summaryrefslogtreecommitdiffstats
path: root/vendor/github.com
diff options
context:
space:
mode:
authorWim <wim@42.be>2022-04-12 00:30:21 +0200
committerGitHub <noreply@github.com>2022-04-12 00:30:21 +0200
commit281ef53e7de5d30114dbf57a4b506b2d8d2720cc (patch)
tree5fe13b85ffe312053452e0d7107ca4b174a412e0 /vendor/github.com
parentf044b948e257814e8e1f70d4b66821bfd9c2ff06 (diff)
downloadmatterbridge-msglm-281ef53e7de5d30114dbf57a4b506b2d8d2720cc.tar.gz
matterbridge-msglm-281ef53e7de5d30114dbf57a4b506b2d8d2720cc.tar.bz2
matterbridge-msglm-281ef53e7de5d30114dbf57a4b506b2d8d2720cc.zip
Update dependencies (#1800)
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/README.md2
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/api/account.go2
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/api/store.go2
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/api/video.go35
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/doc.go2
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/events/context.go5
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/events/events.go2
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/internal/transport.go1
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/object/account.go1
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/object/messages.go1
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/object/users.go211
-rw-r--r--vendor/github.com/SevereCloud/vksdk/v2/object/video.go24
-rw-r--r--vendor/github.com/klauspost/compress/README.md51
-rw-r--r--vendor/github.com/klauspost/compress/huff0/autogen.go5
-rw-r--r--vendor/github.com/klauspost/compress/huff0/bitreader.go126
-rw-r--r--vendor/github.com/klauspost/compress/huff0/compress.go9
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress.go509
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s488
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in197
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.go181
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.s506
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in195
-rw-r--r--vendor/github.com/klauspost/compress/huff0/decompress_generic.go193
-rw-r--r--vendor/github.com/klauspost/compress/huff0/huff0.go2
-rw-r--r--vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go4
-rw-r--r--vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s1873
-rw-r--r--vendor/github.com/klauspost/compress/zstd/README.md112
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bitreader.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/blockdec.go440
-rw-r--r--vendor/github.com/klauspost/compress/zstd/bytebuf.go3
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder.go631
-rw-r--r--vendor/github.com/klauspost/compress/zstd/decoder_options.go24
-rw-r--r--vendor/github.com/klauspost/compress/zstd/enc_fast.go4
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder.go66
-rw-r--r--vendor/github.com/klauspost/compress/zstd/encoder_options.go1
-rw-r--r--vendor/github.com/klauspost/compress/zstd/framedec.go203
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fuzz.go11
-rw-r--r--vendor/github.com/klauspost/compress/zstd/fuzz_none.go11
-rw-r--r--vendor/github.com/klauspost/compress/zstd/history.go46
-rw-r--r--vendor/github.com/klauspost/compress/zstd/seqdec.go349
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zip.go18
-rw-r--r--vendor/github.com/klauspost/compress/zstd/zstd.go11
-rw-r--r--vendor/github.com/lrstanley/girc/.editorconfig45
-rw-r--r--vendor/github.com/lrstanley/girc/.golangci.yml37
-rw-r--r--vendor/github.com/lrstanley/girc/.travis.yml25
-rw-r--r--vendor/github.com/lrstanley/girc/CODE_OF_CONDUCT.md122
-rw-r--r--vendor/github.com/lrstanley/girc/CONTRIBUTING.md120
-rw-r--r--vendor/github.com/lrstanley/girc/README.md136
-rw-r--r--vendor/github.com/lrstanley/girc/SECURITY.md54
-rw-r--r--vendor/github.com/lrstanley/girc/SUPPORT.md56
-rw-r--r--vendor/github.com/lrstanley/girc/builtin.go2
-rw-r--r--vendor/github.com/lrstanley/girc/cap.go2
-rw-r--r--vendor/github.com/lrstanley/girc/client.go7
-rw-r--r--vendor/github.com/lrstanley/girc/commands.go6
-rw-r--r--vendor/github.com/lrstanley/girc/constants.go2
-rw-r--r--vendor/github.com/lrstanley/girc/ctcp.go2
-rw-r--r--vendor/github.com/lrstanley/girc/event.go3
-rw-r--r--vendor/github.com/lrstanley/girc/format.go12
-rw-r--r--vendor/github.com/lrstanley/girc/handler.go2
-rw-r--r--vendor/github.com/lrstanley/girc/modes.go26
60 files changed, 5454 insertions, 1766 deletions
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/README.md b/vendor/github.com/SevereCloud/vksdk/v2/README.md
index 580f6a95..f4986579 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/README.md
+++ b/vendor/github.com/SevereCloud/vksdk/v2/README.md
@@ -1,7 +1,7 @@
# VK SDK for Golang
[![PkgGoDev](https://pkg.go.dev/badge/github.com/SevereCloud/vksdk/v2/v2)](https://pkg.go.dev/github.com/SevereCloud/vksdk/v2?tab=subdirectories)
-[![VK Developers](https://img.shields.io/badge/developers-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.com/dev/)
+[![VK Developers](https://img.shields.io/badge/developers-%234a76a8.svg?logo=VK&logoColor=white)](https://dev.vk.com/)
[![codecov](https://codecov.io/gh/SevereCloud/vksdk/branch/master/graph/badge.svg)](https://codecov.io/gh/SevereCloud/vksdk)
[![VK chat](https://img.shields.io/badge/VK%20chat-%234a76a8.svg?logo=VK&logoColor=white)](https://vk.me/join/AJQ1d6Or8Q00Y_CSOESfbqGt)
[![release](https://img.shields.io/github/v/tag/SevereCloud/vksdk?label=release)](https://github.com/SevereCloud/vksdk/releases)
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/api/account.go b/vendor/github.com/SevereCloud/vksdk/v2/api/account.go
index 7e38ce2b..5732ed61 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/api/account.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/api/account.go
@@ -141,6 +141,8 @@ func (vk *VK) AccountSetInfo(params Params) (response int, err error) {
// AccountSetNameInMenu sets an application screen name
// (up to 17 characters), that is shown to the user in the left menu.
//
+// Deprecated: This method is deprecated and may be disabled soon, please avoid
+//
// https://vk.com/dev/account.setNameInMenu
func (vk *VK) AccountSetNameInMenu(params Params) (response int, err error) {
err = vk.RequestUnmarshal("account.setNameInMenu", &response, params)
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/api/store.go b/vendor/github.com/SevereCloud/vksdk/v2/api/store.go
index a2df4ca0..4a9f5935 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/api/store.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/api/store.go
@@ -1,4 +1,4 @@
-package api // import "github.com/SevereCloud/vksdk/api"
+package api // import "github.com/SevereCloud/vksdk/v2/api"
import (
"github.com/SevereCloud/vksdk/v2/object"
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/api/video.go b/vendor/github.com/SevereCloud/vksdk/v2/api/video.go
index b6e85b9a..01b7f83e 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/api/video.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/api/video.go
@@ -235,6 +235,17 @@ func (vk *VK) VideoGetCommentsExtended(params Params) (response VideoGetComments
return
}
+// VideoLiveGetCategoriesResponse struct.
+type VideoLiveGetCategoriesResponse []object.VideoLiveCategory
+
+// VideoLiveGetCategories method.
+//
+// https://vk.com/dev/video.liveGetCategories
+func (vk *VK) VideoLiveGetCategories(params Params) (response VideoLiveGetCategoriesResponse, err error) {
+ err = vk.RequestUnmarshal("video.liveGetCategories", &response, params)
+ return
+}
+
// VideoRemoveFromAlbum allows you to remove the video from the album.
//
// https://vk.com/dev/video.removeFromAlbum
@@ -336,3 +347,27 @@ func (vk *VK) VideoSearchExtended(params Params) (response VideoSearchExtendedRe
return
}
+
+// VideoStartStreamingResponse struct.
+type VideoStartStreamingResponse object.VideoLive
+
+// VideoStartStreaming method.
+//
+// https://vk.com/dev/video.startStreaming
+func (vk *VK) VideoStartStreaming(params Params) (response VideoStartStreamingResponse, err error) {
+ err = vk.RequestUnmarshal("video.startStreaming", &response, params)
+ return
+}
+
+// VideoStopStreamingResponse struct.
+type VideoStopStreamingResponse struct {
+ UniqueViewers int `json:"unique_viewers"`
+}
+
+// VideoStopStreaming method.
+//
+// https://vk.com/dev/video.stopStreaming
+func (vk *VK) VideoStopStreaming(params Params) (response VideoStopStreamingResponse, err error) {
+ err = vk.RequestUnmarshal("video.stopStreaming", &response, params)
+ return
+}
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/doc.go b/vendor/github.com/SevereCloud/vksdk/v2/doc.go
index 9d70254e..3e3c12a1 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/doc.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/doc.go
@@ -7,6 +7,6 @@ package vksdk
// Module constants.
const (
- Version = "2.13.1"
+ Version = "2.14.0"
API = "5.131"
)
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/events/context.go b/vendor/github.com/SevereCloud/vksdk/v2/events/context.go
index 91264cc7..3c9e69f9 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/events/context.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/events/context.go
@@ -15,3 +15,8 @@ func GroupIDFromContext(ctx context.Context) int {
func EventIDFromContext(ctx context.Context) string {
return ctx.Value(internal.EventIDKey).(string)
}
+
+// VersionFromContext returns the version from context.
+func VersionFromContext(ctx context.Context) string {
+ return ctx.Value(internal.EventVersionKey).(string)
+}
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/events/events.go b/vendor/github.com/SevereCloud/vksdk/v2/events/events.go
index 3f907d48..2bda613a 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/events/events.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/events/events.go
@@ -81,6 +81,7 @@ type GroupEvent struct {
Object json.RawMessage `json:"object"`
GroupID int `json:"group_id"`
EventID string `json:"event_id"`
+ V string `json:"v"`
Secret string `json:"secret"`
}
@@ -158,6 +159,7 @@ func NewFuncList() *FuncList {
func (fl FuncList) Handler(ctx context.Context, e GroupEvent) error { // nolint:gocyclo
ctx = context.WithValue(ctx, internal.GroupIDKey, e.GroupID)
ctx = context.WithValue(ctx, internal.EventIDKey, e.EventID)
+ ctx = context.WithValue(ctx, internal.EventVersionKey, e.V)
if sliceFunc, ok := fl.special[e.Type]; ok {
for _, f := range sliceFunc {
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/internal/transport.go b/vendor/github.com/SevereCloud/vksdk/v2/internal/transport.go
index 6611b496..595b253e 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/internal/transport.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/internal/transport.go
@@ -28,6 +28,7 @@ const (
CallbackRetryCounterKey
CallbackRetryAfterKey
CallbackRemove
+ EventVersionKey
)
// ContextClient return *http.Client.
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/object/account.go b/vendor/github.com/SevereCloud/vksdk/v2/object/account.go
index ca04dce1..7806a472 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/object/account.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/object/account.go
@@ -84,7 +84,6 @@ type AccountAccountCounters struct {
// AccountInfo struct.
type AccountInfo struct {
-
// Country code.
Country string `json:"country"`
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/object/messages.go b/vendor/github.com/SevereCloud/vksdk/v2/object/messages.go
index 2bf69096..e06bf51e 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/object/messages.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/object/messages.go
@@ -409,7 +409,6 @@ type MessageContentSource struct {
Type string `json:"type"`
MessageContentSourceMessage // type message
MessageContentSourceURL // type url
-
}
// NewMessageContentSourceMessage ...
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/object/users.go b/vendor/github.com/SevereCloud/vksdk/v2/object/users.go
index 7bf93b40..91ef92be 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/object/users.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/object/users.go
@@ -24,110 +24,113 @@ const (
// UsersUser struct.
type UsersUser struct {
- ID int `json:"id"`
- FirstName string `json:"first_name"`
- LastName string `json:"last_name"`
- FirstNameNom string `json:"first_name_nom"`
- FirstNameGen string `json:"first_name_gen"`
- FirstNameDat string `json:"first_name_dat"`
- FirstNameAcc string `json:"first_name_acc"`
- FirstNameIns string `json:"first_name_ins"`
- FirstNameAbl string `json:"first_name_abl"`
- LastNameNom string `json:"last_name_nom"`
- LastNameGen string `json:"last_name_gen"`
- LastNameDat string `json:"last_name_dat"`
- LastNameAcc string `json:"last_name_acc"`
- LastNameIns string `json:"last_name_ins"`
- LastNameAbl string `json:"last_name_abl"`
- MaidenName string `json:"maiden_name"`
- Sex int `json:"sex"`
- Nickname string `json:"nickname"`
- Domain string `json:"domain"`
- ScreenName string `json:"screen_name"`
- Bdate string `json:"bdate"`
- City BaseObject `json:"city"`
- Country BaseObject `json:"country"`
- Photo50 string `json:"photo_50"`
- Photo100 string `json:"photo_100"`
- Photo200 string `json:"photo_200"`
- PhotoMax string `json:"photo_max"`
- Photo200Orig string `json:"photo_200_orig"`
- Photo400Orig string `json:"photo_400_orig"`
- PhotoMaxOrig string `json:"photo_max_orig"`
- PhotoID string `json:"photo_id"`
- FriendStatus int `json:"friend_status"` // see FriendStatus const
- OnlineApp int `json:"online_app"`
- Online BaseBoolInt `json:"online"`
- OnlineMobile BaseBoolInt `json:"online_mobile"`
- HasPhoto BaseBoolInt `json:"has_photo"`
- HasMobile BaseBoolInt `json:"has_mobile"`
- IsClosed BaseBoolInt `json:"is_closed"`
- IsFriend BaseBoolInt `json:"is_friend"`
- IsFavorite BaseBoolInt `json:"is_favorite"`
- IsHiddenFromFeed BaseBoolInt `json:"is_hidden_from_feed"`
- CanAccessClosed BaseBoolInt `json:"can_access_closed"`
- CanBeInvitedGroup BaseBoolInt `json:"can_be_invited_group"`
- CanPost BaseBoolInt `json:"can_post"`
- CanSeeAllPosts BaseBoolInt `json:"can_see_all_posts"`
- CanSeeAudio BaseBoolInt `json:"can_see_audio"`
- CanWritePrivateMessage BaseBoolInt `json:"can_write_private_message"`
- CanSendFriendRequest BaseBoolInt `json:"can_send_friend_request"`
- CanCallFromGroup BaseBoolInt `json:"can_call_from_group"`
- Verified BaseBoolInt `json:"verified"`
- Trending BaseBoolInt `json:"trending"`
- Blacklisted BaseBoolInt `json:"blacklisted"`
- BlacklistedByMe BaseBoolInt `json:"blacklisted_by_me"`
- Facebook string `json:"facebook"`
- FacebookName string `json:"facebook_name"`
- Twitter string `json:"twitter"`
- Instagram string `json:"instagram"`
- Site string `json:"site"`
- Status string `json:"status"`
- StatusAudio AudioAudio `json:"status_audio"`
- LastSeen UsersLastSeen `json:"last_seen"`
- CropPhoto UsersCropPhoto `json:"crop_photo"`
- FollowersCount int `json:"followers_count"`
- CommonCount int `json:"common_count"`
- Occupation UsersOccupation `json:"occupation"`
- Career []UsersCareer `json:"career"`
- Military []UsersMilitary `json:"military"`
- University int `json:"university"`
- UniversityName string `json:"university_name"`
- Faculty int `json:"faculty"`
- FacultyName string `json:"faculty_name"`
- Graduation int `json:"graduation"`
- EducationForm string `json:"education_form"`
- EducationStatus string `json:"education_status"`
- HomeTown string `json:"home_town"`
- Relation int `json:"relation"`
- Personal UsersPersonal `json:"personal"`
- Interests string `json:"interests"`
- Music string `json:"music"`
- Activities string `json:"activities"`
- Movies string `json:"movies"`
- Tv string `json:"tv"`
- Books string `json:"books"`
- Games string `json:"games"`
- Universities []UsersUniversity `json:"universities"`
- Schools []UsersSchool `json:"schools"`
- About string `json:"about"`
- Relatives []UsersRelative `json:"relatives"`
- Quotes string `json:"quotes"`
- Lists []int `json:"lists"`
- Deactivated string `json:"deactivated"`
- WallDefault string `json:"wall_default"`
- Timezone int `json:"timezone"`
- Exports UsersExports `json:"exports"`
- Counters UsersUserCounters `json:"counters"`
- MobilePhone string `json:"mobile_phone"`
- HomePhone string `json:"home_phone"`
- FoundWith int `json:"found_with"` // TODO: check it
- OnlineInfo UsersOnlineInfo `json:"online_info"`
- Mutual FriendsRequestsMutual `json:"mutual"`
- TrackCode string `json:"track_code"`
- RelationPartner UsersUserMin `json:"relation_partner"`
- Type string `json:"type"`
- Skype string `json:"skype"`
+ ID int `json:"id"`
+ FirstName string `json:"first_name"`
+ LastName string `json:"last_name"`
+ FirstNameNom string `json:"first_name_nom"`
+ FirstNameGen string `json:"first_name_gen"`
+ FirstNameDat string `json:"first_name_dat"`
+ FirstNameAcc string `json:"first_name_acc"`
+ FirstNameIns string `json:"first_name_ins"`
+ FirstNameAbl string `json:"first_name_abl"`
+ LastNameNom string `json:"last_name_nom"`
+ LastNameGen string `json:"last_name_gen"`
+ LastNameDat string `json:"last_name_dat"`
+ LastNameAcc string `json:"last_name_acc"`
+ LastNameIns string `json:"last_name_ins"`
+ LastNameAbl string `json:"last_name_abl"`
+ MaidenName string `json:"maiden_name"`
+ Sex int `json:"sex"`
+ Nickname string `json:"nickname"`
+ Domain string `json:"domain"`
+ ScreenName string `json:"screen_name"`
+ Bdate string `json:"bdate"`
+ City BaseObject `json:"city"`
+ Country BaseObject `json:"country"`
+ Photo50 string `json:"photo_50"`
+ Photo100 string `json:"photo_100"`
+ Photo200 string `json:"photo_200"`
+ PhotoMax string `json:"photo_max"`
+ Photo200Orig string `json:"photo_200_orig"`
+ Photo400Orig string `json:"photo_400_orig"`
+ PhotoMaxOrig string `json:"photo_max_orig"`
+ PhotoID string `json:"photo_id"`
+ FriendStatus int `json:"friend_status"` // see FriendStatus const
+ OnlineApp int `json:"online_app"`
+ Online BaseBoolInt `json:"online"`
+ OnlineMobile BaseBoolInt `json:"online_mobile"`
+ HasPhoto BaseBoolInt `json:"has_photo"`
+ HasMobile BaseBoolInt `json:"has_mobile"`
+ IsClosed BaseBoolInt `json:"is_closed"`
+ IsFriend BaseBoolInt `json:"is_friend"`
+ IsFavorite BaseBoolInt `json:"is_favorite"`
+ IsHiddenFromFeed BaseBoolInt `json:"is_hidden_from_feed"`
+ CanAccessClosed BaseBoolInt `json:"can_access_closed"`
+ CanBeInvitedGroup BaseBoolInt `json:"can_be_invited_group"`
+ CanPost BaseBoolInt `json:"can_post"`
+ CanSeeAllPosts BaseBoolInt `json:"can_see_all_posts"`
+ CanSeeAudio BaseBoolInt `json:"can_see_audio"`
+ CanWritePrivateMessage BaseBoolInt `json:"can_write_private_message"`
+ CanSendFriendRequest BaseBoolInt `json:"can_send_friend_request"`
+ CanCallFromGroup BaseBoolInt `json:"can_call_from_group"`
+ Verified BaseBoolInt `json:"verified"`
+ Trending BaseBoolInt `json:"trending"`
+ Blacklisted BaseBoolInt `json:"blacklisted"`
+ BlacklistedByMe BaseBoolInt `json:"blacklisted_by_me"`
+ // Deprecated: Facebook и Instagram запрещены в России, Meta признана экстремистской организацией...
+ Facebook string `json:"facebook"`
+ // Deprecated: Facebook и Instagram запрещены в России, Meta признана экстремистской организацией...
+ FacebookName string `json:"facebook_name"`
+ // Deprecated: Facebook и Instagram запрещены в России, Meta признана экстремистской организацией...
+ Instagram string `json:"instagram"`
+ Twitter string `json:"twitter"`
+ Site string `json:"site"`
+ Status string `json:"status"`
+ StatusAudio AudioAudio `json:"status_audio"`
+ LastSeen UsersLastSeen `json:"last_seen"`
+ CropPhoto UsersCropPhoto `json:"crop_photo"`
+ FollowersCount int `json:"followers_count"`
+ CommonCount int `json:"common_count"`
+ Occupation UsersOccupation `json:"occupation"`
+ Career []UsersCareer `json:"career"`
+ Military []UsersMilitary `json:"military"`
+ University int `json:"university"`
+ UniversityName string `json:"university_name"`
+ Faculty int `json:"faculty"`
+ FacultyName string `json:"faculty_name"`
+ Graduation int `json:"graduation"`
+ EducationForm string `json:"education_form"`
+ EducationStatus string `json:"education_status"`
+ HomeTown string `json:"home_town"`
+ Relation int `json:"relation"`
+ Personal UsersPersonal `json:"personal"`
+ Interests string `json:"interests"`
+ Music string `json:"music"`
+ Activities string `json:"activities"`
+ Movies string `json:"movies"`
+ Tv string `json:"tv"`
+ Books string `json:"books"`
+ Games string `json:"games"`
+ Universities []UsersUniversity `json:"universities"`
+ Schools []UsersSchool `json:"schools"`
+ About string `json:"about"`
+ Relatives []UsersRelative `json:"relatives"`
+ Quotes string `json:"quotes"`
+ Lists []int `json:"lists"`
+ Deactivated string `json:"deactivated"`
+ WallDefault string `json:"wall_default"`
+ Timezone int `json:"timezone"`
+ Exports UsersExports `json:"exports"`
+ Counters UsersUserCounters `json:"counters"`
+ MobilePhone string `json:"mobile_phone"`
+ HomePhone string `json:"home_phone"`
+ FoundWith int `json:"found_with"` // TODO: check it
+ OnlineInfo UsersOnlineInfo `json:"online_info"`
+ Mutual FriendsRequestsMutual `json:"mutual"`
+ TrackCode string `json:"track_code"`
+ RelationPartner UsersUserMin `json:"relation_partner"`
+ Type string `json:"type"`
+ Skype string `json:"skype"`
}
// ToMention return mention.
diff --git a/vendor/github.com/SevereCloud/vksdk/v2/object/video.go b/vendor/github.com/SevereCloud/vksdk/v2/object/video.go
index 6c48224b..d86143a9 100644
--- a/vendor/github.com/SevereCloud/vksdk/v2/object/video.go
+++ b/vendor/github.com/SevereCloud/vksdk/v2/object/video.go
@@ -297,3 +297,27 @@ type VideoVideoImage struct {
BaseImage
WithPadding BaseBoolInt `json:"with_padding"`
}
+
+// VideoLive struct.
+type VideoLive struct {
+ OwnerID int `json:"owner_id"`
+ VideoID int `json:"video_id"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AccessKey string `json:"access_key"`
+ Stream VideoLiveStream `json:"stream"`
+}
+
+// VideoLiveStream struct.
+type VideoLiveStream struct {
+ URL string `json:"url"`
+ Key string `json:"key"`
+ OKMPURL string `json:"okmp_url"`
+}
+
+// VideoLiveCategory struct.
+type VideoLiveCategory struct {
+ ID int `json:"id"`
+ Label string `json:"label"`
+ Sublist []VideoLiveCategory `json:"sublist,omitempty"`
+}
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index e8ff994f..0e2dc116 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,42 @@ This package provides various compression algorithms.
# changelog
+* Mar 3, 2022 (v1.15.0)
+ * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
+ * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+
+<details>
+ <summary>See Details</summary>
+Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
+
+Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
+
+While the release has been extensively tested, it is recommended to testing when upgrading.
+</details>
+
+* Feb 22, 2022 (v1.14.4)
+ * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
+ * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
+ * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
+ * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
+
+* Feb 17, 2022 (v1.14.3)
+ * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
+ * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
+ * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
+
+* Jan 25, 2022 (v1.14.2)
+ * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
+ * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
+ * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
+ * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
+ * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
+ * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
+
* Jan 11, 2022 (v1.14.1)
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
@@ -53,6 +89,9 @@ This package provides various compression algorithms.
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
+<details>
+ <summary>See changes to v1.12.x</summary>
+
* May 25, 2021 (v1.12.3)
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
@@ -74,9 +113,10 @@ This package provides various compression algorithms.
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
+</details>
<details>
- <summary>See changes prior to v1.12.1</summary>
+ <summary>See changes to v1.11.x</summary>
* Mar 26, 2021 (v1.11.13)
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
@@ -135,7 +175,7 @@ This package provides various compression algorithms.
</details>
<details>
- <summary>See changes prior to v1.11.0</summary>
+ <summary>See changes to v1.10.x</summary>
* July 8, 2020 (v1.10.11)
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
@@ -297,11 +337,6 @@ This package provides various compression algorithms.
# deflate usage
-* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
-* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
-* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
-
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import | Documentation
@@ -323,6 +358,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range.
If you expect to have a lot of concurrently allocated Writers consider using
the stateless compress described below.
+For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
+
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go
new file mode 100644
index 00000000..ff2c69d6
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/autogen.go
@@ -0,0 +1,5 @@
+package huff0
+
+//go:generate go run generate.go
+//go:generate asmfmt -w decompress_amd64.s
+//go:generate asmfmt -w decompress_8b_amd64.s
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index a4979e88..451160ed 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -8,118 +8,13 @@ package huff0
import (
"encoding/binary"
"errors"
+ "fmt"
"io"
)
// bitReader reads a bitstream in reverse.
// The last set bit indicates the start of the stream and is used
// for aligning the input.
-type bitReader struct {
- in []byte
- off uint // next byte to read is at in[off - 1]
- value uint64
- bitsRead uint8
-}
-
-// init initializes and resets the bit reader.
-func (b *bitReader) init(in []byte) error {
- if len(in) < 1 {
- return errors.New("corrupt stream: too short")
- }
- b.in = in
- b.off = uint(len(in))
- // The highest bit of the last byte indicates where to start
- v := in[len(in)-1]
- if v == 0 {
- return errors.New("corrupt stream, did not find end of stream")
- }
- b.bitsRead = 64
- b.value = 0
- if len(in) >= 8 {
- b.fillFastStart()
- } else {
- b.fill()
- b.fill()
- }
- b.bitsRead += 8 - uint8(highBit32(uint32(v)))
- return nil
-}
-
-// peekBitsFast requires that at least one bit is requested every time.
-// There are no checks if the buffer is filled.
-func (b *bitReader) peekBitsFast(n uint8) uint16 {
- const regMask = 64 - 1
- v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
- return v
-}
-
-// fillFast() will make sure at least 32 bits are available.
-// There must be at least 4 bytes available.
-func (b *bitReader) fillFast() {
- if b.bitsRead < 32 {
- return
- }
-
- // 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
- b.bitsRead -= 32
- b.off -= 4
-}
-
-func (b *bitReader) advance(n uint8) {
- b.bitsRead += n
-}
-
-// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
-func (b *bitReader) fillFastStart() {
- // Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
- b.bitsRead = 0
- b.off -= 8
-}
-
-// fill() will make sure at least 32 bits are available.
-func (b *bitReader) fill() {
- if b.bitsRead < 32 {
- return
- }
- if b.off > 4 {
- v := b.in[b.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
- b.bitsRead -= 32
- b.off -= 4
- return
- }
- for b.off > 0 {
- b.value = (b.value << 8) | uint64(b.in[b.off-1])
- b.bitsRead -= 8
- b.off--
- }
-}
-
-// finished returns true if all bits have been read from the bit stream.
-func (b *bitReader) finished() bool {
- return b.off == 0 && b.bitsRead >= 64
-}
-
-// close the bitstream and returns an error if out-of-buffer reads occurred.
-func (b *bitReader) close() error {
- // Release reference.
- b.in = nil
- if b.bitsRead > 64 {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-
-// bitReader reads a bitstream in reverse.
-// The last set bit indicates the start of the stream and is used
-// for aligning the input.
type bitReaderBytes struct {
in []byte
off uint // next byte to read is at in[off - 1]
@@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
+func (b *bitReaderBytes) remaining() uint {
+ return b.off*8 + uint(64-b.bitsRead)
+}
+
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderBytes) close() error {
// Release reference.
b.in = nil
+ if b.remaining() > 0 {
+ return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
+ }
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
@@ -263,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63))
}
+// peekTopBits(n) is equvialent to peekBitFast(64 - n)
+func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
+ return uint16(b.value >> n)
+}
+
func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63
@@ -318,10 +225,17 @@ func (b *bitReaderShifted) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
+func (b *bitReaderShifted) remaining() uint {
+ return b.off*8 + uint(64-b.bitsRead)
+}
+
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReaderShifted) close() error {
// Release reference.
b.in = nil
+ if b.remaining() > 0 {
+ return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
+ }
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 8323dc05..bc95ac62 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -2,6 +2,7 @@ package huff0
import (
"fmt"
+ "math"
"runtime"
"sync"
)
@@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
if err != nil {
return nil, err
}
+ if len(s.Out)-idx > math.MaxUint16 {
+ // We cannot store the size in the jump table
+ return nil, ErrIncompressible
+ }
// Write compressed length as little endian before block.
if i < 3 {
// Last length is not written.
@@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
return nil, errs[i]
}
o := s.tmpOut[i]
+ if len(o) > math.MaxUint16 {
+ // We cannot store the size in the jump table
+ return nil, ErrIncompressible
+ }
// Write compressed length as little endian before block.
if i < 3 {
// Last length is not written.
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index 2a06bd1a..04f65299 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"io"
+ "sync"
"github.com/klauspost/compress/fse"
)
@@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder {
return &Decoder{
dt: s.dt,
actualTableLog: s.actualTableLog,
+ bufs: &s.decPool,
}
}
@@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder {
type Decoder struct {
dt dTable
actualTableLog uint8
+ bufs *sync.Pool
+}
+
+func (d *Decoder) buffer() *[4][256]byte {
+ buf, ok := d.bufs.Get().(*[4][256]byte)
+ if ok {
+ return buf
+ }
+ return &[4][256]byte{}
}
// Decompress1X will decompress a 1X encoded stream.
@@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
for br.off >= 8 {
@@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
@@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
}
}
if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
@@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
switch d.actualTableLog {
@@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
@@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
@@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
}
}
default:
+ d.bufs.Put(bufs)
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
}
if len(dst) >= maxDecodedSize {
br.close()
+ d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
v := dt[br.peekByteFast()>>shift]
@@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
@@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
dt := d.dt.single[:256]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ bufs := d.buffer()
+ buf := &bufs[0]
var off uint8
const shift = 56
@@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
}
if len(dst)+int(off) > maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
}
}
if len(dst) >= maxDecodedSize {
+ d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
@@ -688,6 +721,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
bitsLeft -= int8(nBits)
dst = append(dst, uint8(v.entry>>8))
}
+ d.bufs.Put(bufs)
return dst, br.close()
}
@@ -695,192 +729,6 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
// the uncompressed data exactly.
-func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
- if len(d.dt.single) == 0 {
- return nil, errors.New("no table loaded")
- }
- if len(src) < 6+(4*1) {
- return nil, errors.New("input too small")
- }
- if use8BitTables && d.actualTableLog <= 8 {
- return d.decompress4X8bit(dst, src)
- }
-
- var br [4]bitReaderShifted
- start := 6
- for i := 0; i < 3; i++ {
- length := int(src[i*2]) | (int(src[i*2+1]) << 8)
- if start+length >= len(src) {
- return nil, errors.New("truncated input (or invalid offset)")
- }
- err := br[i].init(src[start : start+length])
- if err != nil {
- return nil, err
- }
- start += length
- }
- err := br[3].init(src[start:])
- if err != nil {
- return nil, err
- }
-
- // destination, offset to match first output
- dstSize := cap(dst)
- dst = dst[:dstSize]
- out := dst
- dstEvery := (dstSize + 3) / 4
-
- const tlSize = 1 << tableLogMax
- const tlMask = tlSize - 1
- single := d.dt.single[:tlSize]
-
- // Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
- var off uint8
- var decoded int
-
- // Decode 2 values from each decoder/loop.
- const bufoff = 256 / 4
- for {
- if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
- break
- }
-
- {
- const stream = 0
- const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- v2 := single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- v2 = single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
- }
-
- {
- const stream = 2
- const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- val := br[stream].peekBitsFast(d.actualTableLog)
- val2 := br[stream2].peekBitsFast(d.actualTableLog)
- v := single[val&tlMask]
- v2 := single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
-
- val = br[stream].peekBitsFast(d.actualTableLog)
- val2 = br[stream2].peekBitsFast(d.actualTableLog)
- v = single[val&tlMask]
- v2 = single[val2&tlMask]
- br[stream].advance(uint8(v.entry))
- br[stream2].advance(uint8(v2.entry))
- buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
- }
-
- off += 2
-
- if off == bufoff {
- if bufoff > dstEvery {
- return nil, errors.New("corruption detected: stream overrun 1")
- }
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
- out = out[bufoff:]
- decoded += 256
- // There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
- return nil, errors.New("corruption detected: stream overrun 2")
- }
- }
- }
- if off > 0 {
- ioff := int(off)
- if len(out) < dstEvery*3+ioff {
- return nil, errors.New("corruption detected: stream overrun 3")
- }
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
- decoded += int(off) * 4
- out = out[off:]
- }
-
- // Decode remaining.
- for i := range br {
- offset := dstEvery * i
- br := &br[i]
- bitsLeft := br.off*8 + uint(64-br.bitsRead)
- for bitsLeft > 0 {
- br.fill()
- if false && br.bitsRead >= 32 {
- if br.off >= 4 {
- v := br.in[br.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- br.value = (br.value << 32) | uint64(low)
- br.bitsRead -= 32
- br.off -= 4
- } else {
- for br.off > 0 {
- br.value = (br.value << 8) | uint64(br.in[br.off-1])
- br.bitsRead -= 8
- br.off--
- }
- }
- }
- // end inline...
- if offset >= len(out) {
- return nil, errors.New("corruption detected: stream overrun 4")
- }
-
- // Read value and increment offset.
- val := br.peekBitsFast(d.actualTableLog)
- v := single[val&tlMask].entry
- nBits := uint8(v)
- br.advance(nBits)
- bitsLeft -= uint(nBits)
- out[offset] = uint8(v >> 8)
- offset++
- }
- decoded += offset - dstEvery*i
- err = br.close()
- if err != nil {
- return nil, err
- }
- }
- if dstSize != decoded {
- return nil, errors.New("corruption detected: short output block")
- }
- return dst, nil
-}
-
-// Decompress4X will decompress a 4X encoded stream.
-// The length of the supplied input must match the end of a block exactly.
-// The *capacity* of the dst slice must match the destination size of
-// the uncompressed data exactly.
func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
if d.actualTableLog == 8 {
return d.decompress4X8bitExactly(dst, src)
@@ -916,12 +764,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
- const bufoff = 256 / 4
+ const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
@@ -942,8 +790,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -951,8 +799,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -960,8 +808,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -969,8 +817,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- buf[off+bufoff*stream+3] = uint8(v >> 8)
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
{
@@ -987,8 +835,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -996,8 +844,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -1005,8 +853,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
v = single[uint8(br1.value>>shift)].entry
v2 = single[uint8(br2.value>>shift)].entry
@@ -1014,25 +862,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
br1.value <<= v & 63
br2.bitsRead += uint8(v2)
br2.value <<= v2 & 63
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
- buf[off+bufoff*stream+3] = uint8(v >> 8)
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
- if off == bufoff {
+ if off == 0 {
if bufoff > dstEvery {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
- decoded += 256
+ decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
@@ -1040,23 +889,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
br := &br[i]
- bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+ bitsLeft := br.remaining()
for bitsLeft > 0 {
if br.finished() {
+ d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
@@ -1076,7 +933,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
}
}
// end inline...
- if offset >= len(out) {
+ if offset >= endsAt {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@@ -1084,16 +942,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
v := single[uint8(br.value>>shift)].entry
nBits := uint8(v)
br.advance(nBits)
- bitsLeft -= int(nBits)
+ bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
+ d.bufs.Put(buf)
return nil, err
}
}
+ d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
@@ -1135,12 +999,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
- var buf [256]byte
+ buf := d.buffer()
var off uint8
var decoded int
// Decode 4 values from each decoder/loop.
- const bufoff = 256 / 4
+ const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
@@ -1150,104 +1014,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
// Interleave 2 decodes.
const stream = 0
const stream2 = 1
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[uint8(br[stream].value>>shift)].entry
- v2 := single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
{
const stream = 2
const stream2 = 3
- br[stream].fillFast()
- br[stream2].fillFast()
-
- v := single[uint8(br[stream].value>>shift)].entry
- v2 := single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream] = uint8(v >> 8)
- buf[off+bufoff*stream2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+1] = uint8(v >> 8)
- buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+2] = uint8(v >> 8)
- buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
-
- v = single[uint8(br[stream].value>>shift)].entry
- v2 = single[uint8(br[stream2].value>>shift)].entry
- br[stream].bitsRead += uint8(v)
- br[stream].value <<= v & 63
- br[stream2].bitsRead += uint8(v2)
- br[stream2].value <<= v2 & 63
- buf[off+bufoff*stream+3] = uint8(v >> 8)
- buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
+ br1 := &br[stream]
+ br2 := &br[stream2]
+ br1.fillFast()
+ br2.fillFast()
+
+ v := single[uint8(br1.value>>shift)].entry
+ v2 := single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off] = uint8(v >> 8)
+ buf[stream2][off] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+1] = uint8(v >> 8)
+ buf[stream2][off+1] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+2] = uint8(v >> 8)
+ buf[stream2][off+2] = uint8(v2 >> 8)
+
+ v = single[uint8(br1.value>>shift)].entry
+ v2 = single[uint8(br2.value>>shift)].entry
+ br1.bitsRead += uint8(v)
+ br1.value <<= v & 63
+ br2.bitsRead += uint8(v2)
+ br2.value <<= v2 & 63
+ buf[stream][off+3] = uint8(v >> 8)
+ buf[stream2][off+3] = uint8(v2 >> 8)
}
off += 4
- if off == bufoff {
+ if off == 0 {
if bufoff > dstEvery {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[:bufoff])
- copy(out[dstEvery:], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
- off = 0
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
- decoded += 256
+ decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
@@ -1257,21 +1126,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
if len(out) < dstEvery*3+ioff {
return nil, errors.New("corruption detected: stream overrun 3")
}
- copy(out, buf[:off])
- copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
- copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
- copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
}
// Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
for i := range br {
offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
br := &br[i]
- bitsLeft := int(br.off*8) + int(64-br.bitsRead)
+ bitsLeft := br.remaining()
for bitsLeft > 0 {
if br.finished() {
+ d.bufs.Put(buf)
return nil, io.ErrUnexpectedEOF
}
if br.bitsRead >= 56 {
@@ -1291,7 +1166,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
}
}
// end inline...
- if offset >= len(out) {
+ if offset >= endsAt {
+ d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@@ -1299,16 +1175,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
v := single[br.peekByteFast()].entry
nBits := uint8(v)
br.advance(nBits)
- bitsLeft -= int(nBits)
+ bitsLeft -= uint(nBits)
out[offset] = uint8(v >> 8)
offset++
}
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+
decoded += offset - dstEvery*i
err = br.close()
if err != nil {
+ d.bufs.Put(buf)
return nil, err
}
}
+ d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
new file mode 100644
index 00000000..0d6cb1a9
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
@@ -0,0 +1,488 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+
+ // const stream = 0
+ // br0.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
+ MOVQ bitReaderShifted_value(br0), br_value
+ MOVQ bitReaderShifted_off(br0), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill0
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br0), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill0:
+
+ // val0 := br0.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br0.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br0.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 0(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br0.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br0.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br0.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br0.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 0+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
+ MOVQ br_value, bitReaderShifted_value(br0)
+ MOVQ br_offset, bitReaderShifted_off(br0)
+
+ // const stream = 1
+ // br1.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
+ MOVQ bitReaderShifted_value(br1), br_value
+ MOVQ bitReaderShifted_off(br1), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill1
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br1), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill1:
+
+ // val0 := br1.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br1.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br1.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 256(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br1.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br1.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br1.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br1.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 256+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
+ MOVQ br_value, bitReaderShifted_value(br1)
+ MOVQ br_offset, bitReaderShifted_off(br1)
+
+ // const stream = 2
+ // br2.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
+ MOVQ bitReaderShifted_value(br2), br_value
+ MOVQ bitReaderShifted_off(br2), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill2
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br2), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill2:
+
+ // val0 := br2.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br2.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br2.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 512(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br2.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br2.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br2.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br2.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 512+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
+ MOVQ br_value, bitReaderShifted_value(br2)
+ MOVQ br_offset, bitReaderShifted_off(br2)
+
+ // const stream = 3
+ // br3.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
+ MOVQ bitReaderShifted_value(br3), br_value
+ MOVQ bitReaderShifted_off(br3), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill3
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br3), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill3:
+
+ // val0 := br3.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br3.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br3.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 768(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br3.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br3.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br3.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br3.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, 768+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
+ MOVQ br_value, bitReaderShifted_value(br3)
+ MOVQ br_offset, bitReaderShifted_off(br3)
+
+ ADDQ $4, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
new file mode 100644
index 00000000..6d477a2c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
@@ -0,0 +1,197 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+{{ define "decode_2_values_x86" }}
+ // const stream = {{ var "id" }}
+ // br{{ var "id"}}.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
+ MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
+ MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
+
+ // if b.bitsRead >= 32 {
+ CMPQ br_bits_read, $32
+ JB skip_fill{{ var "id" }}
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br{{ var "id"}}.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+ // }
+skip_fill{{ var "id" }}:
+
+ // val0 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br{{ var "id"}}.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val1 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br{{ var "id"}}.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
+
+ // SECOND PART:
+ // val2 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v2 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br{{ var "id"}}.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // val3 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+ // v3 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br{{ var "id"}}.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+ MOVBQZX AL, CX
+ SHLQ CX, br_value // value <<= n
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+ // these two writes get coalesced
+ // buf[stream][off+2] = uint8(v2.entry >> 8)
+ // buf[stream][off+3] = uint8(v3.entry >> 8)
+ MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
+ MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
+ MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
+{{ end }}
+
+ {{ set "id" "0" }}
+ {{ set "ofs" "0" }}
+ {{ set "bufofs" "0" }} {{/* id * bufoff */}}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "1" }}
+ {{ set "ofs" "8" }}
+ {{ set "bufofs" "256" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "2" }}
+ {{ set "ofs" "16" }}
+ {{ set "bufofs" "512" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "3" }}
+ {{ set "ofs" "24" }}
+ {{ set "bufofs" "768" }}
+ {{ template "decode_2_values_x86" . }}
+
+ ADDQ $4, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
new file mode 100644
index 00000000..d47f6644
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -0,0 +1,181 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// This file contains the specialisation of Decoder.Decompress4X
+// that uses an asm implementation of its main loop.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+)
+
+// decompress4x_main_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog > 8.
+// go:noescape
+func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+ peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+
+// decompress4x_8b_loop_x86 is an x86 assembler implementation
+// of Decompress4X when tablelog <= 8 which decodes 4 entries
+// per loop.
+// go:noescape
+func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+ peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
+
+// fallback8BitSize is the size where using Go version is faster.
+const fallback8BitSize = 800
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+
+ use8BitTables := d.actualTableLog <= 8
+ if cap(dst) < fallback8BitSize && use8BitTables {
+ return d.decompress4X8bit(dst, src)
+ }
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ const debug = false
+
+ // see: bitReaderShifted.peekBitsFast()
+ peekBits := uint8((64 - d.actualTableLog) & 63)
+
+ // Decode 2 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ if use8BitTables {
+ off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
+ } else {
+ off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
+ }
+ if debug {
+ fmt.Print("DEBUG: ")
+ fmt.Printf("off=%d,", off)
+ for i := 0; i < 4; i++ {
+ fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
+ i, br[i].bitsRead, br[i].value, br[i].off)
+ }
+ fmt.Println("")
+ }
+
+ if off != 0 {
+ break
+ }
+
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ // There must at least be 3 buffers left.
+ if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
new file mode 100644
index 00000000..2edad3ea
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -0,0 +1,506 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+
+ // const stream = 0
+ // br0.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
+ MOVQ bitReaderShifted_value(br0), br_value
+ MOVQ bitReaderShifted_off(br0), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill0
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br0), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br0.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill0:
+
+ // val0 := br0.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br0.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br0.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br0.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 0(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
+ MOVQ br_value, bitReaderShifted_value(br0)
+ MOVQ br_offset, bitReaderShifted_off(br0)
+
+ // const stream = 1
+ // br1.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
+ MOVQ bitReaderShifted_value(br1), br_value
+ MOVQ bitReaderShifted_off(br1), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill1
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br1), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br1.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill1:
+
+ // val0 := br1.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br1.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br1.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br1.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 256(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
+ MOVQ br_value, bitReaderShifted_value(br1)
+ MOVQ br_offset, bitReaderShifted_off(br1)
+
+ // const stream = 2
+ // br2.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
+ MOVQ bitReaderShifted_value(br2), br_value
+ MOVQ bitReaderShifted_off(br2), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill2
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br2), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br2.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill2:
+
+ // val0 := br2.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br2.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br2.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br2.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 512(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
+ MOVQ br_value, bitReaderShifted_value(br2)
+ MOVQ br_offset, bitReaderShifted_off(br2)
+
+ // const stream = 3
+ // br3.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
+ MOVQ bitReaderShifted_value(br3), br_value
+ MOVQ bitReaderShifted_off(br3), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill3
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br3), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br3.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+
+ // }
+skip_fill3:
+
+ // val0 := br3.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br3.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+
+#else
+ // val1 := br3.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br3.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, 768(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
+ MOVQ br_value, bitReaderShifted_value(br3)
+ MOVQ br_offset, bitReaderShifted_off(br3)
+
+ ADDQ $2, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
new file mode 100644
index 00000000..330d86ae
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
@@ -0,0 +1,195 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+#include "funcdata.h"
+#include "go_asm.h"
+
+#ifdef GOAMD64_v4
+#ifndef GOAMD64_v3
+#define GOAMD64_v3
+#endif
+#endif
+
+#define bufoff 256 // see decompress.go, we're using [4][256]byte table
+
+//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
+// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
+TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
+#define off R8
+#define buffer DI
+#define table SI
+
+#define br_bits_read R9
+#define br_value R10
+#define br_offset R11
+#define peek_bits R12
+#define exhausted DX
+
+#define br0 R13
+#define br1 R14
+#define br2 R15
+#define br3 BP
+
+ MOVQ BP, 0(SP)
+
+ XORQ exhausted, exhausted // exhausted = false
+ XORQ off, off // off = 0
+
+ MOVBQZX peekBits+32(FP), peek_bits
+ MOVQ buf+40(FP), buffer
+ MOVQ tbl+48(FP), table
+
+ MOVQ pbr0+0(FP), br0
+ MOVQ pbr1+8(FP), br1
+ MOVQ pbr2+16(FP), br2
+ MOVQ pbr3+24(FP), br3
+
+main_loop:
+{{ define "decode_2_values_x86" }}
+ // const stream = {{ var "id" }}
+ // br{{ var "id"}}.fillFast()
+ MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
+ MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
+ MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
+
+ // We must have at least 2 * max tablelog left
+ CMPQ br_bits_read, $64-22
+ JBE skip_fill{{ var "id" }}
+
+ SUBQ $32, br_bits_read // b.bitsRead -= 32
+ SUBQ $4, br_offset // b.off -= 4
+
+ // v := b.in[b.off-4 : b.off]
+ // v = v[:4]
+ // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
+
+ // b.value |= uint64(low) << (b.bitsRead & 63)
+#ifdef GOAMD64_v3
+ SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
+#else
+ MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
+ MOVQ br_bits_read, CX
+ SHLQ CL, AX
+#endif
+
+ ORQ AX, br_value
+
+ // exhausted = exhausted || (br{{ var "id"}}.off < 4)
+ CMPQ br_offset, $4
+ SETLT DL
+ ORB DL, DH
+ // }
+skip_fill{{ var "id" }}:
+
+ // val0 := br{{ var "id"}}.peekTopBits(peekBits)
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+#else
+ MOVQ br_value, AX
+ MOVQ peek_bits, CX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+#endif
+
+ // v0 := table[val0&mask]
+ MOVW 0(table)(AX*2), AX // AX - v0
+
+ // br{{ var "id"}}.advance(uint8(v0.entry))
+ MOVB AH, BL // BL = uint8(v0.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+#ifdef GOAMD64_v3
+ SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
+#else
+ // val1 := br{{ var "id"}}.peekTopBits(peekBits)
+ MOVQ peek_bits, CX
+ MOVQ br_value, AX
+ SHRQ CL, AX // AX = (value >> peek_bits) & mask
+#endif
+
+ // v1 := table[val1&mask]
+ MOVW 0(table)(AX*2), AX // AX - v1
+
+ // br{{ var "id"}}.advance(uint8(v1.entry))
+ MOVB AH, BH // BH = uint8(v1.entry >> 8)
+
+#ifdef GOAMD64_v3
+ MOVBQZX AL, CX
+ SHLXQ AX, br_value, br_value // value <<= n
+#else
+ MOVBQZX AL, CX
+ SHLQ CL, br_value // value <<= n
+#endif
+
+ ADDQ CX, br_bits_read // bits_read += n
+
+
+ // these two writes get coalesced
+ // buf[stream][off] = uint8(v0.entry >> 8)
+ // buf[stream][off+1] = uint8(v1.entry >> 8)
+ MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
+
+ // update the bitrader reader structure
+ MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
+ MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
+ MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
+{{ end }}
+
+ {{ set "id" "0" }}
+ {{ set "ofs" "0" }}
+ {{ set "bufofs" "0" }} {{/* id * bufoff */}}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "1" }}
+ {{ set "ofs" "8" }}
+ {{ set "bufofs" "256" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "2" }}
+ {{ set "ofs" "16" }}
+ {{ set "bufofs" "512" }}
+ {{ template "decode_2_values_x86" . }}
+
+ {{ set "id" "3" }}
+ {{ set "ofs" "24" }}
+ {{ set "bufofs" "768" }}
+ {{ template "decode_2_values_x86" . }}
+
+ ADDQ $2, off // off += 2
+
+ TESTB DH, DH // any br[i].ofs < 4?
+ JNZ end
+
+ CMPQ off, $bufoff
+ JL main_loop
+end:
+ MOVQ 0(SP), BP
+
+ MOVB off, ret+56(FP)
+ RET
+#undef off
+#undef buffer
+#undef table
+
+#undef br_bits_read
+#undef br_value
+#undef br_offset
+#undef peek_bits
+#undef exhausted
+
+#undef br0
+#undef br1
+#undef br2
+#undef br3
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
new file mode 100644
index 00000000..126b4d68
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -0,0 +1,193 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// This file contains a generic implementation of Decoder.Decompress4X.
+package huff0
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Decompress4X will decompress a 4X encoded stream.
+// The length of the supplied input must match the end of a block exactly.
+// The *capacity* of the dst slice must match the destination size of
+// the uncompressed data exactly.
+func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
+ if len(d.dt.single) == 0 {
+ return nil, errors.New("no table loaded")
+ }
+ if len(src) < 6+(4*1) {
+ return nil, errors.New("input too small")
+ }
+ if use8BitTables && d.actualTableLog <= 8 {
+ return d.decompress4X8bit(dst, src)
+ }
+
+ var br [4]bitReaderShifted
+ // Decode "jump table"
+ start := 6
+ for i := 0; i < 3; i++ {
+ length := int(src[i*2]) | (int(src[i*2+1]) << 8)
+ if start+length >= len(src) {
+ return nil, errors.New("truncated input (or invalid offset)")
+ }
+ err := br[i].init(src[start : start+length])
+ if err != nil {
+ return nil, err
+ }
+ start += length
+ }
+ err := br[3].init(src[start:])
+ if err != nil {
+ return nil, err
+ }
+
+ // destination, offset to match first output
+ dstSize := cap(dst)
+ dst = dst[:dstSize]
+ out := dst
+ dstEvery := (dstSize + 3) / 4
+
+ const tlSize = 1 << tableLogMax
+ const tlMask = tlSize - 1
+ single := d.dt.single[:tlSize]
+
+ // Use temp table to avoid bound checks/append penalty.
+ buf := d.buffer()
+ var off uint8
+ var decoded int
+
+ // Decode 2 values from each decoder/loop.
+ const bufoff = 256
+ for {
+ if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
+ break
+ }
+
+ {
+ const stream = 0
+ const stream2 = 1
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ {
+ const stream = 2
+ const stream2 = 3
+ br[stream].fillFast()
+ br[stream2].fillFast()
+
+ val := br[stream].peekBitsFast(d.actualTableLog)
+ val2 := br[stream2].peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask]
+ v2 := single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off] = uint8(v.entry >> 8)
+ buf[stream2][off] = uint8(v2.entry >> 8)
+
+ val = br[stream].peekBitsFast(d.actualTableLog)
+ val2 = br[stream2].peekBitsFast(d.actualTableLog)
+ v = single[val&tlMask]
+ v2 = single[val2&tlMask]
+ br[stream].advance(uint8(v.entry))
+ br[stream2].advance(uint8(v2.entry))
+ buf[stream][off+1] = uint8(v.entry >> 8)
+ buf[stream2][off+1] = uint8(v2.entry >> 8)
+ }
+
+ off += 2
+
+ if off == 0 {
+ if bufoff > dstEvery {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 1")
+ }
+ copy(out, buf[0][:])
+ copy(out[dstEvery:], buf[1][:])
+ copy(out[dstEvery*2:], buf[2][:])
+ copy(out[dstEvery*3:], buf[3][:])
+ out = out[bufoff:]
+ decoded += bufoff * 4
+ // There must at least be 3 buffers left.
+ if len(out) < dstEvery*3 {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 2")
+ }
+ }
+ }
+ if off > 0 {
+ ioff := int(off)
+ if len(out) < dstEvery*3+ioff {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 3")
+ }
+ copy(out, buf[0][:off])
+ copy(out[dstEvery:], buf[1][:off])
+ copy(out[dstEvery*2:], buf[2][:off])
+ copy(out[dstEvery*3:], buf[3][:off])
+ decoded += int(off) * 4
+ out = out[off:]
+ }
+
+ // Decode remaining.
+ remainBytes := dstEvery - (decoded / 4)
+ for i := range br {
+ offset := dstEvery * i
+ endsAt := offset + remainBytes
+ if endsAt > len(out) {
+ endsAt = len(out)
+ }
+ br := &br[i]
+ bitsLeft := br.remaining()
+ for bitsLeft > 0 {
+ br.fill()
+ if offset >= endsAt {
+ d.bufs.Put(buf)
+ return nil, errors.New("corruption detected: stream overrun 4")
+ }
+
+ // Read value and increment offset.
+ val := br.peekBitsFast(d.actualTableLog)
+ v := single[val&tlMask].entry
+ nBits := uint8(v)
+ br.advance(nBits)
+ bitsLeft -= uint(nBits)
+ out[offset] = uint8(v >> 8)
+ offset++
+ }
+ if offset != endsAt {
+ d.bufs.Put(buf)
+ return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
+ }
+ decoded += offset - dstEvery*i
+ err = br.close()
+ if err != nil {
+ return nil, err
+ }
+ }
+ d.bufs.Put(buf)
+ if dstSize != decoded {
+ return nil, errors.New("corruption detected: short output block")
+ }
+ return dst, nil
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index 3ee00ecb..e8ad17ad 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
"math/bits"
+ "sync"
"github.com/klauspost/compress/fse"
)
@@ -116,6 +117,7 @@ type Scratch struct {
nodes []nodeElt
tmpOut [4][]byte
fse *fse.Scratch
+ decPool sync.Pool // *[4][256]byte buffers.
huffWeight [maxSymbolValue + 1]byte
}
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
index c8cf7b69..d9312e5b 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.go
@@ -1,7 +1,7 @@
// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
-//go:build !appengine && !noasm && gc
-// +build !appengine,!noasm,gc
+//go:build !appengine && !noasm && gc && !noasm
+// +build !appengine,!noasm,gc,!noasm
package s2
diff --git a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
index 1ac65a0e..729dbf53 100644
--- a/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
+++ b/vendor/github.com/klauspost/compress/s2/encodeblock_amd64.s
@@ -1,13 +1,12 @@
// Code generated by command: go run gen.go -out ../encodeblock_amd64.s -stubs ../encodeblock_amd64.go -pkg=s2. DO NOT EDIT.
-// +build !appengine
-// +build !noasm
-// +build gc
+//go:build !appengine && !noasm && gc && !noasm
+// +build !appengine,!noasm,gc,!noasm
#include "textflag.h"
// func encodeBlockAsm(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm(SB), $65560-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000200, CX
@@ -243,35 +242,68 @@ emit_literal_done_repeat_emit_encodeBlockAsm:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm
+ JL matchlen_match4_repeat_extend_encodeBlockAsm
matchlen_loopback_repeat_extend_encodeBlockAsm:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm
matchlen_loop_repeat_extend_encodeBlockAsm:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm
-
-matchlen_single_repeat_extend_encodeBlockAsm:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm:
+ JZ repeat_extend_forward_end_encodeBlockAsm
+
+matchlen_match4_repeat_extend_encodeBlockAsm:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm
repeat_extend_forward_end_encodeBlockAsm:
ADDL R12, CX
@@ -748,35 +780,68 @@ match_nolit_loop_encodeBlockAsm:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm
+ JL matchlen_match4_match_nolit_encodeBlockAsm
matchlen_loopback_match_nolit_encodeBlockAsm:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm
matchlen_loop_match_nolit_encodeBlockAsm:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm
-
-matchlen_single_match_nolit_encodeBlockAsm:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm:
+ JZ match_nolit_end_encodeBlockAsm
+
+matchlen_match4_match_nolit_encodeBlockAsm:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm
match_nolit_end_encodeBlockAsm:
ADDL R10, CX
@@ -1162,7 +1227,7 @@ emit_literal_done_emit_remainder_encodeBlockAsm:
RET
// func encodeBlockAsm4MB(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm4MB(SB), $65560-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000200, CX
@@ -1390,35 +1455,68 @@ emit_literal_done_repeat_emit_encodeBlockAsm4MB:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm4MB
+ JL matchlen_match4_repeat_extend_encodeBlockAsm4MB
matchlen_loopback_repeat_extend_encodeBlockAsm4MB:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm4MB
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm4MB
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm4MB
matchlen_loop_repeat_extend_encodeBlockAsm4MB:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm4MB
-
-matchlen_single_repeat_extend_encodeBlockAsm4MB:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm4MB
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm4MB:
+ JZ repeat_extend_forward_end_encodeBlockAsm4MB
+
+matchlen_match4_repeat_extend_encodeBlockAsm4MB:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm4MB
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm4MB
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm4MB:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm4MB
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm4MB:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm4MB
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm4MB
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm4MB
repeat_extend_forward_end_encodeBlockAsm4MB:
ADDL R12, CX
@@ -1854,35 +1952,68 @@ match_nolit_loop_encodeBlockAsm4MB:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm4MB
+ JL matchlen_match4_match_nolit_encodeBlockAsm4MB
matchlen_loopback_match_nolit_encodeBlockAsm4MB:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm4MB
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm4MB
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm4MB
matchlen_loop_match_nolit_encodeBlockAsm4MB:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm4MB
-
-matchlen_single_match_nolit_encodeBlockAsm4MB:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm4MB
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm4MB:
+ JZ match_nolit_end_encodeBlockAsm4MB
+
+matchlen_match4_match_nolit_encodeBlockAsm4MB:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm4MB
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm4MB
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm4MB:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm4MB
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm4MB
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm4MB:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm4MB
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm4MB
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm4MB
match_nolit_end_encodeBlockAsm4MB:
ADDL R10, CX
@@ -2238,7 +2369,7 @@ emit_literal_done_emit_remainder_encodeBlockAsm4MB:
RET
// func encodeBlockAsm12B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm12B(SB), $16408-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000080, CX
@@ -2455,35 +2586,68 @@ emit_literal_done_repeat_emit_encodeBlockAsm12B:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm12B
+ JL matchlen_match4_repeat_extend_encodeBlockAsm12B
matchlen_loopback_repeat_extend_encodeBlockAsm12B:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm12B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm12B
matchlen_loop_repeat_extend_encodeBlockAsm12B:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm12B
-
-matchlen_single_repeat_extend_encodeBlockAsm12B:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm12B
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm12B:
+ JZ repeat_extend_forward_end_encodeBlockAsm12B
+
+matchlen_match4_repeat_extend_encodeBlockAsm12B:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm12B
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm12B
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm12B:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm12B
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm12B
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm12B:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm12B
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm12B
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm12B
repeat_extend_forward_end_encodeBlockAsm12B:
ADDL R12, CX
@@ -2804,35 +2968,68 @@ match_nolit_loop_encodeBlockAsm12B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm12B
+ JL matchlen_match4_match_nolit_encodeBlockAsm12B
matchlen_loopback_match_nolit_encodeBlockAsm12B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm12B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm12B
matchlen_loop_match_nolit_encodeBlockAsm12B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm12B
-
-matchlen_single_match_nolit_encodeBlockAsm12B:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm12B
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm12B:
+ JZ match_nolit_end_encodeBlockAsm12B
+
+matchlen_match4_match_nolit_encodeBlockAsm12B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm12B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm12B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm12B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm12B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm12B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm12B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm12B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm12B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm12B
match_nolit_end_encodeBlockAsm12B:
ADDL R10, CX
@@ -3085,7 +3282,7 @@ emit_literal_done_emit_remainder_encodeBlockAsm12B:
RET
// func encodeBlockAsm10B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm10B(SB), $4120-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000020, CX
@@ -3302,35 +3499,68 @@ emit_literal_done_repeat_emit_encodeBlockAsm10B:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm10B
+ JL matchlen_match4_repeat_extend_encodeBlockAsm10B
matchlen_loopback_repeat_extend_encodeBlockAsm10B:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm10B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm10B
matchlen_loop_repeat_extend_encodeBlockAsm10B:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm10B
-
-matchlen_single_repeat_extend_encodeBlockAsm10B:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm10B
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm10B:
+ JZ repeat_extend_forward_end_encodeBlockAsm10B
+
+matchlen_match4_repeat_extend_encodeBlockAsm10B:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm10B
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm10B
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm10B:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm10B
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm10B
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm10B:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm10B
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm10B
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm10B
repeat_extend_forward_end_encodeBlockAsm10B:
ADDL R12, CX
@@ -3651,35 +3881,68 @@ match_nolit_loop_encodeBlockAsm10B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm10B
+ JL matchlen_match4_match_nolit_encodeBlockAsm10B
matchlen_loopback_match_nolit_encodeBlockAsm10B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm10B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm10B
matchlen_loop_match_nolit_encodeBlockAsm10B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm10B
-
-matchlen_single_match_nolit_encodeBlockAsm10B:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm10B
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm10B:
+ JZ match_nolit_end_encodeBlockAsm10B
+
+matchlen_match4_match_nolit_encodeBlockAsm10B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm10B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm10B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm10B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm10B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm10B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm10B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm10B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm10B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm10B
match_nolit_end_encodeBlockAsm10B:
ADDL R10, CX
@@ -3932,7 +4195,7 @@ emit_literal_done_emit_remainder_encodeBlockAsm10B:
RET
// func encodeBlockAsm8B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBlockAsm8B(SB), $1048-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000008, CX
@@ -4149,35 +4412,68 @@ emit_literal_done_repeat_emit_encodeBlockAsm8B:
// matchLen
XORL R12, R12
CMPL R9, $0x08
- JL matchlen_single_repeat_extend_encodeBlockAsm8B
+ JL matchlen_match4_repeat_extend_encodeBlockAsm8B
matchlen_loopback_repeat_extend_encodeBlockAsm8B:
MOVQ (R10)(R12*1), R11
XORQ (SI)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_repeat_extend_encodeBlockAsm8B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP repeat_extend_forward_end_encodeBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP repeat_extend_forward_end_encodeBlockAsm8B
matchlen_loop_repeat_extend_encodeBlockAsm8B:
LEAL -8(R9), R9
LEAL 8(R12), R12
CMPL R9, $0x08
JGE matchlen_loopback_repeat_extend_encodeBlockAsm8B
-
-matchlen_single_repeat_extend_encodeBlockAsm8B:
- TESTL R9, R9
- JZ repeat_extend_forward_end_encodeBlockAsm8B
-
-matchlen_single_loopback_repeat_extend_encodeBlockAsm8B:
+ JZ repeat_extend_forward_end_encodeBlockAsm8B
+
+matchlen_match4_repeat_extend_encodeBlockAsm8B:
+ CMPL R9, $0x04
+ JL matchlen_match2_repeat_extend_encodeBlockAsm8B
+ MOVL (R10)(R12*1), R11
+ CMPL (SI)(R12*1), R11
+ JNE matchlen_match2_repeat_extend_encodeBlockAsm8B
+ SUBL $0x04, R9
+ LEAL 4(R12), R12
+
+matchlen_match2_repeat_extend_encodeBlockAsm8B:
+ CMPL R9, $0x02
+ JL matchlen_match1_repeat_extend_encodeBlockAsm8B
+ MOVW (R10)(R12*1), R11
+ CMPW (SI)(R12*1), R11
+ JNE matchlen_match1_repeat_extend_encodeBlockAsm8B
+ SUBL $0x02, R9
+ LEAL 2(R12), R12
+
+matchlen_match1_repeat_extend_encodeBlockAsm8B:
+ CMPL R9, $0x01
+ JL repeat_extend_forward_end_encodeBlockAsm8B
MOVB (R10)(R12*1), R11
CMPB (SI)(R12*1), R11
JNE repeat_extend_forward_end_encodeBlockAsm8B
LEAL 1(R12), R12
- DECL R9
- JNZ matchlen_single_loopback_repeat_extend_encodeBlockAsm8B
repeat_extend_forward_end_encodeBlockAsm8B:
ADDL R12, CX
@@ -4488,35 +4784,68 @@ match_nolit_loop_encodeBlockAsm8B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeBlockAsm8B
+ JL matchlen_match4_match_nolit_encodeBlockAsm8B
matchlen_loopback_match_nolit_encodeBlockAsm8B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeBlockAsm8B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeBlockAsm8B
matchlen_loop_match_nolit_encodeBlockAsm8B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeBlockAsm8B
-
-matchlen_single_match_nolit_encodeBlockAsm8B:
- TESTL DI, DI
- JZ match_nolit_end_encodeBlockAsm8B
-
-matchlen_single_loopback_match_nolit_encodeBlockAsm8B:
+ JZ match_nolit_end_encodeBlockAsm8B
+
+matchlen_match4_match_nolit_encodeBlockAsm8B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeBlockAsm8B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeBlockAsm8B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeBlockAsm8B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeBlockAsm8B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeBlockAsm8B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeBlockAsm8B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeBlockAsm8B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeBlockAsm8B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeBlockAsm8B
match_nolit_end_encodeBlockAsm8B:
ADDL R10, CX
@@ -4763,7 +5092,7 @@ emit_literal_done_emit_remainder_encodeBlockAsm8B:
RET
// func encodeBetterBlockAsm(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBetterBlockAsm(SB), $327704-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000a00, CX
@@ -4885,35 +5214,68 @@ match_dst_size_check_encodeBetterBlockAsm:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm
matchlen_loopback_match_nolit_encodeBetterBlockAsm:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm
matchlen_loop_match_nolit_encodeBetterBlockAsm:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm
-
-matchlen_single_match_nolit_encodeBetterBlockAsm:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm:
+ JZ match_nolit_end_encodeBetterBlockAsm
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm
match_nolit_end_encodeBetterBlockAsm:
MOVL CX, R8
@@ -5719,7 +6081,7 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm:
RET
// func encodeBetterBlockAsm4MB(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBetterBlockAsm4MB(SB), $327704-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000a00, CX
@@ -5841,35 +6203,68 @@ match_dst_size_check_encodeBetterBlockAsm4MB:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm4MB
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm4MB
matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm4MB
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm4MB
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm4MB
matchlen_loop_match_nolit_encodeBetterBlockAsm4MB:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm4MB
-
-matchlen_single_match_nolit_encodeBetterBlockAsm4MB:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm4MB
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm4MB:
+ JZ match_nolit_end_encodeBetterBlockAsm4MB
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm4MB
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm4MB
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm4MB:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm4MB
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm4MB
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm4MB
match_nolit_end_encodeBetterBlockAsm4MB:
MOVL CX, R8
@@ -6618,7 +7013,7 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm4MB:
RET
// func encodeBetterBlockAsm12B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBetterBlockAsm12B(SB), $81944-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000280, CX
@@ -6732,35 +7127,68 @@ match_dst_size_check_encodeBetterBlockAsm12B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm12B
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm12B
matchlen_loopback_match_nolit_encodeBetterBlockAsm12B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm12B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm12B
matchlen_loop_match_nolit_encodeBetterBlockAsm12B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm12B
-
-matchlen_single_match_nolit_encodeBetterBlockAsm12B:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm12B
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm12B:
+ JZ match_nolit_end_encodeBetterBlockAsm12B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm12B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm12B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm12B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm12B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm12B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm12B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm12B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm12B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm12B
match_nolit_end_encodeBetterBlockAsm12B:
MOVL CX, R8
@@ -7363,7 +7791,7 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm12B:
RET
// func encodeBetterBlockAsm10B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBetterBlockAsm10B(SB), $20504-56
MOVQ dst_base+0(FP), AX
MOVQ $0x000000a0, CX
@@ -7477,35 +7905,68 @@ match_dst_size_check_encodeBetterBlockAsm10B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm10B
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm10B
matchlen_loopback_match_nolit_encodeBetterBlockAsm10B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm10B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm10B
matchlen_loop_match_nolit_encodeBetterBlockAsm10B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm10B
-
-matchlen_single_match_nolit_encodeBetterBlockAsm10B:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm10B
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm10B:
+ JZ match_nolit_end_encodeBetterBlockAsm10B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm10B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm10B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm10B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm10B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm10B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm10B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm10B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm10B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm10B
match_nolit_end_encodeBetterBlockAsm10B:
MOVL CX, R8
@@ -8108,7 +8569,7 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm10B:
RET
// func encodeBetterBlockAsm8B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeBetterBlockAsm8B(SB), $5144-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000028, CX
@@ -8222,35 +8683,68 @@ match_dst_size_check_encodeBetterBlockAsm8B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeBetterBlockAsm8B
+ JL matchlen_match4_match_nolit_encodeBetterBlockAsm8B
matchlen_loopback_match_nolit_encodeBetterBlockAsm8B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeBetterBlockAsm8B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeBetterBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeBetterBlockAsm8B
matchlen_loop_match_nolit_encodeBetterBlockAsm8B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeBetterBlockAsm8B
-
-matchlen_single_match_nolit_encodeBetterBlockAsm8B:
- TESTL R8, R8
- JZ match_nolit_end_encodeBetterBlockAsm8B
-
-matchlen_single_loopback_match_nolit_encodeBetterBlockAsm8B:
+ JZ match_nolit_end_encodeBetterBlockAsm8B
+
+matchlen_match4_match_nolit_encodeBetterBlockAsm8B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeBetterBlockAsm8B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeBetterBlockAsm8B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeBetterBlockAsm8B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeBetterBlockAsm8B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeBetterBlockAsm8B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeBetterBlockAsm8B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeBetterBlockAsm8B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeBetterBlockAsm8B
match_nolit_end_encodeBetterBlockAsm8B:
MOVL CX, R8
@@ -8843,7 +9337,7 @@ emit_literal_done_emit_remainder_encodeBetterBlockAsm8B:
RET
// func encodeSnappyBlockAsm(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm(SB), $65560-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000200, CX
@@ -9079,35 +9573,68 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm
matchlen_loop_repeat_extend_encodeSnappyBlockAsm:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm
repeat_extend_forward_end_encodeSnappyBlockAsm:
ADDL R11, CX
@@ -9380,35 +9907,68 @@ match_nolit_loop_encodeSnappyBlockAsm:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm
matchlen_loopback_match_nolit_encodeSnappyBlockAsm:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm
matchlen_loop_match_nolit_encodeSnappyBlockAsm:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm:
+ JZ match_nolit_end_encodeSnappyBlockAsm
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm
match_nolit_end_encodeSnappyBlockAsm:
ADDL R10, CX
@@ -9660,7 +10220,7 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm:
RET
// func encodeSnappyBlockAsm64K(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm64K(SB), $65560-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000200, CX
@@ -9877,35 +10437,68 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm64K:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm64K
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm64K
matchlen_loop_repeat_extend_encodeSnappyBlockAsm64K:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm64K
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm64K:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm64K:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm64K
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm64K:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm64K
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm64K
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm64K
repeat_extend_forward_end_encodeSnappyBlockAsm64K:
ADDL R11, CX
@@ -10135,35 +10728,68 @@ match_nolit_loop_encodeSnappyBlockAsm64K:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm64K
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm64K
matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm64K
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm64K
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm64K
matchlen_loop_match_nolit_encodeSnappyBlockAsm64K:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm64K
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm64K:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm64K
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm64K:
+ JZ match_nolit_end_encodeSnappyBlockAsm64K
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm64K
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm64K
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm64K:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm64K
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm64K
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm64K
match_nolit_end_encodeSnappyBlockAsm64K:
ADDL R10, CX
@@ -10372,7 +10998,7 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm64K:
RET
// func encodeSnappyBlockAsm12B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm12B(SB), $16408-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000080, CX
@@ -10589,35 +11215,68 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm12B:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm12B
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm12B
matchlen_loop_repeat_extend_encodeSnappyBlockAsm12B:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm12B
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm12B:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm12B:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm12B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm12B:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm12B
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm12B
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm12B
repeat_extend_forward_end_encodeSnappyBlockAsm12B:
ADDL R11, CX
@@ -10847,35 +11506,68 @@ match_nolit_loop_encodeSnappyBlockAsm12B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm12B
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm12B
matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm12B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm12B
matchlen_loop_match_nolit_encodeSnappyBlockAsm12B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm12B
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm12B:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm12B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm12B:
+ JZ match_nolit_end_encodeSnappyBlockAsm12B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm12B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm12B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm12B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm12B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm12B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm12B
match_nolit_end_encodeSnappyBlockAsm12B:
ADDL R10, CX
@@ -11084,7 +11776,7 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm12B:
RET
// func encodeSnappyBlockAsm10B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm10B(SB), $4120-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000020, CX
@@ -11301,35 +11993,68 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm10B:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm10B
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm10B
matchlen_loop_repeat_extend_encodeSnappyBlockAsm10B:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm10B
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm10B:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm10B:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm10B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm10B:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm10B
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm10B
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm10B
repeat_extend_forward_end_encodeSnappyBlockAsm10B:
ADDL R11, CX
@@ -11559,35 +12284,68 @@ match_nolit_loop_encodeSnappyBlockAsm10B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm10B
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm10B
matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm10B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm10B
matchlen_loop_match_nolit_encodeSnappyBlockAsm10B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm10B
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm10B:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm10B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm10B:
+ JZ match_nolit_end_encodeSnappyBlockAsm10B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm10B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm10B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm10B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm10B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm10B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm10B
match_nolit_end_encodeSnappyBlockAsm10B:
ADDL R10, CX
@@ -11796,7 +12554,7 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm10B:
RET
// func encodeSnappyBlockAsm8B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBlockAsm8B(SB), $1048-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000008, CX
@@ -12013,35 +12771,68 @@ emit_literal_done_repeat_emit_encodeSnappyBlockAsm8B:
// matchLen
XORL R11, R11
CMPL R8, $0x08
- JL matchlen_single_repeat_extend_encodeSnappyBlockAsm8B
+ JL matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B
matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B:
MOVQ (R9)(R11*1), R10
XORQ (SI)(R11*1), R10
TESTQ R10, R10
JZ matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B
- BSFQ R10, R10
- SARQ $0x03, R10
- LEAL (R11)(R10*1), R11
- JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R10, R10
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R10, R10
+
+#endif
+ SARQ $0x03, R10
+ LEAL (R11)(R10*1), R11
+ JMP repeat_extend_forward_end_encodeSnappyBlockAsm8B
matchlen_loop_repeat_extend_encodeSnappyBlockAsm8B:
LEAL -8(R8), R8
LEAL 8(R11), R11
CMPL R8, $0x08
JGE matchlen_loopback_repeat_extend_encodeSnappyBlockAsm8B
-
-matchlen_single_repeat_extend_encodeSnappyBlockAsm8B:
- TESTL R8, R8
- JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
-
-matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm8B:
+ JZ repeat_extend_forward_end_encodeSnappyBlockAsm8B
+
+matchlen_match4_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL R8, $0x04
+ JL matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
+ MOVL (R9)(R11*1), R10
+ CMPL (SI)(R11*1), R10
+ JNE matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B
+ SUBL $0x04, R8
+ LEAL 4(R11), R11
+
+matchlen_match2_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL R8, $0x02
+ JL matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ MOVW (R9)(R11*1), R10
+ CMPW (SI)(R11*1), R10
+ JNE matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B
+ SUBL $0x02, R8
+ LEAL 2(R11), R11
+
+matchlen_match1_repeat_extend_encodeSnappyBlockAsm8B:
+ CMPL R8, $0x01
+ JL repeat_extend_forward_end_encodeSnappyBlockAsm8B
MOVB (R9)(R11*1), R10
CMPB (SI)(R11*1), R10
JNE repeat_extend_forward_end_encodeSnappyBlockAsm8B
LEAL 1(R11), R11
- DECL R8
- JNZ matchlen_single_loopback_repeat_extend_encodeSnappyBlockAsm8B
repeat_extend_forward_end_encodeSnappyBlockAsm8B:
ADDL R11, CX
@@ -12269,35 +13060,68 @@ match_nolit_loop_encodeSnappyBlockAsm8B:
// matchLen
XORL R10, R10
CMPL DI, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBlockAsm8B
+ JL matchlen_match4_match_nolit_encodeSnappyBlockAsm8B
matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B:
MOVQ (R8)(R10*1), R9
XORQ (SI)(R10*1), R9
TESTQ R9, R9
JZ matchlen_loop_match_nolit_encodeSnappyBlockAsm8B
- BSFQ R9, R9
- SARQ $0x03, R9
- LEAL (R10)(R9*1), R10
- JMP match_nolit_end_encodeSnappyBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R9, R9
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R9, R9
+
+#endif
+ SARQ $0x03, R9
+ LEAL (R10)(R9*1), R10
+ JMP match_nolit_end_encodeSnappyBlockAsm8B
matchlen_loop_match_nolit_encodeSnappyBlockAsm8B:
LEAL -8(DI), DI
LEAL 8(R10), R10
CMPL DI, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBlockAsm8B
-
-matchlen_single_match_nolit_encodeSnappyBlockAsm8B:
- TESTL DI, DI
- JZ match_nolit_end_encodeSnappyBlockAsm8B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm8B:
+ JZ match_nolit_end_encodeSnappyBlockAsm8B
+
+matchlen_match4_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
+ MOVL (R8)(R10*1), R9
+ CMPL (SI)(R10*1), R9
+ JNE matchlen_match2_match_nolit_encodeSnappyBlockAsm8B
+ SUBL $0x04, DI
+ LEAL 4(R10), R10
+
+matchlen_match2_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ MOVW (R8)(R10*1), R9
+ CMPW (SI)(R10*1), R9
+ JNE matchlen_match1_match_nolit_encodeSnappyBlockAsm8B
+ SUBL $0x02, DI
+ LEAL 2(R10), R10
+
+matchlen_match1_match_nolit_encodeSnappyBlockAsm8B:
+ CMPL DI, $0x01
+ JL match_nolit_end_encodeSnappyBlockAsm8B
MOVB (R8)(R10*1), R9
CMPB (SI)(R10*1), R9
JNE match_nolit_end_encodeSnappyBlockAsm8B
LEAL 1(R10), R10
- DECL DI
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBlockAsm8B
match_nolit_end_encodeSnappyBlockAsm8B:
ADDL R10, CX
@@ -12504,7 +13328,7 @@ emit_literal_done_emit_remainder_encodeSnappyBlockAsm8B:
RET
// func encodeSnappyBetterBlockAsm(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBetterBlockAsm(SB), $327704-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000a00, CX
@@ -12626,35 +13450,68 @@ match_dst_size_check_encodeSnappyBetterBlockAsm:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm
match_nolit_end_encodeSnappyBetterBlockAsm:
MOVL CX, R8
@@ -13086,7 +13943,7 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm:
RET
// func encodeSnappyBetterBlockAsm64K(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBetterBlockAsm64K(SB), $327704-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000a00, CX
@@ -13200,35 +14057,68 @@ match_dst_size_check_encodeSnappyBetterBlockAsm64K:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm64K
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm64K
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm64K:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm64K
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm64K:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm64K:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm64K
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm64K:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm64K
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm64K
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm64K
match_nolit_end_encodeSnappyBetterBlockAsm64K:
MOVL CX, R8
@@ -13589,7 +14479,7 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm64K:
RET
// func encodeSnappyBetterBlockAsm12B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBetterBlockAsm12B(SB), $81944-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000280, CX
@@ -13703,35 +14593,68 @@ match_dst_size_check_encodeSnappyBetterBlockAsm12B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm12B
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm12B
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm12B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm12B
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm12B:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm12B:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm12B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm12B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm12B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm12B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm12B
match_nolit_end_encodeSnappyBetterBlockAsm12B:
MOVL CX, R8
@@ -14092,7 +15015,7 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm12B:
RET
// func encodeSnappyBetterBlockAsm10B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBetterBlockAsm10B(SB), $20504-56
MOVQ dst_base+0(FP), AX
MOVQ $0x000000a0, CX
@@ -14206,35 +15129,68 @@ match_dst_size_check_encodeSnappyBetterBlockAsm10B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm10B
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm10B
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm10B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm10B
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm10B:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm10B:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm10B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm10B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm10B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm10B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm10B
match_nolit_end_encodeSnappyBetterBlockAsm10B:
MOVL CX, R8
@@ -14595,7 +15551,7 @@ emit_literal_done_emit_remainder_encodeSnappyBetterBlockAsm10B:
RET
// func encodeSnappyBetterBlockAsm8B(dst []byte, src []byte) int
-// Requires: SSE2
+// Requires: BMI, SSE2
TEXT ·encodeSnappyBetterBlockAsm8B(SB), $5144-56
MOVQ dst_base+0(FP), AX
MOVQ $0x00000028, CX
@@ -14709,35 +15665,68 @@ match_dst_size_check_encodeSnappyBetterBlockAsm8B:
// matchLen
XORL R12, R12
CMPL R8, $0x08
- JL matchlen_single_match_nolit_encodeSnappyBetterBlockAsm8B
+ JL matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B
matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B:
MOVQ (R9)(R12*1), R11
XORQ (R10)(R12*1), R11
TESTQ R11, R11
JZ matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B
- BSFQ R11, R11
- SARQ $0x03, R11
- LEAL (R12)(R11*1), R12
- JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
+
+#ifdef GOAMD64_v3
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ R11, R11
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ R11, R11
+
+#endif
+ SARQ $0x03, R11
+ LEAL (R12)(R11*1), R12
+ JMP match_nolit_end_encodeSnappyBetterBlockAsm8B
matchlen_loop_match_nolit_encodeSnappyBetterBlockAsm8B:
LEAL -8(R8), R8
LEAL 8(R12), R12
CMPL R8, $0x08
JGE matchlen_loopback_match_nolit_encodeSnappyBetterBlockAsm8B
-
-matchlen_single_match_nolit_encodeSnappyBetterBlockAsm8B:
- TESTL R8, R8
- JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
-
-matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm8B:
+ JZ match_nolit_end_encodeSnappyBetterBlockAsm8B
+
+matchlen_match4_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL R8, $0x04
+ JL matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVL (R9)(R12*1), R11
+ CMPL (R10)(R12*1), R11
+ JNE matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B
+ SUBL $0x04, R8
+ LEAL 4(R12), R12
+
+matchlen_match2_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL R8, $0x02
+ JL matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ MOVW (R9)(R12*1), R11
+ CMPW (R10)(R12*1), R11
+ JNE matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B
+ SUBL $0x02, R8
+ LEAL 2(R12), R12
+
+matchlen_match1_match_nolit_encodeSnappyBetterBlockAsm8B:
+ CMPL R8, $0x01
+ JL match_nolit_end_encodeSnappyBetterBlockAsm8B
MOVB (R9)(R12*1), R11
CMPB (R10)(R12*1), R11
JNE match_nolit_end_encodeSnappyBetterBlockAsm8B
LEAL 1(R12), R12
- DECL R8
- JNZ matchlen_single_loopback_match_nolit_encodeSnappyBetterBlockAsm8B
match_nolit_end_encodeSnappyBetterBlockAsm8B:
MOVL CX, R8
@@ -15635,6 +16624,7 @@ gen_emit_copy_end_snappy:
RET
// func matchLen(a []byte, b []byte) int
+// Requires: BMI
TEXT ·matchLen(SB), NOSPLIT, $0-56
MOVQ a_base+0(FP), AX
MOVQ b_base+24(FP), CX
@@ -15643,35 +16633,68 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56
// matchLen
XORL SI, SI
CMPL DX, $0x08
- JL matchlen_single_standalone
+ JL matchlen_match4_standalone
matchlen_loopback_standalone:
MOVQ (AX)(SI*1), BX
XORQ (CX)(SI*1), BX
TESTQ BX, BX
JZ matchlen_loop_standalone
- BSFQ BX, BX
- SARQ $0x03, BX
- LEAL (SI)(BX*1), SI
- JMP gen_match_len_end
+
+#ifdef GOAMD64_v3
+ TZCNTQ BX, BX
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef GOAMD64_v4
+ TZCNTQ BX, BX
+
+#define TZCNTQ_EMITTED 1
+#endif
+
+#ifdef TZCNTQ_EMITTED
+#undef TZCNTQ_EMITTED
+#else
+ BSFQ BX, BX
+
+#endif
+ SARQ $0x03, BX
+ LEAL (SI)(BX*1), SI
+ JMP gen_match_len_end
matchlen_loop_standalone:
LEAL -8(DX), DX
LEAL 8(SI), SI
CMPL DX, $0x08
JGE matchlen_loopback_standalone
+ JZ gen_match_len_end
-matchlen_single_standalone:
- TESTL DX, DX
- JZ gen_match_len_end
-
-matchlen_single_loopback_standalone:
+matchlen_match4_standalone:
+ CMPL DX, $0x04
+ JL matchlen_match2_standalone
+ MOVL (AX)(SI*1), BX
+ CMPL (CX)(SI*1), BX
+ JNE matchlen_match2_standalone
+ SUBL $0x04, DX
+ LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+ CMPL DX, $0x02
+ JL matchlen_match1_standalone
+ MOVW (AX)(SI*1), BX
+ CMPW (CX)(SI*1), BX
+ JNE matchlen_match1_standalone
+ SUBL $0x02, DX
+ LEAL 2(SI), SI
+
+matchlen_match1_standalone:
+ CMPL DX, $0x01
+ JL gen_match_len_end
MOVB (AX)(SI*1), BL
CMPB (CX)(SI*1), BL
JNE gen_match_len_end
LEAL 1(SI), SI
- DECL DX
- JNZ matchlen_single_loopback_standalone
gen_match_len_end:
MOVQ SI, ret+48(FP)
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index c8f0f16f..e3445ac1 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is
in the future. So if you want to limit concurrency for future updates, specify the concurrency
you would like.
+If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)`
+which will compress input as each block is completed, blocking on writes until each has completed.
+
You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
compression settings can be specified.
@@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h
For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
`EncodeAll` will encode all input in src and append it to dst.
-This function can be called concurrently, but each call will only run on a single goroutine.
+This function can be called concurrently.
+Each call will only run on a same goroutine as the caller.
Encoded blocks can be concatenated and the result will be the combined input stream.
Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
@@ -149,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
This package:
file out level insize outsize millis mb/s
-silesia.tar zskp 1 211947520 73101992 643 313.87
-silesia.tar zskp 2 211947520 67504318 969 208.38
-silesia.tar zskp 3 211947520 64595893 2007 100.68
-silesia.tar zskp 4 211947520 60995370 8825 22.90
+silesia.tar zskp 1 211947520 73821326 634 318.47
+silesia.tar zskp 2 211947520 67655404 1508 133.96
+silesia.tar zskp 3 211947520 64746933 3000 67.37
+silesia.tar zskp 4 211947520 60073508 16926 11.94
cgo zstd:
silesia.tar zstd 1 211947520 73605392 543 371.56
@@ -161,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
silesia.tar zstd 9 211947520 60212393 5063 39.92
gzip, stdlib/this package:
-silesia.tar gzstd 1 211947520 80007735 1654 122.21
-silesia.tar gzkp 1 211947520 80136201 1152 175.45
+silesia.tar gzstd 1 211947520 80007735 1498 134.87
+silesia.tar gzkp 1 211947520 80088272 1009 200.31
GOB stream of binary data. Highly compressible.
https://files.klauspost.com/compress/gob-stream.7z
file out level insize outsize millis mb/s
-gob-stream zskp 1 1911399616 235022249 3088 590.30
-gob-stream zskp 2 1911399616 205669791 3786 481.34
-gob-stream zskp 3 1911399616 175034659 9636 189.17
-gob-stream zskp 4 1911399616 165609838 50369 36.19
+gob-stream zskp 1 1911399616 233948096 3230 564.34
+gob-stream zskp 2 1911399616 203997694 4997 364.73
+gob-stream zskp 3 1911399616 173526523 13435 135.68
+gob-stream zskp 4 1911399616 162195235 47559 38.33
gob-stream zstd 1 1911399616 249810424 2637 691.26
gob-stream zstd 3 1911399616 208192146 3490 522.31
gob-stream zstd 6 1911399616 193632038 6687 272.56
gob-stream zstd 9 1911399616 177620386 16175 112.70
-gob-stream gzstd 1 1911399616 357382641 10251 177.82
-gob-stream gzkp 1 1911399616 359753026 5438 335.20
+gob-stream gzstd 1 1911399616 357382013 9046 201.49
+gob-stream gzkp 1 1911399616 359136669 4885 373.08
The test data for the Large Text Compression Benchmark is the first
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
http://mattmahoney.net/dc/textdata.html
file out level insize outsize millis mb/s
-enwik9 zskp 1 1000000000 343848582 3609 264.18
-enwik9 zskp 2 1000000000 317276632 5746 165.97
-enwik9 zskp 3 1000000000 292243069 12162 78.41
-enwik9 zskp 4 1000000000 262183768 82837 11.51
+enwik9 zskp 1 1000000000 343833605 3687 258.64
+enwik9 zskp 2 1000000000 317001237 7672 124.29
+enwik9 zskp 3 1000000000 291915823 15923 59.89
+enwik9 zskp 4 1000000000 261710291 77697 12.27
enwik9 zstd 1 1000000000 358072021 3110 306.65
enwik9 zstd 3 1000000000 313734672 4784 199.35
enwik9 zstd 6 1000000000 295138875 10290 92.68
enwik9 zstd 9 1000000000 278348700 28549 33.40
-enwik9 gzstd 1 1000000000 382578136 9604 99.30
-enwik9 gzkp 1 1000000000 383825945 6544 145.73
+enwik9 gzstd 1 1000000000 382578136 8608 110.78
+enwik9 gzkp 1 1000000000 382781160 5628 169.45
Highly compressible JSON file.
https://files.klauspost.com/compress/github-june-2days-2019.json.zst
file out level insize outsize millis mb/s
-github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
-github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
-github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
-github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
+github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
+github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
+github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
+github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
-github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
-github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
+github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
+github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
VM Image, Linux mint with a few installed applications:
https://files.klauspost.com/compress/rawstudio-mint14.7z
file out level insize outsize millis mb/s
-rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
-rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
-rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
-rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
+rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
+rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
+rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
+rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
-rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
-rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
+rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
+rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
CSV data:
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
file out level insize outsize millis mb/s
-nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
-nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
-nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
-nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
+nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
+nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
+nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
+nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
-nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
-nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
+nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
+nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
```
## Decompressor
@@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error {
}
```
-It is important to use the "Close" function when you no longer need the Reader to stop running goroutines.
-See "Allocation-less operation" below.
+It is important to use the "Close" function when you no longer need the Reader to stop running goroutines,
+when running with default settings.
+Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream.
+
+Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput.
+However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data
+as it is being requested only.
For decoding buffers, it could look something like this:
@@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd"
// Create a reader that caches decompressors.
// For this operation type we supply a nil Reader.
-var decoder, _ = zstd.NewReader(nil)
+var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
// Decompress a buffer. We don't supply a destination buffer,
// so it will be allocated by the decoder.
@@ -303,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) {
```
Both of these cases should provide the functionality needed.
-The decoder can be used for *concurrent* decompression of multiple buffers.
+The decoder can be used for *concurrent* decompression of multiple buffers.
+By default 4 decompressors will be created.
+
It will only allow a certain number of concurrent operations to run.
-To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
+It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders.
### Dictionaries
@@ -357,19 +369,21 @@ In this case no unneeded allocations should be made.
The buffer decoder does everything on the same goroutine and does nothing concurrently.
It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
-The stream decoder operates on
+The stream decoder will create goroutines that:
-* One goroutine reads input and splits the input to several block decoders.
-* A number of decoders will decode blocks.
-* A goroutine coordinates these blocks and sends history from one to the next.
+1) Reads input and splits the input into blocks.
+2) Decompression of literals.
+3) Decompression of sequences.
+4) Reconstruction of output stream.
So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
+The concurrency level will, for streams, determine how many blocks ahead the compression will start.
+
Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
-In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
-
-
+In practice this means that concurrency is often limited to utilizing about 3 cores effectively.
+
### Benchmarks
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 753d17df..d7cd15ba 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -7,6 +7,7 @@ package zstd
import (
"encoding/binary"
"errors"
+ "fmt"
"io"
"math/bits"
)
@@ -132,6 +133,9 @@ func (b *bitReader) remain() uint {
func (b *bitReader) close() error {
// Release reference.
b.in = nil
+ if !b.finished() {
+ return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
+ }
if b.bitsRead > 64 {
return io.ErrUnexpectedEOF
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index dc587b2c..7d567a54 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -76,16 +76,25 @@ type blockDec struct {
// Window size of the block.
WindowSize uint64
- history chan *history
- input chan struct{}
- result chan decodeOutput
- err error
- decWG sync.WaitGroup
+ err error
+
+ // Check against this crc
+ checkCRC []byte
// Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame.
localFrame *frameDec
+ sequence []seqVals
+
+ async struct {
+ newHist *history
+ literals []byte
+ seqData []byte
+ seqSize int // Size of uncompressed sequences
+ fcs uint64
+ }
+
// Block is RLE, this is the size.
RLESize uint32
tmp [4]byte
@@ -108,13 +117,8 @@ func (b *blockDec) String() string {
func newBlockDec(lowMem bool) *blockDec {
b := blockDec{
- lowMem: lowMem,
- result: make(chan decodeOutput, 1),
- input: make(chan struct{}, 1),
- history: make(chan *history, 1),
+ lowMem: lowMem,
}
- b.decWG.Add(1)
- go b.startDecoder()
return &b
}
@@ -137,6 +141,12 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
case blockTypeReserved:
return ErrReservedBlockType
case blockTypeRLE:
+ if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
+ if debugDecoder {
+ printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrWindowSizeExceeded
+ }
b.RLESize = uint32(cSize)
if b.lowMem {
maxSize = cSize
@@ -157,7 +167,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
}
return ErrCompressedSizeTooBig
}
+ // Empty compressed blocks must at least be 2 bytes
+ // for Literals_Block_Type and one for Sequences_Section_Header.
+ if cSize < 2 {
+ return ErrBlockTooSmall
+ }
case blockTypeRaw:
+ if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
+ if debugDecoder {
+ printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
+ }
+ return ErrWindowSizeExceeded
+ }
+
b.RLESize = 0
// We do not need a destination for raw blocks.
maxSize = -1
@@ -192,85 +214,14 @@ func (b *blockDec) sendErr(err error) {
b.Last = true
b.Type = blockTypeReserved
b.err = err
- b.input <- struct{}{}
}
// Close will release resources.
// Closed blockDec cannot be reset.
func (b *blockDec) Close() {
- close(b.input)
- close(b.history)
- close(b.result)
- b.decWG.Wait()
-}
-
-// decodeAsync will prepare decoding the block when it receives input.
-// This will separate output and history.
-func (b *blockDec) startDecoder() {
- defer b.decWG.Done()
- for range b.input {
- //println("blockDec: Got block input")
- switch b.Type {
- case blockTypeRLE:
- if cap(b.dst) < int(b.RLESize) {
- if b.lowMem {
- b.dst = make([]byte, b.RLESize)
- } else {
- b.dst = make([]byte, maxBlockSize)
- }
- }
- o := decodeOutput{
- d: b,
- b: b.dst[:b.RLESize],
- err: nil,
- }
- v := b.data[0]
- for i := range o.b {
- o.b[i] = v
- }
- hist := <-b.history
- hist.append(o.b)
- b.result <- o
- case blockTypeRaw:
- o := decodeOutput{
- d: b,
- b: b.data,
- err: nil,
- }
- hist := <-b.history
- hist.append(o.b)
- b.result <- o
- case blockTypeCompressed:
- b.dst = b.dst[:0]
- err := b.decodeCompressed(nil)
- o := decodeOutput{
- d: b,
- b: b.dst,
- err: err,
- }
- if debugDecoder {
- println("Decompressed to", len(b.dst), "bytes, error:", err)
- }
- b.result <- o
- case blockTypeReserved:
- // Used for returning errors.
- <-b.history
- b.result <- decodeOutput{
- d: b,
- b: nil,
- err: b.err,
- }
- default:
- panic("Invalid block type")
- }
- if debugDecoder {
- println("blockDec: Finished block")
- }
- }
}
-// decodeAsync will prepare decoding the block when it receives the history.
-// If history is provided, it will not fetch it from the channel.
+// decodeBuf
func (b *blockDec) decodeBuf(hist *history) error {
switch b.Type {
case blockTypeRLE:
@@ -293,14 +244,23 @@ func (b *blockDec) decodeBuf(hist *history) error {
return nil
case blockTypeCompressed:
saved := b.dst
- b.dst = hist.b
- hist.b = nil
+ // Append directly to history
+ if hist.ignoreBuffer == 0 {
+ b.dst = hist.b
+ hist.b = nil
+ } else {
+ b.dst = b.dst[:0]
+ }
err := b.decodeCompressed(hist)
if debugDecoder {
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
}
- hist.b = b.dst
- b.dst = saved
+ if hist.ignoreBuffer == 0 {
+ hist.b = b.dst
+ b.dst = saved
+ } else {
+ hist.appendKeep(b.dst)
+ }
return err
case blockTypeReserved:
// Used for returning errors.
@@ -310,30 +270,18 @@ func (b *blockDec) decodeBuf(hist *history) error {
}
}
-// decodeCompressed will start decompressing a block.
-// If no history is supplied the decoder will decodeAsync as much as possible
-// before fetching from blockDec.history
-func (b *blockDec) decodeCompressed(hist *history) error {
- in := b.data
- delayedHistory := hist == nil
-
- if delayedHistory {
- // We must always grab history.
- defer func() {
- if hist == nil {
- <-b.history
- }
- }()
- }
+func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
if len(in) < 2 {
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
+
litType := literalsBlockType(in[0] & 3)
var litRegenSize int
var litCompSize int
sizeFormat := (in[0] >> 2) & 3
var fourStreams bool
+ var literals []byte
switch litType {
case literalsBlockRaw, literalsBlockRLE:
switch sizeFormat {
@@ -349,7 +297,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
in = in[3:]
@@ -360,7 +308,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
if len(in) < 3 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
litRegenSize = int(n & 1023)
@@ -371,7 +319,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
fourStreams = true
if len(in) < 4 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
litRegenSize = int(n & 16383)
@@ -381,7 +329,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
fourStreams = true
if len(in) < 5 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
litRegenSize = int(n & 262143)
@@ -392,13 +340,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if debugDecoder {
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
}
- var literals []byte
- var huff *huff0.Scratch
+ if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
+ return in, ErrWindowSizeExceeded
+ }
+
switch litType {
case literalsBlockRaw:
if len(in) < litRegenSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
literals = in[:litRegenSize]
in = in[litRegenSize:]
@@ -406,7 +356,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
case literalsBlockRLE:
if len(in) < 1 {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
@@ -417,7 +367,6 @@ func (b *blockDec) decodeCompressed(hist *history) error {
b.literalBuf = make([]byte, litRegenSize)
} else {
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
-
}
}
}
@@ -433,7 +382,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
case literalsBlockTreeless:
if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
// Store compressed literals, so we defer decoding until we get history.
literals = in[:litCompSize]
@@ -441,31 +390,65 @@ func (b *blockDec) decodeCompressed(hist *history) error {
if debugDecoder {
printf("Found %d compressed literals\n", litCompSize)
}
+ huff := hist.huffTree
+ if huff == nil {
+ return in, errors.New("literal block was treeless, but no history was defined")
+ }
+ // Ensure we have space to store it.
+ if cap(b.literalBuf) < litRegenSize {
+ if b.lowMem {
+ b.literalBuf = make([]byte, 0, litRegenSize)
+ } else {
+ b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+ }
+ }
+ var err error
+ // Use our out buffer.
+ huff.MaxDecodedSize = maxCompressedBlockSize
+ if fourStreams {
+ literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
+ } else {
+ literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
+ }
+ // Make sure we don't leak our literals buffer
+ if err != nil {
+ println("decompressing literals:", err)
+ return in, err
+ }
+ if len(literals) != litRegenSize {
+ return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ }
+
case literalsBlockCompressed:
if len(in) < litCompSize {
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
- return ErrBlockTooSmall
+ return in, ErrBlockTooSmall
}
literals = in[:litCompSize]
in = in[litCompSize:]
- huff = huffDecoderPool.Get().(*huff0.Scratch)
- var err error
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize)
} else {
- b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
}
}
- if huff == nil {
- huff = &huff0.Scratch{}
+ huff := hist.huffTree
+ if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
+ huff = huffDecoderPool.Get().(*huff0.Scratch)
+ if huff == nil {
+ huff = &huff0.Scratch{}
+ }
}
+ var err error
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)
- return err
+ return in, err
}
+ hist.huffTree = huff
+ huff.MaxDecodedSize = maxCompressedBlockSize
// Use our out buffer.
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@@ -474,27 +457,56 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
if err != nil {
println("decoding compressed literals:", err)
- return err
+ return in, err
}
// Make sure we don't leak our literals buffer
if len(literals) != litRegenSize {
- return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
+ return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
}
}
+ hist.decoders.literals = literals
+ return in, nil
+}
+
+// decodeCompressed will start decompressing a block.
+func (b *blockDec) decodeCompressed(hist *history) error {
+ in := b.data
+ in, err := b.decodeLiterals(in, hist)
+ if err != nil {
+ return err
+ }
+ err = b.prepareSequences(in, hist)
+ if err != nil {
+ return err
+ }
+ if hist.decoders.nSeqs == 0 {
+ b.dst = append(b.dst, hist.decoders.literals...)
+ return nil
+ }
+ err = hist.decoders.decodeSync(hist)
+ if err != nil {
+ return err
+ }
+ b.dst = hist.decoders.out
+ hist.recentOffsets = hist.decoders.prevOffset
+ return nil
+}
+func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
+ if debugDecoder {
+ printf("prepareSequences: %d byte(s) input\n", len(in))
+ }
// Decode Sequences
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
if len(in) < 1 {
return ErrBlockTooSmall
}
+ var nSeqs int
seqHeader := in[0]
- nSeqs := 0
switch {
- case seqHeader == 0:
- in = in[1:]
case seqHeader < 128:
nSeqs = int(seqHeader)
in = in[1:]
@@ -511,8 +523,16 @@ func (b *blockDec) decodeCompressed(hist *history) error {
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
in = in[3:]
}
+ if nSeqs == 0 && len(in) != 0 {
+ // When no sequences, there should not be any more data...
+ if debugDecoder {
+ printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
+ }
+ return ErrUnexpectedBlockSize
+ }
- var seqs = &sequenceDecs{}
+ var seqs = &hist.decoders
+ seqs.nSeqs = nSeqs
if nSeqs > 0 {
if len(in) < 1 {
return ErrBlockTooSmall
@@ -541,6 +561,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
switch mode {
case compModePredefined:
+ if seq.fse != nil && !seq.fse.preDefined {
+ fseDecoderPool.Put(seq.fse)
+ }
seq.fse = &fsePredef[i]
case compModeRLE:
if br.remain() < 1 {
@@ -548,34 +571,36 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
v := br.Uint8()
br.advance(1)
- dec := fseDecoderPool.Get().(*fseDecoder)
+ if seq.fse == nil || seq.fse.preDefined {
+ seq.fse = fseDecoderPool.Get().(*fseDecoder)
+ }
symb, err := decSymbolValue(v, symbolTableX[i])
if err != nil {
printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
return err
}
- dec.setRLE(symb)
- seq.fse = dec
+ seq.fse.setRLE(symb)
if debugDecoder {
printf("RLE set to %+v, code: %v", symb, v)
}
case compModeFSE:
println("Reading table for", tableIndex(i))
- dec := fseDecoderPool.Get().(*fseDecoder)
- err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
+ if seq.fse == nil || seq.fse.preDefined {
+ seq.fse = fseDecoderPool.Get().(*fseDecoder)
+ }
+ err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
if err != nil {
println("Read table error:", err)
return err
}
- err = dec.transform(symbolTableX[i])
+ err = seq.fse.transform(symbolTableX[i])
if err != nil {
println("Transform table error:", err)
return err
}
if debugDecoder {
- println("Read table ok", "symbolLen:", dec.symbolLen)
+ println("Read table ok", "symbolLen:", seq.fse.symbolLen)
}
- seq.fse = dec
case compModeRepeat:
seq.repeat = true
}
@@ -585,140 +610,89 @@ func (b *blockDec) decodeCompressed(hist *history) error {
}
in = br.unread()
}
-
- // Wait for history.
- // All time spent after this is critical since it is strictly sequential.
- if hist == nil {
- hist = <-b.history
- if hist.error {
- return ErrDecoderClosed
- }
- }
-
- // Decode treeless literal block.
- if litType == literalsBlockTreeless {
- // TODO: We could send the history early WITHOUT the stream history.
- // This would allow decoding treeless literals before the byte history is available.
- // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
- // So not much obvious gain here.
-
- if hist.huffTree == nil {
- return errors.New("literal block was treeless, but no history was defined")
- }
- // Ensure we have space to store it.
- if cap(b.literalBuf) < litRegenSize {
- if b.lowMem {
- b.literalBuf = make([]byte, 0, litRegenSize)
- } else {
- b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
- }
- }
- var err error
- // Use our out buffer.
- huff = hist.huffTree
- if fourStreams {
- literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
- } else {
- literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
- }
- // Make sure we don't leak our literals buffer
- if err != nil {
- println("decompressing literals:", err)
- return err
- }
- if len(literals) != litRegenSize {
- return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
- }
- } else {
- if hist.huffTree != nil && huff != nil {
- if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
- huffDecoderPool.Put(hist.huffTree)
- }
- hist.huffTree = nil
- }
- }
- if huff != nil {
- hist.huffTree = huff
- }
if debugDecoder {
- println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
+ println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
}
if nSeqs == 0 {
- // Decompressed content is defined entirely as Literals Section content.
- b.dst = append(b.dst, literals...)
- if delayedHistory {
- hist.append(literals)
+ if len(b.sequence) > 0 {
+ b.sequence = b.sequence[:0]
}
return nil
}
-
- seqs, err := seqs.mergeHistory(&hist.decoders)
- if err != nil {
- return err
- }
- if debugDecoder {
- println("History merged ok")
+ br := seqs.br
+ if br == nil {
+ br = &bitReader{}
}
- br := &bitReader{}
if err := br.init(in); err != nil {
return err
}
- // TODO: Investigate if sending history without decoders are faster.
- // This would allow the sequences to be decoded async and only have to construct stream history.
- // If only recent offsets were not transferred, this would be an obvious win.
- // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
+ if err := seqs.initialize(br, hist, b.dst); err != nil {
+ println("initializing sequences:", err)
+ return err
+ }
+ return nil
+}
+
+func (b *blockDec) decodeSequences(hist *history) error {
+ if cap(b.sequence) < hist.decoders.nSeqs {
+ if b.lowMem {
+ b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
+ } else {
+ b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
+ }
+ }
+ b.sequence = b.sequence[:hist.decoders.nSeqs]
+ if hist.decoders.nSeqs == 0 {
+ hist.decoders.seqSize = len(hist.decoders.literals)
+ return nil
+ }
+ hist.decoders.windowSize = hist.windowSize
+ hist.decoders.prevOffset = hist.recentOffsets
+ err := hist.decoders.decode(b.sequence)
+ hist.recentOffsets = hist.decoders.prevOffset
+ return err
+}
+func (b *blockDec) executeSequences(hist *history) error {
hbytes := hist.b
if len(hbytes) > hist.windowSize {
hbytes = hbytes[len(hbytes)-hist.windowSize:]
- // We do not need history any more.
+ // We do not need history anymore.
if hist.dict != nil {
hist.dict.content = nil
}
}
-
- if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
- println("initializing sequences:", err)
- return err
- }
-
- err = seqs.decode(nSeqs, br, hbytes)
+ hist.decoders.windowSize = hist.windowSize
+ hist.decoders.out = b.dst[:0]
+ err := hist.decoders.execute(b.sequence, hbytes)
if err != nil {
return err
}
- if !br.finished() {
- return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
- }
+ return b.updateHistory(hist)
+}
- err = br.close()
- if err != nil {
- printf("Closing sequences: %v, %+v\n", err, *br)
- }
+func (b *blockDec) updateHistory(hist *history) error {
if len(b.data) > maxCompressedBlockSize {
return fmt.Errorf("compressed block size too large (%d)", len(b.data))
}
// Set output and release references.
- b.dst = seqs.out
- seqs.out, seqs.literals, seqs.hist = nil, nil, nil
+ b.dst = hist.decoders.out
+ hist.recentOffsets = hist.decoders.prevOffset
- if !delayedHistory {
- // If we don't have delayed history, no need to update.
- hist.recentOffsets = seqs.prevOffset
- return nil
- }
if b.Last {
// if last block we don't care about history.
println("Last block, no history returned")
hist.b = hist.b[:0]
return nil
+ } else {
+ hist.append(b.dst)
+ if debugDecoder {
+ println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
+ }
}
- hist.append(b.dst)
- hist.recentOffsets = seqs.prevOffset
- if debugDecoder {
- println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
- }
+ hist.decoders.out, hist.decoders.literals = nil, nil
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index aab71c6c..b80191e4 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
func (r *readerWrapper) readByte() (byte, error) {
n2, err := r.r.Read(r.tmp[:1])
if err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
return 0, err
}
if n2 != 1 {
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index f430f58b..9fcdaac1 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -5,9 +5,13 @@
package zstd
import (
- "errors"
+ "bytes"
+ "context"
+ "encoding/binary"
"io"
"sync"
+
+ "github.com/klauspost/compress/zstd/internal/xxhash"
)
// Decoder provides decoding of zstandard streams.
@@ -22,12 +26,19 @@ type Decoder struct {
// Unreferenced decoders, ready for use.
decoders chan *blockDec
- // Streams ready to be decoded.
- stream chan decodeStream
-
// Current read position used for Reader functionality.
current decoderState
+ // sync stream decoding
+ syncStream struct {
+ decodedFrame uint64
+ br readerWrapper
+ enabled bool
+ inFrame bool
+ }
+
+ frame *frameDec
+
// Custom dictionaries.
// Always uses copies.
dicts map[uint32]dict
@@ -46,7 +57,10 @@ type decoderState struct {
output chan decodeOutput
// cancel remaining output.
- cancel chan struct{}
+ cancel context.CancelFunc
+
+ // crc of current frame
+ crc *xxhash.Digest
flushed bool
}
@@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
return nil, err
}
}
- d.current.output = make(chan decodeOutput, d.o.concurrent)
+ d.current.crc = xxhash.New()
d.current.flushed = true
if r == nil {
@@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
break
}
if !d.nextBlock(n == 0) {
- return n, nil
+ return n, d.current.err
}
}
}
@@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error {
d.drainOutput()
+ d.syncStream.br.r = nil
if r == nil {
d.current.err = ErrDecoderNilInput
if len(d.current.b) > 0 {
@@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error {
}
return nil
}
-
- if d.stream == nil {
- d.stream = make(chan decodeStream, 1)
- d.streamWg.Add(1)
- go d.startStreamDecoder(d.stream)
- }
-
// Remove current block.
+ d.stashDecoder()
d.current.decodeOutput = decodeOutput{}
d.current.err = nil
- d.current.cancel = make(chan struct{})
d.current.flushed = false
d.current.d = nil
- d.stream <- decodeStream{
- r: r,
- output: d.current.output,
- cancel: d.current.cancel,
+ // Ensure no-one else is still running...
+ d.streamWg.Wait()
+ if d.frame == nil {
+ d.frame = newFrameDec(d.o)
+ }
+
+ if d.o.concurrent == 1 {
+ return d.startSyncDecoder(r)
}
+
+ d.current.output = make(chan decodeOutput, d.o.concurrent)
+ ctx, cancel := context.WithCancel(context.Background())
+ d.current.cancel = cancel
+ d.streamWg.Add(1)
+ go d.startStreamDecoder(ctx, r, d.current.output)
+
return nil
}
// drainOutput will drain the output until errEndOfStream is sent.
func (d *Decoder) drainOutput() {
if d.current.cancel != nil {
- println("cancelling current")
- close(d.current.cancel)
+ if debugDecoder {
+ println("cancelling current")
+ }
+ d.current.cancel()
d.current.cancel = nil
}
if d.current.d != nil {
@@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() {
}
d.decoders <- v.d
}
- if v.err == errEndOfStream {
- println("current flushed")
- d.current.flushed = true
- return
- }
}
+ d.current.output = nil
+ d.current.flushed = true
}
// WriteTo writes data to w until there's no more data to write or when an error occurs.
@@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
// DecodeAll can be used concurrently.
// The Decoder concurrency limits will be respected.
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
- if d.current.err == ErrDecoderClosed {
+ if d.decoders == nil {
return dst, ErrDecoderClosed
}
@@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
frame.rawInput = nil
frame.bBuf = nil
+ if frame.history.decoders.br != nil {
+ frame.history.decoders.br.in = nil
+ }
d.decoders <- block
}()
frame.bBuf = input
@@ -307,27 +328,31 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
for {
frame.history.reset()
err := frame.reset(&frame.bBuf)
- if err == io.EOF {
- if debugDecoder {
- println("frame reset return EOF")
+ if err != nil {
+ if err == io.EOF {
+ if debugDecoder {
+ println("frame reset return EOF")
+ }
+ return dst, nil
}
- return dst, nil
+ return dst, err
}
if frame.DictionaryID != nil {
dict, ok := d.dicts[*frame.DictionaryID]
if !ok {
return nil, ErrUnknownDictionary
}
+ if debugDecoder {
+ println("setting dict", frame.DictionaryID)
+ }
frame.history.setDict(&dict)
}
- if err != nil {
- return dst, err
- }
- if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+
+ if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
return dst, ErrDecoderSizeExceeded
}
- if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
- // Never preallocate moe than 1 GB up front.
+ if frame.FrameContentSize < 1<<30 {
+ // Never preallocate more than 1 GB up front.
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
copy(dst2, dst)
@@ -368,33 +393,170 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// If non-blocking mode is used the returned boolean will be false
// if no data was available without blocking.
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
- if d.current.d != nil {
- if debugDecoder {
- printf("re-adding current decoder %p", d.current.d)
- }
- d.decoders <- d.current.d
- d.current.d = nil
- }
if d.current.err != nil {
// Keep error state.
- return blocking
+ return false
}
+ d.current.b = d.current.b[:0]
+ // SYNC:
+ if d.syncStream.enabled {
+ if !blocking {
+ return false
+ }
+ ok = d.nextBlockSync()
+ if !ok {
+ d.stashDecoder()
+ }
+ return ok
+ }
+
+ //ASYNC:
+ d.stashDecoder()
if blocking {
- d.current.decodeOutput = <-d.current.output
+ d.current.decodeOutput, ok = <-d.current.output
} else {
select {
- case d.current.decodeOutput = <-d.current.output:
+ case d.current.decodeOutput, ok = <-d.current.output:
default:
return false
}
}
+ if !ok {
+ // This should not happen, so signal error state...
+ d.current.err = io.ErrUnexpectedEOF
+ return false
+ }
+ next := d.current.decodeOutput
+ if next.d != nil && next.d.async.newHist != nil {
+ d.current.crc.Reset()
+ }
if debugDecoder {
- println("got", len(d.current.b), "bytes, error:", d.current.err)
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b)))
+ println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
+ }
+
+ if len(next.b) > 0 {
+ n, err := d.current.crc.Write(next.b)
+ if err == nil {
+ if n != len(next.b) {
+ d.current.err = io.ErrShortWrite
+ }
+ }
+ }
+ if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
+ got := d.current.crc.Sum64()
+ var tmp [4]byte
+ binary.LittleEndian.PutUint32(tmp[:], uint32(got))
+ if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
+ if debugDecoder {
+ println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
+ }
+ d.current.err = ErrCRCMismatch
+ } else {
+ if debugDecoder {
+ println("CRC ok", tmp[:])
+ }
+ }
+ }
+
+ return true
+}
+
+func (d *Decoder) nextBlockSync() (ok bool) {
+ if d.current.d == nil {
+ d.current.d = <-d.decoders
+ }
+ for len(d.current.b) == 0 {
+ if !d.syncStream.inFrame {
+ d.frame.history.reset()
+ d.current.err = d.frame.reset(&d.syncStream.br)
+ if d.current.err != nil {
+ return false
+ }
+ if d.frame.DictionaryID != nil {
+ dict, ok := d.dicts[*d.frame.DictionaryID]
+ if !ok {
+ d.current.err = ErrUnknownDictionary
+ return false
+ } else {
+ d.frame.history.setDict(&dict)
+ }
+ }
+ if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
+ d.current.err = ErrDecoderSizeExceeded
+ return false
+ }
+
+ d.syncStream.decodedFrame = 0
+ d.syncStream.inFrame = true
+ }
+ d.current.err = d.frame.next(d.current.d)
+ if d.current.err != nil {
+ return false
+ }
+ d.frame.history.ensureBlock()
+ if debugDecoder {
+ println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame)
+ }
+ histBefore := len(d.frame.history.b)
+ d.current.err = d.current.d.decodeBuf(&d.frame.history)
+
+ if d.current.err != nil {
+ println("error after:", d.current.err)
+ return false
+ }
+ d.current.b = d.frame.history.b[histBefore:]
+ if debugDecoder {
+ println("history after:", len(d.frame.history.b))
+ }
+
+ // Check frame size (before CRC)
+ d.syncStream.decodedFrame += uint64(len(d.current.b))
+ if d.syncStream.decodedFrame > d.frame.FrameContentSize {
+ if debugDecoder {
+ printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
+ }
+ d.current.err = ErrFrameSizeExceeded
+ return false
+ }
+
+ // Check FCS
+ if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
+ if debugDecoder {
+ printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
+ }
+ d.current.err = ErrFrameSizeMismatch
+ return false
+ }
+
+ // Update/Check CRC
+ if d.frame.HasCheckSum {
+ d.frame.crc.Write(d.current.b)
+ if d.current.d.Last {
+ d.current.err = d.frame.checkCRC()
+ if d.current.err != nil {
+ println("CRC error:", d.current.err)
+ return false
+ }
+ }
+ }
+ d.syncStream.inFrame = !d.current.d.Last
}
return true
}
+func (d *Decoder) stashDecoder() {
+ if d.current.d != nil {
+ if debugDecoder {
+ printf("re-adding current decoder %p", d.current.d)
+ }
+ d.decoders <- d.current.d
+ d.current.d = nil
+ }
+}
+
// Close will release all resources.
// It is NOT possible to reuse the decoder after this.
func (d *Decoder) Close() {
@@ -402,10 +564,10 @@ func (d *Decoder) Close() {
return
}
d.drainOutput()
- if d.stream != nil {
- close(d.stream)
+ if d.current.cancel != nil {
+ d.current.cancel()
d.streamWg.Wait()
- d.stream = nil
+ d.current.cancel = nil
}
if d.decoders != nil {
close(d.decoders)
@@ -456,100 +618,307 @@ type decodeOutput struct {
err error
}
-type decodeStream struct {
- r io.Reader
-
- // Blocks ready to be written to output.
- output chan decodeOutput
-
- // cancel reading from the input
- cancel chan struct{}
+func (d *Decoder) startSyncDecoder(r io.Reader) error {
+ d.frame.history.reset()
+ d.syncStream.br = readerWrapper{r: r}
+ d.syncStream.inFrame = false
+ d.syncStream.enabled = true
+ d.syncStream.decodedFrame = 0
+ return nil
}
-// errEndOfStream indicates that everything from the stream was read.
-var errEndOfStream = errors.New("end-of-stream")
-
// Create Decoder:
-// Spawn n block decoders. These accept tasks to decode a block.
-// Create goroutine that handles stream processing, this will send history to decoders as they are available.
-// Decoders update the history as they decode.
-// When a block is returned:
-// a) history is sent to the next decoder,
-// b) content written to CRC.
-// c) return data to WRITER.
-// d) wait for next block to return data.
-// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
-func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
+// ASYNC:
+// Spawn 4 go routines.
+// 0: Read frames and decode blocks.
+// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
+// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
+// 3: Wait for stream history, execute sequences, send stream history.
+func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done()
- frame := newFrameDec(d.o)
- for stream := range inStream {
- if debugDecoder {
- println("got new stream")
+ br := readerWrapper{r: r}
+
+ var seqPrepare = make(chan *blockDec, d.o.concurrent)
+ var seqDecode = make(chan *blockDec, d.o.concurrent)
+ var seqExecute = make(chan *blockDec, d.o.concurrent)
+
+ // Async 1: Prepare blocks...
+ go func() {
+ var hist history
+ var hasErr bool
+ for block := range seqPrepare {
+ if hasErr {
+ if block != nil {
+ seqDecode <- block
+ }
+ continue
+ }
+ if block.async.newHist != nil {
+ if debugDecoder {
+ println("Async 1: new history")
+ }
+ hist.reset()
+ if block.async.newHist.dict != nil {
+ hist.setDict(block.async.newHist.dict)
+ }
+ }
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqDecode <- block
+ continue
+ }
+
+ remain, err := block.decodeLiterals(block.data, &hist)
+ block.err = err
+ hasErr = block.err != nil
+ if err == nil {
+ block.async.literals = hist.decoders.literals
+ block.async.seqData = remain
+ } else if debugDecoder {
+ println("decodeLiterals error:", err)
+ }
+ seqDecode <- block
}
- br := readerWrapper{r: stream.r}
- decodeStream:
- for {
- frame.history.reset()
- err := frame.reset(&br)
- if debugDecoder && err != nil {
- println("Frame decoder returned", err)
+ close(seqDecode)
+ }()
+
+ // Async 2: Decode sequences...
+ go func() {
+ var hist history
+ var hasErr bool
+
+ for block := range seqDecode {
+ if hasErr {
+ if block != nil {
+ seqExecute <- block
+ }
+ continue
}
- if err == nil && frame.DictionaryID != nil {
- dict, ok := d.dicts[*frame.DictionaryID]
- if !ok {
- err = ErrUnknownDictionary
- } else {
- frame.history.setDict(&dict)
+ if block.async.newHist != nil {
+ if debugDecoder {
+ println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
+ }
+ hist.decoders = block.async.newHist.decoders
+ hist.recentOffsets = block.async.newHist.recentOffsets
+ hist.windowSize = block.async.newHist.windowSize
+ if block.async.newHist.dict != nil {
+ hist.setDict(block.async.newHist.dict)
}
}
- if err != nil {
- stream.output <- decodeOutput{
- err: err,
+ if block.err != nil || block.Type != blockTypeCompressed {
+ hasErr = block.err != nil
+ seqExecute <- block
+ continue
+ }
+
+ hist.decoders.literals = block.async.literals
+ block.err = block.prepareSequences(block.async.seqData, &hist)
+ if debugDecoder && block.err != nil {
+ println("prepareSequences returned:", block.err)
+ }
+ hasErr = block.err != nil
+ if block.err == nil {
+ block.err = block.decodeSequences(&hist)
+ if debugDecoder && block.err != nil {
+ println("decodeSequences returned:", block.err)
}
- break
+ hasErr = block.err != nil
+ // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs]
+ block.async.seqSize = hist.decoders.seqSize
}
- if debugDecoder {
- println("starting frame decoder")
- }
-
- // This goroutine will forward history between frames.
- frame.frameDone.Add(1)
- frame.initAsync()
-
- go frame.startDecoder(stream.output)
- decodeFrame:
- // Go through all blocks of the frame.
- for {
- dec := <-d.decoders
- select {
- case <-stream.cancel:
- if !frame.sendErr(dec, io.EOF) {
- // To not let the decoder dangle, send it back.
- stream.output <- decodeOutput{d: dec}
+ seqExecute <- block
+ }
+ close(seqExecute)
+ }()
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ // Async 3: Execute sequences...
+ frameHistCache := d.frame.history.b
+ go func() {
+ var hist history
+ var decodedFrame uint64
+ var fcs uint64
+ var hasErr bool
+ for block := range seqExecute {
+ out := decodeOutput{err: block.err, d: block}
+ if block.err != nil || hasErr {
+ hasErr = true
+ output <- out
+ continue
+ }
+ if block.async.newHist != nil {
+ if debugDecoder {
+ println("Async 3: new history")
+ }
+ hist.windowSize = block.async.newHist.windowSize
+ hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
+ if block.async.newHist.dict != nil {
+ hist.setDict(block.async.newHist.dict)
+ }
+
+ if cap(hist.b) < hist.allocFrameBuffer {
+ if cap(frameHistCache) >= hist.allocFrameBuffer {
+ hist.b = frameHistCache
+ } else {
+ hist.b = make([]byte, 0, hist.allocFrameBuffer)
+ println("Alloc history sized", hist.allocFrameBuffer)
+ }
+ }
+ hist.b = hist.b[:0]
+ fcs = block.async.fcs
+ decodedFrame = 0
+ }
+ do := decodeOutput{err: block.err, d: block}
+ switch block.Type {
+ case blockTypeRLE:
+ if debugDecoder {
+ println("add rle block length:", block.RLESize)
+ }
+
+ if cap(block.dst) < int(block.RLESize) {
+ if block.lowMem {
+ block.dst = make([]byte, block.RLESize)
+ } else {
+ block.dst = make([]byte, maxBlockSize)
}
- break decodeStream
- default:
}
- err := frame.next(dec)
- switch err {
- case io.EOF:
- // End of current frame, no error
- println("EOF on next block")
- break decodeFrame
- case nil:
- continue
- default:
- println("block decoder returned", err)
- break decodeStream
+ block.dst = block.dst[:block.RLESize]
+ v := block.data[0]
+ for i := range block.dst {
+ block.dst[i] = v
+ }
+ hist.append(block.dst)
+ do.b = block.dst
+ case blockTypeRaw:
+ if debugDecoder {
+ println("add raw block length:", len(block.data))
+ }
+ hist.append(block.data)
+ do.b = block.data
+ case blockTypeCompressed:
+ if debugDecoder {
+ println("execute with history length:", len(hist.b), "window:", hist.windowSize)
+ }
+ hist.decoders.seqSize = block.async.seqSize
+ hist.decoders.literals = block.async.literals
+ do.err = block.executeSequences(&hist)
+ hasErr = do.err != nil
+ if debugDecoder && hasErr {
+ println("executeSequences returned:", do.err)
+ }
+ do.b = block.dst
+ }
+ if !hasErr {
+ decodedFrame += uint64(len(do.b))
+ if decodedFrame > fcs {
+ println("fcs exceeded", block.Last, fcs, decodedFrame)
+ do.err = ErrFrameSizeExceeded
+ hasErr = true
+ } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
+ do.err = ErrFrameSizeMismatch
+ hasErr = true
+ } else {
+ if debugDecoder {
+ println("fcs ok", block.Last, fcs, decodedFrame)
+ }
}
}
- // All blocks have started decoding, check if there are more frames.
- println("waiting for done")
- frame.frameDone.Wait()
- println("done waiting...")
+ output <- do
+ }
+ close(output)
+ frameHistCache = hist.b
+ wg.Done()
+ if debugDecoder {
+ println("decoder goroutines finished")
+ }
+ }()
+
+decodeStream:
+ for {
+ frame := d.frame
+ if debugDecoder {
+ println("New frame...")
+ }
+ var historySent bool
+ frame.history.reset()
+ err := frame.reset(&br)
+ if debugDecoder && err != nil {
+ println("Frame decoder returned", err)
+ }
+ if err == nil && frame.DictionaryID != nil {
+ dict, ok := d.dicts[*frame.DictionaryID]
+ if !ok {
+ err = ErrUnknownDictionary
+ } else {
+ frame.history.setDict(&dict)
+ }
+ }
+ if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+ err = ErrDecoderSizeExceeded
+ }
+ if err != nil {
+ select {
+ case <-ctx.Done():
+ case dec := <-d.decoders:
+ dec.sendErr(err)
+ seqPrepare <- dec
+ }
+ break decodeStream
+ }
+
+ // Go through all blocks of the frame.
+ for {
+ var dec *blockDec
+ select {
+ case <-ctx.Done():
+ break decodeStream
+ case dec = <-d.decoders:
+ // Once we have a decoder, we MUST return it.
+ }
+ err := frame.next(dec)
+ if !historySent {
+ h := frame.history
+ if debugDecoder {
+ println("Alloc History:", h.allocFrameBuffer)
+ }
+ dec.async.newHist = &h
+ dec.async.fcs = frame.FrameContentSize
+ historySent = true
+ } else {
+ dec.async.newHist = nil
+ }
+ if debugDecoder && err != nil {
+ println("next block returned error:", err)
+ }
+ dec.err = err
+ dec.checkCRC = nil
+ if dec.Last && frame.HasCheckSum && err == nil {
+ crc, err := frame.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
+ dec.err = err
+ }
+ var tmp [4]byte
+ copy(tmp[:], crc)
+ dec.checkCRC = tmp[:]
+ if debugDecoder {
+ println("found crc to check:", dec.checkCRC)
+ }
+ }
+ err = dec.err
+ last := dec.Last
+ seqPrepare <- dec
+ if err != nil {
+ break decodeStream
+ }
+ if last {
+ break
+ }
}
- frame.frameDone.Wait()
- println("Sending EOS")
- stream.output <- decodeOutput{err: errEndOfStream}
}
+ close(seqPrepare)
+ wg.Wait()
+ d.frame.history.b = frameHistCache
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index 95cc9b8b..fd05c9bb 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -28,6 +28,9 @@ func (o *decoderOptions) setDefault() {
concurrent: runtime.GOMAXPROCS(0),
maxWindowSize: MaxWindowSize,
}
+ if o.concurrent > 4 {
+ o.concurrent = 4
+ }
o.maxDecodedSize = 1 << 63
}
@@ -37,16 +40,25 @@ func WithDecoderLowmem(b bool) DOption {
return func(o *decoderOptions) error { o.lowMem = b; return nil }
}
-// WithDecoderConcurrency will set the concurrency,
-// meaning the maximum number of decoders to run concurrently.
-// The value supplied must be at least 1.
-// By default this will be set to GOMAXPROCS.
+// WithDecoderConcurrency sets the number of created decoders.
+// When decoding block with DecodeAll, this will limit the number
+// of possible concurrently running decodes.
+// When decoding streams, this will limit the number of
+// inflight blocks.
+// When decoding streams and setting maximum to 1,
+// no async decoding will be done.
+// When a value of 0 is provided GOMAXPROCS will be used.
+// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
func WithDecoderConcurrency(n int) DOption {
return func(o *decoderOptions) error {
- if n <= 0 {
+ if n < 0 {
return errors.New("concurrency must be at least 1")
}
- o.concurrent = n
+ if n == 0 {
+ o.concurrent = runtime.GOMAXPROCS(0)
+ } else {
+ o.concurrent = n
+ }
return nil
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index 5f08a283..f51ab529 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
- const kSearchStrength = 7
+ const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
@@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
- const kSearchStrength = 8
+ const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index e6e31596..dcc987a7 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) {
if cap(s.filling) == 0 {
s.filling = make([]byte, 0, e.o.blockSize)
}
- if cap(s.current) == 0 {
- s.current = make([]byte, 0, e.o.blockSize)
- }
- if cap(s.previous) == 0 {
- s.previous = make([]byte, 0, e.o.blockSize)
+ if e.o.concurrent > 1 {
+ if cap(s.current) == 0 {
+ s.current = make([]byte, 0, e.o.blockSize)
+ }
+ if cap(s.previous) == 0 {
+ s.previous = make([]byte, 0, e.o.blockSize)
+ }
+ s.current = s.current[:0]
+ s.previous = s.previous[:0]
+ if s.writing == nil {
+ s.writing = &blockEnc{lowMem: e.o.lowMem}
+ s.writing.init()
+ }
+ s.writing.initNewEncode()
}
if s.encoder == nil {
s.encoder = e.o.encoder()
}
- if s.writing == nil {
- s.writing = &blockEnc{lowMem: e.o.lowMem}
- s.writing.init()
- }
- s.writing.initNewEncode()
s.filling = s.filling[:0]
- s.current = s.current[:0]
- s.previous = s.previous[:0]
s.encoder.Reset(e.o.dict, false)
s.headerWritten = false
s.eofWritten = false
@@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error {
return s.err
}
+ // SYNC:
+ if e.o.concurrent == 1 {
+ src := s.filling
+ s.nInput += int64(len(s.filling))
+ if debugEncoder {
+ println("Adding sync block,", len(src), "bytes, final:", final)
+ }
+ enc := s.encoder
+ blk := enc.Block()
+ blk.reset(nil)
+ enc.Encode(blk, src)
+ blk.last = final
+ if final {
+ s.eofWritten = true
+ }
+
+ err := errIncompressible
+ // If we got the exact same number of literals as input,
+ // assume the literals cannot be compressed.
+ if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
+ err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ }
+ switch err {
+ case errIncompressible:
+ if debugEncoder {
+ println("Storing incompressible block as raw")
+ }
+ blk.encodeRaw(src)
+ // In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
+ case nil:
+ default:
+ s.err = err
+ return err
+ }
+ _, s.err = s.w.Write(blk.output)
+ s.nWritten += int64(len(blk.output))
+ s.filling = s.filling[:0]
+ return s.err
+ }
+
// Move blocks forward.
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index 5f2e1d02..44d8dbd1 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -76,6 +76,7 @@ func WithEncoderCRC(b bool) EOption {
// WithEncoderConcurrency will set the concurrency,
// meaning the maximum number of encoders to run concurrently.
// The value supplied must be at least 1.
+// For streams, setting a value of 1 will disable async compression.
// By default this will be set to GOMAXPROCS.
func WithEncoderConcurrency(n int) EOption {
return func(o *encoderOptions) error {
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 989c79f8..11089d22 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -8,23 +8,17 @@ import (
"bytes"
"encoding/hex"
"errors"
- "hash"
"io"
- "sync"
"github.com/klauspost/compress/zstd/internal/xxhash"
)
type frameDec struct {
- o decoderOptions
- crc hash.Hash64
- offset int64
+ o decoderOptions
+ crc *xxhash.Digest
WindowSize uint64
- // In order queue of blocks being decoded.
- decoding chan *blockDec
-
// Frame history passed between blocks
history history
@@ -34,15 +28,10 @@ type frameDec struct {
bBuf byteBuf
FrameContentSize uint64
- frameDone sync.WaitGroup
DictionaryID *uint32
HasCheckSum bool
SingleSegment bool
-
- // asyncRunning indicates whether the async routine processes input on 'decoding'.
- asyncRunningMu sync.Mutex
- asyncRunning bool
}
const (
@@ -208,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
default:
fcsSize = 1 << v
}
- d.FrameContentSize = 0
+ d.FrameContentSize = fcsUnknown
if fcsSize > 0 {
b, err := br.readSmall(fcsSize)
if err != nil {
@@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error {
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
}
if debugDecoder {
- println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
+ println("Read FCS:", d.FrameContentSize)
}
}
+
// Move this to shared.
d.HasCheckSum = fhd&(1<<2) != 0
if d.HasCheckSum {
@@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error {
}
d.history.windowSize = int(d.WindowSize)
if d.o.lowMem && d.history.windowSize < maxBlockSize {
- d.history.maxSize = d.history.windowSize * 2
+ d.history.allocFrameBuffer = d.history.windowSize * 2
+ // TODO: Maybe use FrameContent size
} else {
- d.history.maxSize = d.history.windowSize + maxBlockSize
+ d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
}
+
+ if debugDecoder {
+ println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
+ }
+
// history contains input - maybe we do something
d.rawInput = br
return nil
@@ -276,49 +272,18 @@ func (d *frameDec) reset(br byteBuffer) error {
// next will start decoding the next block from stream.
func (d *frameDec) next(block *blockDec) error {
if debugDecoder {
- printf("decoding new block %p:%p", block, block.data)
+ println("decoding new block")
}
err := block.reset(d.rawInput, d.WindowSize)
if err != nil {
println("block error:", err)
// Signal the frame decoder we have a problem.
- d.sendErr(block, err)
+ block.sendErr(err)
return err
}
- block.input <- struct{}{}
- if debugDecoder {
- println("next block:", block)
- }
- d.asyncRunningMu.Lock()
- defer d.asyncRunningMu.Unlock()
- if !d.asyncRunning {
- return nil
- }
- if block.Last {
- // We indicate the frame is done by sending io.EOF
- d.decoding <- block
- return io.EOF
- }
- d.decoding <- block
return nil
}
-// sendEOF will queue an error block on the frame.
-// This will cause the frame decoder to return when it encounters the block.
-// Returns true if the decoder was added.
-func (d *frameDec) sendErr(block *blockDec, err error) bool {
- d.asyncRunningMu.Lock()
- defer d.asyncRunningMu.Unlock()
- if !d.asyncRunning {
- return false
- }
-
- println("sending error", err.Error())
- block.sendErr(err)
- d.decoding <- block
- return true
-}
-
// checkCRC will check the checksum if the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error {
@@ -340,7 +305,7 @@ func (d *frameDec) checkCRC() error {
return err
}
- if !bytes.Equal(tmp[:], want) {
+ if !bytes.Equal(tmp[:], want) && !ignoreCRC {
if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want)
}
@@ -352,131 +317,13 @@ func (d *frameDec) checkCRC() error {
return nil
}
-func (d *frameDec) initAsync() {
- if !d.o.lowMem && !d.SingleSegment {
- // set max extra size history to 2MB.
- d.history.maxSize = d.history.windowSize + maxBlockSize
- }
- // re-alloc if more than one extra block size.
- if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
- d.history.b = make([]byte, 0, d.history.maxSize)
- }
- if cap(d.history.b) < d.history.maxSize {
- d.history.b = make([]byte, 0, d.history.maxSize)
- }
- if cap(d.decoding) < d.o.concurrent {
- d.decoding = make(chan *blockDec, d.o.concurrent)
- }
- if debugDecoder {
- h := d.history
- printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
- }
- d.asyncRunningMu.Lock()
- d.asyncRunning = true
- d.asyncRunningMu.Unlock()
-}
-
-// startDecoder will start decoding blocks and write them to the writer.
-// The decoder will stop as soon as an error occurs or at end of frame.
-// When the frame has finished decoding the *bufio.Reader
-// containing the remaining input will be sent on frameDec.frameDone.
-func (d *frameDec) startDecoder(output chan decodeOutput) {
- written := int64(0)
-
- defer func() {
- d.asyncRunningMu.Lock()
- d.asyncRunning = false
- d.asyncRunningMu.Unlock()
-
- // Drain the currently decoding.
- d.history.error = true
- flushdone:
- for {
- select {
- case b := <-d.decoding:
- b.history <- &d.history
- output <- <-b.result
- default:
- break flushdone
- }
- }
- println("frame decoder done, signalling done")
- d.frameDone.Done()
- }()
- // Get decoder for first block.
- block := <-d.decoding
- block.history <- &d.history
- for {
- var next *blockDec
- // Get result
- r := <-block.result
- if r.err != nil {
- println("Result contained error", r.err)
- output <- r
- return
- }
- if debugDecoder {
- println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
- d.offset += int64(len(r.b))
- }
- if !block.Last {
- // Send history to next block
- select {
- case next = <-d.decoding:
- if debugDecoder {
- println("Sending ", len(d.history.b), "bytes as history")
- }
- next.history <- &d.history
- default:
- // Wait until we have sent the block, so
- // other decoders can potentially get the decoder.
- next = nil
- }
- }
-
- // Add checksum, async to decoding.
- if d.HasCheckSum {
- n, err := d.crc.Write(r.b)
- if err != nil {
- r.err = err
- if n != len(r.b) {
- r.err = io.ErrShortWrite
- }
- output <- r
- return
- }
- }
- written += int64(len(r.b))
- if d.SingleSegment && uint64(written) > d.FrameContentSize {
- println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
- r.err = ErrFrameSizeExceeded
- output <- r
- return
- }
- if block.Last {
- r.err = d.checkCRC()
- output <- r
- return
- }
- output <- r
- if next == nil {
- // There was no decoder available, we wait for one now that we have sent to the writer.
- if debugDecoder {
- println("Sending ", len(d.history.b), " bytes as history")
- }
- next = <-d.decoding
- next.history <- &d.history
- }
- block = next
- }
-}
-
// runDecoder will create a sync decoder that will decode a block of data.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b
// We use the history for output to avoid copying it.
d.history.b = dst
+ d.history.ignoreBuffer = len(dst)
// Store input length, so we only check new data.
crcStart := len(dst)
var err error
@@ -489,22 +336,30 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
println("next block:", dec)
}
err = dec.decodeBuf(&d.history)
- if err != nil || dec.Last {
+ if err != nil {
break
}
if uint64(len(d.history.b)) > d.o.maxDecodedSize {
err = ErrDecoderSizeExceeded
break
}
- if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
- println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
+ if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
+ println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
err = ErrFrameSizeExceeded
break
}
+ if dec.Last {
+ break
+ }
+ if debugDecoder {
+ println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
+ }
}
dst = d.history.b
if err == nil {
- if d.HasCheckSum {
+ if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
+ err = ErrFrameSizeMismatch
+ } else if d.HasCheckSum {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go
new file mode 100644
index 00000000..7f2210e0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fuzz.go
@@ -0,0 +1,11 @@
+//go:build ignorecrc
+// +build ignorecrc
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+// ignoreCRC can be used for fuzz testing to ignore CRC values...
+const ignoreCRC = true
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
new file mode 100644
index 00000000..6811c68a
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go
@@ -0,0 +1,11 @@
+//go:build !ignorecrc
+// +build !ignorecrc
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+// Based on work by Yann Collet, released under BSD License.
+
+package zstd
+
+// ignoreCRC can be used for fuzz testing to ignore CRC values...
+const ignoreCRC = false
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index f783e32d..28b40153 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -10,20 +10,31 @@ import (
// history contains the information transferred between blocks.
type history struct {
- b []byte
- huffTree *huff0.Scratch
- recentOffsets [3]int
+ // Literal decompression
+ huffTree *huff0.Scratch
+
+ // Sequence decompression
decoders sequenceDecs
- windowSize int
- maxSize int
- error bool
- dict *dict
+ recentOffsets [3]int
+
+ // History buffer...
+ b []byte
+
+ // ignoreBuffer is meant to ignore a number of bytes
+ // when checking for matches in history
+ ignoreBuffer int
+
+ windowSize int
+ allocFrameBuffer int // needed?
+ error bool
+ dict *dict
}
// reset will reset the history to initial state of a frame.
// The history must already have been initialized to the desired size.
func (h *history) reset() {
h.b = h.b[:0]
+ h.ignoreBuffer = 0
h.error = false
h.recentOffsets = [3]int{1, 4, 8}
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
@@ -35,7 +46,7 @@ func (h *history) reset() {
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
fseDecoderPool.Put(f)
}
- h.decoders = sequenceDecs{}
+ h.decoders = sequenceDecs{br: h.decoders.br}
if h.huffTree != nil {
if h.dict == nil || h.dict.litEnc != h.huffTree {
huffDecoderPool.Put(h.huffTree)
@@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) {
h.decoders.litLengths = dict.llDec
h.decoders.offsets = dict.ofDec
h.decoders.matchLengths = dict.mlDec
+ h.decoders.dict = dict.content
h.recentOffsets = dict.offsets
h.huffTree = dict.litEnc
}
@@ -83,6 +95,24 @@ func (h *history) append(b []byte) {
copy(h.b[h.windowSize-len(b):], b)
}
+// ensureBlock will ensure there is space for at least one block...
+func (h *history) ensureBlock() {
+ if cap(h.b) < h.allocFrameBuffer {
+ h.b = make([]byte, 0, h.allocFrameBuffer)
+ return
+ }
+
+ avail := cap(h.b) - len(h.b)
+ if avail >= h.windowSize || avail > maxCompressedBlockSize {
+ return
+ }
+ // Move data down so we only have window size left.
+ // We know we have less than window size in b at this point.
+ discard := len(h.b) - h.windowSize
+ copy(h.b, h.b[discard:])
+ h.b = h.b[:h.windowSize]
+}
+
// append bytes to history without ever discarding anything.
func (h *history) appendKeep(b []byte) {
h.b = append(h.b, b...)
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index bc731e4c..819f1461 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -20,6 +20,10 @@ type seq struct {
llCode, mlCode, ofCode uint8
}
+type seqVals struct {
+ ll, ml, mo int
+}
+
func (s seq) String() string {
if s.offset <= 3 {
if s.offset == 0 {
@@ -61,16 +65,18 @@ type sequenceDecs struct {
offsets sequenceDec
matchLengths sequenceDec
prevOffset [3]int
- hist []byte
dict []byte
literals []byte
out []byte
+ nSeqs int
+ br *bitReader
+ seqSize int
windowSize int
maxBits uint8
}
// initialize all 3 decoders from the stream input.
-func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
+func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error {
if err := s.litLengths.init(br); err != nil {
return errors.New("litLengths:" + err.Error())
}
@@ -80,8 +86,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
if err := s.matchLengths.init(br); err != nil {
return errors.New("matchLengths:" + err.Error())
}
- s.literals = literals
- s.hist = hist.b
+ s.br = br
s.prevOffset = hist.recentOffsets
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
s.windowSize = hist.windowSize
@@ -94,11 +99,261 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
}
// decode sequences from the stream with the provided history.
-func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
+func (s *sequenceDecs) decode(seqs []seqVals) error {
+ br := s.br
+
+ // Grab full sizes tables, to avoid bounds checks.
+ llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
+ llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ s.seqSize = 0
+ litRemain := len(s.literals)
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
+ for i := range seqs {
+ var ll, mo, ml int
+ if br.off > 4+((maxOffsetBits+16+16)>>3) {
+ // inlined function:
+ // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
+
+ // Final will not read from stream.
+ var llB, mlB, moB uint8
+ ll, llB = llState.final()
+ ml, mlB = mlState.final()
+ mo, moB = ofState.final()
+
+ // extra bits are stored in reverse order.
+ br.fillFast()
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
+ br.fillFast()
+ }
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
+
+ if moB > 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = mo
+ } else {
+ // mo = s.adjustOffset(mo, ll, moB)
+ // Inlined for rather big speedup
+ if ll == 0 {
+ // There is an exception though, when current sequence's literals_length = 0.
+ // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
+ // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
+ mo++
+ }
+
+ if mo == 0 {
+ mo = s.prevOffset[0]
+ } else {
+ var temp int
+ if mo == 3 {
+ temp = s.prevOffset[0] - 1
+ } else {
+ temp = s.prevOffset[mo]
+ }
+
+ if temp == 0 {
+ // 0 is not valid; input is corrupted; force offset to 1
+ println("WARNING: temp was 0")
+ temp = 1
+ }
+
+ if mo != 1 {
+ s.prevOffset[2] = s.prevOffset[1]
+ }
+ s.prevOffset[1] = s.prevOffset[0]
+ s.prevOffset[0] = temp
+ mo = temp
+ }
+ }
+ br.fillFast()
+ } else {
+ if br.overread() {
+ if debugDecoder {
+ printf("reading sequence %d, exceeded available data\n", i)
+ }
+ return io.ErrUnexpectedEOF
+ }
+ ll, mo, ml = s.next(br, llState, mlState, ofState)
+ br.fill()
+ }
+
+ if debugSequences {
+ println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
+ }
+ // Evaluate.
+ // We might be doing this async, so do it early.
+ if mo == 0 && ml > 0 {
+ return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
+ }
+ if ml > maxMatchLen {
+ return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
+ }
+ s.seqSize += ll + ml
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ }
+ litRemain -= ll
+ if litRemain < 0 {
+ return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
+ }
+ seqs[i] = seqVals{
+ ll: ll,
+ ml: ml,
+ mo: mo,
+ }
+ if i == len(seqs)-1 {
+ // This is the last sequence, so we shouldn't update state.
+ break
+ }
+
+ // Manually inlined, ~ 5-20% faster
+ // Update all 3 states at once. Approx 20% faster.
+ nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
+ if nBits == 0 {
+ llState = llTable[llState.newState()&maxTableMask]
+ mlState = mlTable[mlState.newState()&maxTableMask]
+ ofState = ofTable[ofState.newState()&maxTableMask]
+ } else {
+ bits := br.get32BitsFast(nBits)
+ lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
+ llState = llTable[(llState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits >> (ofState.nbBits() & 31))
+ lowBits &= bitMask[mlState.nbBits()&15]
+ mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
+
+ lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
+ ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
+ }
+ }
+ s.seqSize += litRemain
+ if s.seqSize > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ }
+ err := br.close()
+ if err != nil {
+ printf("Closing sequences: %v, %+v\n", err, *br)
+ }
+ return err
+}
+
+// execute will execute the decoded sequence with the provided history.
+// The sequence must be evaluated before being sent.
+func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
+ // Ensure we have enough output size...
+ if len(s.out)+s.seqSize > cap(s.out) {
+ addBytes := s.seqSize + len(s.out)
+ s.out = append(s.out, make([]byte, addBytes)...)
+ s.out = s.out[:len(s.out)-addBytes]
+ }
+
+ if debugDecoder {
+ printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize)
+ }
+
+ var t = len(s.out)
+ out := s.out[:t+s.seqSize]
+
+ for _, seq := range seqs {
+ // Add literals
+ copy(out[t:], s.literals[:seq.ll])
+ t += seq.ll
+ s.literals = s.literals[seq.ll:]
+
+ // Copy from dictionary...
+ if seq.mo > t+len(hist) || seq.mo > s.windowSize {
+ if len(s.dict) == 0 {
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
+ }
+
+ // we may be in dictionary.
+ dictO := len(s.dict) - (seq.mo - (t + len(hist)))
+ if dictO < 0 || dictO >= len(s.dict) {
+ return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict))
+ }
+ end := dictO + seq.ml
+ if end > len(s.dict) {
+ n := len(s.dict) - dictO
+ copy(out[t:], s.dict[dictO:])
+ t += n
+ seq.ml -= n
+ } else {
+ copy(out[t:], s.dict[dictO:end])
+ t += end - dictO
+ continue
+ }
+ }
+
+ // Copy from history.
+ if v := seq.mo - t; v > 0 {
+ // v is the start position in history from end.
+ start := len(hist) - v
+ if seq.ml > v {
+ // Some goes into current block.
+ // Copy remainder of history
+ copy(out[t:], hist[start:])
+ t += v
+ seq.ml -= v
+ } else {
+ copy(out[t:], hist[start:start+seq.ml])
+ t += seq.ml
+ continue
+ }
+ }
+ // We must be in current buffer now
+ if seq.ml > 0 {
+ start := t - seq.mo
+ if seq.ml <= t-start {
+ // No overlap
+ copy(out[t:], out[start:start+seq.ml])
+ t += seq.ml
+ continue
+ } else {
+ // Overlapping copy
+ // Extend destination slice and copy one byte at the time.
+ src := out[start : start+seq.ml]
+ dst := out[t:]
+ dst = dst[:len(src)]
+ t += len(src)
+ // Destination is the space we just added.
+ for i := range src {
+ dst[i] = src[i]
+ }
+ }
+ }
+ }
+ // Add final literals
+ copy(out[t:], s.literals)
+ if debugDecoder {
+ t += len(s.literals)
+ if t != len(out) {
+ panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
+ }
+ }
+ s.out = out
+
+ return nil
+}
+
+// decode sequences from the stream with the provided history.
+func (s *sequenceDecs) decodeSync(history *history) error {
+ br := s.br
+ seqs := s.nSeqs
startSize := len(s.out)
// Grab full sizes tables, to avoid bounds checks.
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
+ hist := history.b[history.ignoreBuffer:]
+ out := s.out
+ maxBlockSize := maxCompressedBlockSize
+ if s.windowSize < maxBlockSize {
+ maxBlockSize = s.windowSize
+ }
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
@@ -151,7 +406,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
- println("temp was 0")
+ println("WARNING: temp was 0")
temp = 1
}
@@ -176,51 +431,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
if ll > len(s.literals) {
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
}
- size := ll + ml + len(s.out)
+ size := ll + ml + len(out)
if size-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size", size)
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
}
- if size > cap(s.out) {
+ if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
// but could be if destination slice is too small for sync operations.
// over-allocating here can create a large amount of GC pressure so we try to keep
// it as contained as possible
- used := len(s.out) - startSize
+ used := len(out) - startSize
addBytes := 256 + ll + ml + used>>2
// Clamp to max block size.
if used+addBytes > maxBlockSize {
addBytes = maxBlockSize - used
}
- s.out = append(s.out, make([]byte, addBytes)...)
- s.out = s.out[:len(s.out)-addBytes]
+ out = append(out, make([]byte, addBytes)...)
+ out = out[:len(out)-addBytes]
}
if ml > maxMatchLen {
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
}
// Add literals
- s.out = append(s.out, s.literals[:ll]...)
+ out = append(out, s.literals[:ll]...)
s.literals = s.literals[ll:]
- out := s.out
if mo == 0 && ml > 0 {
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
}
- if mo > len(s.out)+len(hist) || mo > s.windowSize {
+ if mo > len(out)+len(hist) || mo > s.windowSize {
if len(s.dict) == 0 {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
}
// we may be in dictionary.
- dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
+ dictO := len(s.dict) - (mo - (len(out) + len(hist)))
if dictO < 0 || dictO >= len(s.dict) {
- return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
+ return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
}
end := dictO + ml
if end > len(s.dict) {
out = append(out, s.dict[dictO:]...)
- mo -= len(s.dict) - dictO
ml -= len(s.dict) - dictO
} else {
out = append(out, s.dict[dictO:end]...)
@@ -231,26 +484,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
// Copy from history.
// TODO: Blocks without history could be made to ignore this completely.
- if v := mo - len(s.out); v > 0 {
+ if v := mo - len(out); v > 0 {
// v is the start position in history from end.
- start := len(s.hist) - v
+ start := len(hist) - v
if ml > v {
// Some goes into current block.
// Copy remainder of history
- out = append(out, s.hist[start:]...)
- mo -= v
+ out = append(out, hist[start:]...)
ml -= v
} else {
- out = append(out, s.hist[start:start+ml]...)
+ out = append(out, hist[start:start+ml]...)
ml = 0
}
}
// We must be in current buffer now
if ml > 0 {
- start := len(s.out) - mo
- if ml <= len(s.out)-start {
+ start := len(out) - mo
+ if ml <= len(out)-start {
// No overlap
- out = append(out, s.out[start:start+ml]...)
+ out = append(out, out[start:start+ml]...)
} else {
// Overlapping copy
// Extend destination slice and copy one byte at the time.
@@ -264,7 +516,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
}
}
}
- s.out = out
if i == 0 {
// This is the last sequence, so we shouldn't update state.
break
@@ -291,9 +542,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
}
}
+ // Check if space for literals
+ if len(s.literals)+len(s.out)-startSize > maxBlockSize {
+ return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
+ }
+
// Add final literals
- s.out = append(s.out, s.literals...)
- return nil
+ s.out = append(out, s.literals...)
+ return br.close()
}
// update states, at least 27 bits must be available.
@@ -457,36 +713,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
s.prevOffset[0] = temp
return temp
}
-
-// mergeHistory will merge history.
-func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
- for i := uint(0); i < 3; i++ {
- var sNew, sHist *sequenceDec
- switch i {
- default:
- // same as "case 0":
- sNew = &s.litLengths
- sHist = &hist.litLengths
- case 1:
- sNew = &s.offsets
- sHist = &hist.offsets
- case 2:
- sNew = &s.matchLengths
- sHist = &hist.matchLengths
- }
- if sNew.repeat {
- if sHist.fse == nil {
- return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
- }
- continue
- }
- if sNew.fse == nil {
- return nil, fmt.Errorf("sequence stream %d, no fse found", i)
- }
- if sHist.fse != nil && !sHist.fse.preDefined {
- fseDecoderPool.Put(sHist.fse)
- }
- sHist.fse = sNew.fse
- }
- return hist, nil
-}
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go
index 967f29b3..ffffcbc2 100644
--- a/vendor/github.com/klauspost/compress/zstd/zip.go
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go
@@ -20,7 +20,7 @@ const ZipMethodPKWare = 20
var zipReaderPool sync.Pool
-// newZipReader cannot be used since we would leak goroutines...
+// newZipReader creates a pooled zip decompressor.
func newZipReader(r io.Reader) io.ReadCloser {
dec, ok := zipReaderPool.Get().(*Decoder)
if ok {
@@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.dec == nil {
- return 0, errors.New("Read after Close")
+ return 0, errors.New("read after close or EOF")
}
dec, err := r.dec.Read(p)
-
+ if err == io.EOF {
+ err = r.dec.Reset(nil)
+ zipReaderPool.Put(r.dec)
+ r.dec = nil
+ }
return dec, err
}
@@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
// See ZipCompressor for example.
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
- return func(r io.Reader) io.ReadCloser {
- d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
- if err != nil {
- panic(err)
- }
- return d.IOReadCloser()
- }
+ return newZipReader
}
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index ef1d49a0..c1c90b4a 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -39,6 +39,9 @@ const zstdMinMatch = 3
// Reset the buffer offset when reaching this.
const bufferReset = math.MaxInt32 - MaxWindowSize
+// fcsUnknown is used for unknown frame content size.
+const fcsUnknown = math.MaxUint64
+
var (
// ErrReservedBlockType is returned when a reserved block type is found.
// Typically this indicates wrong or corrupted input.
@@ -52,6 +55,10 @@ var (
// Typically returned on invalid input.
ErrBlockTooSmall = errors.New("block too small")
+ // ErrUnexpectedBlockSize is returned when a block has unexpected size.
+ // Typically returned on invalid input.
+ ErrUnexpectedBlockSize = errors.New("unexpected block size")
+
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
// Typically this indicates wrong or corrupted input.
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
@@ -75,6 +82,10 @@ var (
// This is only returned if SingleSegment is specified on the frame.
ErrFrameSizeExceeded = errors.New("frame size exceeded")
+ // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size.
+ // This is only returned if SingleSegment is specified on the frame.
+ ErrFrameSizeMismatch = errors.New("frame size does not match size on stream")
+
// ErrCRCMismatch is returned if CRC mismatches.
ErrCRCMismatch = errors.New("CRC check failed")
diff --git a/vendor/github.com/lrstanley/girc/.editorconfig b/vendor/github.com/lrstanley/girc/.editorconfig
new file mode 100644
index 00000000..a324b1ef
--- /dev/null
+++ b/vendor/github.com/lrstanley/girc/.editorconfig
@@ -0,0 +1,45 @@
+# THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform.
+#
+# editorconfig.org
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.tf]
+indent_size = 2
+
+[*.go]
+indent_style = tab
+indent_size = 4
+
+[*.md]
+trim_trailing_whitespace = false
+
+[*.{md,py,sh,yml,yaml}]
+max_line_length = 105
+
+[*.{yml,yaml,toml}]
+indent_size = 2
+
+[*.json]
+indent_size = 2
+insert_final_newline = ignore
+
+[*.html]
+indent_size = 2
+
+[Makefile]
+indent_style = tab
+
+[**.min.js]
+indent_style = ignore
+insert_final_newline = ignore
+
+[*.bat]
+indent_style = tab
diff --git a/vendor/github.com/lrstanley/girc/.golangci.yml b/vendor/github.com/lrstanley/girc/.golangci.yml
new file mode 100644
index 00000000..a1fa835e
--- /dev/null
+++ b/vendor/github.com/lrstanley/girc/.golangci.yml
@@ -0,0 +1,37 @@
+# THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform.
+run:
+ tests: False
+ timeout: 3m
+
+issues:
+ max-per-linter: 0
+ max-same-issues: 0
+
+severity:
+ default-severity: error
+ rules:
+ - linters:
+ - errcheck
+ - gocritic
+ severity: warning
+
+linters:
+ enable:
+ - asciicheck
+ - exportloopref
+ - gci
+ - gocritic
+ - gofmt
+ - misspell
+
+linters-settings:
+ gocritic:
+ disabled-checks:
+ - hugeParam
+ enabled-tags:
+ - diagnostic
+ - opinionated
+ - performance
+ - style
+ govet:
+ check-shadowing: true
diff --git a/vendor/github.com/lrstanley/girc/.travis.yml b/vendor/github.com/lrstanley/girc/.travis.yml
deleted file mode 100644
index 96a1bb8a..00000000
--- a/vendor/github.com/lrstanley/girc/.travis.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-language: go
-go:
-- 1.11.x
-- tip
-before_install:
-- go get -v golang.org/x/lint/golint
-script:
-- $HOME/gopath/bin/golint -min_confidence 0.9 -set_exit_status
-- GORACE="exitcode=1 halt_on_error=1" go test -v -coverprofile=coverage.txt -race -timeout 3m -count 3 -cpu 1,4
-- go vet -v .
-after_success:
- - bash <(curl -s https://codecov.io/bash)
-branches:
- only:
- - master
-notifications:
- irc:
- channels:
- - irc.byteirc.org#/dev/null
- template:
- - "%{repository} #%{build_number} %{branch}/%{commit}: %{author} -- %{message}
- %{build_url}"
- on_success: change
- on_failure: change
- skip_join: false
diff --git a/vendor/github.com/lrstanley/girc/CODE_OF_CONDUCT.md b/vendor/github.com/lrstanley/girc/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..0eb65874
--- /dev/null
+++ b/vendor/github.com/lrstanley/girc/CODE_OF_CONDUCT.md
@@ -0,0 +1,122 @@
+<!-- THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform. -->
+# Code of Conduct
+
+## Our Pledge :purple_heart:
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual
+identity and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the overall
+ community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or advances of
+ any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email address,
+ without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+disclosure@liamstanley.io. All complaints will be reviewed and investigated
+promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series of
+actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or permanent
+ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within the
+community.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant,
+version 2.1, available [here](https://www.contributor-covenant.org/version/2/1/code_of_conduct.html).
+
+For answers to common questions about this code of conduct, see the [FAQ](https://www.contributor-covenant.org/faq).
+Translations are available at [translations](https://www.contributor-covenant.org/translations).
diff --git a/vendor/github.com/lrstanley/girc/CONTRIBUTING.md b/vendor/github.com/lrstanley/girc/CONTRIBUTING.md
index 7913630c..9c237e3c 100644
--- a/vendor/github.com/lrstanley/girc/CONTRIBUTING.md
+++ b/vendor/github.com/lrstanley/girc/CONTRIBUTING.md
@@ -1,32 +1,106 @@
-# Contributing
+<!-- THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform. -->
+# :handshake: Contributing
-## Issue submission
+This document outlines some of the guidelines that we try and adhere to while
+working on this project.
- * When submitting an issue or bug report, please ensure to provide as much
- information as possible, please ensure that you are running on the latest
- stable version (tagged), or when using master, provide the specific commit
- being used.
+> :point_right: **Note**: before participating in the community, please read our
+> [Code of Conduct][coc].
+> By interacting with this repository, organization, or community you agree to
+> abide by our Code of Conduct.
+>
+> Additionally, if you contribute **any source code** to this repository, you
+> agree to the terms of the [Developer Certificate of Origin][dco]. This helps
+> ensure that contributions aren't in violation of 3rd party license terms.
+
+## :lady_beetle: Issue submission
+
+When [submitting an issue][issues] or bug report,
+please follow these guidelines:
+
+ * Provide as much information as possible (logs, metrics, screenshots,
+ runtime environment, etc).
+ * Ensure that you are running on the latest stable version (tagged), or
+ when using `master`, provide the specific commit being used.
* Provide the minimum needed viable source to replicate the problem.
-## Pull requests
+## :bulb: Feature requests
+
+When [submitting a feature request][issues], please
+follow these guidelines:
+
+ * Does this feature benefit others? or just your usecase? If the latter,
+ it will likely be declined, unless it has a more broad benefit to others.
+ * Please include the pros and cons of the feature.
+ * If possible, describe how the feature would work, and any diagrams/mock
+ examples of what the feature would look like.
+
+## :rocket: Pull requests
To review what is currently being worked on, or looked into, feel free to head
-over to the [issues list](../../issues).
-
-Below are a few guidelines if you would like to contribute. Keep the code
-clean, standardized, and much of the quality should match Golang's standard
-library and common idioms.
-
- * Always test using the latest Go version.
- * Always use `gofmt` before committing anything.
- * Always have proper documentation before committing.
- * Keep the same whitespacing, documentation, and newline format as the
- rest of the project.
- * Only use 3rd party libraries if necessary. If only a small portion of
+over to the [open pull requests][pull-requests] or [issues list][issues].
+
+## :raised_back_of_hand: Assistance with discussions
+
+ * Take a look at the [open discussions][discussions], and if you feel like
+ you'd like to help out other members of the community, it would be much
+ appreciated!
+
+## :pushpin: Guidelines
+
+### :test_tube: Language agnostic
+
+Below are a few guidelines if you would like to contribute:
+
+ * If the feature is large or the bugfix has potential breaking changes,
+ please open an issue first to ensure the changes go down the best path.
+ * If possible, break the changes into smaller PRs. Pull requests should be
+ focused on a specific feature/fix.
+ * Pull requests will only be accepted with sufficient documentation
+ describing the new functionality/fixes.
+ * Keep the code simple where possible. Code that is smaller/more compact
+ does not mean better. Don't do magic behind the scenes.
+ * Use the same formatting/styling/structure as existing code.
+ * Follow idioms and community-best-practices of the related language,
+ unless the previous above guidelines override what the community
+ recommends.
+ * Always test your changes, both the features/fixes being implemented, but
+ also in the standard way that a user would use the project (not just
+ your configuration that fixes your issue).
+ * Only use 3rd party libraries when necessary. If only a small portion of
the library is needed, simply rewrite it within the library to prevent
useless imports.
- * Also see [golang/go/wiki/CodeReviewComments](https://github.com/golang/go/wiki/CodeReviewComments)
-If you would like to assist, and the pull request is quite large and/or it has
-the potential of being a breaking change, please open an issue first so it can
-be discussed.
+### :hamster: Golang
+
+ * See [golang/go/wiki/CodeReviewComments](https://github.com/golang/go/wiki/CodeReviewComments)
+ * This project uses [golangci-lint](https://golangci-lint.run/) for
+ Go-related files. This should be available for any editor that supports
+ `gopls`, however you can also run it locally with `golangci-lint run`
+ after installing it.
+
+
+
+
+## :clipboard: References
+
+ * [Open Source: How to Contribute](https://opensource.guide/how-to-contribute/)
+ * [About pull requests](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests)
+ * [GitHub Docs](https://docs.github.com/)
+
+## :speech_balloon: What to do next?
+
+ * :old_key: Find a vulnerability? Check out our [Security and Disclosure][security] policy.
+ * :link: Repository [License][license].
+ * [Support][support]
+ * [Code of Conduct][coc].
+
+<!-- definitions -->
+[coc]: https://github.com/lrstanley/girc/blob/master/CODE_OF_CONDUCT.md
+[dco]: https://developercertificate.org/
+[discussions]: https://github.com/lrstanley/girc/discussions
+[issues]: https://github.com/lrstanley/girc/issues/new/choose
+[license]: https://github.com/lrstanley/girc/blob/master/LICENSE
+[pull-requests]: https://github.com/lrstanley/girc/issues/new/choose
+[security]: https://github.com/lrstanley/girc/security/policy
+[support]: https://github.com/lrstanley/girc/blob/master/SUPPORT.md
diff --git a/vendor/github.com/lrstanley/girc/README.md b/vendor/github.com/lrstanley/girc/README.md
index 62f26a76..0a5fba99 100644
--- a/vendor/github.com/lrstanley/girc/README.md
+++ b/vendor/github.com/lrstanley/girc/README.md
@@ -1,12 +1,49 @@
<p align="center"><a href="https://pkg.go.dev/github.com/lrstanley/girc"><img width="270" src="http://i.imgur.com/DEnyrdB.png"></a></p>
-<p align="center">girc, a flexible IRC library for Go</p>
+<!-- template:begin:header -->
+<!-- do not edit anything in this "template" block, its auto-generated -->
+<p align="center">girc -- :bomb: girc is a flexible IRC library for Go :ok_hand:</p>
<p align="center">
- <a href="https://github.com/lrstanley/girc/actions"><img src="https://github.com/lrstanley/girc/workflows/test/badge.svg" alt="Test Status"></a>
- <a href="https://codecov.io/gh/lrstanley/girc"><img src="https://codecov.io/gh/lrstanley/girc/branch/master/graph/badge.svg" alt="Coverage Status"></a>
- <a href="https://pkg.go.dev/github.com/lrstanley/girc"><img src="https://pkg.go.dev/badge/github.com/lrstanley/girc" alt="GoDoc"></a>
- <a href="https://goreportcard.com/report/github.com/lrstanley/girc"><img src="https://goreportcard.com/badge/github.com/lrstanley/girc" alt="Go Report Card"></a>
- <a href="https://liam.sh/chat"><img src="https://img.shields.io/badge/community-chat%20with%20us-green.svg" alt="Community Chat"></a>
+
+ <a href="https://github.com/lrstanley/girc/actions?query=workflow%3Atest+event%3Apush">
+ <img alt="GitHub Workflow Status (test @ master)" src="https://img.shields.io/github/workflow/status/lrstanley/girc/test/master?label=test&style=flat-square&event=push">
+ </a>
+
+ <img alt="Code Coverage" src="https://img.shields.io/codecov/c/github/lrstanley/girc/master?style=flat-square">
+
+ <a href="https://pkg.go.dev/github.com/lrstanley/girc">
+ <img alt="Go Documentation" src="https://pkg.go.dev/badge/github.com/lrstanley/girc?style=flat-square">
+ </a>
+ <a href="https://goreportcard.com/report/github.com/lrstanley/girc">
+ <img alt="Go Report Card" src="https://goreportcard.com/badge/github.com/lrstanley/girc?style=flat-square">
+ </a>
+ <img alt="Bug reports" src="https://img.shields.io/github/issues/lrstanley/girc/bug?label=issues&style=flat-square">
+ <img alt="Feature requests" src="https://img.shields.io/github/issues/lrstanley/girc/enhancement?label=feature%20requests&style=flat-square">
+ <a href="https://github.com/lrstanley/girc/pulls">
+ <img alt="Open Pull Requests" src="https://img.shields.io/github/issues-pr/lrstanley/girc?style=flat-square">
+ </a>
+ <a href="https://github.com/lrstanley/girc/tags">
+ <img alt="Latest Semver Tag" src="https://img.shields.io/github/v/tag/lrstanley/girc?style=flat-square">
+ </a>
+ <img alt="Last commit" src="https://img.shields.io/github/last-commit/lrstanley/girc?style=flat-square">
+ <a href="https://github.com/lrstanley/girc/discussions/new?category=q-a">
+ <img alt="Ask a Question" src="https://img.shields.io/badge/discussions-ask_a_question!-green?style=flat-square">
+ </a>
+ <a href="https://liam.sh/chat"><img src="https://img.shields.io/badge/discord-bytecord-blue.svg?style=flat-square" alt="Discord Chat"></a>
</p>
+<!-- template:end:header -->
+
+<!-- template:begin:toc -->
+<!-- do not edit anything in this "template" block, its auto-generated -->
+## :link: Table of Contents
+
+ - [Features](#features)
+ - [Installing](#installing)
+ - [Examples](#examples)
+ - [References](#references)
+ - [Support & Assistance](#raising_hand_man-support-assistance)
+ - [Contributing](#handshake-contributing)
+ - [License](#balance_scale-license)
+<!-- template:end:toc -->
## Features
@@ -48,35 +85,6 @@ usecases/examples/projects which utilize girc:
Working on a project and want to add it to the list? Submit a pull request!
-## Contributing
-
-Please review the [CONTRIBUTING](CONTRIBUTING.md) doc for submitting issues/a guide
-on submitting pull requests and helping out.
-
-## License
-
- Copyright (c) 2016 Liam Stanley <me@liamstanley.io>
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in all
- copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE.
-
-girc artwork licensed under [CC 3.0](http://creativecommons.org/licenses/by/3.0/) based on Renee French under Creative Commons 3.0 Attributions.
-
## References
* [IRCv3: Specification Docs](http://ircv3.net/irc/)
@@ -93,3 +101,61 @@ girc artwork licensed under [CC 3.0](http://creativecommons.org/licenses/by/3.0/
* [RFC7194: Default Port for Internet Relay Chat (IRC) via TLS/SSL](https://tools.ietf.org/html/rfc7194)
* [RFC4422: Simple Authentication and Security Layer](https://tools.ietf.org/html/rfc4422) ([SASL EXTERNAL](https://tools.ietf.org/html/rfc4422#appendix-A))
* [RFC4616: The PLAIN SASL Mechanism](https://tools.ietf.org/html/rfc4616)
+
+
+<!-- template:begin:support -->
+<!-- do not edit anything in this "template" block, its auto-generated -->
+## :raising_hand_man: Support & Assistance
+
+ * :heart: Please review the [Code of Conduct](CODE_OF_CONDUCT.md) for
+ guidelines on ensuring everyone has the best experience interacting with
+ the community.
+ * :raising_hand_man: Take a look at the [support](SUPPORT.md) document on
+ guidelines for tips on how to ask the right questions.
+ * :lady_beetle: For all features/bugs/issues/questions/etc, [head over here](https://github.com/lrstanley/girc/issues/new/choose).
+<!-- template:end:support -->
+
+<!-- template:begin:contributing -->
+<!-- do not edit anything in this "template" block, its auto-generated -->
+## :handshake: Contributing
+
+ * :heart: Please review the [Code of Conduct](CODE_OF_CONDUCT.md) for guidelines
+ on ensuring everyone has the best experience interacting with the
+ community.
+ * :clipboard: Please review the [contributing](CONTRIBUTING.md) doc for submitting
+ issues/a guide on submitting pull requests and helping out.
+ * :old_key: For anything security related, please review this repositories [security policy](https://github.com/lrstanley/girc/security/policy).
+<!-- template:end:contributing -->
+
+<!-- template:begin:license -->
+<!-- do not edit anything in this "template" block, its auto-generated -->
+## :balance_scale: License
+
+```
+MIT License
+
+Copyright (c) 2016 Liam Stanley <me@liamstanley.io>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+```
+
+_Also located [here](LICENSE)_
+<!-- template:end:license -->
+girc artwork licensed under [CC 3.0](http://creativecommons.org/licenses/by/3.0/)
+based on Renee French under Creative Commons 3.0 Attributions.
diff --git a/vendor/github.com/lrstanley/girc/SECURITY.md b/vendor/github.com/lrstanley/girc/SECURITY.md
new file mode 100644
index 00000000..7c951d0c
--- /dev/null
+++ b/vendor/github.com/lrstanley/girc/SECURITY.md
@@ -0,0 +1,54 @@
+<!-- THIS FILE IS GENERATED! DO NOT EDIT! Maintained by Terraform. -->
+# :old_key: Security Policy
+
+## :heavy_check_mark: Supported Versions
+
+The following restrictions apply for versions that are still supported in terms of security and bug fixes:
+
+ * :grey_question: Must be using the latest major/minor version.
+ * :grey_question: Must be using a supported platform for the repository (e.g. OS, browser, etc), and that platform must
+ be within its supported versions (for example: don't use a legacy or unsupported version of Ubuntu or
+ Google Chrome).
+ * :grey_question: Repository must not be archived (unless the vulnerability is critical, and the repository moderately
+ popular).
+ * :heavy_check_mark:
+
+If one of the above doesn't apply to you, feel free to submit an issue and we can discuss the
+issue/vulnerability further.
+
+
+## :lady_beetle: Reporting a Vulnerability
+
+Best method of contact: [GPG :key:](https://github.com/lrstanley.gpg)
+
+ * :speech_balloon: [Discord][chat]: message `/home/liam#0000`.
+ * :email: Email: `security@liamstanley.io`
+
+Backup contacts (if I am unresponsive after **48h**): [GPG :key:](https://github.com/FM1337.gpg)
+ * :speech_balloon: [Discord][chat]: message `Allen#7440`.
+ * :email: Email: `security@allenlydiard.ca`
+
+If you feel that this disclosure doesn't include a critical vulnerability and there is no sensitive
+information in the disclosure, you don't have to use the GPG key. For all other situations, please
+use it.
+
+### :stopwatch: Vulnerability disclosure expectations
+
+ * :no_bell: We expect you to not share this information with others, unless:
+ * The maximum timeline for initial response has been exceeded (shown below).
+ * The maximum resolution time has been exceeded (shown below).
+ * :mag_right: We expect you to responsibly investigate this vulnerability -- please do not utilize the
+ vulnerability beyond the initial findings.
+ * :stopwatch: Initial response within 48h, however, if the primary contact shown above is unavailable, please
+ use the backup contacts provided. The maximum timeline for an initial response should be within
+ 7 days.
+ * :stopwatch: Depending on the severity of the disclosure, resolution time may be anywhere from 24h to 2
+ weeks after initial response, though in most cases it will likely be closer to the former.
+ * If the vulnerability is very low/low in terms of risk, the above timelines **will not apply**.
+ * :toolbox: Before the release of resolved versions, a [GitHub Security Advisory][advisory-docs].
+ will be released on the respective repository. [Browser all advisories here][advisory].
+
+<!-- definitions -->
+[chat]: https://liam.sh/chat
+[advisory]: https://github.com/advisories?query=type%3Areviewed+ecosystem%3Ago
+[advisory-docs]: https://docs.github.com/en/code-security/repository-security-advisories/creating-a-repository-security-advisory
diff --git a/vendor/github.com/lrstanley/girc/SUPPORT.md b/vendor/github.com/lrstanley/girc/SUPPORT.md
new file mode 100644
index 00000000..aca24782
--- /dev/null
+++ b/vendor/github.com/lrstanley/girc/SUPPORT.md
@@ -0,0 +1,56 @@
+# :raising_hand_man: Support
+
+This document explains where and how to get help with most of my projects.
+Please ensure you read through it thoroughly.
+
+> :point_right: **Note**: before participating in the community, please read our
+> [Code of Conduct][coc].
+> By interacting with this repository, organization, or community you agree to
+> abide by its terms.
+
+## :grey_question: Asking quality questions
+
+Questions can go to [Github Discussions][discussions] or feel free to join
+the Discord [here][chat].
+
+Help me help you! Spend time framing questions and add links and resources.
+Spending the extra time up front can help save everyone time in the long run.
+Here are some tips:
+
+* Don't fall for the [XY problem][xy].
+* Search to find out if a similar question has been asked or if a similar
+ issue/bug has been reported.
+* Try to define what you need help with:
+ * Is there something in particular you want to do?
+ * What problem are you encountering and what steps have you taken to try
+ and fix it?
+ * Is there a concept you don't understand?
+* Provide sample code, such as a [CodeSandbox][cs] or a simple snippet, if
+ possible.
+* Screenshots can help, but if there's important text such as code or error
+ messages in them, please also provide those.
+* The more time you put into asking your question, the better I and others
+ can help you.
+
+## :old_key: Security
+
+For any security or vulnerability related disclosure, please follow the
+guidelines outlined in our [security policy][security].
+
+## :handshake: Contributions
+
+See [`CONTRIBUTING.md`][contributing] on how to contribute.
+
+<!-- definitions -->
+[coc]: https://github.com/lrstanley/girc/blob/master/CODE_OF_CONDUCT.md
+[contributing]: https://github.com/lrstanley/girc/blob/master/CONTRIBUTING.md
+[discussions]: https://github.com/lrstanley/girc/discussions/categories/q-a
+[issues]: https://github.com/lrstanley/girc/issues/new/choose
+[license]: https://github.com/lrstanley/girc/blob/master/LICENSE
+[pull-requests]: https://github.com/lrstanley/girc/issues/new/choose
+[security]: https://github.com/lrstanley/girc/security/policy
+[support]: https://github.com/lrstanley/girc/blob/master/SUPPORT.md
+
+[xy]: https://meta.stackexchange.com/questions/66377/what-is-the-xy-problem/66378#66378
+[chat]: https://liam.sh/chat
+[cs]: https://codesandbox.io
diff --git a/vendor/github.com/lrstanley/girc/builtin.go b/vendor/github.com/lrstanley/girc/builtin.go
index 914b5fab..e60c577a 100644
--- a/vendor/github.com/lrstanley/girc/builtin.go
+++ b/vendor/github.com/lrstanley/girc/builtin.go
@@ -427,7 +427,7 @@ func handleMOTD(c *Client, e Event) {
}
// Otherwise, assume we're getting sent the MOTD line-by-line.
- if len(c.state.motd) != 0 {
+ if c.state.motd != "" {
c.state.motd += "\n"
}
c.state.motd += e.Last()
diff --git a/vendor/github.com/lrstanley/girc/cap.go b/vendor/github.com/lrstanley/girc/cap.go
index dcf53f0e..631b925b 100644
--- a/vendor/github.com/lrstanley/girc/cap.go
+++ b/vendor/github.com/lrstanley/girc/cap.go
@@ -64,7 +64,7 @@ func possibleCapList(c *Client) map[string][]string {
if !c.Config.DisableSTS && !c.Config.SSL {
// If fallback supported, and we failed recently, don't try negotiating STS.
// ONLY do this fallback if we're expired (primarily useful during the first
- // sts negotation).
+ // sts negotiation).
if time.Since(c.state.sts.lastFailed) < 5*time.Minute && !c.Config.DisableSTSFallback {
c.debug.Println("skipping strict transport policy negotiation; failed within the last 5 minutes")
} else {
diff --git a/vendor/github.com/lrstanley/girc/client.go b/vendor/github.com/lrstanley/girc/client.go
index 3308b399..db6ec080 100644
--- a/vendor/github.com/lrstanley/girc/client.go
+++ b/vendor/github.com/lrstanley/girc/client.go
@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"log"
"net"
"os"
@@ -268,7 +267,7 @@ func New(config Config) *Client {
if envDebug {
c.debug = log.New(os.Stderr, "debug:", log.Ltime|log.Lshortfile)
} else {
- c.debug = log.New(ioutil.Discard, "", 0)
+ c.debug = log.New(io.Discard, "", 0)
}
} else {
if envDebug {
@@ -411,7 +410,7 @@ func (c *Client) execLoop(ctx context.Context, errs chan error, wg *sync.WaitGro
// Handles incoming ERROR responses. These are only ever sent
// by the server (with the exception that this library may use
// them as a lower level way of signalling to disconnect due
- // to some other client-choosen error), and should always be
+ // to some other client-chosen error), and should always be
// followed up by the server disconnecting the client. If for
// some reason the server doesn't disconnect the client, or
// if this library is the source of the error, this should
@@ -446,7 +445,7 @@ func (c *Client) DisableTracking() {
// Server returns the string representation of host+port pair for the connection.
func (c *Client) Server() string {
c.state.Lock()
- defer c.state.Lock()
+ defer c.state.Unlock()
return c.server()
}
diff --git a/vendor/github.com/lrstanley/girc/commands.go b/vendor/github.com/lrstanley/girc/commands.go
index 2ee0a23a..91a8b96a 100644
--- a/vendor/github.com/lrstanley/girc/commands.go
+++ b/vendor/github.com/lrstanley/girc/commands.go
@@ -5,9 +5,9 @@
package girc
import (
- "strconv"
"errors"
"fmt"
+ "strconv"
)
// Commands holds a large list of useful methods to interact with the server,
@@ -37,7 +37,7 @@ func (cmd *Commands) Join(channels ...string) {
continue
}
- if len(buffer) == 0 {
+ if buffer == "" {
buffer = channels[i]
} else {
buffer += "," + channels[i]
@@ -341,7 +341,7 @@ func (cmd *Commands) List(channels ...string) {
continue
}
- if len(buffer) == 0 {
+ if buffer == "" {
buffer = channels[i]
} else {
buffer += "," + channels[i]
diff --git a/vendor/github.com/lrstanley/girc/constants.go b/vendor/github.com/lrstanley/girc/constants.go
index 8f5f91b1..d6058089 100644
--- a/vendor/github.com/lrstanley/girc/constants.go
+++ b/vendor/github.com/lrstanley/girc/constants.go
@@ -149,7 +149,7 @@ const (
RPL_ENDOFWHOWAS = "369"
RPL_LISTSTART = "321"
RPL_LIST = "322"
- RPL_LISTEND = "323"
+ RPL_LISTEND = "323" //nolint:misspell // it's correct.
RPL_UNIQOPIS = "325"
RPL_CHANNELMODEIS = "324"
RPL_NOTOPIC = "331"
diff --git a/vendor/github.com/lrstanley/girc/ctcp.go b/vendor/github.com/lrstanley/girc/ctcp.go
index 637615ef..caa9429b 100644
--- a/vendor/github.com/lrstanley/girc/ctcp.go
+++ b/vendor/github.com/lrstanley/girc/ctcp.go
@@ -104,7 +104,7 @@ func EncodeCTCP(ctcp *CTCPEvent) (out string) {
// EncodeCTCPRaw is much like EncodeCTCP, however accepts a raw command and
// string as input.
func EncodeCTCPRaw(cmd, text string) (out string) {
- if len(cmd) <= 0 {
+ if cmd == "" {
return ""
}
diff --git a/vendor/github.com/lrstanley/girc/event.go b/vendor/github.com/lrstanley/girc/event.go
index 3eb187f9..0ca06976 100644
--- a/vendor/github.com/lrstanley/girc/event.go
+++ b/vendor/github.com/lrstanley/girc/event.go
@@ -283,7 +283,6 @@ func (e *Event) Bytes() []byte {
// Space separated list of arguments.
if len(e.Params) > 0 {
- // buffer.WriteByte(eventSpace)
for i := 0; i < len(e.Params); i++ {
if i == len(e.Params)-1 && (strings.Contains(e.Params[i], " ") || strings.HasPrefix(e.Params[i], ":") || e.Params[i] == "") {
buffer.WriteString(string(eventSpace) + string(messagePrefix) + e.Params[i])
@@ -621,7 +620,7 @@ func (s *Source) IsHostmask() bool {
// IsServer returns true if this source looks like a server name.
func (s *Source) IsServer() bool {
- return len(s.Ident) <= 0 && len(s.Host) <= 0
+ return s.Ident == "" && s.Host == ""
}
// writeTo is an utility function to write the source to the bytes.Buffer
diff --git a/vendor/github.com/lrstanley/girc/format.go b/vendor/github.com/lrstanley/girc/format.go
index e1efb756..85e3e387 100644
--- a/vendor/github.com/lrstanley/girc/format.go
+++ b/vendor/github.com/lrstanley/girc/format.go
@@ -127,10 +127,10 @@ func Fmt(text string) string {
// See Fmt() for more information.
func TrimFmt(text string) string {
for color := range fmtColors {
- text = strings.Replace(text, string(fmtOpenChar)+color+string(fmtCloseChar), "", -1)
+ text = strings.ReplaceAll(text, string(fmtOpenChar)+color+string(fmtCloseChar), "")
}
for code := range fmtCodes {
- text = strings.Replace(text, string(fmtOpenChar)+code+string(fmtCloseChar), "", -1)
+ text = strings.ReplaceAll(text, string(fmtOpenChar)+code+string(fmtCloseChar), "")
}
return text
@@ -138,7 +138,7 @@ func TrimFmt(text string) string {
// This is really the only fastest way of doing this (marginally better than
// actually trying to parse it manually.)
-var reStripColor = regexp.MustCompile(`\x03([019]?[0-9](,[019]?[0-9])?)?`)
+var reStripColor = regexp.MustCompile(`\x03([019]?\d(,[019]?\d)?)?`)
// StripRaw tries to strip all ASCII format codes that are used for IRC.
// Primarily, foreground/background colors, and other control bytes like
@@ -148,7 +148,7 @@ func StripRaw(text string) string {
text = reStripColor.ReplaceAllString(text, "")
for _, code := range fmtCodes {
- text = strings.Replace(text, code, "", -1)
+ text = strings.ReplaceAll(text, code, "")
}
return text
@@ -219,7 +219,7 @@ func IsValidChannel(channel string) bool {
// digit = 0x30-0x39
// special = 0x5B-0x60 / 0x7B-0x7D
func IsValidNick(nick string) bool {
- if len(nick) <= 0 {
+ if nick == "" {
return false
}
@@ -256,7 +256,7 @@ func IsValidNick(nick string) bool {
// user = 1*( %x01-09 / %x0B-0C / %x0E-1F / %x21-3F / %x41-FF )
// ; any octet except NUL, CR, LF, " " and "@"
func IsValidUser(name string) bool {
- if len(name) <= 0 {
+ if name == "" {
return false
}
diff --git a/vendor/github.com/lrstanley/girc/handler.go b/vendor/github.com/lrstanley/girc/handler.go
index 712886b3..55fd4e9c 100644
--- a/vendor/github.com/lrstanley/girc/handler.go
+++ b/vendor/github.com/lrstanley/girc/handler.go
@@ -287,7 +287,7 @@ func (c *Caller) Remove(cuid string) (success bool) {
// on your own.
func (c *Caller) remove(cuid string) (success bool) {
cmd, uid := c.cuidToID(cuid)
- if len(cmd) == 0 || len(uid) == 0 {
+ if cmd == "" || uid == "" {
return false
}
diff --git a/vendor/github.com/lrstanley/girc/modes.go b/vendor/github.com/lrstanley/girc/modes.go
index 864df71f..35ff103a 100644
--- a/vendor/github.com/lrstanley/girc/modes.go
+++ b/vendor/github.com/lrstanley/girc/modes.go
@@ -34,7 +34,7 @@ func (c *CMode) Short() string {
// String returns a string representation of a mode, including optional
// arguments. E.g. "+b user*!ident@host.*.com"
func (c *CMode) String() string {
- if len(c.args) == 0 {
+ if c.args == "" {
return c.Short()
}
@@ -106,7 +106,7 @@ func (c *CModes) HasMode(mode string) bool {
func (c *CModes) Get(mode string) (args string, ok bool) {
for i := 0; i < len(c.modes); i++ {
if string(c.modes[i].name) == mode {
- if len(c.modes[i].args) == 0 {
+ if c.modes[i].args == "" {
return "", false
}
@@ -157,7 +157,7 @@ func (c *CModes) hasArg(set bool, mode byte) (hasArgs, isSetting bool) {
// For example, the latter would mean applying an incoming MODE with the modes
// stored for a channel.
func (c *CModes) Apply(modes []CMode) {
- var new []CMode
+ var newModes []CMode
for j := 0; j < len(c.modes); j++ {
isin := false
@@ -166,14 +166,14 @@ func (c *CModes) Apply(modes []CMode) {
continue
}
if c.modes[j].name == modes[i].name && modes[i].add {
- new = append(new, modes[i])
+ newModes = append(newModes, modes[i])
isin = true
break
}
}
if !isin {
- new = append(new, c.modes[j])
+ newModes = append(newModes, c.modes[j])
}
}
@@ -183,19 +183,19 @@ func (c *CModes) Apply(modes []CMode) {
}
isin := false
- for j := 0; j < len(new); j++ {
- if modes[i].name == new[j].name {
+ for j := 0; j < len(newModes); j++ {
+ if modes[i].name == newModes[j].name {
isin = true
break
}
}
if !isin {
- new = append(new, modes[i])
+ newModes = append(newModes, modes[i])
}
}
- c.modes = new
+ c.modes = newModes
}
// Parse parses a set of flags and args, returning the necessary list of
@@ -221,7 +221,7 @@ func (c *CModes) Parse(flags string, args []string) (out []CMode) {
}
hasArgs, isSetting := c.hasArg(add, flags[i])
- if hasArgs && len(args) >= argCount+1 {
+ if hasArgs && len(args) > argCount {
mode.args = args[argCount]
argCount++
}
@@ -351,7 +351,7 @@ func handleMODE(c *Client, e Event) {
// Loop through and update users modes as necessary.
for i := 0; i < len(modes); i++ {
- if modes[i].setting || len(modes[i].args) == 0 {
+ if modes[i].setting || modes[i].args == "" {
continue
}
@@ -493,8 +493,8 @@ func (m *Perms) reset() {
// set translates raw prefix characters into proper permissions. Only
// use this function when you have a session lock.
-func (m *Perms) set(prefix string, append bool) {
- if !append {
+func (m *Perms) set(prefix string, add bool) {
+ if !add {
m.reset()
}