summaryrefslogtreecommitdiffstats
path: root/vendor/golang.org/x/crypto/internal/chacha20
diff options
context:
space:
mode:
authorWim <wim@42.be>2020-01-09 21:02:56 +0100
committerGitHub <noreply@github.com>2020-01-09 21:02:56 +0100
commit0f708daf2d14dcca261ef98cc698a1b1f2a6aa74 (patch)
tree022eee21366d6a9a00feaeff918972d9e72632c2 /vendor/golang.org/x/crypto/internal/chacha20
parentb9354de8fd5e424ac2f246fff1a03b27e8094fd8 (diff)
downloadmatterbridge-msglm-0f708daf2d14dcca261ef98cc698a1b1f2a6aa74.tar.gz
matterbridge-msglm-0f708daf2d14dcca261ef98cc698a1b1f2a6aa74.tar.bz2
matterbridge-msglm-0f708daf2d14dcca261ef98cc698a1b1f2a6aa74.zip
Update dependencies (#975)
Diffstat (limited to 'vendor/golang.org/x/crypto/internal/chacha20')
-rw-r--r--vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s308
-rw-r--r--vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s668
-rw-r--r--vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go31
-rw-r--r--vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go264
-rw-r--r--vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go16
-rw-r--r--vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go52
-rw-r--r--vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go29
-rw-r--r--vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s260
-rw-r--r--vendor/golang.org/x/crypto/internal/chacha20/xor.go43
9 files changed, 0 insertions, 1671 deletions
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s b/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s
deleted file mode 100644
index b3a16ef7..00000000
--- a/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s
+++ /dev/null
@@ -1,308 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.11
-// +build !gccgo,!appengine
-
-#include "textflag.h"
-
-#define NUM_ROUNDS 10
-
-// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
-TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
- MOVD dst+0(FP), R1
- MOVD src+24(FP), R2
- MOVD src_len+32(FP), R3
- MOVD key+48(FP), R4
- MOVD nonce+56(FP), R6
- MOVD counter+64(FP), R7
-
- MOVD $·constants(SB), R10
- MOVD $·incRotMatrix(SB), R11
-
- MOVW (R7), R20
-
- AND $~255, R3, R13
- ADD R2, R13, R12 // R12 for block end
- AND $255, R3, R13
-loop:
- MOVD $NUM_ROUNDS, R21
- VLD1 (R11), [V30.S4, V31.S4]
-
- // load contants
- // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4]
- WORD $0x4D60E940
-
- // load keys
- // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4]
- WORD $0x4DFFE884
- // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4]
- WORD $0x4DFFE888
- SUB $32, R4
-
- // load counter + nonce
- // VLD1R (R7), [V12.S4]
- WORD $0x4D40C8EC
-
- // VLD3R (R6), [V13.S4, V14.S4, V15.S4]
- WORD $0x4D40E8CD
-
- // update counter
- VADD V30.S4, V12.S4, V12.S4
-
-chacha:
- // V0..V3 += V4..V7
- // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16)
- VADD V0.S4, V4.S4, V0.S4
- VADD V1.S4, V5.S4, V1.S4
- VADD V2.S4, V6.S4, V2.S4
- VADD V3.S4, V7.S4, V3.S4
- VEOR V12.B16, V0.B16, V12.B16
- VEOR V13.B16, V1.B16, V13.B16
- VEOR V14.B16, V2.B16, V14.B16
- VEOR V15.B16, V3.B16, V15.B16
- VREV32 V12.H8, V12.H8
- VREV32 V13.H8, V13.H8
- VREV32 V14.H8, V14.H8
- VREV32 V15.H8, V15.H8
- // V8..V11 += V12..V15
- // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12)
- VADD V8.S4, V12.S4, V8.S4
- VADD V9.S4, V13.S4, V9.S4
- VADD V10.S4, V14.S4, V10.S4
- VADD V11.S4, V15.S4, V11.S4
- VEOR V8.B16, V4.B16, V16.B16
- VEOR V9.B16, V5.B16, V17.B16
- VEOR V10.B16, V6.B16, V18.B16
- VEOR V11.B16, V7.B16, V19.B16
- VSHL $12, V16.S4, V4.S4
- VSHL $12, V17.S4, V5.S4
- VSHL $12, V18.S4, V6.S4
- VSHL $12, V19.S4, V7.S4
- VSRI $20, V16.S4, V4.S4
- VSRI $20, V17.S4, V5.S4
- VSRI $20, V18.S4, V6.S4
- VSRI $20, V19.S4, V7.S4
-
- // V0..V3 += V4..V7
- // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8)
- VADD V0.S4, V4.S4, V0.S4
- VADD V1.S4, V5.S4, V1.S4
- VADD V2.S4, V6.S4, V2.S4
- VADD V3.S4, V7.S4, V3.S4
- VEOR V12.B16, V0.B16, V12.B16
- VEOR V13.B16, V1.B16, V13.B16
- VEOR V14.B16, V2.B16, V14.B16
- VEOR V15.B16, V3.B16, V15.B16
- VTBL V31.B16, [V12.B16], V12.B16
- VTBL V31.B16, [V13.B16], V13.B16
- VTBL V31.B16, [V14.B16], V14.B16
- VTBL V31.B16, [V15.B16], V15.B16
-
- // V8..V11 += V12..V15
- // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7)
- VADD V12.S4, V8.S4, V8.S4
- VADD V13.S4, V9.S4, V9.S4
- VADD V14.S4, V10.S4, V10.S4
- VADD V15.S4, V11.S4, V11.S4
- VEOR V8.B16, V4.B16, V16.B16
- VEOR V9.B16, V5.B16, V17.B16
- VEOR V10.B16, V6.B16, V18.B16
- VEOR V11.B16, V7.B16, V19.B16
- VSHL $7, V16.S4, V4.S4
- VSHL $7, V17.S4, V5.S4
- VSHL $7, V18.S4, V6.S4
- VSHL $7, V19.S4, V7.S4
- VSRI $25, V16.S4, V4.S4
- VSRI $25, V17.S4, V5.S4
- VSRI $25, V18.S4, V6.S4
- VSRI $25, V19.S4, V7.S4
-
- // V0..V3 += V5..V7, V4
- // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16)
- VADD V0.S4, V5.S4, V0.S4
- VADD V1.S4, V6.S4, V1.S4
- VADD V2.S4, V7.S4, V2.S4
- VADD V3.S4, V4.S4, V3.S4
- VEOR V15.B16, V0.B16, V15.B16
- VEOR V12.B16, V1.B16, V12.B16
- VEOR V13.B16, V2.B16, V13.B16
- VEOR V14.B16, V3.B16, V14.B16
- VREV32 V12.H8, V12.H8
- VREV32 V13.H8, V13.H8
- VREV32 V14.H8, V14.H8
- VREV32 V15.H8, V15.H8
-
- // V10 += V15; V5 <<<= ((V10 XOR V5), 12)
- // ...
- VADD V15.S4, V10.S4, V10.S4
- VADD V12.S4, V11.S4, V11.S4
- VADD V13.S4, V8.S4, V8.S4
- VADD V14.S4, V9.S4, V9.S4
- VEOR V10.B16, V5.B16, V16.B16
- VEOR V11.B16, V6.B16, V17.B16
- VEOR V8.B16, V7.B16, V18.B16
- VEOR V9.B16, V4.B16, V19.B16
- VSHL $12, V16.S4, V5.S4
- VSHL $12, V17.S4, V6.S4
- VSHL $12, V18.S4, V7.S4
- VSHL $12, V19.S4, V4.S4
- VSRI $20, V16.S4, V5.S4
- VSRI $20, V17.S4, V6.S4
- VSRI $20, V18.S4, V7.S4
- VSRI $20, V19.S4, V4.S4
-
- // V0 += V5; V15 <<<= ((V0 XOR V15), 8)
- // ...
- VADD V5.S4, V0.S4, V0.S4
- VADD V6.S4, V1.S4, V1.S4
- VADD V7.S4, V2.S4, V2.S4
- VADD V4.S4, V3.S4, V3.S4
- VEOR V0.B16, V15.B16, V15.B16
- VEOR V1.B16, V12.B16, V12.B16
- VEOR V2.B16, V13.B16, V13.B16
- VEOR V3.B16, V14.B16, V14.B16
- VTBL V31.B16, [V12.B16], V12.B16
- VTBL V31.B16, [V13.B16], V13.B16
- VTBL V31.B16, [V14.B16], V14.B16
- VTBL V31.B16, [V15.B16], V15.B16
-
- // V10 += V15; V5 <<<= ((V10 XOR V5), 7)
- // ...
- VADD V15.S4, V10.S4, V10.S4
- VADD V12.S4, V11.S4, V11.S4
- VADD V13.S4, V8.S4, V8.S4
- VADD V14.S4, V9.S4, V9.S4
- VEOR V10.B16, V5.B16, V16.B16
- VEOR V11.B16, V6.B16, V17.B16
- VEOR V8.B16, V7.B16, V18.B16
- VEOR V9.B16, V4.B16, V19.B16
- VSHL $7, V16.S4, V5.S4
- VSHL $7, V17.S4, V6.S4
- VSHL $7, V18.S4, V7.S4
- VSHL $7, V19.S4, V4.S4
- VSRI $25, V16.S4, V5.S4
- VSRI $25, V17.S4, V6.S4
- VSRI $25, V18.S4, V7.S4
- VSRI $25, V19.S4, V4.S4
-
- SUB $1, R21
- CBNZ R21, chacha
-
- // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4]
- WORD $0x4D60E950
-
- // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4]
- WORD $0x4DFFE894
- VADD V30.S4, V12.S4, V12.S4
- VADD V16.S4, V0.S4, V0.S4
- VADD V17.S4, V1.S4, V1.S4
- VADD V18.S4, V2.S4, V2.S4
- VADD V19.S4, V3.S4, V3.S4
- // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4]
- WORD $0x4DFFE898
- // restore R4
- SUB $32, R4
-
- // load counter + nonce
- // VLD1R (R7), [V28.S4]
- WORD $0x4D40C8FC
- // VLD3R (R6), [V29.S4, V30.S4, V31.S4]
- WORD $0x4D40E8DD
-
- VADD V20.S4, V4.S4, V4.S4
- VADD V21.S4, V5.S4, V5.S4
- VADD V22.S4, V6.S4, V6.S4
- VADD V23.S4, V7.S4, V7.S4
- VADD V24.S4, V8.S4, V8.S4
- VADD V25.S4, V9.S4, V9.S4
- VADD V26.S4, V10.S4, V10.S4
- VADD V27.S4, V11.S4, V11.S4
- VADD V28.S4, V12.S4, V12.S4
- VADD V29.S4, V13.S4, V13.S4
- VADD V30.S4, V14.S4, V14.S4
- VADD V31.S4, V15.S4, V15.S4
-
- VZIP1 V1.S4, V0.S4, V16.S4
- VZIP2 V1.S4, V0.S4, V17.S4
- VZIP1 V3.S4, V2.S4, V18.S4
- VZIP2 V3.S4, V2.S4, V19.S4
- VZIP1 V5.S4, V4.S4, V20.S4
- VZIP2 V5.S4, V4.S4, V21.S4
- VZIP1 V7.S4, V6.S4, V22.S4
- VZIP2 V7.S4, V6.S4, V23.S4
- VZIP1 V9.S4, V8.S4, V24.S4
- VZIP2 V9.S4, V8.S4, V25.S4
- VZIP1 V11.S4, V10.S4, V26.S4
- VZIP2 V11.S4, V10.S4, V27.S4
- VZIP1 V13.S4, V12.S4, V28.S4
- VZIP2 V13.S4, V12.S4, V29.S4
- VZIP1 V15.S4, V14.S4, V30.S4
- VZIP2 V15.S4, V14.S4, V31.S4
- VZIP1 V18.D2, V16.D2, V0.D2
- VZIP2 V18.D2, V16.D2, V4.D2
- VZIP1 V19.D2, V17.D2, V8.D2
- VZIP2 V19.D2, V17.D2, V12.D2
- VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16]
-
- VZIP1 V22.D2, V20.D2, V1.D2
- VZIP2 V22.D2, V20.D2, V5.D2
- VZIP1 V23.D2, V21.D2, V9.D2
- VZIP2 V23.D2, V21.D2, V13.D2
- VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16]
- VZIP1 V26.D2, V24.D2, V2.D2
- VZIP2 V26.D2, V24.D2, V6.D2
- VZIP1 V27.D2, V25.D2, V10.D2
- VZIP2 V27.D2, V25.D2, V14.D2
- VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16]
- VZIP1 V30.D2, V28.D2, V3.D2
- VZIP2 V30.D2, V28.D2, V7.D2
- VZIP1 V31.D2, V29.D2, V11.D2
- VZIP2 V31.D2, V29.D2, V15.D2
- VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16]
- VEOR V0.B16, V16.B16, V16.B16
- VEOR V1.B16, V17.B16, V17.B16
- VEOR V2.B16, V18.B16, V18.B16
- VEOR V3.B16, V19.B16, V19.B16
- VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1)
- VEOR V4.B16, V20.B16, V20.B16
- VEOR V5.B16, V21.B16, V21.B16
- VEOR V6.B16, V22.B16, V22.B16
- VEOR V7.B16, V23.B16, V23.B16
- VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1)
- VEOR V8.B16, V24.B16, V24.B16
- VEOR V9.B16, V25.B16, V25.B16
- VEOR V10.B16, V26.B16, V26.B16
- VEOR V11.B16, V27.B16, V27.B16
- VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1)
- VEOR V12.B16, V28.B16, V28.B16
- VEOR V13.B16, V29.B16, V29.B16
- VEOR V14.B16, V30.B16, V30.B16
- VEOR V15.B16, V31.B16, V31.B16
- VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1)
-
- ADD $4, R20
- MOVW R20, (R7) // update counter
-
- CMP R2, R12
- BGT loop
-
- RET
-
-
-DATA ·constants+0x00(SB)/4, $0x61707865
-DATA ·constants+0x04(SB)/4, $0x3320646e
-DATA ·constants+0x08(SB)/4, $0x79622d32
-DATA ·constants+0x0c(SB)/4, $0x6b206574
-GLOBL ·constants(SB), NOPTR|RODATA, $32
-
-DATA ·incRotMatrix+0x00(SB)/4, $0x00000000
-DATA ·incRotMatrix+0x04(SB)/4, $0x00000001
-DATA ·incRotMatrix+0x08(SB)/4, $0x00000002
-DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003
-DATA ·incRotMatrix+0x10(SB)/4, $0x02010003
-DATA ·incRotMatrix+0x14(SB)/4, $0x06050407
-DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B
-DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F
-GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s b/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s
deleted file mode 100644
index cde3fc98..00000000
--- a/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s
+++ /dev/null
@@ -1,668 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Based on CRYPTOGAMS code with the following comment:
-// # ====================================================================
-// # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-// # project. The module is, however, dual licensed under OpenSSL and
-// # CRYPTOGAMS licenses depending on where you obtain it. For further
-// # details see http://www.openssl.org/~appro/cryptogams/.
-// # ====================================================================
-
-// Original code can be found at the link below:
-// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91e5c39ca79126a4a876d5d8ff
-
-// There are some differences between CRYPTOGAMS code and this one. The round
-// loop for "_int" isn't the same as the original. Some adjustments were
-// necessary because there are less vector registers available. For example, some
-// X variables (r12, r13, r14, and r15) share the same register used by the
-// counter. The original code uses ctr to name the counter. Here we use CNT
-// because golang uses CTR as the counter register name.
-
-// +build ppc64le,!gccgo,!appengine
-
-#include "textflag.h"
-
-#define OUT R3
-#define INP R4
-#define LEN R5
-#define KEY R6
-#define CNT R7
-
-#define TEMP R8
-
-#define X0 R11
-#define X1 R12
-#define X2 R14
-#define X3 R15
-#define X4 R16
-#define X5 R17
-#define X6 R18
-#define X7 R19
-#define X8 R20
-#define X9 R21
-#define X10 R22
-#define X11 R23
-#define X12 R24
-#define X13 R25
-#define X14 R26
-#define X15 R27
-
-#define CON0 X0
-#define CON1 X1
-#define CON2 X2
-#define CON3 X3
-
-#define KEY0 X4
-#define KEY1 X5
-#define KEY2 X6
-#define KEY3 X7
-#define KEY4 X8
-#define KEY5 X9
-#define KEY6 X10
-#define KEY7 X11
-
-#define CNT0 X12
-#define CNT1 X13
-#define CNT2 X14
-#define CNT3 X15
-
-#define TMP0 R9
-#define TMP1 R10
-#define TMP2 R28
-#define TMP3 R29
-
-#define CONSTS R8
-
-#define A0 V0
-#define B0 V1
-#define C0 V2
-#define D0 V3
-#define A1 V4
-#define B1 V5
-#define C1 V6
-#define D1 V7
-#define A2 V8
-#define B2 V9
-#define C2 V10
-#define D2 V11
-#define T0 V12
-#define T1 V13
-#define T2 V14
-
-#define K0 V15
-#define K1 V16
-#define K2 V17
-#define K3 V18
-#define K4 V19
-#define K5 V20
-
-#define FOUR V21
-#define SIXTEEN V22
-#define TWENTY4 V23
-#define TWENTY V24
-#define TWELVE V25
-#define TWENTY5 V26
-#define SEVEN V27
-
-#define INPPERM V28
-#define OUTPERM V29
-#define OUTMASK V30
-
-#define DD0 V31
-#define DD1 SEVEN
-#define DD2 T0
-#define DD3 T1
-#define DD4 T2
-
-DATA ·consts+0x00(SB)/8, $0x3320646e61707865
-DATA ·consts+0x08(SB)/8, $0x6b20657479622d32
-DATA ·consts+0x10(SB)/8, $0x0000000000000001
-DATA ·consts+0x18(SB)/8, $0x0000000000000000
-DATA ·consts+0x20(SB)/8, $0x0000000000000004
-DATA ·consts+0x28(SB)/8, $0x0000000000000000
-DATA ·consts+0x30(SB)/8, $0x0a0b08090e0f0c0d
-DATA ·consts+0x38(SB)/8, $0x0203000106070405
-DATA ·consts+0x40(SB)/8, $0x090a0b080d0e0f0c
-DATA ·consts+0x48(SB)/8, $0x0102030005060704
-GLOBL ·consts(SB), RODATA, $80
-
-//func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[32]byte, counter *[16]byte)
-TEXT ·chaCha20_ctr32_vmx(SB),NOSPLIT|NOFRAME,$0
- // Load the arguments inside the registers
- MOVD out+0(FP), OUT
- MOVD inp+8(FP), INP
- MOVD len+16(FP), LEN
- MOVD key+24(FP), KEY
- MOVD counter+32(FP), CNT
-
- MOVD $·consts(SB), CONSTS // point to consts addr
-
- MOVD $16, X0
- MOVD $32, X1
- MOVD $48, X2
- MOVD $64, X3
- MOVD $31, X4
- MOVD $15, X5
-
- // Load key
- LVX (KEY)(R0), K1
- LVSR (KEY)(R0), T0
- LVX (KEY)(X0), K2
- LVX (KEY)(X4), DD0
-
- // Load counter
- LVX (CNT)(R0), K3
- LVSR (CNT)(R0), T1
- LVX (CNT)(X5), DD1
-
- // Load constants
- LVX (CONSTS)(R0), K0
- LVX (CONSTS)(X0), K5
- LVX (CONSTS)(X1), FOUR
- LVX (CONSTS)(X2), SIXTEEN
- LVX (CONSTS)(X3), TWENTY4
-
- // Align key and counter
- VPERM K2, K1, T0, K1
- VPERM DD0, K2, T0, K2
- VPERM DD1, K3, T1, K3
-
- // Load counter to GPR
- MOVWZ 0(CNT), CNT0
- MOVWZ 4(CNT), CNT1
- MOVWZ 8(CNT), CNT2
- MOVWZ 12(CNT), CNT3
-
- // Adjust vectors for the initial state
- VADDUWM K3, K5, K3
- VADDUWM K3, K5, K4
- VADDUWM K4, K5, K5
-
- // Synthesized constants
- VSPLTISW $-12, TWENTY
- VSPLTISW $12, TWELVE
- VSPLTISW $-7, TWENTY5
-
- VXOR T0, T0, T0
- VSPLTISW $-1, OUTMASK
- LVSR (INP)(R0), INPPERM
- LVSL (OUT)(R0), OUTPERM
- VPERM OUTMASK, T0, OUTPERM, OUTMASK
-
-loop_outer_vmx:
- // Load constant
- MOVD $0x61707865, CON0
- MOVD $0x3320646e, CON1
- MOVD $0x79622d32, CON2
- MOVD $0x6b206574, CON3
-
- VOR K0, K0, A0
- VOR K0, K0, A1
- VOR K0, K0, A2
- VOR K1, K1, B0
-
- MOVD $10, TEMP
-
- // Load key to GPR
- MOVWZ 0(KEY), X4
- MOVWZ 4(KEY), X5
- MOVWZ 8(KEY), X6
- MOVWZ 12(KEY), X7
- VOR K1, K1, B1
- VOR K1, K1, B2
- MOVWZ 16(KEY), X8
- MOVWZ 0(CNT), X12
- MOVWZ 20(KEY), X9
- MOVWZ 4(CNT), X13
- VOR K2, K2, C0
- VOR K2, K2, C1
- MOVWZ 24(KEY), X10
- MOVWZ 8(CNT), X14
- VOR K2, K2, C2
- VOR K3, K3, D0
- MOVWZ 28(KEY), X11
- MOVWZ 12(CNT), X15
- VOR K4, K4, D1
- VOR K5, K5, D2
-
- MOVD X4, TMP0
- MOVD X5, TMP1
- MOVD X6, TMP2
- MOVD X7, TMP3
- VSPLTISW $7, SEVEN
-
- MOVD TEMP, CTR
-
-loop_vmx:
- // CRYPTOGAMS uses a macro to create a loop using perl. This isn't possible
- // using assembly macros. Therefore, the macro expansion result was used
- // in order to maintain the algorithm efficiency.
- // This loop generates three keystream blocks using VMX instructions and,
- // in parallel, one keystream block using scalar instructions.
- ADD X4, X0, X0
- ADD X5, X1, X1
- VADDUWM A0, B0, A0
- VADDUWM A1, B1, A1
- ADD X6, X2, X2
- ADD X7, X3, X3
- VADDUWM A2, B2, A2
- VXOR D0, A0, D0
- XOR X0, X12, X12
- XOR X1, X13, X13
- VXOR D1, A1, D1
- VXOR D2, A2, D2
- XOR X2, X14, X14
- XOR X3, X15, X15
- VPERM D0, D0, SIXTEEN, D0
- VPERM D1, D1, SIXTEEN, D1
- ROTLW $16, X12, X12
- ROTLW $16, X13, X13
- VPERM D2, D2, SIXTEEN, D2
- VADDUWM C0, D0, C0
- ROTLW $16, X14, X14
- ROTLW $16, X15, X15
- VADDUWM C1, D1, C1
- VADDUWM C2, D2, C2
- ADD X12, X8, X8
- ADD X13, X9, X9
- VXOR B0, C0, T0
- VXOR B1, C1, T1
- ADD X14, X10, X10
- ADD X15, X11, X11
- VXOR B2, C2, T2
- VRLW T0, TWELVE, B0
- XOR X8, X4, X4
- XOR X9, X5, X5
- VRLW T1, TWELVE, B1
- VRLW T2, TWELVE, B2
- XOR X10, X6, X6
- XOR X11, X7, X7
- VADDUWM A0, B0, A0
- VADDUWM A1, B1, A1
- ROTLW $12, X4, X4
- ROTLW $12, X5, X5
- VADDUWM A2, B2, A2
- VXOR D0, A0, D0
- ROTLW $12, X6, X6
- ROTLW $12, X7, X7
- VXOR D1, A1, D1
- VXOR D2, A2, D2
- ADD X4, X0, X0
- ADD X5, X1, X1
- VPERM D0, D0, TWENTY4, D0
- VPERM D1, D1, TWENTY4, D1
- ADD X6, X2, X2
- ADD X7, X3, X3
- VPERM D2, D2, TWENTY4, D2
- VADDUWM C0, D0, C0
- XOR X0, X12, X12
- XOR X1, X13, X13
- VADDUWM C1, D1, C1
- VADDUWM C2, D2, C2
- XOR X2, X14, X14
- XOR X3, X15, X15
- VXOR B0, C0, T0
- VXOR B1, C1, T1
- ROTLW $8, X12, X12
- ROTLW $8, X13, X13
- VXOR B2, C2, T2
- VRLW T0, SEVEN, B0
- ROTLW $8, X14, X14
- ROTLW $8, X15, X15
- VRLW T1, SEVEN, B1
- VRLW T2, SEVEN, B2
- ADD X12, X8, X8
- ADD X13, X9, X9
- VSLDOI $8, C0, C0, C0
- VSLDOI $8, C1, C1, C1
- ADD X14, X10, X10
- ADD X15, X11, X11
- VSLDOI $8, C2, C2, C2
- VSLDOI $12, B0, B0, B0
- XOR X8, X4, X4
- XOR X9, X5, X5
- VSLDOI $12, B1, B1, B1
- VSLDOI $12, B2, B2, B2
- XOR X10, X6, X6
- XOR X11, X7, X7
- VSLDOI $4, D0, D0, D0
- VSLDOI $4, D1, D1, D1
- ROTLW $7, X4, X4
- ROTLW $7, X5, X5
- VSLDOI $4, D2, D2, D2
- VADDUWM A0, B0, A0
- ROTLW $7, X6, X6
- ROTLW $7, X7, X7
- VADDUWM A1, B1, A1
- VADDUWM A2, B2, A2
- ADD X5, X0, X0
- ADD X6, X1, X1
- VXOR D0, A0, D0
- VXOR D1, A1, D1
- ADD X7, X2, X2
- ADD X4, X3, X3
- VXOR D2, A2, D2
- VPERM D0, D0, SIXTEEN, D0
- XOR X0, X15, X15
- XOR X1, X12, X12
- VPERM D1, D1, SIXTEEN, D1
- VPERM D2, D2, SIXTEEN, D2
- XOR X2, X13, X13
- XOR X3, X14, X14
- VADDUWM C0, D0, C0
- VADDUWM C1, D1, C1
- ROTLW $16, X15, X15
- ROTLW $16, X12, X12
- VADDUWM C2, D2, C2
- VXOR B0, C0, T0
- ROTLW $16, X13, X13
- ROTLW $16, X14, X14
- VXOR B1, C1, T1
- VXOR B2, C2, T2
- ADD X15, X10, X10
- ADD X12, X11, X11
- VRLW T0, TWELVE, B0
- VRLW T1, TWELVE, B1
- ADD X13, X8, X8
- ADD X14, X9, X9
- VRLW T2, TWELVE, B2
- VADDUWM A0, B0, A0
- XOR X10, X5, X5
- XOR X11, X6, X6
- VADDUWM A1, B1, A1
- VADDUWM A2, B2, A2
- XOR X8, X7, X7
- XOR X9, X4, X4
- VXOR D0, A0, D0
- VXOR D1, A1, D1
- ROTLW $12, X5, X5
- ROTLW $12, X6, X6
- VXOR D2, A2, D2
- VPERM D0, D0, TWENTY4, D0
- ROTLW $12, X7, X7
- ROTLW $12, X4, X4
- VPERM D1, D1, TWENTY4, D1
- VPERM D2, D2, TWENTY4, D2
- ADD X5, X0, X0
- ADD X6, X1, X1
- VADDUWM C0, D0, C0
- VADDUWM C1, D1, C1
- ADD X7, X2, X2
- ADD X4, X3, X3
- VADDUWM C2, D2, C2
- VXOR B0, C0, T0
- XOR X0, X15, X15
- XOR X1, X12, X12
- VXOR B1, C1, T1
- VXOR B2, C2, T2
- XOR X2, X13, X13
- XOR X3, X14, X14
- VRLW T0, SEVEN, B0
- VRLW T1, SEVEN, B1
- ROTLW $8, X15, X15
- ROTLW $8, X12, X12
- VRLW T2, SEVEN, B2
- VSLDOI $8, C0, C0, C0
- ROTLW $8, X13, X13
- ROTLW $8, X14, X14
- VSLDOI $8, C1, C1, C1
- VSLDOI $8, C2, C2, C2
- ADD X15, X10, X10
- ADD X12, X11, X11
- VSLDOI $4, B0, B0, B0
- VSLDOI $4, B1, B1, B1
- ADD X13, X8, X8
- ADD X14, X9, X9
- VSLDOI $4, B2, B2, B2
- VSLDOI $12, D0, D0, D0
- XOR X10, X5, X5
- XOR X11, X6, X6
- VSLDOI $12, D1, D1, D1
- VSLDOI $12, D2, D2, D2
- XOR X8, X7, X7
- XOR X9, X4, X4
- ROTLW $7, X5, X5
- ROTLW $7, X6, X6
- ROTLW $7, X7, X7
- ROTLW $7, X4, X4
- BC 0x10, 0, loop_vmx
-
- SUB $256, LEN, LEN
-
- // Accumulate key block
- ADD $0x61707865, X0, X0
- ADD $0x3320646e, X1, X1
- ADD $0x79622d32, X2, X2
- ADD $0x6b206574, X3, X3
- ADD TMP0, X4, X4
- ADD TMP1, X5, X5
- ADD TMP2, X6, X6
- ADD TMP3, X7, X7
- MOVWZ 16(KEY), TMP0
- MOVWZ 20(KEY), TMP1
- MOVWZ 24(KEY), TMP2
- MOVWZ 28(KEY), TMP3
- ADD TMP0, X8, X8
- ADD TMP1, X9, X9
- ADD TMP2, X10, X10
- ADD TMP3, X11, X11
-
- MOVWZ 12(CNT), TMP0
- MOVWZ 8(CNT), TMP1
- MOVWZ 4(CNT), TMP2
- MOVWZ 0(CNT), TEMP
- ADD TMP0, X15, X15
- ADD TMP1, X14, X14
- ADD TMP2, X13, X13
- ADD TEMP, X12, X12
-
- // Accumulate key block
- VADDUWM A0, K0, A0
- VADDUWM A1, K0, A1
- VADDUWM A2, K0, A2
- VADDUWM B0, K1, B0
- VADDUWM B1, K1, B1
- VADDUWM B2, K1, B2
- VADDUWM C0, K2, C0
- VADDUWM C1, K2, C1
- VADDUWM C2, K2, C2
- VADDUWM D0, K3, D0
- VADDUWM D1, K4, D1
- VADDUWM D2, K5, D2
-
- // Increment counter
- ADD $4, TEMP, TEMP
- MOVW TEMP, 0(CNT)
-
- VADDUWM K3, FOUR, K3
- VADDUWM K4, FOUR, K4
- VADDUWM K5, FOUR, K5
-
- // XOR the input slice (INP) with the keystream, which is stored in GPRs (X0-X3).
-
- // Load input (aligned or not)
- MOVWZ 0(INP), TMP0
- MOVWZ 4(INP), TMP1
- MOVWZ 8(INP), TMP2
- MOVWZ 12(INP), TMP3
-
- // XOR with input
- XOR TMP0, X0, X0
- XOR TMP1, X1, X1
- XOR TMP2, X2, X2
- XOR TMP3, X3, X3
- MOVWZ 16(INP), TMP0
- MOVWZ 20(INP), TMP1
- MOVWZ 24(INP), TMP2
- MOVWZ 28(INP), TMP3
- XOR TMP0, X4, X4
- XOR TMP1, X5, X5
- XOR TMP2, X6, X6
- XOR TMP3, X7, X7
- MOVWZ 32(INP), TMP0
- MOVWZ 36(INP), TMP1
- MOVWZ 40(INP), TMP2
- MOVWZ 44(INP), TMP3
- XOR TMP0, X8, X8
- XOR TMP1, X9, X9
- XOR TMP2, X10, X10
- XOR TMP3, X11, X11
- MOVWZ 48(INP), TMP0
- MOVWZ 52(INP), TMP1
- MOVWZ 56(INP), TMP2
- MOVWZ 60(INP), TMP3
- XOR TMP0, X12, X12
- XOR TMP1, X13, X13
- XOR TMP2, X14, X14
- XOR TMP3, X15, X15
-
- // Store output (aligned or not)
- MOVW X0, 0(OUT)
- MOVW X1, 4(OUT)
- MOVW X2, 8(OUT)
- MOVW X3, 12(OUT)
-
- ADD $64, INP, INP // INP points to the end of the slice for the alignment code below
-
- MOVW X4, 16(OUT)
- MOVD $16, TMP0
- MOVW X5, 20(OUT)
- MOVD $32, TMP1
- MOVW X6, 24(OUT)
- MOVD $48, TMP2
- MOVW X7, 28(OUT)
- MOVD $64, TMP3
- MOVW X8, 32(OUT)
- MOVW X9, 36(OUT)
- MOVW X10, 40(OUT)
- MOVW X11, 44(OUT)
- MOVW X12, 48(OUT)
- MOVW X13, 52(OUT)
- MOVW X14, 56(OUT)
- MOVW X15, 60(OUT)
- ADD $64, OUT, OUT
-
- // Load input
- LVX (INP)(R0), DD0
- LVX (INP)(TMP0), DD1
- LVX (INP)(TMP1), DD2
- LVX (INP)(TMP2), DD3
- LVX (INP)(TMP3), DD4
- ADD $64, INP, INP
-
- VPERM DD1, DD0, INPPERM, DD0 // Align input
- VPERM DD2, DD1, INPPERM, DD1
- VPERM DD3, DD2, INPPERM, DD2
- VPERM DD4, DD3, INPPERM, DD3
- VXOR A0, DD0, A0 // XOR with input
- VXOR B0, DD1, B0
- LVX (INP)(TMP0), DD1 // Keep loading input
- VXOR C0, DD2, C0
- LVX (INP)(TMP1), DD2
- VXOR D0, DD3, D0
- LVX (INP)(TMP2), DD3
- LVX (INP)(TMP3), DD0
- ADD $64, INP, INP
- MOVD $63, TMP3 // 63 is not a typo
- VPERM A0, A0, OUTPERM, A0
- VPERM B0, B0, OUTPERM, B0
- VPERM C0, C0, OUTPERM, C0
- VPERM D0, D0, OUTPERM, D0
-
- VPERM DD1, DD4, INPPERM, DD4 // Align input
- VPERM DD2, DD1, INPPERM, DD1
- VPERM DD3, DD2, INPPERM, DD2
- VPERM DD0, DD3, INPPERM, DD3
- VXOR A1, DD4, A1
- VXOR B1, DD1, B1
- LVX (INP)(TMP0), DD1 // Keep loading
- VXOR C1, DD2, C1
- LVX (INP)(TMP1), DD2
- VXOR D1, DD3, D1
- LVX (INP)(TMP2), DD3
-
- // Note that the LVX address is always rounded down to the nearest 16-byte
- // boundary, and that it always points to at most 15 bytes beyond the end of
- // the slice, so we cannot cross a page boundary.
- LVX (INP)(TMP3), DD4 // Redundant in aligned case.
- ADD $64, INP, INP
- VPERM A1, A1, OUTPERM, A1 // Pre-misalign output
- VPERM B1, B1, OUTPERM, B1
- VPERM C1, C1, OUTPERM, C1
- VPERM D1, D1, OUTPERM, D1
-
- VPERM DD1, DD0, INPPERM, DD0 // Align Input
- VPERM DD2, DD1, INPPERM, DD1
- VPERM DD3, DD2, INPPERM, DD2
- VPERM DD4, DD3, INPPERM, DD3
- VXOR A2, DD0, A2
- VXOR B2, DD1, B2
- VXOR C2, DD2, C2
- VXOR D2, DD3, D2
- VPERM A2, A2, OUTPERM, A2
- VPERM B2, B2, OUTPERM, B2
- VPERM C2, C2, OUTPERM, C2
- VPERM D2, D2, OUTPERM, D2
-
- ANDCC $15, OUT, X1 // Is out aligned?
- MOVD OUT, X0
-
- VSEL A0, B0, OUTMASK, DD0 // Collect pre-misaligned output
- VSEL B0, C0, OUTMASK, DD1
- VSEL C0, D0, OUTMASK, DD2
- VSEL D0, A1, OUTMASK, DD3
- VSEL A1, B1, OUTMASK, B0
- VSEL B1, C1, OUTMASK, C0
- VSEL C1, D1, OUTMASK, D0
- VSEL D1, A2, OUTMASK, A1
- VSEL A2, B2, OUTMASK, B1
- VSEL B2, C2, OUTMASK, C1
- VSEL C2, D2, OUTMASK, D1
-
- STVX DD0, (OUT+TMP0)
- STVX DD1, (OUT+TMP1)
- STVX DD2, (OUT+TMP2)
- ADD $64, OUT, OUT
- STVX DD3, (OUT+R0)
- STVX B0, (OUT+TMP0)
- STVX C0, (OUT+TMP1)
- STVX D0, (OUT+TMP2)
- ADD $64, OUT, OUT
- STVX A1, (OUT+R0)
- STVX B1, (OUT+TMP0)
- STVX C1, (OUT+TMP1)
- STVX D1, (OUT+TMP2)
- ADD $64, OUT, OUT
-
- BEQ aligned_vmx
-
- SUB X1, OUT, X2 // in misaligned case edges
- MOVD $0, X3 // are written byte-by-byte
-
-unaligned_tail_vmx:
- STVEBX D2, (X2+X3)
- ADD $1, X3, X3
- CMPW X3, X1
- BNE unaligned_tail_vmx
- SUB X1, X0, X2
-
-unaligned_head_vmx:
- STVEBX A0, (X2+X1)
- CMPW X1, $15
- ADD $1, X1, X1
- BNE unaligned_head_vmx
-
- CMPU LEN, $255 // done with 256-byte block yet?
- BGT loop_outer_vmx
-
- JMP done_vmx
-
-aligned_vmx:
- STVX A0, (X0+R0)
- CMPU LEN, $255 // done with 256-byte block yet?
- BGT loop_outer_vmx
-
-done_vmx:
- RET
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go
deleted file mode 100644
index ad74e23a..00000000
--- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.11
-// +build !gccgo
-
-package chacha20
-
-const (
- haveAsm = true
- bufSize = 256
-)
-
-//go:noescape
-func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32)
-
-func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
-
- if len(src) >= bufSize {
- xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter)
- }
-
- if len(src)%bufSize != 0 {
- i := len(src) - len(src)%bufSize
- c.buf = [bufSize]byte{}
- copy(c.buf[:], src[i:])
- xorKeyStreamVX(c.buf[:], c.buf[:], &c.key, &c.nonce, &c.counter)
- c.len = bufSize - copy(dst[i:], c.buf[:len(src)%bufSize])
- }
-}
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go
deleted file mode 100644
index 6570847f..00000000
--- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ChaCha20 implements the core ChaCha20 function as specified
-// in https://tools.ietf.org/html/rfc7539#section-2.3.
-package chacha20
-
-import (
- "crypto/cipher"
- "encoding/binary"
-
- "golang.org/x/crypto/internal/subtle"
-)
-
-// assert that *Cipher implements cipher.Stream
-var _ cipher.Stream = (*Cipher)(nil)
-
-// Cipher is a stateful instance of ChaCha20 using a particular key
-// and nonce. A *Cipher implements the cipher.Stream interface.
-type Cipher struct {
- key [8]uint32
- counter uint32 // incremented after each block
- nonce [3]uint32
- buf [bufSize]byte // buffer for unused keystream bytes
- len int // number of unused keystream bytes at end of buf
-}
-
-// New creates a new ChaCha20 stream cipher with the given key and nonce.
-// The initial counter value is set to 0.
-func New(key [8]uint32, nonce [3]uint32) *Cipher {
- return &Cipher{key: key, nonce: nonce}
-}
-
-// ChaCha20 constants spelling "expand 32-byte k"
-const (
- j0 uint32 = 0x61707865
- j1 uint32 = 0x3320646e
- j2 uint32 = 0x79622d32
- j3 uint32 = 0x6b206574
-)
-
-func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
- a += b
- d ^= a
- d = (d << 16) | (d >> 16)
- c += d
- b ^= c
- b = (b << 12) | (b >> 20)
- a += b
- d ^= a
- d = (d << 8) | (d >> 24)
- c += d
- b ^= c
- b = (b << 7) | (b >> 25)
- return a, b, c, d
-}
-
-// XORKeyStream XORs each byte in the given slice with a byte from the
-// cipher's key stream. Dst and src must overlap entirely or not at all.
-//
-// If len(dst) < len(src), XORKeyStream will panic. It is acceptable
-// to pass a dst bigger than src, and in that case, XORKeyStream will
-// only update dst[:len(src)] and will not touch the rest of dst.
-//
-// Multiple calls to XORKeyStream behave as if the concatenation of
-// the src buffers was passed in a single run. That is, Cipher
-// maintains state and does not reset at each XORKeyStream call.
-func (s *Cipher) XORKeyStream(dst, src []byte) {
- if len(dst) < len(src) {
- panic("chacha20: output smaller than input")
- }
- if subtle.InexactOverlap(dst[:len(src)], src) {
- panic("chacha20: invalid buffer overlap")
- }
-
- // xor src with buffered keystream first
- if s.len != 0 {
- buf := s.buf[len(s.buf)-s.len:]
- if len(src) < len(buf) {
- buf = buf[:len(src)]
- }
- td, ts := dst[:len(buf)], src[:len(buf)] // BCE hint
- for i, b := range buf {
- td[i] = ts[i] ^ b
- }
- s.len -= len(buf)
- if s.len != 0 {
- return
- }
- s.buf = [len(s.buf)]byte{} // zero the empty buffer
- src = src[len(buf):]
- dst = dst[len(buf):]
- }
-
- if len(src) == 0 {
- return
- }
- if haveAsm {
- if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 {
- panic("chacha20: counter overflow")
- }
- s.xorKeyStreamAsm(dst, src)
- return
- }
-
- // set up a 64-byte buffer to pad out the final block if needed
- // (hoisted out of the main loop to avoid spills)
- rem := len(src) % 64 // length of final block
- fin := len(src) - rem // index of final block
- if rem > 0 {
- copy(s.buf[len(s.buf)-64:], src[fin:])
- }
-
- // pre-calculate most of the first round
- s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0])
- s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1])
- s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2])
-
- n := len(src)
- src, dst = src[:n:n], dst[:n:n] // BCE hint
- for i := 0; i < n; i += 64 {
- // calculate the remainder of the first round
- s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter)
-
- // execute the second round
- x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15)
- x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12)
- x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13)
- x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14)
-
- // execute the remaining 18 rounds
- for i := 0; i < 9; i++ {
- x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
- x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
- x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
- x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
-
- x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
- x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
- x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
- x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
- }
-
- x0 += j0
- x1 += j1
- x2 += j2
- x3 += j3
-
- x4 += s.key[0]
- x5 += s.key[1]
- x6 += s.key[2]
- x7 += s.key[3]
- x8 += s.key[4]
- x9 += s.key[5]
- x10 += s.key[6]
- x11 += s.key[7]
-
- x12 += s.counter
- x13 += s.nonce[0]
- x14 += s.nonce[1]
- x15 += s.nonce[2]
-
- // increment the counter
- s.counter += 1
- if s.counter == 0 {
- panic("chacha20: counter overflow")
- }
-
- // pad to 64 bytes if needed
- in, out := src[i:], dst[i:]
- if i == fin {
- // src[fin:] has already been copied into s.buf before
- // the main loop
- in, out = s.buf[len(s.buf)-64:], s.buf[len(s.buf)-64:]
- }
- in, out = in[:64], out[:64] // BCE hint
-
- // XOR the key stream with the source and write out the result
- xor(out[0:], in[0:], x0)
- xor(out[4:], in[4:], x1)
- xor(out[8:], in[8:], x2)
- xor(out[12:], in[12:], x3)
- xor(out[16:], in[16:], x4)
- xor(out[20:], in[20:], x5)
- xor(out[24:], in[24:], x6)
- xor(out[28:], in[28:], x7)
- xor(out[32:], in[32:], x8)
- xor(out[36:], in[36:], x9)
- xor(out[40:], in[40:], x10)
- xor(out[44:], in[44:], x11)
- xor(out[48:], in[48:], x12)
- xor(out[52:], in[52:], x13)
- xor(out[56:], in[56:], x14)
- xor(out[60:], in[60:], x15)
- }
- // copy any trailing bytes out of the buffer and into dst
- if rem != 0 {
- s.len = 64 - rem
- copy(dst[fin:], s.buf[len(s.buf)-64:])
- }
-}
-
-// Advance discards bytes in the key stream until the next 64 byte block
-// boundary is reached and updates the counter accordingly. If the key
-// stream is already at a block boundary no bytes will be discarded and
-// the counter will be unchanged.
-func (s *Cipher) Advance() {
- s.len -= s.len % 64
- if s.len == 0 {
- s.buf = [len(s.buf)]byte{}
- }
-}
-
-// XORKeyStream crypts bytes from in to out using the given key and counters.
-// In and out must overlap entirely or not at all. Counter contains the raw
-// ChaCha20 counter bytes (i.e. block counter followed by nonce).
-func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
- s := Cipher{
- key: [8]uint32{
- binary.LittleEndian.Uint32(key[0:4]),
- binary.LittleEndian.Uint32(key[4:8]),
- binary.LittleEndian.Uint32(key[8:12]),
- binary.LittleEndian.Uint32(key[12:16]),
- binary.LittleEndian.Uint32(key[16:20]),
- binary.LittleEndian.Uint32(key[20:24]),
- binary.LittleEndian.Uint32(key[24:28]),
- binary.LittleEndian.Uint32(key[28:32]),
- },
- nonce: [3]uint32{
- binary.LittleEndian.Uint32(counter[4:8]),
- binary.LittleEndian.Uint32(counter[8:12]),
- binary.LittleEndian.Uint32(counter[12:16]),
- },
- counter: binary.LittleEndian.Uint32(counter[0:4]),
- }
- s.XORKeyStream(out, in)
-}
-
-// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a
-// nonce. It should only be used as part of the XChaCha20 construction.
-func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 {
- x0, x1, x2, x3 := j0, j1, j2, j3
- x4, x5, x6, x7 := key[0], key[1], key[2], key[3]
- x8, x9, x10, x11 := key[4], key[5], key[6], key[7]
- x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3]
-
- for i := 0; i < 10; i++ {
- x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12)
- x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13)
- x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14)
- x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15)
-
- x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15)
- x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12)
- x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13)
- x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
- }
-
- var out [8]uint32
- out[0], out[1], out[2], out[3] = x0, x1, x2, x3
- out[4], out[5], out[6], out[7] = x12, x13, x14, x15
- return out
-}
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go
deleted file mode 100644
index bf8beba6..00000000
--- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !ppc64le,!arm64,!s390x arm64,!go1.11 gccgo appengine
-
-package chacha20
-
-const (
- bufSize = 64
- haveAsm = false
-)
-
-func (*Cipher) xorKeyStreamAsm(dst, src []byte) {
- panic("not implemented")
-}
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go
deleted file mode 100644
index 638cb5e5..00000000
--- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ppc64le,!gccgo,!appengine
-
-package chacha20
-
-import "encoding/binary"
-
-const (
- bufSize = 256
- haveAsm = true
-)
-
-//go:noescape
-func chaCha20_ctr32_vmx(out, inp *byte, len int, key *[8]uint32, counter *uint32)
-
-func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
- if len(src) >= bufSize {
- chaCha20_ctr32_vmx(&dst[0], &src[0], len(src)-len(src)%bufSize, &c.key, &c.counter)
- }
- if len(src)%bufSize != 0 {
- chaCha20_ctr32_vmx(&c.buf[0], &c.buf[0], bufSize, &c.key, &c.counter)
- start := len(src) - len(src)%bufSize
- ts, td, tb := src[start:], dst[start:], c.buf[:]
- // Unroll loop to XOR 32 bytes per iteration.
- for i := 0; i < len(ts)-32; i += 32 {
- td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination
- s0 := binary.LittleEndian.Uint64(ts[0:8])
- s1 := binary.LittleEndian.Uint64(ts[8:16])
- s2 := binary.LittleEndian.Uint64(ts[16:24])
- s3 := binary.LittleEndian.Uint64(ts[24:32])
- b0 := binary.LittleEndian.Uint64(tb[0:8])
- b1 := binary.LittleEndian.Uint64(tb[8:16])
- b2 := binary.LittleEndian.Uint64(tb[16:24])
- b3 := binary.LittleEndian.Uint64(tb[24:32])
- binary.LittleEndian.PutUint64(td[0:8], s0^b0)
- binary.LittleEndian.PutUint64(td[8:16], s1^b1)
- binary.LittleEndian.PutUint64(td[16:24], s2^b2)
- binary.LittleEndian.PutUint64(td[24:32], s3^b3)
- ts, td, tb = ts[32:], td[32:], tb[32:]
- }
- td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination
- for i, v := range ts {
- td[i] = tb[i] ^ v
- }
- c.len = bufSize - (len(src) % bufSize)
-
- }
-
-}
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go
deleted file mode 100644
index aad645b4..00000000
--- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build s390x,!gccgo,!appengine
-
-package chacha20
-
-import (
- "golang.org/x/sys/cpu"
-)
-
-var haveAsm = cpu.S390X.HasVX
-
-const bufSize = 256
-
-// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only
-// be called when the vector facility is available.
-// Implementation in asm_s390x.s.
-//go:noescape
-func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
-
-func (c *Cipher) xorKeyStreamAsm(dst, src []byte) {
- xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len)
-}
-
-// EXRL targets, DO NOT CALL!
-func mvcSrcToBuf()
-func mvcBufToDst()
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s
deleted file mode 100644
index 57df4044..00000000
--- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s
+++ /dev/null
@@ -1,260 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build s390x,!gccgo,!appengine
-
-#include "go_asm.h"
-#include "textflag.h"
-
-// This is an implementation of the ChaCha20 encryption algorithm as
-// specified in RFC 7539. It uses vector instructions to compute
-// 4 keystream blocks in parallel (256 bytes) which are then XORed
-// with the bytes in the input slice.
-
-GLOBL ·constants<>(SB), RODATA|NOPTR, $32
-// BSWAP: swap bytes in each 4-byte element
-DATA ·constants<>+0x00(SB)/4, $0x03020100
-DATA ·constants<>+0x04(SB)/4, $0x07060504
-DATA ·constants<>+0x08(SB)/4, $0x0b0a0908
-DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c
-// J0: [j0, j1, j2, j3]
-DATA ·constants<>+0x10(SB)/4, $0x61707865
-DATA ·constants<>+0x14(SB)/4, $0x3320646e
-DATA ·constants<>+0x18(SB)/4, $0x79622d32
-DATA ·constants<>+0x1c(SB)/4, $0x6b206574
-
-// EXRL targets:
-TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0
- MVC $1, (R1), (R8)
- RET
-
-TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0
- MVC $1, (R8), (R9)
- RET
-
-#define BSWAP V5
-#define J0 V6
-#define KEY0 V7
-#define KEY1 V8
-#define NONCE V9
-#define CTR V10
-#define M0 V11
-#define M1 V12
-#define M2 V13
-#define M3 V14
-#define INC V15
-#define X0 V16
-#define X1 V17
-#define X2 V18
-#define X3 V19
-#define X4 V20
-#define X5 V21
-#define X6 V22
-#define X7 V23
-#define X8 V24
-#define X9 V25
-#define X10 V26
-#define X11 V27
-#define X12 V28
-#define X13 V29
-#define X14 V30
-#define X15 V31
-
-#define NUM_ROUNDS 20
-
-#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \
- VAF a1, a0, a0 \
- VAF b1, b0, b0 \
- VAF c1, c0, c0 \
- VAF d1, d0, d0 \
- VX a0, a2, a2 \
- VX b0, b2, b2 \
- VX c0, c2, c2 \
- VX d0, d2, d2 \
- VERLLF $16, a2, a2 \
- VERLLF $16, b2, b2 \
- VERLLF $16, c2, c2 \
- VERLLF $16, d2, d2 \
- VAF a2, a3, a3 \
- VAF b2, b3, b3 \
- VAF c2, c3, c3 \
- VAF d2, d3, d3 \
- VX a3, a1, a1 \
- VX b3, b1, b1 \
- VX c3, c1, c1 \
- VX d3, d1, d1 \
- VERLLF $12, a1, a1 \
- VERLLF $12, b1, b1 \
- VERLLF $12, c1, c1 \
- VERLLF $12, d1, d1 \
- VAF a1, a0, a0 \
- VAF b1, b0, b0 \
- VAF c1, c0, c0 \
- VAF d1, d0, d0 \
- VX a0, a2, a2 \
- VX b0, b2, b2 \
- VX c0, c2, c2 \
- VX d0, d2, d2 \
- VERLLF $8, a2, a2 \
- VERLLF $8, b2, b2 \
- VERLLF $8, c2, c2 \
- VERLLF $8, d2, d2 \
- VAF a2, a3, a3 \
- VAF b2, b3, b3 \
- VAF c2, c3, c3 \
- VAF d2, d3, d3 \
- VX a3, a1, a1 \
- VX b3, b1, b1 \
- VX c3, c1, c1 \
- VX d3, d1, d1 \
- VERLLF $7, a1, a1 \
- VERLLF $7, b1, b1 \
- VERLLF $7, c1, c1 \
- VERLLF $7, d1, d1
-
-#define PERMUTE(mask, v0, v1, v2, v3) \
- VPERM v0, v0, mask, v0 \
- VPERM v1, v1, mask, v1 \
- VPERM v2, v2, mask, v2 \
- VPERM v3, v3, mask, v3
-
-#define ADDV(x, v0, v1, v2, v3) \
- VAF x, v0, v0 \
- VAF x, v1, v1 \
- VAF x, v2, v2 \
- VAF x, v3, v3
-
-#define XORV(off, dst, src, v0, v1, v2, v3) \
- VLM off(src), M0, M3 \
- PERMUTE(BSWAP, v0, v1, v2, v3) \
- VX v0, M0, M0 \
- VX v1, M1, M1 \
- VX v2, M2, M2 \
- VX v3, M3, M3 \
- VSTM M0, M3, off(dst)
-
-#define SHUFFLE(a, b, c, d, t, u, v, w) \
- VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]}
- VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]}
- VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]}
- VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]}
- VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]}
- VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]}
- VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]}
- VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]}
-
-// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int)
-TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0
- MOVD $·constants<>(SB), R1
- MOVD dst+0(FP), R2 // R2=&dst[0]
- LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src)
- MOVD key+48(FP), R5 // R5=key
- MOVD nonce+56(FP), R6 // R6=nonce
- MOVD counter+64(FP), R7 // R7=counter
- MOVD buf+72(FP), R8 // R8=buf
- MOVD len+80(FP), R9 // R9=len
-
- // load BSWAP and J0
- VLM (R1), BSWAP, J0
-
- // set up tail buffer
- ADD $-1, R4, R12
- MOVBZ R12, R12
- CMPUBEQ R12, $255, aligned
- MOVD R4, R1
- AND $~255, R1
- MOVD $(R3)(R1*1), R1
- EXRL $·mvcSrcToBuf(SB), R12
- MOVD $255, R0
- SUB R12, R0
- MOVD R0, (R9) // update len
-
-aligned:
- // setup
- MOVD $95, R0
- VLM (R5), KEY0, KEY1
- VLL R0, (R6), NONCE
- VZERO M0
- VLEIB $7, $32, M0
- VSRLB M0, NONCE, NONCE
-
- // initialize counter values
- VLREPF (R7), CTR
- VZERO INC
- VLEIF $1, $1, INC
- VLEIF $2, $2, INC
- VLEIF $3, $3, INC
- VAF INC, CTR, CTR
- VREPIF $4, INC
-
-chacha:
- VREPF $0, J0, X0
- VREPF $1, J0, X1
- VREPF $2, J0, X2
- VREPF $3, J0, X3
- VREPF $0, KEY0, X4
- VREPF $1, KEY0, X5
- VREPF $2, KEY0, X6
- VREPF $3, KEY0, X7
- VREPF $0, KEY1, X8
- VREPF $1, KEY1, X9
- VREPF $2, KEY1, X10
- VREPF $3, KEY1, X11
- VLR CTR, X12
- VREPF $1, NONCE, X13
- VREPF $2, NONCE, X14
- VREPF $3, NONCE, X15
-
- MOVD $(NUM_ROUNDS/2), R1
-
-loop:
- ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11)
- ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9)
-
- ADD $-1, R1
- BNE loop
-
- // decrement length
- ADD $-256, R4
- BLT tail
-
-continue:
- // rearrange vectors
- SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3)
- ADDV(J0, X0, X1, X2, X3)
- SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3)
- ADDV(KEY0, X4, X5, X6, X7)
- SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3)
- ADDV(KEY1, X8, X9, X10, X11)
- VAF CTR, X12, X12
- SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3)
- ADDV(NONCE, X12, X13, X14, X15)
-
- // increment counters
- VAF INC, CTR, CTR
-
- // xor keystream with plaintext
- XORV(0*64, R2, R3, X0, X4, X8, X12)
- XORV(1*64, R2, R3, X1, X5, X9, X13)
- XORV(2*64, R2, R3, X2, X6, X10, X14)
- XORV(3*64, R2, R3, X3, X7, X11, X15)
-
- // increment pointers
- MOVD $256(R2), R2
- MOVD $256(R3), R3
-
- CMPBNE R4, $0, chacha
- CMPUBEQ R12, $255, return
- EXRL $·mvcBufToDst(SB), R12 // len was updated during setup
-
-return:
- VSTEF $0, CTR, (R7)
- RET
-
-tail:
- MOVD R2, R9
- MOVD R8, R2
- MOVD R8, R3
- MOVD $0, R4
- JMP continue
diff --git a/vendor/golang.org/x/crypto/internal/chacha20/xor.go b/vendor/golang.org/x/crypto/internal/chacha20/xor.go
deleted file mode 100644
index 9c5ba0b3..00000000
--- a/vendor/golang.org/x/crypto/internal/chacha20/xor.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found src the LICENSE file.
-
-package chacha20
-
-import (
- "runtime"
-)
-
-// Platforms that have fast unaligned 32-bit little endian accesses.
-const unaligned = runtime.GOARCH == "386" ||
- runtime.GOARCH == "amd64" ||
- runtime.GOARCH == "arm64" ||
- runtime.GOARCH == "ppc64le" ||
- runtime.GOARCH == "s390x"
-
-// xor reads a little endian uint32 from src, XORs it with u and
-// places the result in little endian byte order in dst.
-func xor(dst, src []byte, u uint32) {
- _, _ = src[3], dst[3] // eliminate bounds checks
- if unaligned {
- // The compiler should optimize this code into
- // 32-bit unaligned little endian loads and stores.
- // TODO: delete once the compiler does a reliably
- // good job with the generic code below.
- // See issue #25111 for more details.
- v := uint32(src[0])
- v |= uint32(src[1]) << 8
- v |= uint32(src[2]) << 16
- v |= uint32(src[3]) << 24
- v ^= u
- dst[0] = byte(v)
- dst[1] = byte(v >> 8)
- dst[2] = byte(v >> 16)
- dst[3] = byte(v >> 24)
- } else {
- dst[0] = src[0] ^ byte(u)
- dst[1] = src[1] ^ byte(u>>8)
- dst[2] = src[2] ^ byte(u>>16)
- dst[3] = src[3] ^ byte(u>>24)
- }
-}