diff options
author | Wim <wim@42.be> | 2022-01-31 00:27:37 +0100 |
---|---|---|
committer | Wim <wim@42.be> | 2022-03-20 14:57:48 +0100 |
commit | e3cafeaf9292f67459ff1d186f68283bfaedf2ae (patch) | |
tree | b69c39620aa91dba695b3b935c6651c0fb37ce75 /vendor/filippo.io | |
parent | e7b193788a56ee7cdb02a87a9db0ad6724ef66d5 (diff) | |
download | matterbridge-msglm-e3cafeaf9292f67459ff1d186f68283bfaedf2ae.tar.gz matterbridge-msglm-e3cafeaf9292f67459ff1d186f68283bfaedf2ae.tar.bz2 matterbridge-msglm-e3cafeaf9292f67459ff1d186f68283bfaedf2ae.zip |
Add dependencies/vendor (whatsapp)
Diffstat (limited to 'vendor/filippo.io')
-rw-r--r-- | vendor/filippo.io/edwards25519/LICENSE | 27 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/README.md | 14 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/doc.go | 20 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/edwards25519.go | 428 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/extra.go | 343 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/field/fe.go | 419 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/field/fe_amd64.go | 13 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/field/fe_amd64.s | 378 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go | 12 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/field/fe_arm64.go | 16 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/field/fe_arm64.s | 42 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go | 12 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/field/fe_generic.go | 264 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/scalar.go | 1027 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/scalarmult.go | 214 | ||||
-rw-r--r-- | vendor/filippo.io/edwards25519/tables.go | 129 |
16 files changed, 3358 insertions, 0 deletions
diff --git a/vendor/filippo.io/edwards25519/LICENSE b/vendor/filippo.io/edwards25519/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/filippo.io/edwards25519/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md new file mode 100644 index 00000000..e87d1654 --- /dev/null +++ b/vendor/filippo.io/edwards25519/README.md @@ -0,0 +1,14 @@ +# filippo.io/edwards25519 + +``` +import "filippo.io/edwards25519" +``` + +This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives. +Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519). + +The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255. + +Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative. + +Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements. diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go new file mode 100644 index 00000000..d8608b06 --- /dev/null +++ b/vendor/filippo.io/edwards25519/doc.go @@ -0,0 +1,20 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edwards25519 implements group logic for the twisted Edwards curve +// +// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2 +// +// This is better known as the Edwards curve equivalent to Curve25519, and is +// the curve used by the Ed25519 signature scheme. +// +// Most users don't need this package, and should instead use crypto/ed25519 for +// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or +// github.com/gtank/ristretto255 for prime order group logic. +// +// However, developers who do need to interact with low-level edwards25519 +// operations can use this package, which is an extended version of +// crypto/ed25519/internal/edwards25519 from the standard library repackaged as +// an importable module. +package edwards25519 diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go new file mode 100644 index 00000000..e22a7c2d --- /dev/null +++ b/vendor/filippo.io/edwards25519/edwards25519.go @@ -0,0 +1,428 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "errors" + + "filippo.io/edwards25519/field" +) + +// Point types. + +type projP1xP1 struct { + X, Y, Z, T field.Element +} + +type projP2 struct { + X, Y, Z field.Element +} + +// Point represents a point on the edwards25519 curve. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is NOT valid, and it may be used only as a receiver. +type Point struct { + // The point is internally represented in extended coordinates (X, Y, Z, T) + // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522. + x, y, z, t field.Element + + // Make the type not comparable (i.e. used with == or as a map key), as + // equivalent points can be represented by different Go values. + _ incomparable +} + +type incomparable [0]func() + +func checkInitialized(points ...*Point) { + for _, p := range points { + if p.x == (field.Element{}) && p.y == (field.Element{}) { + panic("edwards25519: use of uninitialized Point") + } + } +} + +type projCached struct { + YplusX, YminusX, Z, T2d field.Element +} + +type affineCached struct { + YplusX, YminusX, T2d field.Element +} + +// Constructors. + +func (v *projP2) Zero() *projP2 { + v.X.Zero() + v.Y.One() + v.Z.One() + return v +} + +// identity is the point at infinity. +var identity, _ = new(Point).SetBytes([]byte{ + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) + +// NewIdentityPoint returns a new Point set to the identity. +func NewIdentityPoint() *Point { + return new(Point).Set(identity) +} + +// generator is the canonical curve basepoint. See TestGenerator for the +// correspondence of this encoding with the values in RFC 8032. +var generator, _ = new(Point).SetBytes([]byte{ + 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66}) + +// NewGeneratorPoint returns a new Point set to the canonical generator. +func NewGeneratorPoint() *Point { + return new(Point).Set(generator) +} + +func (v *projCached) Zero() *projCached { + v.YplusX.One() + v.YminusX.One() + v.Z.One() + v.T2d.Zero() + return v +} + +func (v *affineCached) Zero() *affineCached { + v.YplusX.One() + v.YminusX.One() + v.T2d.Zero() + return v +} + +// Assignments. + +// Set sets v = u, and returns v. +func (v *Point) Set(u *Point) *Point { + *v = *u + return v +} + +// Encoding. + +// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032, +// Section 5.1.2. +func (v *Point) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var buf [32]byte + return v.bytes(&buf) +} + +func (v *Point) bytes(buf *[32]byte) []byte { + checkInitialized(v) + + var zInv, x, y field.Element + zInv.Invert(&v.z) // zInv = 1 / Z + x.Multiply(&v.x, &zInv) // x = X / Z + y.Multiply(&v.y, &zInv) // y = Y / Z + + out := copyFieldElement(buf, &y) + out[31] |= byte(x.IsNegative() << 7) + return out +} + +var feOne = new(field.Element).One() + +// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not +// represent a valid point on the curve, SetBytes returns nil and an error and +// the receiver is unchanged. Otherwise, SetBytes returns v. +// +// Note that SetBytes accepts all non-canonical encodings of valid points. +// That is, it follows decoding rules that match most implementations in +// the ecosystem rather than RFC 8032. +func (v *Point) SetBytes(x []byte) (*Point, error) { + // Specifically, the non-canonical encodings that are accepted are + // 1) the ones where the field element is not reduced (see the + // (*field.Element).SetBytes docs) and + // 2) the ones where the x-coordinate is zero and the sign bit is set. + // + // This is consistent with crypto/ed25519/internal/edwards25519. Read more + // at https://hdevalence.ca/blog/2020-10-04-its-25519am, specifically the + // "Canonical A, R" section. + + y, err := new(field.Element).SetBytes(x) + if err != nil { + return nil, errors.New("edwards25519: invalid point encoding length") + } + + // -x² + y² = 1 + dx²y² + // x² + dx²y² = x²(dy² + 1) = y² - 1 + // x² = (y² - 1) / (dy² + 1) + + // u = y² - 1 + y2 := new(field.Element).Square(y) + u := new(field.Element).Subtract(y2, feOne) + + // v = dy² + 1 + vv := new(field.Element).Multiply(y2, d) + vv = vv.Add(vv, feOne) + + // x = +√(u/v) + xx, wasSquare := new(field.Element).SqrtRatio(u, vv) + if wasSquare == 0 { + return nil, errors.New("edwards25519: invalid point encoding") + } + + // Select the negative square root if the sign bit is set. + xxNeg := new(field.Element).Negate(xx) + xx = xx.Select(xxNeg, xx, int(x[31]>>7)) + + v.x.Set(xx) + v.y.Set(y) + v.z.One() + v.t.Multiply(xx, y) // xy = T / Z + + return v, nil +} + +func copyFieldElement(buf *[32]byte, v *field.Element) []byte { + copy(buf[:], v.Bytes()) + return buf[:] +} + +// Conversions. + +func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 { + v.X.Multiply(&p.X, &p.T) + v.Y.Multiply(&p.Y, &p.Z) + v.Z.Multiply(&p.Z, &p.T) + return v +} + +func (v *projP2) FromP3(p *Point) *projP2 { + v.X.Set(&p.x) + v.Y.Set(&p.y) + v.Z.Set(&p.z) + return v +} + +func (v *Point) fromP1xP1(p *projP1xP1) *Point { + v.x.Multiply(&p.X, &p.T) + v.y.Multiply(&p.Y, &p.Z) + v.z.Multiply(&p.Z, &p.T) + v.t.Multiply(&p.X, &p.Y) + return v +} + +func (v *Point) fromP2(p *projP2) *Point { + v.x.Multiply(&p.X, &p.Z) + v.y.Multiply(&p.Y, &p.Z) + v.z.Square(&p.Z) + v.t.Multiply(&p.X, &p.Y) + return v +} + +// d is a constant in the curve equation. +var d, _ = new(field.Element).SetBytes([]byte{ + 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75, + 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00, + 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c, + 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52}) +var d2 = new(field.Element).Add(d, d) + +func (v *projCached) FromP3(p *Point) *projCached { + v.YplusX.Add(&p.y, &p.x) + v.YminusX.Subtract(&p.y, &p.x) + v.Z.Set(&p.z) + v.T2d.Multiply(&p.t, d2) + return v +} + +func (v *affineCached) FromP3(p *Point) *affineCached { + v.YplusX.Add(&p.y, &p.x) + v.YminusX.Subtract(&p.y, &p.x) + v.T2d.Multiply(&p.t, d2) + + var invZ field.Element + invZ.Invert(&p.z) + v.YplusX.Multiply(&v.YplusX, &invZ) + v.YminusX.Multiply(&v.YminusX, &invZ) + v.T2d.Multiply(&v.T2d, &invZ) + return v +} + +// (Re)addition and subtraction. + +// Add sets v = p + q, and returns v. +func (v *Point) Add(p, q *Point) *Point { + checkInitialized(p, q) + qCached := new(projCached).FromP3(q) + result := new(projP1xP1).Add(p, qCached) + return v.fromP1xP1(result) +} + +// Subtract sets v = p - q, and returns v. +func (v *Point) Subtract(p, q *Point) *Point { + checkInitialized(p, q) + qCached := new(projCached).FromP3(q) + result := new(projP1xP1).Sub(p, qCached) + return v.fromP1xP1(result) +} + +func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YplusX) + MM.Multiply(&YminusX, &q.YminusX) + TT2d.Multiply(&p.t, &q.T2d) + ZZ2.Multiply(&p.z, &q.Z) + + ZZ2.Add(&ZZ2, &ZZ2) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Add(&ZZ2, &TT2d) + v.T.Subtract(&ZZ2, &TT2d) + return v +} + +func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YminusX) // flipped sign + MM.Multiply(&YminusX, &q.YplusX) // flipped sign + TT2d.Multiply(&p.t, &q.T2d) + ZZ2.Multiply(&p.z, &q.Z) + + ZZ2.Add(&ZZ2, &ZZ2) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Subtract(&ZZ2, &TT2d) // flipped sign + v.T.Add(&ZZ2, &TT2d) // flipped sign + return v +} + +func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YplusX) + MM.Multiply(&YminusX, &q.YminusX) + TT2d.Multiply(&p.t, &q.T2d) + + Z2.Add(&p.z, &p.z) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Add(&Z2, &TT2d) + v.T.Subtract(&Z2, &TT2d) + return v +} + +func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 { + var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element + + YplusX.Add(&p.y, &p.x) + YminusX.Subtract(&p.y, &p.x) + + PP.Multiply(&YplusX, &q.YminusX) // flipped sign + MM.Multiply(&YminusX, &q.YplusX) // flipped sign + TT2d.Multiply(&p.t, &q.T2d) + + Z2.Add(&p.z, &p.z) + + v.X.Subtract(&PP, &MM) + v.Y.Add(&PP, &MM) + v.Z.Subtract(&Z2, &TT2d) // flipped sign + v.T.Add(&Z2, &TT2d) // flipped sign + return v +} + +// Doubling. + +func (v *projP1xP1) Double(p *projP2) *projP1xP1 { + var XX, YY, ZZ2, XplusYsq field.Element + + XX.Square(&p.X) + YY.Square(&p.Y) + ZZ2.Square(&p.Z) + ZZ2.Add(&ZZ2, &ZZ2) + XplusYsq.Add(&p.X, &p.Y) + XplusYsq.Square(&XplusYsq) + + v.Y.Add(&YY, &XX) + v.Z.Subtract(&YY, &XX) + + v.X.Subtract(&XplusYsq, &v.Y) + v.T.Subtract(&ZZ2, &v.Z) + return v +} + +// Negation. + +// Negate sets v = -p, and returns v. +func (v *Point) Negate(p *Point) *Point { + checkInitialized(p) + v.x.Negate(&p.x) + v.y.Set(&p.y) + v.z.Set(&p.z) + v.t.Negate(&p.t) + return v +} + +// Equal returns 1 if v is equivalent to u, and 0 otherwise. +func (v *Point) Equal(u *Point) int { + checkInitialized(v, u) + + var t1, t2, t3, t4 field.Element + t1.Multiply(&v.x, &u.z) + t2.Multiply(&u.x, &v.z) + t3.Multiply(&v.y, &u.z) + t4.Multiply(&u.y, &v.z) + + return t1.Equal(&t2) & t3.Equal(&t4) +} + +// Constant-time operations + +// Select sets v to a if cond == 1 and to b if cond == 0. +func (v *projCached) Select(a, b *projCached, cond int) *projCached { + v.YplusX.Select(&a.YplusX, &b.YplusX, cond) + v.YminusX.Select(&a.YminusX, &b.YminusX, cond) + v.Z.Select(&a.Z, &b.Z, cond) + v.T2d.Select(&a.T2d, &b.T2d, cond) + return v +} + +// Select sets v to a if cond == 1 and to b if cond == 0. +func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached { + v.YplusX.Select(&a.YplusX, &b.YplusX, cond) + v.YminusX.Select(&a.YminusX, &b.YminusX, cond) + v.T2d.Select(&a.T2d, &b.T2d, cond) + return v +} + +// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. +func (v *projCached) CondNeg(cond int) *projCached { + v.YplusX.Swap(&v.YminusX, cond) + v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) + return v +} + +// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0. +func (v *affineCached) CondNeg(cond int) *affineCached { + v.YplusX.Swap(&v.YminusX, cond) + v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond) + return v +} diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go new file mode 100644 index 00000000..f5e59080 --- /dev/null +++ b/vendor/filippo.io/edwards25519/extra.go @@ -0,0 +1,343 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// This file contains additional functionality that is not included in the +// upstream crypto/ed25519/internal/edwards25519 package. + +import ( + "errors" + + "filippo.io/edwards25519/field" +) + +// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where +// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. +func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. Don't change the style without making + // sure it doesn't increase the inliner cost. + var e [4]field.Element + X, Y, Z, T = v.extendedCoordinates(&e) + return +} + +func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) { + checkInitialized(v) + X = e[0].Set(&v.x) + Y = e[1].Set(&v.y) + Z = e[2].Set(&v.z) + T = e[3].Set(&v.t) + return +} + +// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where +// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522. +// +// If the coordinates are invalid or don't represent a valid point on the curve, +// SetExtendedCoordinates returns nil and an error and the receiver is +// unchanged. Otherwise, SetExtendedCoordinates returns v. +func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) { + if !isOnCurve(X, Y, Z, T) { + return nil, errors.New("edwards25519: invalid point coordinates") + } + v.x.Set(X) + v.y.Set(Y) + v.z.Set(Z) + v.t.Set(T) + return v, nil +} + +func isOnCurve(X, Y, Z, T *field.Element) bool { + var lhs, rhs field.Element + XX := new(field.Element).Square(X) + YY := new(field.Element).Square(Y) + ZZ := new(field.Element).Square(Z) + TT := new(field.Element).Square(T) + // -x² + y² = 1 + dx²y² + // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)² + // -X² + Y² = Z² + dT² + lhs.Subtract(YY, XX) + rhs.Multiply(d, TT).Add(&rhs, ZZ) + if lhs.Equal(&rhs) != 1 { + return false + } + // xy = T/Z + // XY/Z² = T/Z + // XY = TZ + lhs.Multiply(X, Y) + rhs.Multiply(T, Z) + return lhs.Equal(&rhs) == 1 +} + +// BytesMontgomery converts v to a point on the birationally-equivalent +// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding +// according to RFC 7748. +// +// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode +// to the same value. If v is the identity point, BytesMontgomery returns 32 +// zero bytes, analogously to the X25519 function. +func (v *Point) BytesMontgomery() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var buf [32]byte + return v.bytesMontgomery(&buf) +} + +func (v *Point) bytesMontgomery(buf *[32]byte) []byte { + checkInitialized(v) + + // RFC 7748, Section 4.1 provides the bilinear map to calculate the + // Montgomery u-coordinate + // + // u = (1 + y) / (1 - y) + // + // where y = Y / Z. + + var y, recip, u field.Element + + y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z + recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y) + u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r + + return copyFieldElement(buf, &u) +} + +// MultByCofactor sets v = 8 * p, and returns v. +func (v *Point) MultByCofactor(p *Point) *Point { + checkInitialized(p) + result := projP1xP1{} + pp := (&projP2{}).FromP3(p) + result.Double(pp) + pp.FromP1xP1(&result) + result.Double(pp) + pp.FromP1xP1(&result) + result.Double(pp) + return v.fromP1xP1(&result) +} + +// Given k > 0, set s = s**(2*i). +func (s *Scalar) pow2k(k int) { + for i := 0; i < k; i++ { + s.Multiply(s, s) + } +} + +// Invert sets s to the inverse of a nonzero scalar v, and returns s. +// +// If t is zero, Invert returns zero. +func (s *Scalar) Invert(t *Scalar) *Scalar { + // Uses a hardcoded sliding window of width 4. + var table [8]Scalar + var tt Scalar + tt.Multiply(t, t) + table[0] = *t + for i := 0; i < 7; i++ { + table[i+1].Multiply(&table[i], &tt) + } + // Now table = [t**1, t**3, t**7, t**11, t**13, t**15] + // so t**k = t[k/2] for odd k + + // To compute the sliding window digits, use the following Sage script: + + // sage: import itertools + // sage: def sliding_window(w,k): + // ....: digits = [] + // ....: while k > 0: + // ....: if k % 2 == 1: + // ....: kmod = k % (2**w) + // ....: digits.append(kmod) + // ....: k = k - kmod + // ....: else: + // ....: digits.append(0) + // ....: k = k // 2 + // ....: return digits + + // Now we can compute s roughly as follows: + + // sage: s = 1 + // sage: for coeff in reversed(sliding_window(4,l-2)): + // ....: s = s*s + // ....: if coeff > 0 : + // ....: s = s*t**coeff + + // This works on one bit at a time, with many runs of zeros. + // The digits can be collapsed into [(count, coeff)] as follows: + + // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))] + + // Entries of the form (k, 0) turn into pow2k(k) + // Entries of the form (1, coeff) turn into a squaring and then a table lookup. + // We can fold the squaring into the previous pow2k(k) as pow2k(k+1). + + *s = table[1/2] + s.pow2k(127 + 1) + s.Multiply(s, &table[1/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[13/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[5/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[1/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(5 + 1) + s.Multiply(s, &table[11/2]) + s.pow2k(9 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[3/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[13/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[7/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[9/2]) + s.pow2k(3 + 1) + s.Multiply(s, &table[15/2]) + s.pow2k(4 + 1) + s.Multiply(s, &table[11/2]) + + return s +} + +// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. +// +// Execution time depends only on the lengths of the two slices, which must match. +func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point { + if len(scalars) != len(points) { + panic("edwards25519: called MultiScalarMult with different size inputs") + } + checkInitialized(points...) + + // Proceed as in the single-base case, but share doublings + // between each point in the multiscalar equation. + + // Build lookup tables for each point + tables := make([]projLookupTable, len(points)) + for i := range tables { + tables[i].FromP3(points[i]) + } + // Compute signed radix-16 digits for each scalar + digits := make([][64]int8, len(scalars)) + for i := range digits { + digits[i] = scalars[i].signedRadix16() + } + + // Unwrap first loop iteration to save computing 16*identity + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + // Lookup-and-add the appropriate multiple of each input point + for j := range tables { + tables[j].SelectInto(multiple, digits[j][63]) + tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords + v.fromP1xP1(tmp1) // update v + } + tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration + for i := 62; i >= 0; i-- { + tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords + v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords + // Lookup-and-add the appropriate multiple of each input point + for j := range tables { + tables[j].SelectInto(multiple, digits[j][i]) + tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords + v.fromP1xP1(tmp1) // update v + } + tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration + } + return v +} + +// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v. +// +// Execution time depends on the inputs. +func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point { + if len(scalars) != len(points) { + panic("edwards25519: called VarTimeMultiScalarMult with different size inputs") + } + checkInitialized(points...) + + // Generalize double-base NAF computation to arbitrary sizes. + // Here all the points are dynamic, so we only use the smaller + // tables. + + // Build lookup tables for each point + tables := make([]nafLookupTable5, len(points)) + for i := range tables { + tables[i].FromP3(points[i]) + } + // Compute a NAF for each scalar + nafs := make([][256]int8, len(scalars)) + for i := range nafs { + nafs[i] = scalars[i].nonAdjacentForm(5) + } + + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + tmp2.Zero() + + // Move from high to low bits, doubling the accumulator + // at each iteration and checking whether there is a nonzero + // coefficient to look up a multiple of. + // + // Skip trying to find the first nonzero coefficent, because + // searching might be more work than a few extra doublings. + for i := 255; i >= 0; i-- { + tmp1.Double(tmp2) + + for j := range nafs { + if nafs[j][i] > 0 { + v.fromP1xP1(tmp1) + tables[j].SelectInto(multiple, nafs[j][i]) + tmp1.Add(v, multiple) + } else if nafs[j][i] < 0 { + v.fromP1xP1(tmp1) + tables[j].SelectInto(multiple, -nafs[j][i]) + tmp1.Sub(v, multiple) + } + } + + tmp2.FromP1xP1(tmp1) + } + + v.fromP2(tmp2) + return v +} diff --git a/vendor/filippo.io/edwards25519/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go new file mode 100644 index 00000000..e5f53859 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe.go @@ -0,0 +1,419 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package field implements fast arithmetic modulo 2^255-19. +package field + +import ( + "crypto/subtle" + "encoding/binary" + "errors" + "math/bits" +) + +// Element represents an element of the field GF(2^255-19). Note that this +// is not a cryptographically secure group, and should only be used to interact +// with edwards25519.Point coordinates. +// +// This type works similarly to math/big.Int, and all arguments and receivers +// are allowed to alias. +// +// The zero value is a valid zero element. +type Element struct { + // An element t represents the integer + // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204 + // + // Between operations, all limbs are expected to be lower than 2^52. + l0 uint64 + l1 uint64 + l2 uint64 + l3 uint64 + l4 uint64 +} + +const maskLow51Bits uint64 = (1 << 51) - 1 + +var feZero = &Element{0, 0, 0, 0, 0} + +// Zero sets v = 0, and returns v. +func (v *Element) Zero() *Element { + *v = *feZero + return v +} + +var feOne = &Element{1, 0, 0, 0, 0} + +// One sets v = 1, and returns v. +func (v *Element) One() *Element { + *v = *feOne + return v +} + +// reduce reduces v modulo 2^255 - 19 and returns it. +func (v *Element) reduce() *Element { + v.carryPropagate() + + // After the light reduction we now have a field element representation + // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19. + + // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1, + // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise. + c := (v.l0 + 19) >> 51 + c = (v.l1 + c) >> 51 + c = (v.l2 + c) >> 51 + c = (v.l3 + c) >> 51 + c = (v.l4 + c) >> 51 + + // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's + // effectively applying the reduction identity to the carry. + v.l0 += 19 * c + + v.l1 += v.l0 >> 51 + v.l0 = v.l0 & maskLow51Bits + v.l2 += v.l1 >> 51 + v.l1 = v.l1 & maskLow51Bits + v.l3 += v.l2 >> 51 + v.l2 = v.l2 & maskLow51Bits + v.l4 += v.l3 >> 51 + v.l3 = v.l3 & maskLow51Bits + // no additional carry + v.l4 = v.l4 & maskLow51Bits + + return v +} + +// Add sets v = a + b, and returns v. +func (v *Element) Add(a, b *Element) *Element { + v.l0 = a.l0 + b.l0 + v.l1 = a.l1 + b.l1 + v.l2 = a.l2 + b.l2 + v.l3 = a.l3 + b.l3 + v.l4 = a.l4 + b.l4 + // Using the generic implementation here is actually faster than the + // assembly. Probably because the body of this function is so simple that + // the compiler can figure out better optimizations by inlining the carry + // propagation. + return v.carryPropagateGeneric() +} + +// Subtract sets v = a - b, and returns v. +func (v *Element) Subtract(a, b *Element) *Element { + // We first add 2 * p, to guarantee the subtraction won't underflow, and + // then subtract b (which can be up to 2^255 + 2^13 * 19). + v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0 + v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1 + v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2 + v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3 + v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4 + return v.carryPropagate() +} + +// Negate sets v = -a, and returns v. +func (v *Element) Negate(a *Element) *Element { + return v.Subtract(feZero, a) +} + +// Invert sets v = 1/z mod p, and returns v. +// +// If z == 0, Invert returns v = 0. +func (v *Element) Invert(z *Element) *Element { + // Inversion is implemented as exponentiation with exponent p − 2. It uses the + // same sequence of 255 squarings and 11 multiplications as [Curve25519]. + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element + + z2.Square(z) // 2 + t.Square(&z2) // 4 + t.Square(&t) // 8 + z9.Multiply(&t, z) // 9 + z11.Multiply(&z9, &z2) // 11 + t.Square(&z11) // 22 + z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0 + + t.Square(&z2_5_0) // 2^6 - 2^1 + for i := 0; i < 4; i++ { + t.Square(&t) // 2^10 - 2^5 + } + z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0 + + t.Square(&z2_10_0) // 2^11 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^20 - 2^10 + } + z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0 + + t.Square(&z2_20_0) // 2^21 - 2^1 + for i := 0; i < 19; i++ { + t.Square(&t) // 2^40 - 2^20 + } + t.Multiply(&t, &z2_20_0) // 2^40 - 2^0 + + t.Square(&t) // 2^41 - 2^1 + for i := 0; i < 9; i++ { + t.Square(&t) // 2^50 - 2^10 + } + z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0 + + t.Square(&z2_50_0) // 2^51 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^100 - 2^50 + } + z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0 + + t.Square(&z2_100_0) // 2^101 - 2^1 + for i := 0; i < 99; i++ { + t.Square(&t) // 2^200 - 2^100 + } + t.Multiply(&t, &z2_100_0) // 2^200 - 2^0 + + t.Square(&t) // 2^201 - 2^1 + for i := 0; i < 49; i++ { + t.Square(&t) // 2^250 - 2^50 + } + t.Multiply(&t, &z2_50_0) // 2^250 - 2^0 + + t.Square(&t) // 2^251 - 2^1 + t.Square(&t) // 2^252 - 2^2 + t.Square(&t) // 2^253 - 2^3 + t.Square(&t) // 2^254 - 2^4 + t.Square(&t) // 2^255 - 2^5 + + return v.Multiply(&t, &z11) // 2^255 - 21 +} + +// Set sets v = a, and returns v. +func (v *Element) Set(a *Element) *Element { + *v = *a + return v +} + +// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is +// not of the right length, SetUniformBytes returns nil and an error, and the +// receiver is unchanged. +// +// Consistent with RFC 7748, the most significant bit (the high bit of the +// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1) +// are accepted. Note that this is laxer than specified by RFC 8032. +func (v *Element) SetBytes(x []byte) (*Element, error) { + if len(x) != 32 { + return nil, errors.New("edwards25519: invalid field element input size") + } + + // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51). + v.l0 = binary.LittleEndian.Uint64(x[0:8]) + v.l0 &= maskLow51Bits + // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51). + v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3 + v.l1 &= maskLow51Bits + // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51). + v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6 + v.l2 &= maskLow51Bits + // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51). + v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1 + v.l3 &= maskLow51Bits + // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51). + // Note: not bytes 25:33, shift 4, to avoid overread. + v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12 + v.l4 &= maskLow51Bits + + return v, nil +} + +// Bytes returns the canonical 32-byte little-endian encoding of v. +func (v *Element) Bytes() []byte { + // This function is outlined to make the allocations inline in the caller + // rather than happen on the heap. + var out [32]byte + return v.bytes(&out) +} + +func (v *Element) bytes(out *[32]byte) []byte { + t := *v + t.reduce() + + var buf [8]byte + for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} { + bitsOffset := i * 51 + binary.LittleEndian.PutUint64(buf[:], l<<uint(bitsOffset%8)) + for i, bb := range buf { + off := bitsOffset/8 + i + if off >= len(out) { + break + } + out[off] |= bb + } + } + + return out[:] +} + +// Equal returns 1 if v and u are equal, and 0 otherwise. +func (v *Element) Equal(u *Element) int { + sa, sv := u.Bytes(), v.Bytes() + return subtle.ConstantTimeCompare(sa, sv) +} + +// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise. +func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) } + +// Select sets v to a if cond == 1, and to b if cond == 0. +func (v *Element) Select(a, b *Element, cond int) *Element { + m := mask64Bits(cond) + v.l0 = (m & a.l0) | (^m & b.l0) + v.l1 = (m & a.l1) | (^m & b.l1) + v.l2 = (m & a.l2) | (^m & b.l2) + v.l3 = (m & a.l3) | (^m & b.l3) + v.l4 = (m & a.l4) | (^m & b.l4) + return v +} + +// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v. +func (v *Element) Swap(u *Element, cond int) { + m := mask64Bits(cond) + t := m & (v.l0 ^ u.l0) + v.l0 ^= t + u.l0 ^= t + t = m & (v.l1 ^ u.l1) + v.l1 ^= t + u.l1 ^= t + t = m & (v.l2 ^ u.l2) + v.l2 ^= t + u.l2 ^= t + t = m & (v.l3 ^ u.l3) + v.l3 ^= t + u.l3 ^= t + t = m & (v.l4 ^ u.l4) + v.l4 ^= t + u.l4 ^= t +} + +// IsNegative returns 1 if v is negative, and 0 otherwise. +func (v *Element) IsNegative() int { + return int(v.Bytes()[0] & 1) +} + +// Absolute sets v to |u|, and returns v. +func (v *Element) Absolute(u *Element) *Element { + return v.Select(new(Element).Negate(u), u, u.IsNegative()) +} + +// Multiply sets v = x * y, and returns v. +func (v *Element) Multiply(x, y *Element) *Element { + feMul(v, x, y) + return v +} + +// Square sets v = x * x, and returns v. +func (v *Element) Square(x *Element) *Element { + feSquare(v, x) + return v +} + +// Mult32 sets v = x * y, and returns v. +func (v *Element) Mult32(x *Element, y uint32) *Element { + x0lo, x0hi := mul51(x.l0, y) + x1lo, x1hi := mul51(x.l1, y) + x2lo, x2hi := mul51(x.l2, y) + x3lo, x3hi := mul51(x.l3, y) + x4lo, x4hi := mul51(x.l4, y) + v.l0 = x0lo + 19*x4hi // carried over per the reduction identity + v.l1 = x1lo + x0hi + v.l2 = x2lo + x1hi + v.l3 = x3lo + x2hi + v.l4 = x4lo + x3hi + // The hi portions are going to be only 32 bits, plus any previous excess, + // so we can skip the carry propagation. + return v +} + +// mul51 returns lo + hi * 2⁵¹ = a * b. +func mul51(a uint64, b uint32) (lo uint64, hi uint64) { + mh, ml := bits.Mul64(a, uint64(b)) + lo = ml & maskLow51Bits + hi = (mh << 13) | (ml >> 51) + return +} + +// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3. +func (v *Element) Pow22523(x *Element) *Element { + var t0, t1, t2 Element + + t0.Square(x) // x^2 + t1.Square(&t0) // x^4 + t1.Square(&t1) // x^8 + t1.Multiply(x, &t1) // x^9 + t0.Multiply(&t0, &t1) // x^11 + t0.Square(&t0) // x^22 + t0.Multiply(&t1, &t0) // x^31 + t1.Square(&t0) // x^62 + for i := 1; i < 5; i++ { // x^992 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1 + t1.Square(&t0) // 2^11 - 2 + for i := 1; i < 10; i++ { // 2^20 - 2^10 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^20 - 1 + t2.Square(&t1) // 2^21 - 2 + for i := 1; i < 20; i++ { // 2^40 - 2^20 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^40 - 1 + t1.Square(&t1) // 2^41 - 2 + for i := 1; i < 10; i++ { // 2^50 - 2^10 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^50 - 1 + t1.Square(&t0) // 2^51 - 2 + for i := 1; i < 50; i++ { // 2^100 - 2^50 + t1.Square(&t1) + } + t1.Multiply(&t1, &t0) // 2^100 - 1 + t2.Square(&t1) // 2^101 - 2 + for i := 1; i < 100; i++ { // 2^200 - 2^100 + t2.Square(&t2) + } + t1.Multiply(&t2, &t1) // 2^200 - 1 + t1.Square(&t1) // 2^201 - 2 + for i := 1; i < 50; i++ { // 2^250 - 2^50 + t1.Square(&t1) + } + t0.Multiply(&t1, &t0) // 2^250 - 1 + t0.Square(&t0) // 2^251 - 2 + t0.Square(&t0) // 2^252 - 4 + return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3) +} + +// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion. +var sqrtM1 = &Element{1718705420411056, 234908883556509, + 2233514472574048, 2117202627021982, 765476049583133} + +// SqrtRatio sets r to the non-negative square root of the ratio of u and v. +// +// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio +// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00, +// and returns r and 0. +func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) { + var a, b Element + + // r = (u * v3) * (u * v7)^((p-5)/8) + v2 := a.Square(v) + uv3 := b.Multiply(u, b.Multiply(v2, v)) + uv7 := a.Multiply(uv3, a.Square(v2)) + r.Multiply(uv3, r.Pow22523(uv7)) + + check := a.Multiply(v, a.Square(r)) // check = v * r^2 + + uNeg := b.Negate(u) + correctSignSqrt := check.Equal(u) + flippedSignSqrt := check.Equal(uNeg) + flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1)) + + rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r + // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r) + r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI) + + r.Absolute(r) // Choose the nonnegative square root. + return r, correctSignSqrt | flippedSignSqrt +} diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go new file mode 100644 index 00000000..44dc8e8c --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64.go @@ -0,0 +1,13 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +// +build amd64,gc,!purego + +package field + +// feMul sets out = a * b. It works like feMulGeneric. +//go:noescape +func feMul(out *Element, a *Element, b *Element) + +// feSquare sets out = a * a. It works like feSquareGeneric. +//go:noescape +func feSquare(out *Element, a *Element) diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s new file mode 100644 index 00000000..0aa1e86d --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64.s @@ -0,0 +1,378 @@ +// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT. + +// +build amd64,gc,!purego + +#include "textflag.h" + +// func feMul(out *Element, a *Element, b *Element) +TEXT ·feMul(SB), NOSPLIT, $0-24 + MOVQ a+8(FP), CX + MOVQ b+16(FP), BX + + // r0 = a0×b0 + MOVQ (CX), AX + MULQ (BX) + MOVQ AX, DI + MOVQ DX, SI + + // r0 += 19×a1×b4 + MOVQ 8(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a2×b3 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a3×b2 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r0 += 19×a4×b1 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 8(BX) + ADDQ AX, DI + ADCQ DX, SI + + // r1 = a0×b1 + MOVQ (CX), AX + MULQ 8(BX) + MOVQ AX, R9 + MOVQ DX, R8 + + // r1 += a1×b0 + MOVQ 8(CX), AX + MULQ (BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a2×b4 + MOVQ 16(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a3×b3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r1 += 19×a4×b2 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 16(BX) + ADDQ AX, R9 + ADCQ DX, R8 + + // r2 = a0×b2 + MOVQ (CX), AX + MULQ 16(BX) + MOVQ AX, R11 + MOVQ DX, R10 + + // r2 += a1×b1 + MOVQ 8(CX), AX + MULQ 8(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += a2×b0 + MOVQ 16(CX), AX + MULQ (BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a3×b4 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r2 += 19×a4×b3 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(BX) + ADDQ AX, R11 + ADCQ DX, R10 + + // r3 = a0×b3 + MOVQ (CX), AX + MULQ 24(BX) + MOVQ AX, R13 + MOVQ DX, R12 + + // r3 += a1×b2 + MOVQ 8(CX), AX + MULQ 16(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a2×b1 + MOVQ 16(CX), AX + MULQ 8(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += a3×b0 + MOVQ 24(CX), AX + MULQ (BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r3 += 19×a4×b4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(BX) + ADDQ AX, R13 + ADCQ DX, R12 + + // r4 = a0×b4 + MOVQ (CX), AX + MULQ 32(BX) + MOVQ AX, R15 + MOVQ DX, R14 + + // r4 += a1×b3 + MOVQ 8(CX), AX + MULQ 24(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a2×b2 + MOVQ 16(CX), AX + MULQ 16(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a3×b1 + MOVQ 24(CX), AX + MULQ 8(BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // r4 += a4×b0 + MOVQ 32(CX), AX + MULQ (BX) + ADDQ AX, R15 + ADCQ DX, R14 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, DI, SI + SHLQ $0x0d, R9, R8 + SHLQ $0x0d, R11, R10 + SHLQ $0x0d, R13, R12 + SHLQ $0x0d, R15, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Second reduction chain (carryPropagate) + MOVQ DI, SI + SHRQ $0x33, SI + MOVQ R9, R8 + SHRQ $0x33, R8 + MOVQ R11, R10 + SHRQ $0x33, R10 + MOVQ R13, R12 + SHRQ $0x33, R12 + MOVQ R15, R14 + SHRQ $0x33, R14 + ANDQ AX, DI + IMUL3Q $0x13, R14, R14 + ADDQ R14, DI + ANDQ AX, R9 + ADDQ SI, R9 + ANDQ AX, R11 + ADDQ R8, R11 + ANDQ AX, R13 + ADDQ R10, R13 + ANDQ AX, R15 + ADDQ R12, R15 + + // Store output + MOVQ out+0(FP), AX + MOVQ DI, (AX) + MOVQ R9, 8(AX) + MOVQ R11, 16(AX) + MOVQ R13, 24(AX) + MOVQ R15, 32(AX) + RET + +// func feSquare(out *Element, a *Element) +TEXT ·feSquare(SB), NOSPLIT, $0-16 + MOVQ a+8(FP), CX + + // r0 = l0×l0 + MOVQ (CX), AX + MULQ (CX) + MOVQ AX, SI + MOVQ DX, BX + + // r0 += 38×l1×l4 + MOVQ 8(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r0 += 38×l2×l3 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 24(CX) + ADDQ AX, SI + ADCQ DX, BX + + // r1 = 2×l0×l1 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 8(CX) + MOVQ AX, R8 + MOVQ DX, DI + + // r1 += 38×l2×l4 + MOVQ 16(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r1 += 19×l3×l3 + MOVQ 24(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 24(CX) + ADDQ AX, R8 + ADCQ DX, DI + + // r2 = 2×l0×l2 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 16(CX) + MOVQ AX, R10 + MOVQ DX, R9 + + // r2 += l1×l1 + MOVQ 8(CX), AX + MULQ 8(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r2 += 38×l3×l4 + MOVQ 24(CX), AX + IMUL3Q $0x26, AX, AX + MULQ 32(CX) + ADDQ AX, R10 + ADCQ DX, R9 + + // r3 = 2×l0×l3 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 24(CX) + MOVQ AX, R12 + MOVQ DX, R11 + + // r3 += 2×l1×l2 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 16(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r3 += 19×l4×l4 + MOVQ 32(CX), AX + IMUL3Q $0x13, AX, AX + MULQ 32(CX) + ADDQ AX, R12 + ADCQ DX, R11 + + // r4 = 2×l0×l4 + MOVQ (CX), AX + SHLQ $0x01, AX + MULQ 32(CX) + MOVQ AX, R14 + MOVQ DX, R13 + + // r4 += 2×l1×l3 + MOVQ 8(CX), AX + IMUL3Q $0x02, AX, AX + MULQ 24(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // r4 += l2×l2 + MOVQ 16(CX), AX + MULQ 16(CX) + ADDQ AX, R14 + ADCQ DX, R13 + + // First reduction chain + MOVQ $0x0007ffffffffffff, AX + SHLQ $0x0d, SI, BX + SHLQ $0x0d, R8, DI + SHLQ $0x0d, R10, R9 + SHLQ $0x0d, R12, R11 + SHLQ $0x0d, R14, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Second reduction chain (carryPropagate) + MOVQ SI, BX + SHRQ $0x33, BX + MOVQ R8, DI + SHRQ $0x33, DI + MOVQ R10, R9 + SHRQ $0x33, R9 + MOVQ R12, R11 + SHRQ $0x33, R11 + MOVQ R14, R13 + SHRQ $0x33, R13 + ANDQ AX, SI + IMUL3Q $0x13, R13, R13 + ADDQ R13, SI + ANDQ AX, R8 + ADDQ BX, R8 + ANDQ AX, R10 + ADDQ DI, R10 + ANDQ AX, R12 + ADDQ R9, R12 + ANDQ AX, R14 + ADDQ R11, R14 + + // Store output + MOVQ out+0(FP), AX + MOVQ SI, (AX) + MOVQ R8, 8(AX) + MOVQ R10, 16(AX) + MOVQ R12, 24(AX) + MOVQ R14, 32(AX) + RET diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go new file mode 100644 index 00000000..ddb6c9b8 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego +// +build !amd64 !gc purego + +package field + +func feMul(v, x, y *Element) { feMulGeneric(v, x, y) } + +func feSquare(v, x *Element) { feSquareGeneric(v, x) } diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go new file mode 100644 index 00000000..af459ef5 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64.go @@ -0,0 +1,16 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm64 && gc && !purego +// +build arm64,gc,!purego + +package field + +//go:noescape +func carryPropagate(v *Element) + +func (v *Element) carryPropagate() *Element { + carryPropagate(v) + return v +} diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s new file mode 100644 index 00000000..751ab2ad --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64.s @@ -0,0 +1,42 @@ +// Copyright (c) 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64,gc,!purego + +#include "textflag.h" + +// carryPropagate works exactly like carryPropagateGeneric and uses the +// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but +// avoids loading R0-R4 twice and uses LDP and STP. +// +// See https://golang.org/issues/43145 for the main compiler issue. +// +// func carryPropagate(v *Element) +TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8 + MOVD v+0(FP), R20 + + LDP 0(R20), (R0, R1) + LDP 16(R20), (R2, R3) + MOVD 32(R20), R4 + + AND $0x7ffffffffffff, R0, R10 + AND $0x7ffffffffffff, R1, R11 + AND $0x7ffffffffffff, R2, R12 + AND $0x7ffffffffffff, R3, R13 + AND $0x7ffffffffffff, R4, R14 + + ADD R0>>51, R11, R11 + ADD R1>>51, R12, R12 + ADD R2>>51, R13, R13 + ADD R3>>51, R14, R14 + // R4>>51 * 19 + R10 -> R10 + LSR $51, R4, R21 + MOVD $19, R22 + MADD R22, R10, R21, R10 + + STP (R10, R11), 0(R20) + STP (R12, R13), 16(R20) + MOVD R14, 32(R20) + + RET diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go new file mode 100644 index 00000000..234a5b2e --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go @@ -0,0 +1,12 @@ +// Copyright (c) 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !arm64 || !gc || purego +// +build !arm64 !gc purego + +package field + +func (v *Element) carryPropagate() *Element { + return v.carryPropagateGeneric() +} diff --git a/vendor/filippo.io/edwards25519/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go new file mode 100644 index 00000000..bccf8511 --- /dev/null +++ b/vendor/filippo.io/edwards25519/field/fe_generic.go @@ -0,0 +1,264 @@ +// Copyright (c) 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package field + +import "math/bits" + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +// mul64 returns a * b. +func mul64(a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + return uint128{lo, hi} +} + +// addMul64 returns v + a * b. +func addMul64(v uint128, a, b uint64) uint128 { + hi, lo := bits.Mul64(a, b) + lo, c := bits.Add64(lo, v.lo, 0) + hi, _ = bits.Add64(hi, v.hi, c) + return uint128{lo, hi} +} + +// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits. +func shiftRightBy51(a uint128) uint64 { + return (a.hi << (64 - 51)) | (a.lo >> 51) +} + +func feMulGeneric(v, a, b *Element) { + a0 := a.l0 + a1 := a.l1 + a2 := a.l2 + a3 := a.l3 + a4 := a.l4 + + b0 := b.l0 + b1 := b.l1 + b2 := b.l2 + b3 := b.l3 + b4 := b.l4 + + // Limb multiplication works like pen-and-paper columnar multiplication, but + // with 51-bit limbs instead of digits. + // + // a4 a3 a2 a1 a0 x + // b4 b3 b2 b1 b0 = + // ------------------------ + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a4b1 a3b1 a2b1 a1b1 a0b1 + + // a4b2 a3b2 a2b2 a1b2 a0b2 + + // a4b3 a3b3 a2b3 a1b3 a0b3 + + // a4b4 a3b4 a2b4 a1b4 a0b4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to + // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5, + // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc. + // + // Reduction can be carried out simultaneously to multiplication. For + // example, we do not compute r5: whenever the result of a multiplication + // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0. + // + // a4b0 a3b0 a2b0 a1b0 a0b0 + + // a3b1 a2b1 a1b1 a0b1 19×a4b1 + + // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 + + // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 + + // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // Finally we add up the columns into wide, overlapping limbs. + + a1_19 := a1 * 19 + a2_19 := a2 * 19 + a3_19 := a3 * 19 + a4_19 := a4 * 19 + + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + r0 := mul64(a0, b0) + r0 = addMul64(r0, a1_19, b4) + r0 = addMul64(r0, a2_19, b3) + r0 = addMul64(r0, a3_19, b2) + r0 = addMul64(r0, a4_19, b1) + + // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2) + r1 := mul64(a0, b1) + r1 = addMul64(r1, a1, b0) + r1 = addMul64(r1, a2_19, b4) + r1 = addMul64(r1, a3_19, b3) + r1 = addMul64(r1, a4_19, b2) + + // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3) + r2 := mul64(a0, b2) + r2 = addMul64(r2, a1, b1) + r2 = addMul64(r2, a2, b0) + r2 = addMul64(r2, a3_19, b4) + r2 = addMul64(r2, a4_19, b3) + + // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4 + r3 := mul64(a0, b3) + r3 = addMul64(r3, a1, b2) + r3 = addMul64(r3, a2, b1) + r3 = addMul64(r3, a3, b0) + r3 = addMul64(r3, a4_19, b4) + + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + r4 := mul64(a0, b4) + r4 = addMul64(r4, a1, b3) + r4 = addMul64(r4, a2, b2) + r4 = addMul64(r4, a3, b1) + r4 = addMul64(r4, a4, b0) + + // After the multiplication, we need to reduce (carry) the five coefficients + // to obtain a result with limbs that are at most slightly larger than 2⁵¹, + // to respect the Element invariant. + // + // Overall, the reduction works the same as carryPropagate, except with + // wider inputs: we take the carry for each coefficient by shifting it right + // by 51, and add it to the limb above it. The top carry is multiplied by 19 + // according to the reduction identity and added to the lowest limb. + // + // The largest coefficient (r0) will be at most 111 bits, which guarantees + // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64. + // + // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1) + // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²) + // r0 < (1 + 19 × 4) × 2⁵² × 2⁵² + // r0 < 2⁷ × 2⁵² × 2⁵² + // r0 < 2¹¹¹ + // + // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most + // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and + // allows us to easily apply the reduction identity. + // + // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0 + // r4 < 5 × 2⁵² × 2⁵² + // r4 < 2¹⁰⁷ + // + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + // Now all coefficients fit into 64-bit registers but are still too large to + // be passed around as a Element. We therefore do one last carry chain, + // where the carries will be small enough to fit in the wiggle room above 2⁵¹. + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +func feSquareGeneric(v, a *Element) { + l0 := a.l0 + l1 := a.l1 + l2 := a.l2 + l3 := a.l3 + l4 := a.l4 + + // Squaring works precisely like multiplication above, but thanks to its + // symmetry we get to group a few terms together. + // + // l4 l3 l2 l1 l0 x + // l4 l3 l2 l1 l0 = + // ------------------------ + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l4l1 l3l1 l2l1 l1l1 l0l1 + + // l4l2 l3l2 l2l2 l1l2 l0l2 + + // l4l3 l3l3 l2l3 l1l3 l0l3 + + // l4l4 l3l4 l2l4 l1l4 l0l4 = + // ---------------------------------------------- + // r8 r7 r6 r5 r4 r3 r2 r1 r0 + // + // l4l0 l3l0 l2l0 l1l0 l0l0 + + // l3l1 l2l1 l1l1 l0l1 19×l4l1 + + // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 + + // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 + + // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 = + // -------------------------------------- + // r4 r3 r2 r1 r0 + // + // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with + // only three Mul64 and four Add64, instead of five and eight. + + l0_2 := l0 * 2 + l1_2 := l1 * 2 + + l1_38 := l1 * 38 + l2_38 := l2 * 38 + l3_38 := l3 * 38 + + l3_19 := l3 * 19 + l4_19 := l4 * 19 + + // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3) + r0 := mul64(l0, l0) + r0 = addMul64(r0, l1_38, l4) + r0 = addMul64(r0, l2_38, l3) + + // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3 + r1 := mul64(l0_2, l1) + r1 = addMul64(r1, l2_38, l4) + r1 = addMul64(r1, l3_19, l3) + + // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4 + r2 := mul64(l0_2, l2) + r2 = addMul64(r2, l1, l1) + r2 = addMul64(r2, l3_38, l4) + + // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4 + r3 := mul64(l0_2, l3) + r3 = addMul64(r3, l1_2, l2) + r3 = addMul64(r3, l4_19, l4) + + // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2 + r4 := mul64(l0_2, l4) + r4 = addMul64(r4, l1_2, l3) + r4 = addMul64(r4, l2, l2) + + c0 := shiftRightBy51(r0) + c1 := shiftRightBy51(r1) + c2 := shiftRightBy51(r2) + c3 := shiftRightBy51(r3) + c4 := shiftRightBy51(r4) + + rr0 := r0.lo&maskLow51Bits + c4*19 + rr1 := r1.lo&maskLow51Bits + c0 + rr2 := r2.lo&maskLow51Bits + c1 + rr3 := r3.lo&maskLow51Bits + c2 + rr4 := r4.lo&maskLow51Bits + c3 + + *v = Element{rr0, rr1, rr2, rr3, rr4} + v.carryPropagate() +} + +// carryPropagate brings the limbs below 52 bits by applying the reduction +// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. +func (v *Element) carryPropagateGeneric() *Element { + c0 := v.l0 >> 51 + c1 := v.l1 >> 51 + c2 := v.l2 >> 51 + c3 := v.l3 >> 51 + c4 := v.l4 >> 51 + + v.l0 = v.l0&maskLow51Bits + c4*19 + v.l1 = v.l1&maskLow51Bits + c0 + v.l2 = v.l2&maskLow51Bits + c1 + v.l3 = v.l3&maskLow51Bits + c2 + v.l4 = v.l4&maskLow51Bits + c3 + + return v +} diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go new file mode 100644 index 00000000..f3da71ce --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalar.go @@ -0,0 +1,1027 @@ +// Copyright (c) 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "crypto/subtle" + "encoding/binary" + "errors" +) + +// A Scalar is an integer modulo +// +// l = 2^252 + 27742317777372353535851937790883648493 +// +// which is the prime order of the edwards25519 group. +// +// This type works similarly to math/big.Int, and all arguments and +// receivers are allowed to alias. +// +// The zero value is a valid zero element. +type Scalar struct { + // s is the Scalar value in little-endian. The value is always reduced + // between operations. + s [32]byte +} + +var ( + scZero = Scalar{[32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + + scOne = Scalar{[32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + + scMinusOne = Scalar{[32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}} +) + +// NewScalar returns a new zero Scalar. +func NewScalar() *Scalar { + return &Scalar{} +} + +// MultiplyAdd sets s = x * y + z mod l, and returns s. +func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar { + scMulAdd(&s.s, &x.s, &y.s, &z.s) + return s +} + +// Add sets s = x + y mod l, and returns s. +func (s *Scalar) Add(x, y *Scalar) *Scalar { + // s = 1 * x + y mod l + scMulAdd(&s.s, &scOne.s, &x.s, &y.s) + return s +} + +// Subtract sets s = x - y mod l, and returns s. +func (s *Scalar) Subtract(x, y *Scalar) *Scalar { + // s = -1 * y + x mod l + scMulAdd(&s.s, &scMinusOne.s, &y.s, &x.s) + return s +} + +// Negate sets s = -x mod l, and returns s. +func (s *Scalar) Negate(x *Scalar) *Scalar { + // s = -1 * x + 0 mod l + scMulAdd(&s.s, &scMinusOne.s, &x.s, &scZero.s) + return s +} + +// Multiply sets s = x * y mod l, and returns s. +func (s *Scalar) Multiply(x, y *Scalar) *Scalar { + // s = x * y + 0 mod l + scMulAdd(&s.s, &x.s, &y.s, &scZero.s) + return s +} + +// Set sets s = x, and returns s. +func (s *Scalar) Set(x *Scalar) *Scalar { + *s = *x + return s +} + +// SetUniformBytes sets s to an uniformly distributed value given 64 uniformly +// distributed random bytes. If x is not of the right length, SetUniformBytes +// returns nil and an error, and the receiver is unchanged. +func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) { + if len(x) != 64 { + return nil, errors.New("edwards25519: invalid SetUniformBytes input length") + } + var wideBytes [64]byte + copy(wideBytes[:], x[:]) + scReduce(&s.s, &wideBytes) + return s, nil +} + +// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of +// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes +// returns nil and an error, and the receiver is unchanged. +func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) { + if len(x) != 32 { + return nil, errors.New("invalid scalar length") + } + ss := &Scalar{} + copy(ss.s[:], x) + if !isReduced(ss) { + return nil, errors.New("invalid scalar encoding") + } + s.s = ss.s + return s, nil +} + +// isReduced returns whether the given scalar is reduced modulo l. +func isReduced(s *Scalar) bool { + for i := len(s.s) - 1; i >= 0; i-- { + switch { + case s.s[i] > scMinusOne.s[i]: + return false + case s.s[i] < scMinusOne.s[i]: + return true + } + } + return true +} + +// SetBytesWithClamping applies the buffer pruning described in RFC 8032, +// Section 5.1.5 (also known as clamping) and sets s to the result. The input +// must be 32 bytes, and it is not modified. If x is not of the right length, +// SetBytesWithClamping returns nil and an error, and the receiver is unchanged. +// +// Note that since Scalar values are always reduced modulo the prime order of +// the curve, the resulting value will not preserve any of the cofactor-clearing +// properties that clamping is meant to provide. It will however work as +// expected as long as it is applied to points on the prime order subgroup, like +// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the +// irrelevant RFC 7748 clamping, but it is now required for compatibility. +func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) { + // The description above omits the purpose of the high bits of the clamping + // for brevity, but those are also lost to reductions, and are also + // irrelevant to edwards25519 as they protect against a specific + // implementation bug that was once observed in a generic Montgomery ladder. + if len(x) != 32 { + return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length") + } + var wideBytes [64]byte + copy(wideBytes[:], x[:]) + wideBytes[0] &= 248 + wideBytes[31] &= 63 + wideBytes[31] |= 64 + scReduce(&s.s, &wideBytes) + return s, nil +} + +// Bytes returns the canonical 32-byte little-endian encoding of s. +func (s *Scalar) Bytes() []byte { + buf := make([]byte, 32) + copy(buf, s.s[:]) + return buf +} + +// Equal returns 1 if s and t are equal, and 0 otherwise. +func (s *Scalar) Equal(t *Scalar) int { + return subtle.ConstantTimeCompare(s.s[:], t.s[:]) +} + +// scMulAdd and scReduce are ported from the public domain, “ref10” +// implementation of ed25519 from SUPERCOP. + +func load3(in []byte) int64 { + r := int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + r := int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func scMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func scReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} + +// nonAdjacentForm computes a width-w non-adjacent form for this scalar. +// +// w must be between 2 and 8, or nonAdjacentForm will panic. +func (s *Scalar) nonAdjacentForm(w uint) [256]int8 { + // This implementation is adapted from the one + // in curve25519-dalek and is documented there: + // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871 + if s.s[31] > 127 { + panic("scalar has high bit set illegally") + } + if w < 2 { + panic("w must be at least 2 by the definition of NAF") + } else if w > 8 { + panic("NAF digits must fit in int8") + } + + var naf [256]int8 + var digits [5]uint64 + + for i := 0; i < 4; i++ { + digits[i] = binary.LittleEndian.Uint64(s.s[i*8:]) + } + + width := uint64(1 << w) + windowMask := uint64(width - 1) + + pos := uint(0) + carry := uint64(0) + for pos < 256 { + indexU64 := pos / 64 + indexBit := pos % 64 + var bitBuf uint64 + if indexBit < 64-w { + // This window's bits are contained in a single u64 + bitBuf = digits[indexU64] >> indexBit + } else { + // Combine the current 64 bits with bits from the next 64 + bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit)) + } + + // Add carry into the current window + window := carry + (bitBuf & windowMask) + + if window&1 == 0 { + // If the window value is even, preserve the carry and continue. + // Why is the carry preserved? + // If carry == 0 and window & 1 == 0, + // then the next carry should be 0 + // If carry == 1 and window & 1 == 0, + // then bit_buf & 1 == 1 so the next carry should be 1 + pos += 1 + continue + } + + if window < width/2 { + carry = 0 + naf[pos] = int8(window) + } else { + carry = 1 + naf[pos] = int8(window) - int8(width) + } + + pos += w + } + return naf +} + +func (s *Scalar) signedRadix16() [64]int8 { + if s.s[31] > 127 { + panic("scalar has high bit set illegally") + } + + var digits [64]int8 + + // Compute unsigned radix-16 digits: + for i := 0; i < 32; i++ { + digits[2*i] = int8(s.s[i] & 15) + digits[2*i+1] = int8((s.s[i] >> 4) & 15) + } + + // Recenter coefficients: + for i := 0; i < 63; i++ { + carry := (digits[i] + 8) >> 4 + digits[i] -= carry << 4 + digits[i+1] += carry + } + + return digits +} diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go new file mode 100644 index 00000000..f7ca3cef --- /dev/null +++ b/vendor/filippo.io/edwards25519/scalarmult.go @@ -0,0 +1,214 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "sync" + +// basepointTable is a set of 32 affineLookupTables, where table i is generated +// from 256i * basepoint. It is precomputed the first time it's used. +func basepointTable() *[32]affineLookupTable { + basepointTablePrecomp.initOnce.Do(func() { + p := NewGeneratorPoint() + for i := 0; i < 32; i++ { + basepointTablePrecomp.table[i].FromP3(p) + for j := 0; j < 8; j++ { + p.Add(p, p) + } + } + }) + return &basepointTablePrecomp.table +} + +var basepointTablePrecomp struct { + table [32]affineLookupTable + initOnce sync.Once +} + +// ScalarBaseMult sets v = x * B, where B is the canonical generator, and +// returns v. +// +// The scalar multiplication is done in constant time. +func (v *Point) ScalarBaseMult(x *Scalar) *Point { + basepointTable := basepointTable() + + // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i ) + // as described in the Ed25519 paper + // + // Group even and odd coefficients + // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B + // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B + // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B + // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B) + // + // We use a lookup table for each i to get x_i*16^(2*i)*B + // and do four doublings to multiply by 16. + digits := x.signedRadix16() + + multiple := &affineCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + + // Accumulate the odd components first + v.Set(NewIdentityPoint()) + for i := 1; i < 64; i += 2 { + basepointTable[i/2].SelectInto(multiple, digits[i]) + tmp1.AddAffine(v, multiple) + v.fromP1xP1(tmp1) + } + + // Multiply by 16 + tmp2.FromP3(v) // tmp2 = v in P2 coords + tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords + v.fromP1xP1(tmp1) // now v = 16*(odd components) + + // Accumulate the even components + for i := 0; i < 64; i += 2 { + basepointTable[i/2].SelectInto(multiple, digits[i]) + tmp1.AddAffine(v, multiple) + v.fromP1xP1(tmp1) + } + + return v +} + +// ScalarMult sets v = x * q, and returns v. +// +// The scalar multiplication is done in constant time. +func (v *Point) ScalarMult(x *Scalar, q *Point) *Point { + checkInitialized(q) + + var table projLookupTable + table.FromP3(q) + + // Write x = sum(x_i * 16^i) + // so x*Q = sum( Q*x_i*16^i ) + // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... ) + // <------compute inside out--------- + // + // We use the lookup table to get the x_i*Q values + // and do four doublings to compute 16*Q + digits := x.signedRadix16() + + // Unwrap first loop iteration to save computing 16*identity + multiple := &projCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + table.SelectInto(multiple, digits[63]) + + v.Set(NewIdentityPoint()) + tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords + for i := 62; i >= 0; i-- { + tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords + tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords + tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords + v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords + table.SelectInto(multiple, digits[i]) + tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords + } + v.fromP1xP1(tmp1) + return v +} + +// basepointNafTable is the nafLookupTable8 for the basepoint. +// It is precomputed the first time it's used. +func basepointNafTable() *nafLookupTable8 { + basepointNafTablePrecomp.initOnce.Do(func() { + basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint()) + }) + return &basepointNafTablePrecomp.table +} + +var basepointNafTablePrecomp struct { + table nafLookupTable8 + initOnce sync.Once +} + +// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical +// generator, and returns v. +// +// Execution time depends on the inputs. +func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point { + checkInitialized(A) + + // Similarly to the single variable-base approach, we compute + // digits and use them with a lookup table. However, because + // we are allowed to do variable-time operations, we don't + // need constant-time lookups or constant-time digit + // computations. + // + // So we use a non-adjacent form of some width w instead of + // radix 16. This is like a binary representation (one digit + // for each binary place) but we allow the digits to grow in + // magnitude up to 2^{w-1} so that the nonzero digits are as + // sparse as possible. Intuitively, this "condenses" the + // "mass" of the scalar onto sparse coefficients (meaning + // fewer additions). + + basepointNafTable := basepointNafTable() + var aTable nafLookupTable5 + aTable.FromP3(A) + // Because the basepoint is fixed, we can use a wider NAF + // corresponding to a bigger table. + aNaf := a.nonAdjacentForm(5) + bNaf := b.nonAdjacentForm(8) + + // Find the first nonzero coefficient. + i := 255 + for j := i; j >= 0; j-- { + if aNaf[j] != 0 || bNaf[j] != 0 { + break + } + } + + multA := &projCached{} + multB := &affineCached{} + tmp1 := &projP1xP1{} + tmp2 := &projP2{} + tmp2.Zero() + + // Move from high to low bits, doubling the accumulator + // at each iteration and checking whether there is a nonzero + // coefficient to look up a multiple of. + for ; i >= 0; i-- { + tmp1.Double(tmp2) + + // Only update v if we have a nonzero coeff to add in. + if aNaf[i] > 0 { + v.fromP1xP1(tmp1) + aTable.SelectInto(multA, aNaf[i]) + tmp1.Add(v, multA) + } else if aNaf[i] < 0 { + v.fromP1xP1(tmp1) + aTable.SelectInto(multA, -aNaf[i]) + tmp1.Sub(v, multA) + } + + if bNaf[i] > 0 { + v.fromP1xP1(tmp1) + basepointNafTable.SelectInto(multB, bNaf[i]) + tmp1.AddAffine(v, multB) + } else if bNaf[i] < 0 { + v.fromP1xP1(tmp1) + basepointNafTable.SelectInto(multB, -bNaf[i]) + tmp1.SubAffine(v, multB) + } + + tmp2.FromP1xP1(tmp1) + } + + v.fromP2(tmp2) + return v +} diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go new file mode 100644 index 00000000..beec956b --- /dev/null +++ b/vendor/filippo.io/edwards25519/tables.go @@ -0,0 +1,129 @@ +// Copyright (c) 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import ( + "crypto/subtle" +) + +// A dynamic lookup table for variable-base, constant-time scalar muls. +type projLookupTable struct { + points [8]projCached +} + +// A precomputed lookup table for fixed-base, constant-time scalar muls. +type affineLookupTable struct { + points [8]affineCached +} + +// A dynamic lookup table for variable-base, variable-time scalar muls. +type nafLookupTable5 struct { + points [8]projCached +} + +// A precomputed lookup table for fixed-base, variable-time scalar muls. +type nafLookupTable8 struct { + points [64]affineCached +} + +// Constructors. + +// Builds a lookup table at runtime. Fast. +func (v *projLookupTable) FromP3(q *Point) { + // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q + // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q + v.points[0].FromP3(q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + // Compute (i+1)*Q as Q + i*Q and convert to a ProjCached + // This is needlessly complicated because the API has explicit + // recievers instead of creating stack objects and relying on RVO + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i]))) + } +} + +// This is not optimised for speed; fixed-base tables should be precomputed. +func (v *affineLookupTable) FromP3(q *Point) { + // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q + // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q + v.points[0].FromP3(q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + // Compute (i+1)*Q as Q + i*Q and convert to AffineCached + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i]))) + } +} + +// Builds a lookup table at runtime. Fast. +func (v *nafLookupTable5) FromP3(q *Point) { + // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q + // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q + v.points[0].FromP3(q) + q2 := Point{} + q2.Add(q, q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 7; i++ { + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i]))) + } +} + +// This is not optimised for speed; fixed-base tables should be precomputed. +func (v *nafLookupTable8) FromP3(q *Point) { + v.points[0].FromP3(q) + q2 := Point{} + q2.Add(q, q) + tmpP3 := Point{} + tmpP1xP1 := projP1xP1{} + for i := 0; i < 63; i++ { + v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i]))) + } +} + +// Selectors. + +// Set dest to x*Q, where -8 <= x <= 8, in constant time. +func (v *projLookupTable) SelectInto(dest *projCached, x int8) { + // Compute xabs = |x| + xmask := x >> 7 + xabs := uint8((x + xmask) ^ xmask) + + dest.Zero() + for j := 1; j <= 8; j++ { + // Set dest = j*Q if |x| = j + cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + dest.Select(&v.points[j-1], dest, cond) + } + // Now dest = |x|*Q, conditionally negate to get x*Q + dest.CondNeg(int(xmask & 1)) +} + +// Set dest to x*Q, where -8 <= x <= 8, in constant time. +func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) { + // Compute xabs = |x| + xmask := x >> 7 + xabs := uint8((x + xmask) ^ xmask) + + dest.Zero() + for j := 1; j <= 8; j++ { + // Set dest = j*Q if |x| = j + cond := subtle.ConstantTimeByteEq(xabs, uint8(j)) + dest.Select(&v.points[j-1], dest, cond) + } + // Now dest = |x|*Q, conditionally negate to get x*Q + dest.CondNeg(int(xmask & 1)) +} + +// Given odd x with 0 < x < 2^4, return x*Q (in variable time). +func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) { + *dest = v.points[x/2] +} + +// Given odd x with 0 < x < 2^7, return x*Q (in variable time). +func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) { + *dest = v.points[x/2] +} |