Bump github.com/hashicorp/terraform-plugin-sdk/v2 from 2.26.1 to 2.27.0
Bumps [github.com/hashicorp/terraform-plugin-sdk/v2](https://github.com/hashicorp/terraform-plugin-sdk) from 2.26.1 to 2.27.0. - [Release notes](https://github.com/hashicorp/terraform-plugin-sdk/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-sdk/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-sdk/compare/v2.26.1...v2.27.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/terraform-plugin-sdk/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
205
vendor/github.com/cloudflare/circl/math/fp25519/fp.go
generated
vendored
Normal file
205
vendor/github.com/cloudflare/circl/math/fp25519/fp.go
generated
vendored
Normal file
@ -0,0 +1,205 @@
|
||||
// Package fp25519 provides prime field arithmetic over GF(2^255-19).
|
||||
package fp25519
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/cloudflare/circl/internal/conv"
|
||||
)
|
||||
|
||||
// Size in bytes of an element.
|
||||
const Size = 32
|
||||
|
||||
// Elt is a prime field element.
|
||||
type Elt [Size]byte
|
||||
|
||||
func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) }
|
||||
|
||||
// p is the prime modulus 2^255-19.
|
||||
var p = Elt{
|
||||
0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
|
||||
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
|
||||
}
|
||||
|
||||
// P returns the prime modulus 2^255-19.
|
||||
func P() Elt { return p }
|
||||
|
||||
// ToBytes stores in b the little-endian byte representation of x.
|
||||
func ToBytes(b []byte, x *Elt) error {
|
||||
if len(b) != Size {
|
||||
return errors.New("wrong size")
|
||||
}
|
||||
Modp(x)
|
||||
copy(b, x[:])
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsZero returns true if x is equal to 0.
|
||||
func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} }
|
||||
|
||||
// SetOne assigns x=1.
|
||||
func SetOne(x *Elt) { *x = Elt{}; x[0] = 1 }
|
||||
|
||||
// Neg calculates z = -x.
|
||||
func Neg(z, x *Elt) { Sub(z, &p, x) }
|
||||
|
||||
// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue, which is
|
||||
// indicated by returning isQR = true. Otherwise, when x/y is a quadratic
|
||||
// non-residue, z will have an undetermined value and isQR = false.
|
||||
func InvSqrt(z, x, y *Elt) (isQR bool) {
|
||||
sqrtMinusOne := &Elt{
|
||||
0xb0, 0xa0, 0x0e, 0x4a, 0x27, 0x1b, 0xee, 0xc4,
|
||||
0x78, 0xe4, 0x2f, 0xad, 0x06, 0x18, 0x43, 0x2f,
|
||||
0xa7, 0xd7, 0xfb, 0x3d, 0x99, 0x00, 0x4d, 0x2b,
|
||||
0x0b, 0xdf, 0xc1, 0x4f, 0x80, 0x24, 0x83, 0x2b,
|
||||
}
|
||||
t0, t1, t2, t3 := &Elt{}, &Elt{}, &Elt{}, &Elt{}
|
||||
|
||||
Mul(t0, x, y) // t0 = u*v
|
||||
Sqr(t1, y) // t1 = v^2
|
||||
Mul(t2, t0, t1) // t2 = u*v^3
|
||||
Sqr(t0, t1) // t0 = v^4
|
||||
Mul(t1, t0, t2) // t1 = u*v^7
|
||||
|
||||
var Tab [4]*Elt
|
||||
Tab[0] = &Elt{}
|
||||
Tab[1] = &Elt{}
|
||||
Tab[2] = t3
|
||||
Tab[3] = t1
|
||||
|
||||
*Tab[0] = *t1
|
||||
Sqr(Tab[0], Tab[0])
|
||||
Sqr(Tab[1], Tab[0])
|
||||
Sqr(Tab[1], Tab[1])
|
||||
Mul(Tab[1], Tab[1], Tab[3])
|
||||
Mul(Tab[0], Tab[0], Tab[1])
|
||||
Sqr(Tab[0], Tab[0])
|
||||
Mul(Tab[0], Tab[0], Tab[1])
|
||||
Sqr(Tab[1], Tab[0])
|
||||
for i := 0; i < 4; i++ {
|
||||
Sqr(Tab[1], Tab[1])
|
||||
}
|
||||
Mul(Tab[1], Tab[1], Tab[0])
|
||||
Sqr(Tab[2], Tab[1])
|
||||
for i := 0; i < 4; i++ {
|
||||
Sqr(Tab[2], Tab[2])
|
||||
}
|
||||
Mul(Tab[2], Tab[2], Tab[0])
|
||||
Sqr(Tab[1], Tab[2])
|
||||
for i := 0; i < 14; i++ {
|
||||
Sqr(Tab[1], Tab[1])
|
||||
}
|
||||
Mul(Tab[1], Tab[1], Tab[2])
|
||||
Sqr(Tab[2], Tab[1])
|
||||
for i := 0; i < 29; i++ {
|
||||
Sqr(Tab[2], Tab[2])
|
||||
}
|
||||
Mul(Tab[2], Tab[2], Tab[1])
|
||||
Sqr(Tab[1], Tab[2])
|
||||
for i := 0; i < 59; i++ {
|
||||
Sqr(Tab[1], Tab[1])
|
||||
}
|
||||
Mul(Tab[1], Tab[1], Tab[2])
|
||||
for i := 0; i < 5; i++ {
|
||||
Sqr(Tab[1], Tab[1])
|
||||
}
|
||||
Mul(Tab[1], Tab[1], Tab[0])
|
||||
Sqr(Tab[2], Tab[1])
|
||||
for i := 0; i < 124; i++ {
|
||||
Sqr(Tab[2], Tab[2])
|
||||
}
|
||||
Mul(Tab[2], Tab[2], Tab[1])
|
||||
Sqr(Tab[2], Tab[2])
|
||||
Sqr(Tab[2], Tab[2])
|
||||
Mul(Tab[2], Tab[2], Tab[3])
|
||||
|
||||
Mul(z, t3, t2) // z = xy^(p+3)/8 = xy^3*(xy^7)^(p-5)/8
|
||||
// Checking whether y z^2 == x
|
||||
Sqr(t0, z) // t0 = z^2
|
||||
Mul(t0, t0, y) // t0 = yz^2
|
||||
Sub(t1, t0, x) // t1 = t0-u
|
||||
Add(t2, t0, x) // t2 = t0+u
|
||||
if IsZero(t1) {
|
||||
return true
|
||||
} else if IsZero(t2) {
|
||||
Mul(z, z, sqrtMinusOne) // z = z*sqrt(-1)
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Inv calculates z = 1/x mod p.
|
||||
func Inv(z, x *Elt) {
|
||||
x0, x1, x2 := &Elt{}, &Elt{}, &Elt{}
|
||||
Sqr(x1, x)
|
||||
Sqr(x0, x1)
|
||||
Sqr(x0, x0)
|
||||
Mul(x0, x0, x)
|
||||
Mul(z, x0, x1)
|
||||
Sqr(x1, z)
|
||||
Mul(x0, x0, x1)
|
||||
Sqr(x1, x0)
|
||||
for i := 0; i < 4; i++ {
|
||||
Sqr(x1, x1)
|
||||
}
|
||||
Mul(x0, x0, x1)
|
||||
Sqr(x1, x0)
|
||||
for i := 0; i < 9; i++ {
|
||||
Sqr(x1, x1)
|
||||
}
|
||||
Mul(x1, x1, x0)
|
||||
Sqr(x2, x1)
|
||||
for i := 0; i < 19; i++ {
|
||||
Sqr(x2, x2)
|
||||
}
|
||||
Mul(x2, x2, x1)
|
||||
for i := 0; i < 10; i++ {
|
||||
Sqr(x2, x2)
|
||||
}
|
||||
Mul(x2, x2, x0)
|
||||
Sqr(x0, x2)
|
||||
for i := 0; i < 49; i++ {
|
||||
Sqr(x0, x0)
|
||||
}
|
||||
Mul(x0, x0, x2)
|
||||
Sqr(x1, x0)
|
||||
for i := 0; i < 99; i++ {
|
||||
Sqr(x1, x1)
|
||||
}
|
||||
Mul(x1, x1, x0)
|
||||
for i := 0; i < 50; i++ {
|
||||
Sqr(x1, x1)
|
||||
}
|
||||
Mul(x1, x1, x2)
|
||||
for i := 0; i < 5; i++ {
|
||||
Sqr(x1, x1)
|
||||
}
|
||||
Mul(z, z, x1)
|
||||
}
|
||||
|
||||
// Cmov assigns y to x if n is 1.
|
||||
func Cmov(x, y *Elt, n uint) { cmov(x, y, n) }
|
||||
|
||||
// Cswap interchanges x and y if n is 1.
|
||||
func Cswap(x, y *Elt, n uint) { cswap(x, y, n) }
|
||||
|
||||
// Add calculates z = x+y mod p.
|
||||
func Add(z, x, y *Elt) { add(z, x, y) }
|
||||
|
||||
// Sub calculates z = x-y mod p.
|
||||
func Sub(z, x, y *Elt) { sub(z, x, y) }
|
||||
|
||||
// AddSub calculates (x,y) = (x+y mod p, x-y mod p).
|
||||
func AddSub(x, y *Elt) { addsub(x, y) }
|
||||
|
||||
// Mul calculates z = x*y mod p.
|
||||
func Mul(z, x, y *Elt) { mul(z, x, y) }
|
||||
|
||||
// Sqr calculates z = x^2 mod p.
|
||||
func Sqr(z, x *Elt) { sqr(z, x) }
|
||||
|
||||
// Modp ensures that z is between [0,p-1].
|
||||
func Modp(z *Elt) { modp(z) }
|
45
vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go
generated
vendored
Normal file
45
vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
//go:build amd64 && !purego
|
||||
// +build amd64,!purego
|
||||
|
||||
package fp25519
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
|
||||
|
||||
var _ = hasBmi2Adx
|
||||
|
||||
func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) }
|
||||
func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) }
|
||||
func add(z, x, y *Elt) { addAmd64(z, x, y) }
|
||||
func sub(z, x, y *Elt) { subAmd64(z, x, y) }
|
||||
func addsub(x, y *Elt) { addsubAmd64(x, y) }
|
||||
func mul(z, x, y *Elt) { mulAmd64(z, x, y) }
|
||||
func sqr(z, x *Elt) { sqrAmd64(z, x) }
|
||||
func modp(z *Elt) { modpAmd64(z) }
|
||||
|
||||
//go:noescape
|
||||
func cmovAmd64(x, y *Elt, n uint)
|
||||
|
||||
//go:noescape
|
||||
func cswapAmd64(x, y *Elt, n uint)
|
||||
|
||||
//go:noescape
|
||||
func addAmd64(z, x, y *Elt)
|
||||
|
||||
//go:noescape
|
||||
func subAmd64(z, x, y *Elt)
|
||||
|
||||
//go:noescape
|
||||
func addsubAmd64(x, y *Elt)
|
||||
|
||||
//go:noescape
|
||||
func mulAmd64(z, x, y *Elt)
|
||||
|
||||
//go:noescape
|
||||
func sqrAmd64(z, x *Elt)
|
||||
|
||||
//go:noescape
|
||||
func modpAmd64(z *Elt)
|
351
vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h
generated
vendored
Normal file
351
vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h
generated
vendored
Normal file
@ -0,0 +1,351 @@
|
||||
// This code was imported from https://github.com/armfazh/rfc7748_precomputed
|
||||
|
||||
// CHECK_BMI2ADX triggers bmi2adx if supported,
|
||||
// otherwise it fallbacks to legacy code.
|
||||
#define CHECK_BMI2ADX(label, legacy, bmi2adx) \
|
||||
CMPB ·hasBmi2Adx(SB), $0 \
|
||||
JE label \
|
||||
bmi2adx \
|
||||
RET \
|
||||
label: \
|
||||
legacy \
|
||||
RET
|
||||
|
||||
// cselect is a conditional move
|
||||
// if b=1: it copies y into x;
|
||||
// if b=0: x remains with the same value;
|
||||
// if b<> 0,1: undefined.
|
||||
// Uses: AX, DX, FLAGS
|
||||
// Instr: x86_64, cmov
|
||||
#define cselect(x,y,b) \
|
||||
TESTQ b, b \
|
||||
MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \
|
||||
MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \
|
||||
MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \
|
||||
MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x;
|
||||
|
||||
// cswap is a conditional swap
|
||||
// if b=1: x,y <- y,x;
|
||||
// if b=0: x,y remain with the same values;
|
||||
// if b<> 0,1: undefined.
|
||||
// Uses: AX, DX, R8, FLAGS
|
||||
// Instr: x86_64, cmov
|
||||
#define cswap(x,y,b) \
|
||||
TESTQ b, b \
|
||||
MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \
|
||||
MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \
|
||||
MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \
|
||||
MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y;
|
||||
|
||||
// additionLeg adds x and y and stores in z
|
||||
// Uses: AX, DX, R8-R11, FLAGS
|
||||
// Instr: x86_64, cmov
|
||||
#define additionLeg(z,x,y) \
|
||||
MOVL $38, AX; \
|
||||
MOVL $0, DX; \
|
||||
MOVQ 0+x, R8; ADDQ 0+y, R8; \
|
||||
MOVQ 8+x, R9; ADCQ 8+y, R9; \
|
||||
MOVQ 16+x, R10; ADCQ 16+y, R10; \
|
||||
MOVQ 24+x, R11; ADCQ 24+y, R11; \
|
||||
CMOVQCS AX, DX; \
|
||||
ADDQ DX, R8; \
|
||||
ADCQ $0, R9; MOVQ R9, 8+z; \
|
||||
ADCQ $0, R10; MOVQ R10, 16+z; \
|
||||
ADCQ $0, R11; MOVQ R11, 24+z; \
|
||||
MOVL $0, DX; \
|
||||
CMOVQCS AX, DX; \
|
||||
ADDQ DX, R8; MOVQ R8, 0+z;
|
||||
|
||||
// additionAdx adds x and y and stores in z
|
||||
// Uses: AX, DX, R8-R11, FLAGS
|
||||
// Instr: x86_64, cmov, adx
|
||||
#define additionAdx(z,x,y) \
|
||||
MOVL $38, AX; \
|
||||
XORL DX, DX; \
|
||||
MOVQ 0+x, R8; ADCXQ 0+y, R8; \
|
||||
MOVQ 8+x, R9; ADCXQ 8+y, R9; \
|
||||
MOVQ 16+x, R10; ADCXQ 16+y, R10; \
|
||||
MOVQ 24+x, R11; ADCXQ 24+y, R11; \
|
||||
CMOVQCS AX, DX ; \
|
||||
XORL AX, AX; \
|
||||
ADCXQ DX, R8; \
|
||||
ADCXQ AX, R9; MOVQ R9, 8+z; \
|
||||
ADCXQ AX, R10; MOVQ R10, 16+z; \
|
||||
ADCXQ AX, R11; MOVQ R11, 24+z; \
|
||||
MOVL $38, DX; \
|
||||
CMOVQCS DX, AX; \
|
||||
ADDQ AX, R8; MOVQ R8, 0+z;
|
||||
|
||||
// subtraction subtracts y from x and stores in z
|
||||
// Uses: AX, DX, R8-R11, FLAGS
|
||||
// Instr: x86_64, cmov
|
||||
#define subtraction(z,x,y) \
|
||||
MOVL $38, AX; \
|
||||
MOVQ 0+x, R8; SUBQ 0+y, R8; \
|
||||
MOVQ 8+x, R9; SBBQ 8+y, R9; \
|
||||
MOVQ 16+x, R10; SBBQ 16+y, R10; \
|
||||
MOVQ 24+x, R11; SBBQ 24+y, R11; \
|
||||
MOVL $0, DX; \
|
||||
CMOVQCS AX, DX; \
|
||||
SUBQ DX, R8; \
|
||||
SBBQ $0, R9; MOVQ R9, 8+z; \
|
||||
SBBQ $0, R10; MOVQ R10, 16+z; \
|
||||
SBBQ $0, R11; MOVQ R11, 24+z; \
|
||||
MOVL $0, DX; \
|
||||
CMOVQCS AX, DX; \
|
||||
SUBQ DX, R8; MOVQ R8, 0+z;
|
||||
|
||||
// integerMulAdx multiplies x and y and stores in z
|
||||
// Uses: AX, DX, R8-R15, FLAGS
|
||||
// Instr: x86_64, bmi2, adx
|
||||
#define integerMulAdx(z,x,y) \
|
||||
MOVL $0,R15; \
|
||||
MOVQ 0+y, DX; XORL AX, AX; \
|
||||
MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \
|
||||
MULXQ 8+x, AX, R9; ADCXQ AX, R8; \
|
||||
MULXQ 16+x, AX, R10; ADCXQ AX, R9; \
|
||||
MULXQ 24+x, AX, R11; ADCXQ AX, R10; \
|
||||
MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; \
|
||||
MOVQ 8+y, DX; XORL AX, AX; \
|
||||
MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 8+z; \
|
||||
MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; \
|
||||
MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; \
|
||||
MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; \
|
||||
MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; \
|
||||
MOVQ 16+y, DX; XORL AX, AX; \
|
||||
MULXQ 0+x, AX, R8; ADCXQ R12, AX; MOVQ AX, 16+z; \
|
||||
MULXQ 8+x, AX, R9; ADCXQ R13, R8; ADOXQ AX, R8; \
|
||||
MULXQ 16+x, AX, R10; ADCXQ R14, R9; ADOXQ AX, R9; \
|
||||
MULXQ 24+x, AX, R11; ADCXQ R15, R10; ADOXQ AX, R10; \
|
||||
MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; ADOXQ AX, R11; \
|
||||
MOVQ 24+y, DX; XORL AX, AX; \
|
||||
MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 24+z; \
|
||||
MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; MOVQ R12, 32+z; \
|
||||
MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; MOVQ R13, 40+z; \
|
||||
MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; MOVQ R14, 48+z; \
|
||||
MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; MOVQ R15, 56+z;
|
||||
|
||||
// integerMulLeg multiplies x and y and stores in z
|
||||
// Uses: AX, DX, R8-R15, FLAGS
|
||||
// Instr: x86_64
|
||||
#define integerMulLeg(z,x,y) \
|
||||
MOVQ 0+y, R8; \
|
||||
MOVQ 0+x, AX; MULQ R8; MOVQ AX, 0+z; MOVQ DX, R15; \
|
||||
MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
|
||||
MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
|
||||
MOVQ 24+x, AX; MULQ R8; \
|
||||
ADDQ R13, R15; \
|
||||
ADCQ R14, R10; MOVQ R10, 16+z; \
|
||||
ADCQ AX, R11; MOVQ R11, 24+z; \
|
||||
ADCQ $0, DX; MOVQ DX, 32+z; \
|
||||
MOVQ 8+y, R8; \
|
||||
MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
|
||||
MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
|
||||
MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
|
||||
MOVQ 24+x, AX; MULQ R8; \
|
||||
ADDQ R12, R15; MOVQ R15, 8+z; \
|
||||
ADCQ R13, R9; \
|
||||
ADCQ R14, R10; \
|
||||
ADCQ AX, R11; \
|
||||
ADCQ $0, DX; \
|
||||
ADCQ 16+z, R9; MOVQ R9, R15; \
|
||||
ADCQ 24+z, R10; MOVQ R10, 24+z; \
|
||||
ADCQ 32+z, R11; MOVQ R11, 32+z; \
|
||||
ADCQ $0, DX; MOVQ DX, 40+z; \
|
||||
MOVQ 16+y, R8; \
|
||||
MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
|
||||
MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
|
||||
MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
|
||||
MOVQ 24+x, AX; MULQ R8; \
|
||||
ADDQ R12, R15; MOVQ R15, 16+z; \
|
||||
ADCQ R13, R9; \
|
||||
ADCQ R14, R10; \
|
||||
ADCQ AX, R11; \
|
||||
ADCQ $0, DX; \
|
||||
ADCQ 24+z, R9; MOVQ R9, R15; \
|
||||
ADCQ 32+z, R10; MOVQ R10, 32+z; \
|
||||
ADCQ 40+z, R11; MOVQ R11, 40+z; \
|
||||
ADCQ $0, DX; MOVQ DX, 48+z; \
|
||||
MOVQ 24+y, R8; \
|
||||
MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
|
||||
MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
|
||||
MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
|
||||
MOVQ 24+x, AX; MULQ R8; \
|
||||
ADDQ R12, R15; MOVQ R15, 24+z; \
|
||||
ADCQ R13, R9; \
|
||||
ADCQ R14, R10; \
|
||||
ADCQ AX, R11; \
|
||||
ADCQ $0, DX; \
|
||||
ADCQ 32+z, R9; MOVQ R9, 32+z; \
|
||||
ADCQ 40+z, R10; MOVQ R10, 40+z; \
|
||||
ADCQ 48+z, R11; MOVQ R11, 48+z; \
|
||||
ADCQ $0, DX; MOVQ DX, 56+z;
|
||||
|
||||
// integerSqrLeg squares x and stores in z
|
||||
// Uses: AX, CX, DX, R8-R15, FLAGS
|
||||
// Instr: x86_64
|
||||
#define integerSqrLeg(z,x) \
|
||||
MOVQ 0+x, R8; \
|
||||
MOVQ 8+x, AX; MULQ R8; MOVQ AX, R9; MOVQ DX, R10; /* A[0]*A[1] */ \
|
||||
MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; /* A[0]*A[2] */ \
|
||||
MOVQ 24+x, AX; MULQ R8; MOVQ AX, R15; MOVQ DX, R12; /* A[0]*A[3] */ \
|
||||
MOVQ 24+x, R8; \
|
||||
MOVQ 8+x, AX; MULQ R8; MOVQ AX, CX; MOVQ DX, R13; /* A[3]*A[1] */ \
|
||||
MOVQ 16+x, AX; MULQ R8; /* A[3]*A[2] */ \
|
||||
\
|
||||
ADDQ R14, R10;\
|
||||
ADCQ R15, R11; MOVL $0, R15;\
|
||||
ADCQ CX, R12;\
|
||||
ADCQ AX, R13;\
|
||||
ADCQ $0, DX; MOVQ DX, R14;\
|
||||
MOVQ 8+x, AX; MULQ 16+x;\
|
||||
\
|
||||
ADDQ AX, R11;\
|
||||
ADCQ DX, R12;\
|
||||
ADCQ $0, R13;\
|
||||
ADCQ $0, R14;\
|
||||
ADCQ $0, R15;\
|
||||
\
|
||||
SHLQ $1, R14, R15; MOVQ R15, 56+z;\
|
||||
SHLQ $1, R13, R14; MOVQ R14, 48+z;\
|
||||
SHLQ $1, R12, R13; MOVQ R13, 40+z;\
|
||||
SHLQ $1, R11, R12; MOVQ R12, 32+z;\
|
||||
SHLQ $1, R10, R11; MOVQ R11, 24+z;\
|
||||
SHLQ $1, R9, R10; MOVQ R10, 16+z;\
|
||||
SHLQ $1, R9; MOVQ R9, 8+z;\
|
||||
\
|
||||
MOVQ 0+x,AX; MULQ AX; MOVQ AX, 0+z; MOVQ DX, R9;\
|
||||
MOVQ 8+x,AX; MULQ AX; MOVQ AX, R10; MOVQ DX, R11;\
|
||||
MOVQ 16+x,AX; MULQ AX; MOVQ AX, R12; MOVQ DX, R13;\
|
||||
MOVQ 24+x,AX; MULQ AX; MOVQ AX, R14; MOVQ DX, R15;\
|
||||
\
|
||||
ADDQ 8+z, R9; MOVQ R9, 8+z;\
|
||||
ADCQ 16+z, R10; MOVQ R10, 16+z;\
|
||||
ADCQ 24+z, R11; MOVQ R11, 24+z;\
|
||||
ADCQ 32+z, R12; MOVQ R12, 32+z;\
|
||||
ADCQ 40+z, R13; MOVQ R13, 40+z;\
|
||||
ADCQ 48+z, R14; MOVQ R14, 48+z;\
|
||||
ADCQ 56+z, R15; MOVQ R15, 56+z;
|
||||
|
||||
// integerSqrAdx squares x and stores in z
|
||||
// Uses: AX, CX, DX, R8-R15, FLAGS
|
||||
// Instr: x86_64, bmi2, adx
|
||||
#define integerSqrAdx(z,x) \
|
||||
MOVQ 0+x, DX; /* A[0] */ \
|
||||
MULXQ 8+x, R8, R14; /* A[1]*A[0] */ XORL R15, R15; \
|
||||
MULXQ 16+x, R9, R10; /* A[2]*A[0] */ ADCXQ R14, R9; \
|
||||
MULXQ 24+x, AX, CX; /* A[3]*A[0] */ ADCXQ AX, R10; \
|
||||
MOVQ 24+x, DX; /* A[3] */ \
|
||||
MULXQ 8+x, R11, R12; /* A[1]*A[3] */ ADCXQ CX, R11; \
|
||||
MULXQ 16+x, AX, R13; /* A[2]*A[3] */ ADCXQ AX, R12; \
|
||||
MOVQ 8+x, DX; /* A[1] */ ADCXQ R15, R13; \
|
||||
MULXQ 16+x, AX, CX; /* A[2]*A[1] */ MOVL $0, R14; \
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ R15, R14; \
|
||||
XORL R15, R15; \
|
||||
ADOXQ AX, R10; ADCXQ R8, R8; \
|
||||
ADOXQ CX, R11; ADCXQ R9, R9; \
|
||||
ADOXQ R15, R12; ADCXQ R10, R10; \
|
||||
ADOXQ R15, R13; ADCXQ R11, R11; \
|
||||
ADOXQ R15, R14; ADCXQ R12, R12; \
|
||||
;;;;;;;;;;;;;;; ADCXQ R13, R13; \
|
||||
;;;;;;;;;;;;;;; ADCXQ R14, R14; \
|
||||
MOVQ 0+x, DX; MULXQ DX, AX, CX; /* A[0]^2 */ \
|
||||
;;;;;;;;;;;;;;; MOVQ AX, 0+z; \
|
||||
ADDQ CX, R8; MOVQ R8, 8+z; \
|
||||
MOVQ 8+x, DX; MULXQ DX, AX, CX; /* A[1]^2 */ \
|
||||
ADCQ AX, R9; MOVQ R9, 16+z; \
|
||||
ADCQ CX, R10; MOVQ R10, 24+z; \
|
||||
MOVQ 16+x, DX; MULXQ DX, AX, CX; /* A[2]^2 */ \
|
||||
ADCQ AX, R11; MOVQ R11, 32+z; \
|
||||
ADCQ CX, R12; MOVQ R12, 40+z; \
|
||||
MOVQ 24+x, DX; MULXQ DX, AX, CX; /* A[3]^2 */ \
|
||||
ADCQ AX, R13; MOVQ R13, 48+z; \
|
||||
ADCQ CX, R14; MOVQ R14, 56+z;
|
||||
|
||||
// reduceFromDouble finds z congruent to x modulo p such that 0<z<2^256
|
||||
// Uses: AX, DX, R8-R13, FLAGS
|
||||
// Instr: x86_64
|
||||
#define reduceFromDoubleLeg(z,x) \
|
||||
/* 2*C = 38 = 2^256 */ \
|
||||
MOVL $38, AX; MULQ 32+x; MOVQ AX, R8; MOVQ DX, R9; /* C*C[4] */ \
|
||||
MOVL $38, AX; MULQ 40+x; MOVQ AX, R12; MOVQ DX, R10; /* C*C[5] */ \
|
||||
MOVL $38, AX; MULQ 48+x; MOVQ AX, R13; MOVQ DX, R11; /* C*C[6] */ \
|
||||
MOVL $38, AX; MULQ 56+x; /* C*C[7] */ \
|
||||
ADDQ R12, R9; \
|
||||
ADCQ R13, R10; \
|
||||
ADCQ AX, R11; \
|
||||
ADCQ $0, DX; \
|
||||
ADDQ 0+x, R8; \
|
||||
ADCQ 8+x, R9; \
|
||||
ADCQ 16+x, R10; \
|
||||
ADCQ 24+x, R11; \
|
||||
ADCQ $0, DX; \
|
||||
MOVL $38, AX; \
|
||||
IMULQ AX, DX; /* C*C[4], CF=0, OF=0 */ \
|
||||
ADDQ DX, R8; \
|
||||
ADCQ $0, R9; MOVQ R9, 8+z; \
|
||||
ADCQ $0, R10; MOVQ R10, 16+z; \
|
||||
ADCQ $0, R11; MOVQ R11, 24+z; \
|
||||
MOVL $0, DX; \
|
||||
CMOVQCS AX, DX; \
|
||||
ADDQ DX, R8; MOVQ R8, 0+z;
|
||||
|
||||
// reduceFromDoubleAdx finds z congruent to x modulo p such that 0<z<2^256
|
||||
// Uses: AX, DX, R8-R13, FLAGS
|
||||
// Instr: x86_64, bmi2, adx
|
||||
#define reduceFromDoubleAdx(z,x) \
|
||||
MOVL $38, DX; /* 2*C = 38 = 2^256 */ \
|
||||
MULXQ 32+x, R8, R10; /* C*C[4] */ XORL AX, AX; ADOXQ 0+x, R8; \
|
||||
MULXQ 40+x, R9, R11; /* C*C[5] */ ADCXQ R10, R9; ADOXQ 8+x, R9; \
|
||||
MULXQ 48+x, R10, R13; /* C*C[6] */ ADCXQ R11, R10; ADOXQ 16+x, R10; \
|
||||
MULXQ 56+x, R11, R12; /* C*C[7] */ ADCXQ R13, R11; ADOXQ 24+x, R11; \
|
||||
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ AX, R12; ADOXQ AX, R12; \
|
||||
IMULQ DX, R12; /* C*C[4], CF=0, OF=0 */ \
|
||||
ADCXQ R12, R8; \
|
||||
ADCXQ AX, R9; MOVQ R9, 8+z; \
|
||||
ADCXQ AX, R10; MOVQ R10, 16+z; \
|
||||
ADCXQ AX, R11; MOVQ R11, 24+z; \
|
||||
MOVL $0, R12; \
|
||||
CMOVQCS DX, R12; \
|
||||
ADDQ R12, R8; MOVQ R8, 0+z;
|
||||
|
||||
// addSub calculates two operations: x,y = x+y,x-y
|
||||
// Uses: AX, DX, R8-R15, FLAGS
|
||||
#define addSub(x,y) \
|
||||
MOVL $38, AX; \
|
||||
XORL DX, DX; \
|
||||
MOVQ 0+x, R8; MOVQ R8, R12; ADDQ 0+y, R8; \
|
||||
MOVQ 8+x, R9; MOVQ R9, R13; ADCQ 8+y, R9; \
|
||||
MOVQ 16+x, R10; MOVQ R10, R14; ADCQ 16+y, R10; \
|
||||
MOVQ 24+x, R11; MOVQ R11, R15; ADCQ 24+y, R11; \
|
||||
CMOVQCS AX, DX; \
|
||||
XORL AX, AX; \
|
||||
ADDQ DX, R8; \
|
||||
ADCQ $0, R9; \
|
||||
ADCQ $0, R10; \
|
||||
ADCQ $0, R11; \
|
||||
MOVL $38, DX; \
|
||||
CMOVQCS DX, AX; \
|
||||
ADDQ AX, R8; \
|
||||
MOVL $38, AX; \
|
||||
SUBQ 0+y, R12; \
|
||||
SBBQ 8+y, R13; \
|
||||
SBBQ 16+y, R14; \
|
||||
SBBQ 24+y, R15; \
|
||||
MOVL $0, DX; \
|
||||
CMOVQCS AX, DX; \
|
||||
SUBQ DX, R12; \
|
||||
SBBQ $0, R13; \
|
||||
SBBQ $0, R14; \
|
||||
SBBQ $0, R15; \
|
||||
MOVL $0, DX; \
|
||||
CMOVQCS AX, DX; \
|
||||
SUBQ DX, R12; \
|
||||
MOVQ R8, 0+x; \
|
||||
MOVQ R9, 8+x; \
|
||||
MOVQ R10, 16+x; \
|
||||
MOVQ R11, 24+x; \
|
||||
MOVQ R12, 0+y; \
|
||||
MOVQ R13, 8+y; \
|
||||
MOVQ R14, 16+y; \
|
||||
MOVQ R15, 24+y;
|
111
vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s
generated
vendored
Normal file
111
vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.s
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
// +build amd64
|
||||
|
||||
#include "textflag.h"
|
||||
#include "fp_amd64.h"
|
||||
|
||||
// func cmovAmd64(x, y *Elt, n uint)
|
||||
TEXT ·cmovAmd64(SB),NOSPLIT,$0-24
|
||||
MOVQ x+0(FP), DI
|
||||
MOVQ y+8(FP), SI
|
||||
MOVQ n+16(FP), BX
|
||||
cselect(0(DI),0(SI),BX)
|
||||
RET
|
||||
|
||||
// func cswapAmd64(x, y *Elt, n uint)
|
||||
TEXT ·cswapAmd64(SB),NOSPLIT,$0-24
|
||||
MOVQ x+0(FP), DI
|
||||
MOVQ y+8(FP), SI
|
||||
MOVQ n+16(FP), BX
|
||||
cswap(0(DI),0(SI),BX)
|
||||
RET
|
||||
|
||||
// func subAmd64(z, x, y *Elt)
|
||||
TEXT ·subAmd64(SB),NOSPLIT,$0-24
|
||||
MOVQ z+0(FP), DI
|
||||
MOVQ x+8(FP), SI
|
||||
MOVQ y+16(FP), BX
|
||||
subtraction(0(DI),0(SI),0(BX))
|
||||
RET
|
||||
|
||||
// func addsubAmd64(x, y *Elt)
|
||||
TEXT ·addsubAmd64(SB),NOSPLIT,$0-16
|
||||
MOVQ x+0(FP), DI
|
||||
MOVQ y+8(FP), SI
|
||||
addSub(0(DI),0(SI))
|
||||
RET
|
||||
|
||||
#define addLegacy \
|
||||
additionLeg(0(DI),0(SI),0(BX))
|
||||
#define addBmi2Adx \
|
||||
additionAdx(0(DI),0(SI),0(BX))
|
||||
|
||||
#define mulLegacy \
|
||||
integerMulLeg(0(SP),0(SI),0(BX)) \
|
||||
reduceFromDoubleLeg(0(DI),0(SP))
|
||||
#define mulBmi2Adx \
|
||||
integerMulAdx(0(SP),0(SI),0(BX)) \
|
||||
reduceFromDoubleAdx(0(DI),0(SP))
|
||||
|
||||
#define sqrLegacy \
|
||||
integerSqrLeg(0(SP),0(SI)) \
|
||||
reduceFromDoubleLeg(0(DI),0(SP))
|
||||
#define sqrBmi2Adx \
|
||||
integerSqrAdx(0(SP),0(SI)) \
|
||||
reduceFromDoubleAdx(0(DI),0(SP))
|
||||
|
||||
// func addAmd64(z, x, y *Elt)
|
||||
TEXT ·addAmd64(SB),NOSPLIT,$0-24
|
||||
MOVQ z+0(FP), DI
|
||||
MOVQ x+8(FP), SI
|
||||
MOVQ y+16(FP), BX
|
||||
CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx)
|
||||
|
||||
// func mulAmd64(z, x, y *Elt)
|
||||
TEXT ·mulAmd64(SB),NOSPLIT,$64-24
|
||||
MOVQ z+0(FP), DI
|
||||
MOVQ x+8(FP), SI
|
||||
MOVQ y+16(FP), BX
|
||||
CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx)
|
||||
|
||||
// func sqrAmd64(z, x *Elt)
|
||||
TEXT ·sqrAmd64(SB),NOSPLIT,$64-16
|
||||
MOVQ z+0(FP), DI
|
||||
MOVQ x+8(FP), SI
|
||||
CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx)
|
||||
|
||||
// func modpAmd64(z *Elt)
|
||||
TEXT ·modpAmd64(SB),NOSPLIT,$0-8
|
||||
MOVQ z+0(FP), DI
|
||||
|
||||
MOVQ (DI), R8
|
||||
MOVQ 8(DI), R9
|
||||
MOVQ 16(DI), R10
|
||||
MOVQ 24(DI), R11
|
||||
|
||||
MOVL $19, AX
|
||||
MOVL $38, CX
|
||||
|
||||
BTRQ $63, R11 // PUT BIT 255 IN CARRY FLAG AND CLEAR
|
||||
CMOVLCC AX, CX // C[255] ? 38 : 19
|
||||
|
||||
// ADD EITHER 19 OR 38 TO C
|
||||
ADDQ CX, R8
|
||||
ADCQ $0, R9
|
||||
ADCQ $0, R10
|
||||
ADCQ $0, R11
|
||||
|
||||
// TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19
|
||||
MOVL $0, CX
|
||||
CMOVLPL AX, CX // C[255] ? 0 : 19
|
||||
BTRQ $63, R11 // CLEAR BIT 255
|
||||
|
||||
// SUBTRACT 19 IF NECESSARY
|
||||
SUBQ CX, R8
|
||||
MOVQ R8, (DI)
|
||||
SBBQ $0, R9
|
||||
MOVQ R9, 8(DI)
|
||||
SBBQ $0, R10
|
||||
MOVQ R10, 16(DI)
|
||||
SBBQ $0, R11
|
||||
MOVQ R11, 24(DI)
|
||||
RET
|
317
vendor/github.com/cloudflare/circl/math/fp25519/fp_generic.go
generated
vendored
Normal file
317
vendor/github.com/cloudflare/circl/math/fp25519/fp_generic.go
generated
vendored
Normal file
@ -0,0 +1,317 @@
|
||||
package fp25519
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
func cmovGeneric(x, y *Elt, n uint) {
|
||||
m := -uint64(n & 0x1)
|
||||
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
|
||||
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
|
||||
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
|
||||
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
|
||||
|
||||
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
|
||||
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
|
||||
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
|
||||
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
|
||||
|
||||
x0 = (x0 &^ m) | (y0 & m)
|
||||
x1 = (x1 &^ m) | (y1 & m)
|
||||
x2 = (x2 &^ m) | (y2 & m)
|
||||
x3 = (x3 &^ m) | (y3 & m)
|
||||
|
||||
binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
|
||||
binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
|
||||
binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
|
||||
binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
|
||||
}
|
||||
|
||||
func cswapGeneric(x, y *Elt, n uint) {
|
||||
m := -uint64(n & 0x1)
|
||||
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
|
||||
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
|
||||
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
|
||||
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
|
||||
|
||||
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
|
||||
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
|
||||
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
|
||||
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
|
||||
|
||||
t0 := m & (x0 ^ y0)
|
||||
t1 := m & (x1 ^ y1)
|
||||
t2 := m & (x2 ^ y2)
|
||||
t3 := m & (x3 ^ y3)
|
||||
x0 ^= t0
|
||||
x1 ^= t1
|
||||
x2 ^= t2
|
||||
x3 ^= t3
|
||||
y0 ^= t0
|
||||
y1 ^= t1
|
||||
y2 ^= t2
|
||||
y3 ^= t3
|
||||
|
||||
binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
|
||||
binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
|
||||
binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
|
||||
binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
|
||||
|
||||
binary.LittleEndian.PutUint64(y[0*8:1*8], y0)
|
||||
binary.LittleEndian.PutUint64(y[1*8:2*8], y1)
|
||||
binary.LittleEndian.PutUint64(y[2*8:3*8], y2)
|
||||
binary.LittleEndian.PutUint64(y[3*8:4*8], y3)
|
||||
}
|
||||
|
||||
func addGeneric(z, x, y *Elt) {
|
||||
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
|
||||
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
|
||||
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
|
||||
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
|
||||
|
||||
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
|
||||
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
|
||||
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
|
||||
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
|
||||
|
||||
z0, c0 := bits.Add64(x0, y0, 0)
|
||||
z1, c1 := bits.Add64(x1, y1, c0)
|
||||
z2, c2 := bits.Add64(x2, y2, c1)
|
||||
z3, c3 := bits.Add64(x3, y3, c2)
|
||||
|
||||
z0, c0 = bits.Add64(z0, (-c3)&38, 0)
|
||||
z1, c1 = bits.Add64(z1, 0, c0)
|
||||
z2, c2 = bits.Add64(z2, 0, c1)
|
||||
z3, c3 = bits.Add64(z3, 0, c2)
|
||||
z0, _ = bits.Add64(z0, (-c3)&38, 0)
|
||||
|
||||
binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
|
||||
binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
|
||||
binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
|
||||
binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
|
||||
}
|
||||
|
||||
func subGeneric(z, x, y *Elt) {
|
||||
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
|
||||
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
|
||||
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
|
||||
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
|
||||
|
||||
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
|
||||
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
|
||||
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
|
||||
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
|
||||
|
||||
z0, c0 := bits.Sub64(x0, y0, 0)
|
||||
z1, c1 := bits.Sub64(x1, y1, c0)
|
||||
z2, c2 := bits.Sub64(x2, y2, c1)
|
||||
z3, c3 := bits.Sub64(x3, y3, c2)
|
||||
|
||||
z0, c0 = bits.Sub64(z0, (-c3)&38, 0)
|
||||
z1, c1 = bits.Sub64(z1, 0, c0)
|
||||
z2, c2 = bits.Sub64(z2, 0, c1)
|
||||
z3, c3 = bits.Sub64(z3, 0, c2)
|
||||
z0, _ = bits.Sub64(z0, (-c3)&38, 0)
|
||||
|
||||
binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
|
||||
binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
|
||||
binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
|
||||
binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
|
||||
}
|
||||
|
||||
func addsubGeneric(x, y *Elt) {
|
||||
z := &Elt{}
|
||||
addGeneric(z, x, y)
|
||||
subGeneric(y, x, y)
|
||||
*x = *z
|
||||
}
|
||||
|
||||
func mulGeneric(z, x, y *Elt) {
|
||||
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
|
||||
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
|
||||
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
|
||||
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
|
||||
|
||||
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
|
||||
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
|
||||
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
|
||||
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
|
||||
|
||||
yi := y0
|
||||
h0, l0 := bits.Mul64(x0, yi)
|
||||
h1, l1 := bits.Mul64(x1, yi)
|
||||
h2, l2 := bits.Mul64(x2, yi)
|
||||
h3, l3 := bits.Mul64(x3, yi)
|
||||
|
||||
z0 := l0
|
||||
a0, c0 := bits.Add64(h0, l1, 0)
|
||||
a1, c1 := bits.Add64(h1, l2, c0)
|
||||
a2, c2 := bits.Add64(h2, l3, c1)
|
||||
a3, _ := bits.Add64(h3, 0, c2)
|
||||
|
||||
yi = y1
|
||||
h0, l0 = bits.Mul64(x0, yi)
|
||||
h1, l1 = bits.Mul64(x1, yi)
|
||||
h2, l2 = bits.Mul64(x2, yi)
|
||||
h3, l3 = bits.Mul64(x3, yi)
|
||||
|
||||
z1, c0 := bits.Add64(a0, l0, 0)
|
||||
h0, c1 = bits.Add64(h0, l1, c0)
|
||||
h1, c2 = bits.Add64(h1, l2, c1)
|
||||
h2, c3 := bits.Add64(h2, l3, c2)
|
||||
h3, _ = bits.Add64(h3, 0, c3)
|
||||
|
||||
a0, c0 = bits.Add64(a1, h0, 0)
|
||||
a1, c1 = bits.Add64(a2, h1, c0)
|
||||
a2, c2 = bits.Add64(a3, h2, c1)
|
||||
a3, _ = bits.Add64(0, h3, c2)
|
||||
|
||||
yi = y2
|
||||
h0, l0 = bits.Mul64(x0, yi)
|
||||
h1, l1 = bits.Mul64(x1, yi)
|
||||
h2, l2 = bits.Mul64(x2, yi)
|
||||
h3, l3 = bits.Mul64(x3, yi)
|
||||
|
||||
z2, c0 := bits.Add64(a0, l0, 0)
|
||||
h0, c1 = bits.Add64(h0, l1, c0)
|
||||
h1, c2 = bits.Add64(h1, l2, c1)
|
||||
h2, c3 = bits.Add64(h2, l3, c2)
|
||||
h3, _ = bits.Add64(h3, 0, c3)
|
||||
|
||||
a0, c0 = bits.Add64(a1, h0, 0)
|
||||
a1, c1 = bits.Add64(a2, h1, c0)
|
||||
a2, c2 = bits.Add64(a3, h2, c1)
|
||||
a3, _ = bits.Add64(0, h3, c2)
|
||||
|
||||
yi = y3
|
||||
h0, l0 = bits.Mul64(x0, yi)
|
||||
h1, l1 = bits.Mul64(x1, yi)
|
||||
h2, l2 = bits.Mul64(x2, yi)
|
||||
h3, l3 = bits.Mul64(x3, yi)
|
||||
|
||||
z3, c0 := bits.Add64(a0, l0, 0)
|
||||
h0, c1 = bits.Add64(h0, l1, c0)
|
||||
h1, c2 = bits.Add64(h1, l2, c1)
|
||||
h2, c3 = bits.Add64(h2, l3, c2)
|
||||
h3, _ = bits.Add64(h3, 0, c3)
|
||||
|
||||
z4, c0 := bits.Add64(a1, h0, 0)
|
||||
z5, c1 := bits.Add64(a2, h1, c0)
|
||||
z6, c2 := bits.Add64(a3, h2, c1)
|
||||
z7, _ := bits.Add64(0, h3, c2)
|
||||
|
||||
red64(z, z0, z1, z2, z3, z4, z5, z6, z7)
|
||||
}
|
||||
|
||||
func sqrGeneric(z, x *Elt) {
|
||||
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
|
||||
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
|
||||
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
|
||||
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
|
||||
|
||||
h0, a0 := bits.Mul64(x0, x1)
|
||||
h1, l1 := bits.Mul64(x0, x2)
|
||||
h2, l2 := bits.Mul64(x0, x3)
|
||||
h3, l3 := bits.Mul64(x3, x1)
|
||||
h4, l4 := bits.Mul64(x3, x2)
|
||||
h, l := bits.Mul64(x1, x2)
|
||||
|
||||
a1, c0 := bits.Add64(l1, h0, 0)
|
||||
a2, c1 := bits.Add64(l2, h1, c0)
|
||||
a3, c2 := bits.Add64(l3, h2, c1)
|
||||
a4, c3 := bits.Add64(l4, h3, c2)
|
||||
a5, _ := bits.Add64(h4, 0, c3)
|
||||
|
||||
a2, c0 = bits.Add64(a2, l, 0)
|
||||
a3, c1 = bits.Add64(a3, h, c0)
|
||||
a4, c2 = bits.Add64(a4, 0, c1)
|
||||
a5, c3 = bits.Add64(a5, 0, c2)
|
||||
a6, _ := bits.Add64(0, 0, c3)
|
||||
|
||||
a0, c0 = bits.Add64(a0, a0, 0)
|
||||
a1, c1 = bits.Add64(a1, a1, c0)
|
||||
a2, c2 = bits.Add64(a2, a2, c1)
|
||||
a3, c3 = bits.Add64(a3, a3, c2)
|
||||
a4, c4 := bits.Add64(a4, a4, c3)
|
||||
a5, c5 := bits.Add64(a5, a5, c4)
|
||||
a6, _ = bits.Add64(a6, a6, c5)
|
||||
|
||||
b1, b0 := bits.Mul64(x0, x0)
|
||||
b3, b2 := bits.Mul64(x1, x1)
|
||||
b5, b4 := bits.Mul64(x2, x2)
|
||||
b7, b6 := bits.Mul64(x3, x3)
|
||||
|
||||
b1, c0 = bits.Add64(b1, a0, 0)
|
||||
b2, c1 = bits.Add64(b2, a1, c0)
|
||||
b3, c2 = bits.Add64(b3, a2, c1)
|
||||
b4, c3 = bits.Add64(b4, a3, c2)
|
||||
b5, c4 = bits.Add64(b5, a4, c3)
|
||||
b6, c5 = bits.Add64(b6, a5, c4)
|
||||
b7, _ = bits.Add64(b7, a6, c5)
|
||||
|
||||
red64(z, b0, b1, b2, b3, b4, b5, b6, b7)
|
||||
}
|
||||
|
||||
func modpGeneric(x *Elt) {
|
||||
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
|
||||
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
|
||||
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
|
||||
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
|
||||
|
||||
// CX = C[255] ? 38 : 19
|
||||
cx := uint64(19) << (x3 >> 63)
|
||||
// PUT BIT 255 IN CARRY FLAG AND CLEAR
|
||||
x3 &^= 1 << 63
|
||||
|
||||
x0, c0 := bits.Add64(x0, cx, 0)
|
||||
x1, c1 := bits.Add64(x1, 0, c0)
|
||||
x2, c2 := bits.Add64(x2, 0, c1)
|
||||
x3, _ = bits.Add64(x3, 0, c2)
|
||||
|
||||
// TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19
|
||||
// cx = C[255] ? 0 : 19
|
||||
cx = uint64(19) &^ (-(x3 >> 63))
|
||||
// CLEAR BIT 255
|
||||
x3 &^= 1 << 63
|
||||
|
||||
x0, c0 = bits.Sub64(x0, cx, 0)
|
||||
x1, c1 = bits.Sub64(x1, 0, c0)
|
||||
x2, c2 = bits.Sub64(x2, 0, c1)
|
||||
x3, _ = bits.Sub64(x3, 0, c2)
|
||||
|
||||
binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
|
||||
binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
|
||||
binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
|
||||
binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
|
||||
}
|
||||
|
||||
func red64(z *Elt, x0, x1, x2, x3, x4, x5, x6, x7 uint64) {
|
||||
h0, l0 := bits.Mul64(x4, 38)
|
||||
h1, l1 := bits.Mul64(x5, 38)
|
||||
h2, l2 := bits.Mul64(x6, 38)
|
||||
h3, l3 := bits.Mul64(x7, 38)
|
||||
|
||||
l1, c0 := bits.Add64(h0, l1, 0)
|
||||
l2, c1 := bits.Add64(h1, l2, c0)
|
||||
l3, c2 := bits.Add64(h2, l3, c1)
|
||||
l4, _ := bits.Add64(h3, 0, c2)
|
||||
|
||||
l0, c0 = bits.Add64(l0, x0, 0)
|
||||
l1, c1 = bits.Add64(l1, x1, c0)
|
||||
l2, c2 = bits.Add64(l2, x2, c1)
|
||||
l3, c3 := bits.Add64(l3, x3, c2)
|
||||
l4, _ = bits.Add64(l4, 0, c3)
|
||||
|
||||
_, l4 = bits.Mul64(l4, 38)
|
||||
l0, c0 = bits.Add64(l0, l4, 0)
|
||||
z1, c1 := bits.Add64(l1, 0, c0)
|
||||
z2, c2 := bits.Add64(l2, 0, c1)
|
||||
z3, c3 := bits.Add64(l3, 0, c2)
|
||||
z0, _ := bits.Add64(l0, (-c3)&38, 0)
|
||||
|
||||
binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
|
||||
binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
|
||||
binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
|
||||
binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
|
||||
}
|
13
vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go
generated
vendored
Normal file
13
vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
//go:build !amd64 || purego
|
||||
// +build !amd64 purego
|
||||
|
||||
package fp25519
|
||||
|
||||
func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) }
|
||||
func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) }
|
||||
func add(z, x, y *Elt) { addGeneric(z, x, y) }
|
||||
func sub(z, x, y *Elt) { subGeneric(z, x, y) }
|
||||
func addsub(x, y *Elt) { addsubGeneric(x, y) }
|
||||
func mul(z, x, y *Elt) { mulGeneric(z, x, y) }
|
||||
func sqr(z, x *Elt) { sqrGeneric(z, x) }
|
||||
func modp(z *Elt) { modpGeneric(z) }
|
Reference in New Issue
Block a user