Bump github.com/hashicorp/terraform-plugin-sdk/v2 from 2.26.1 to 2.27.0

Bumps [github.com/hashicorp/terraform-plugin-sdk/v2](https://github.com/hashicorp/terraform-plugin-sdk) from 2.26.1 to 2.27.0.
- [Release notes](https://github.com/hashicorp/terraform-plugin-sdk/releases)
- [Changelog](https://github.com/hashicorp/terraform-plugin-sdk/blob/main/CHANGELOG.md)
- [Commits](https://github.com/hashicorp/terraform-plugin-sdk/compare/v2.26.1...v2.27.0)

---
updated-dependencies:
- dependency-name: github.com/hashicorp/terraform-plugin-sdk/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot]
2023-07-03 20:21:30 +00:00
committed by GitHub
parent b2403e2569
commit 910ccdb092
722 changed files with 31260 additions and 8125 deletions

164
vendor/github.com/cloudflare/circl/math/fp448/fp.go generated vendored Normal file
View File

@ -0,0 +1,164 @@
// Package fp448 provides prime field arithmetic over GF(2^448-2^224-1).
package fp448
import (
"errors"
"github.com/cloudflare/circl/internal/conv"
)
// Size in bytes of an element.
const Size = 56
// Elt is a prime field element.
type Elt [Size]byte
func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) }
// p is the prime modulus 2^448-2^224-1.
var p = Elt{
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
}
// P returns the prime modulus 2^448-2^224-1.
func P() Elt { return p }
// ToBytes stores in b the little-endian byte representation of x.
func ToBytes(b []byte, x *Elt) error {
if len(b) != Size {
return errors.New("wrong size")
}
Modp(x)
copy(b, x[:])
return nil
}
// IsZero returns true if x is equal to 0.
func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} }
// IsOne returns true if x is equal to 1.
func IsOne(x *Elt) bool { Modp(x); return *x == Elt{1} }
// SetOne assigns x=1.
func SetOne(x *Elt) { *x = Elt{1} }
// One returns the 1 element.
func One() (x Elt) { x = Elt{1}; return }
// Neg calculates z = -x.
func Neg(z, x *Elt) { Sub(z, &p, x) }
// Modp ensures that z is between [0,p-1].
func Modp(z *Elt) { Sub(z, z, &p) }
// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue. If so,
// isQR = true; otherwise, isQR = false, since x/y is a quadratic non-residue,
// and z = sqrt(-x/y).
func InvSqrt(z, x, y *Elt) (isQR bool) {
// First note that x^(2(k+1)) = x^(p-1)/2 * x = legendre(x) * x
// so that's x if x is a quadratic residue and -x otherwise.
// Next, y^(6k+3) = y^(4k+2) * y^(2k+1) = y^(p-1) * y^((p-1)/2) = legendre(y).
// So the z we compute satisfies z^2 y = x^(2(k+1)) y^(6k+3) = legendre(x)*legendre(y).
// Thus if x and y are quadratic residues, then z is indeed sqrt(x/y).
t0, t1 := &Elt{}, &Elt{}
Mul(t0, x, y) // x*y
Sqr(t1, y) // y^2
Mul(t1, t0, t1) // x*y^3
powPminus3div4(z, t1) // (x*y^3)^k
Mul(z, z, t0) // z = x*y*(x*y^3)^k = x^(k+1) * y^(3k+1)
// Check if x/y is a quadratic residue
Sqr(t0, z) // z^2
Mul(t0, t0, y) // y*z^2
Sub(t0, t0, x) // y*z^2-x
return IsZero(t0)
}
// Inv calculates z = 1/x mod p.
func Inv(z, x *Elt) {
// Calculates z = x^(4k+1) = x^(p-3+1) = x^(p-2) = x^-1, where k = (p-3)/4.
t := &Elt{}
powPminus3div4(t, x) // t = x^k
Sqr(t, t) // t = x^2k
Sqr(t, t) // t = x^4k
Mul(z, t, x) // z = x^(4k+1)
}
// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4.
func powPminus3div4(z, x *Elt) {
x0, x1 := &Elt{}, &Elt{}
Sqr(z, x)
Mul(z, z, x)
Sqr(x0, z)
Mul(x0, x0, x)
Sqr(z, x0)
Sqr(z, z)
Sqr(z, z)
Mul(z, z, x0)
Sqr(x1, z)
for i := 0; i < 5; i++ {
Sqr(x1, x1)
}
Mul(x1, x1, z)
Sqr(z, x1)
for i := 0; i < 11; i++ {
Sqr(z, z)
}
Mul(z, z, x1)
Sqr(z, z)
Sqr(z, z)
Sqr(z, z)
Mul(z, z, x0)
Sqr(x1, z)
for i := 0; i < 26; i++ {
Sqr(x1, x1)
}
Mul(x1, x1, z)
Sqr(z, x1)
for i := 0; i < 53; i++ {
Sqr(z, z)
}
Mul(z, z, x1)
Sqr(z, z)
Sqr(z, z)
Sqr(z, z)
Mul(z, z, x0)
Sqr(x1, z)
for i := 0; i < 110; i++ {
Sqr(x1, x1)
}
Mul(x1, x1, z)
Sqr(z, x1)
Mul(z, z, x)
for i := 0; i < 223; i++ {
Sqr(z, z)
}
Mul(z, z, x1)
}
// Cmov assigns y to x if n is 1.
func Cmov(x, y *Elt, n uint) { cmov(x, y, n) }
// Cswap interchanges x and y if n is 1.
func Cswap(x, y *Elt, n uint) { cswap(x, y, n) }
// Add calculates z = x+y mod p.
func Add(z, x, y *Elt) { add(z, x, y) }
// Sub calculates z = x-y mod p.
func Sub(z, x, y *Elt) { sub(z, x, y) }
// AddSub calculates (x,y) = (x+y mod p, x-y mod p).
func AddSub(x, y *Elt) { addsub(x, y) }
// Mul calculates z = x*y mod p.
func Mul(z, x, y *Elt) { mul(z, x, y) }
// Sqr calculates z = x^2 mod p.
func Sqr(z, x *Elt) { sqr(z, x) }

View File

@ -0,0 +1,43 @@
//go:build amd64 && !purego
// +build amd64,!purego
package fp448
import (
"golang.org/x/sys/cpu"
)
var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
var _ = hasBmi2Adx
func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) }
func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) }
func add(z, x, y *Elt) { addAmd64(z, x, y) }
func sub(z, x, y *Elt) { subAmd64(z, x, y) }
func addsub(x, y *Elt) { addsubAmd64(x, y) }
func mul(z, x, y *Elt) { mulAmd64(z, x, y) }
func sqr(z, x *Elt) { sqrAmd64(z, x) }
/* Functions defined in fp_amd64.s */
//go:noescape
func cmovAmd64(x, y *Elt, n uint)
//go:noescape
func cswapAmd64(x, y *Elt, n uint)
//go:noescape
func addAmd64(z, x, y *Elt)
//go:noescape
func subAmd64(z, x, y *Elt)
//go:noescape
func addsubAmd64(x, y *Elt)
//go:noescape
func mulAmd64(z, x, y *Elt)
//go:noescape
func sqrAmd64(z, x *Elt)

View File

@ -0,0 +1,591 @@
// This code was imported from https://github.com/armfazh/rfc7748_precomputed
// CHECK_BMI2ADX triggers bmi2adx if supported,
// otherwise it fallbacks to legacy code.
#define CHECK_BMI2ADX(label, legacy, bmi2adx) \
CMPB ·hasBmi2Adx(SB), $0 \
JE label \
bmi2adx \
RET \
label: \
legacy \
RET
// cselect is a conditional move
// if b=1: it copies y into x;
// if b=0: x remains with the same value;
// if b<> 0,1: undefined.
// Uses: AX, DX, FLAGS
// Instr: x86_64, cmov
#define cselect(x,y,b) \
TESTQ b, b \
MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \
MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \
MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \
MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; \
MOVQ 32+x, AX; MOVQ 32+y, DX; CMOVQNE DX, AX; MOVQ AX, 32+x; \
MOVQ 40+x, AX; MOVQ 40+y, DX; CMOVQNE DX, AX; MOVQ AX, 40+x; \
MOVQ 48+x, AX; MOVQ 48+y, DX; CMOVQNE DX, AX; MOVQ AX, 48+x;
// cswap is a conditional swap
// if b=1: x,y <- y,x;
// if b=0: x,y remain with the same values;
// if b<> 0,1: undefined.
// Uses: AX, DX, R8, FLAGS
// Instr: x86_64, cmov
#define cswap(x,y,b) \
TESTQ b, b \
MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \
MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \
MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \
MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; \
MOVQ 32+x, AX; MOVQ AX, R8; MOVQ 32+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 32+x; MOVQ DX, 32+y; \
MOVQ 40+x, AX; MOVQ AX, R8; MOVQ 40+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 40+x; MOVQ DX, 40+y; \
MOVQ 48+x, AX; MOVQ AX, R8; MOVQ 48+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 48+x; MOVQ DX, 48+y;
// additionLeg adds x and y and stores in z
// Uses: AX, DX, R8-R14, FLAGS
// Instr: x86_64
#define additionLeg(z,x,y) \
MOVQ 0+x, R8; ADDQ 0+y, R8; \
MOVQ 8+x, R9; ADCQ 8+y, R9; \
MOVQ 16+x, R10; ADCQ 16+y, R10; \
MOVQ 24+x, R11; ADCQ 24+y, R11; \
MOVQ 32+x, R12; ADCQ 32+y, R12; \
MOVQ 40+x, R13; ADCQ 40+y, R13; \
MOVQ 48+x, R14; ADCQ 48+y, R14; \
MOVQ $0, AX; ADCQ $0, AX; \
MOVQ AX, DX; \
SHLQ $32, DX; \
ADDQ AX, R8; MOVQ $0, AX; \
ADCQ $0, R9; \
ADCQ $0, R10; \
ADCQ DX, R11; \
ADCQ $0, R12; \
ADCQ $0, R13; \
ADCQ $0, R14; \
ADCQ $0, AX; \
MOVQ AX, DX; \
SHLQ $32, DX; \
ADDQ AX, R8; MOVQ R8, 0+z; \
ADCQ $0, R9; MOVQ R9, 8+z; \
ADCQ $0, R10; MOVQ R10, 16+z; \
ADCQ DX, R11; MOVQ R11, 24+z; \
ADCQ $0, R12; MOVQ R12, 32+z; \
ADCQ $0, R13; MOVQ R13, 40+z; \
ADCQ $0, R14; MOVQ R14, 48+z;
// additionAdx adds x and y and stores in z
// Uses: AX, DX, R8-R15, FLAGS
// Instr: x86_64, adx
#define additionAdx(z,x,y) \
MOVL $32, R15; \
XORL DX, DX; \
MOVQ 0+x, R8; ADCXQ 0+y, R8; \
MOVQ 8+x, R9; ADCXQ 8+y, R9; \
MOVQ 16+x, R10; ADCXQ 16+y, R10; \
MOVQ 24+x, R11; ADCXQ 24+y, R11; \
MOVQ 32+x, R12; ADCXQ 32+y, R12; \
MOVQ 40+x, R13; ADCXQ 40+y, R13; \
MOVQ 48+x, R14; ADCXQ 48+y, R14; \
;;;;;;;;;;;;;;; ADCXQ DX, DX; \
XORL AX, AX; \
ADCXQ DX, R8; SHLXQ R15, DX, DX; \
ADCXQ AX, R9; \
ADCXQ AX, R10; \
ADCXQ DX, R11; \
ADCXQ AX, R12; \
ADCXQ AX, R13; \
ADCXQ AX, R14; \
ADCXQ AX, AX; \
XORL DX, DX; \
ADCXQ AX, R8; MOVQ R8, 0+z; SHLXQ R15, AX, AX; \
ADCXQ DX, R9; MOVQ R9, 8+z; \
ADCXQ DX, R10; MOVQ R10, 16+z; \
ADCXQ AX, R11; MOVQ R11, 24+z; \
ADCXQ DX, R12; MOVQ R12, 32+z; \
ADCXQ DX, R13; MOVQ R13, 40+z; \
ADCXQ DX, R14; MOVQ R14, 48+z;
// subtraction subtracts y from x and stores in z
// Uses: AX, DX, R8-R14, FLAGS
// Instr: x86_64
#define subtraction(z,x,y) \
MOVQ 0+x, R8; SUBQ 0+y, R8; \
MOVQ 8+x, R9; SBBQ 8+y, R9; \
MOVQ 16+x, R10; SBBQ 16+y, R10; \
MOVQ 24+x, R11; SBBQ 24+y, R11; \
MOVQ 32+x, R12; SBBQ 32+y, R12; \
MOVQ 40+x, R13; SBBQ 40+y, R13; \
MOVQ 48+x, R14; SBBQ 48+y, R14; \
MOVQ $0, AX; SETCS AX; \
MOVQ AX, DX; \
SHLQ $32, DX; \
SUBQ AX, R8; MOVQ $0, AX; \
SBBQ $0, R9; \
SBBQ $0, R10; \
SBBQ DX, R11; \
SBBQ $0, R12; \
SBBQ $0, R13; \
SBBQ $0, R14; \
SETCS AX; \
MOVQ AX, DX; \
SHLQ $32, DX; \
SUBQ AX, R8; MOVQ R8, 0+z; \
SBBQ $0, R9; MOVQ R9, 8+z; \
SBBQ $0, R10; MOVQ R10, 16+z; \
SBBQ DX, R11; MOVQ R11, 24+z; \
SBBQ $0, R12; MOVQ R12, 32+z; \
SBBQ $0, R13; MOVQ R13, 40+z; \
SBBQ $0, R14; MOVQ R14, 48+z;
// maddBmi2Adx multiplies x and y and accumulates in z
// Uses: AX, DX, R15, FLAGS
// Instr: x86_64, bmi2, adx
#define maddBmi2Adx(z,x,y,i,r0,r1,r2,r3,r4,r5,r6) \
MOVQ i+y, DX; XORL AX, AX; \
MULXQ 0+x, AX, R8; ADOXQ AX, r0; ADCXQ R8, r1; MOVQ r0,i+z; \
MULXQ 8+x, AX, r0; ADOXQ AX, r1; ADCXQ r0, r2; MOVQ $0, R8; \
MULXQ 16+x, AX, r0; ADOXQ AX, r2; ADCXQ r0, r3; \
MULXQ 24+x, AX, r0; ADOXQ AX, r3; ADCXQ r0, r4; \
MULXQ 32+x, AX, r0; ADOXQ AX, r4; ADCXQ r0, r5; \
MULXQ 40+x, AX, r0; ADOXQ AX, r5; ADCXQ r0, r6; \
MULXQ 48+x, AX, r0; ADOXQ AX, r6; ADCXQ R8, r0; \
;;;;;;;;;;;;;;;;;;; ADOXQ R8, r0;
// integerMulAdx multiplies x and y and stores in z
// Uses: AX, DX, R8-R15, FLAGS
// Instr: x86_64, bmi2, adx
#define integerMulAdx(z,x,y) \
MOVL $0,R15; \
MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \
MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \
MULXQ 8+x, AX, R10; ADCXQ AX, R9; \
MULXQ 16+x, AX, R11; ADCXQ AX, R10; \
MULXQ 24+x, AX, R12; ADCXQ AX, R11; \
MULXQ 32+x, AX, R13; ADCXQ AX, R12; \
MULXQ 40+x, AX, R14; ADCXQ AX, R13; \
MULXQ 48+x, AX, R15; ADCXQ AX, R14; \
;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R15; \
maddBmi2Adx(z,x,y, 8, R9,R10,R11,R12,R13,R14,R15) \
maddBmi2Adx(z,x,y,16,R10,R11,R12,R13,R14,R15, R9) \
maddBmi2Adx(z,x,y,24,R11,R12,R13,R14,R15, R9,R10) \
maddBmi2Adx(z,x,y,32,R12,R13,R14,R15, R9,R10,R11) \
maddBmi2Adx(z,x,y,40,R13,R14,R15, R9,R10,R11,R12) \
maddBmi2Adx(z,x,y,48,R14,R15, R9,R10,R11,R12,R13) \
MOVQ R15, 56+z; \
MOVQ R9, 64+z; \
MOVQ R10, 72+z; \
MOVQ R11, 80+z; \
MOVQ R12, 88+z; \
MOVQ R13, 96+z; \
MOVQ R14, 104+z;
// maddLegacy multiplies x and y and accumulates in z
// Uses: AX, DX, R15, FLAGS
// Instr: x86_64
#define maddLegacy(z,x,y,i) \
MOVQ i+y, R15; \
MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \
MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \
ADDQ 0+i+z, R8; MOVQ R8, 0+i+z; \
ADCQ 8+i+z, R9; MOVQ R9, 8+i+z; \
ADCQ 16+i+z, R10; MOVQ R10, 16+i+z; \
ADCQ 24+i+z, R11; MOVQ R11, 24+i+z; \
ADCQ 32+i+z, R12; MOVQ R12, 32+i+z; \
ADCQ 40+i+z, R13; MOVQ R13, 40+i+z; \
ADCQ 48+i+z, R14; MOVQ R14, 48+i+z; \
ADCQ $0, DX; MOVQ DX, 56+i+z;
// integerMulLeg multiplies x and y and stores in z
// Uses: AX, DX, R8-R15, FLAGS
// Instr: x86_64
#define integerMulLeg(z,x,y) \
MOVQ 0+y, R15; \
MOVQ 0+x, AX; MULQ R15; MOVQ AX, 0+z; ;;;;;;;;;;;; MOVQ DX, R8; \
MOVQ 8+x, AX; MULQ R15; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \
MOVQ 16+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; MOVQ R9, 16+z; \
MOVQ 24+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; MOVQ R10, 24+z; \
MOVQ 32+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; MOVQ R11, 32+z; \
MOVQ 40+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; MOVQ R12, 40+z; \
MOVQ 48+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX,56+z; MOVQ R13, 48+z; \
maddLegacy(z,x,y, 8) \
maddLegacy(z,x,y,16) \
maddLegacy(z,x,y,24) \
maddLegacy(z,x,y,32) \
maddLegacy(z,x,y,40) \
maddLegacy(z,x,y,48)
// integerSqrLeg squares x and stores in z
// Uses: AX, CX, DX, R8-R15, FLAGS
// Instr: x86_64
#define integerSqrLeg(z,x) \
XORL R15, R15; \
MOVQ 0+x, CX; \
MOVQ CX, AX; MULQ CX; MOVQ AX, 0+z; MOVQ DX, R8; \
ADDQ CX, CX; ADCQ $0, R15; \
MOVQ 8+x, AX; MULQ CX; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \
MOVQ 16+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
MOVQ 24+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
MOVQ 32+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
MOVQ 40+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
MOVQ 48+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
\
MOVQ 8+x, CX; \
MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9,16+z; \
MOVQ R15, AX; NEGQ AX; ANDQ 8+x, AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \
ADDQ 8+x, CX; ADCQ $0, R15; \
MOVQ 16+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 24+z; \
MOVQ 24+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX, R8; \
MOVQ 32+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; \
MOVQ 40+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \
MOVQ 48+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R9; \
\
MOVQ 16+x, CX; \
MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 32+z; \
MOVQ R15, AX; NEGQ AX; ANDQ 16+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \
ADDQ 16+x, CX; ADCQ $0, R15; \
MOVQ 24+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 40+z; \
MOVQ 32+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \
MOVQ 40+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; \
MOVQ 48+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX,R10; \
\
MOVQ 24+x, CX; \
MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 48+z; \
MOVQ R15, AX; NEGQ AX; ANDQ 24+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \
ADDQ 24+x, CX; ADCQ $0, R15; \
MOVQ 32+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; MOVQ R14, 56+z; \
MOVQ 40+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX, R8; \
MOVQ 48+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX,R11; \
\
MOVQ 32+x, CX; \
MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9, 64+z; \
MOVQ R15, AX; NEGQ AX; ANDQ 32+x,AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \
ADDQ 32+x, CX; ADCQ $0, R15; \
MOVQ 40+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 72+z; \
MOVQ 48+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX,R12; \
\
XORL R13, R13; \
XORL R14, R14; \
MOVQ 40+x, CX; \
MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 80+z; \
MOVQ R15, AX; NEGQ AX; ANDQ 40+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \
ADDQ 40+x, CX; ADCQ $0, R15; \
MOVQ 48+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 88+z; \
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8, R13; ADCQ $0,R14; \
\
XORL R9, R9; \
MOVQ 48+x, CX; \
MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 96+z; \
MOVQ R15, AX; NEGQ AX; ANDQ 48+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8,R14; ADCQ $0, R9; MOVQ R14, 104+z;
// integerSqrAdx squares x and stores in z
// Uses: AX, CX, DX, R8-R15, FLAGS
// Instr: x86_64, bmi2, adx
#define integerSqrAdx(z,x) \
XORL R15, R15; \
MOVQ 0+x, DX; \
;;;;;;;;;;;;;; MULXQ DX, AX, R8; MOVQ AX, 0+z; \
ADDQ DX, DX; ADCQ $0, R15; CLC; \
MULXQ 8+x, AX, R9; ADCXQ AX, R8; MOVQ R8, 8+z; \
MULXQ 16+x, AX, R10; ADCXQ AX, R9; MOVQ $0, R8;\
MULXQ 24+x, AX, R11; ADCXQ AX, R10; \
MULXQ 32+x, AX, R12; ADCXQ AX, R11; \
MULXQ 40+x, AX, R13; ADCXQ AX, R12; \
MULXQ 48+x, AX, R14; ADCXQ AX, R13; \
;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R14; \
\
MOVQ 8+x, DX; \
MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
MULXQ AX, AX, CX; \
MOVQ R15, R8; NEGQ R8; ANDQ 8+x, R8; \
ADDQ AX, R9; MOVQ R9, 16+z; \
ADCQ CX, R8; \
ADCQ $0, R11; \
ADDQ 8+x, DX; \
ADCQ $0, R15; \
XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \
MULXQ 16+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 24+z; \
MULXQ 24+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; MOVQ $0, R10; \
MULXQ 32+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; \
MULXQ 40+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; \
MULXQ 48+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \
;;;;;;;;;;;;;;;;;;; ADCXQ R10, R9; \
\
MOVQ 16+x, DX; \
MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
MULXQ AX, AX, CX; \
MOVQ R15, R8; NEGQ R8; ANDQ 16+x, R8; \
ADDQ AX, R11; MOVQ R11, 32+z; \
ADCQ CX, R8; \
ADCQ $0, R13; \
ADDQ 16+x, DX; \
ADCQ $0, R15; \
XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \
MULXQ 24+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 40+z; \
MULXQ 32+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; MOVQ $0, R12; \
MULXQ 40+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \
MULXQ 48+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; \
;;;;;;;;;;;;;;;;;;; ADCXQ R11,R10; \
\
MOVQ 24+x, DX; \
MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
MULXQ AX, AX, CX; \
MOVQ R15, R8; NEGQ R8; ANDQ 24+x, R8; \
ADDQ AX, R13; MOVQ R13, 48+z; \
ADCQ CX, R8; \
ADCQ $0, R9; \
ADDQ 24+x, DX; \
ADCQ $0, R15; \
XORL R13, R13; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R14; \
MULXQ 32+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; MOVQ R14, 56+z; \
MULXQ 40+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; MOVQ $0, R14; \
MULXQ 48+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; \
;;;;;;;;;;;;;;;;;;; ADCXQ R12,R11; \
\
MOVQ 32+x, DX; \
MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
MULXQ AX, AX, CX; \
MOVQ R15, R8; NEGQ R8; ANDQ 32+x, R8; \
ADDQ AX, R9; MOVQ R9, 64+z; \
ADCQ CX, R8; \
ADCQ $0, R11; \
ADDQ 32+x, DX; \
ADCQ $0, R15; \
XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \
MULXQ 40+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 72+z; \
MULXQ 48+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; \
;;;;;;;;;;;;;;;;;;; ADCXQ R13,R12; \
\
MOVQ 40+x, DX; \
MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
MULXQ AX, AX, CX; \
MOVQ R15, R8; NEGQ R8; ANDQ 40+x, R8; \
ADDQ AX, R11; MOVQ R11, 80+z; \
ADCQ CX, R8; \
ADCQ $0, R13; \
ADDQ 40+x, DX; \
ADCQ $0, R15; \
XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \
MULXQ 48+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 88+z; \
;;;;;;;;;;;;;;;;;;; ADCXQ R14,R13; \
\
MOVQ 48+x, DX; \
MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
MULXQ AX, AX, CX; \
MOVQ R15, R8; NEGQ R8; ANDQ 48+x, R8; \
XORL R10, R10; ;;;;;;;;;;;;;; ADOXQ CX, R14; \
;;;;;;;;;;;;;; ADCXQ AX, R13; ;;;;;;;;;;;;;; MOVQ R13, 96+z; \
;;;;;;;;;;;;;; ADCXQ R8, R14; MOVQ R14, 104+z;
// reduceFromDoubleLeg finds a z=x modulo p such that z<2^448 and stores in z
// Uses: AX, R8-R15, FLAGS
// Instr: x86_64
#define reduceFromDoubleLeg(z,x) \
/* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \
/* (r14, r13, r12, r11, r10,r9,r8,r15) */ \
MOVQ 80+x,AX; MOVQ AX,R10; \
MOVQ $0xFFFFFFFF00000000, R8; \
ANDQ R8,R10; \
\
MOVQ $0,R14; \
MOVQ 104+x,R13; SHLQ $1,R13,R14; \
MOVQ 96+x,R12; SHLQ $1,R12,R13; \
MOVQ 88+x,R11; SHLQ $1,R11,R12; \
MOVQ 72+x, R9; SHLQ $1,R10,R11; \
MOVQ 64+x, R8; SHLQ $1,R10; \
MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \
MOVQ 56+x,R15; \
\
ADDQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \
ADCQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \
ADCQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \
ADCQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \
ADCQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \
ADCQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \
ADCQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \
ADCQ $0,R14; \
/* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \
/* ( r9, r8, r15, r13, r12, r11, r10) */ \
MOVQ R10, AX; \
SHRQ $32,R11,R10; \
SHRQ $32,R12,R11; \
SHRQ $32,R13,R12; \
SHRQ $32,R15,R13; \
SHRQ $32, R8,R15; \
SHRQ $32, R9, R8; \
SHRQ $32, AX, R9; \
\
ADDQ 0+z,R10; \
ADCQ 8+z,R11; \
ADCQ 16+z,R12; \
ADCQ 24+z,R13; \
ADCQ 32+z,R15; \
ADCQ 40+z, R8; \
ADCQ 48+z, R9; \
ADCQ $0,R14; \
/* ( c7) + (c6,...,c0) */ \
/* (r14) */ \
MOVQ R14, AX; SHLQ $32, AX; \
ADDQ R14,R10; MOVQ $0,R14; \
ADCQ $0,R11; \
ADCQ $0,R12; \
ADCQ AX,R13; \
ADCQ $0,R15; \
ADCQ $0, R8; \
ADCQ $0, R9; \
ADCQ $0,R14; \
/* ( c7) + (c6,...,c0) */ \
/* (r14) */ \
MOVQ R14, AX; SHLQ $32,AX; \
ADDQ R14,R10; MOVQ R10, 0+z; \
ADCQ $0,R11; MOVQ R11, 8+z; \
ADCQ $0,R12; MOVQ R12,16+z; \
ADCQ AX,R13; MOVQ R13,24+z; \
ADCQ $0,R15; MOVQ R15,32+z; \
ADCQ $0, R8; MOVQ R8,40+z; \
ADCQ $0, R9; MOVQ R9,48+z;
// reduceFromDoubleAdx finds a z=x modulo p such that z<2^448 and stores in z
// Uses: AX, R8-R15, FLAGS
// Instr: x86_64, adx
#define reduceFromDoubleAdx(z,x) \
/* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \
/* (r14, r13, r12, r11, r10,r9,r8,r15) */ \
MOVQ 80+x,AX; MOVQ AX,R10; \
MOVQ $0xFFFFFFFF00000000, R8; \
ANDQ R8,R10; \
\
MOVQ $0,R14; \
MOVQ 104+x,R13; SHLQ $1,R13,R14; \
MOVQ 96+x,R12; SHLQ $1,R12,R13; \
MOVQ 88+x,R11; SHLQ $1,R11,R12; \
MOVQ 72+x, R9; SHLQ $1,R10,R11; \
MOVQ 64+x, R8; SHLQ $1,R10; \
MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \
MOVQ 56+x,R15; \
\
XORL AX,AX; \
ADCXQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \
ADCXQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \
ADCXQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \
ADCXQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \
ADCXQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \
ADCXQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \
ADCXQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \
ADCXQ AX,R14; \
/* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \
/* ( r9, r8, r15, r13, r12, r11, r10) */ \
MOVQ R10, AX; \
SHRQ $32,R11,R10; \
SHRQ $32,R12,R11; \
SHRQ $32,R13,R12; \
SHRQ $32,R15,R13; \
SHRQ $32, R8,R15; \
SHRQ $32, R9, R8; \
SHRQ $32, AX, R9; \
\
XORL AX,AX; \
ADCXQ 0+z,R10; \
ADCXQ 8+z,R11; \
ADCXQ 16+z,R12; \
ADCXQ 24+z,R13; \
ADCXQ 32+z,R15; \
ADCXQ 40+z, R8; \
ADCXQ 48+z, R9; \
ADCXQ AX,R14; \
/* ( c7) + (c6,...,c0) */ \
/* (r14) */ \
MOVQ R14, AX; SHLQ $32, AX; \
CLC; \
ADCXQ R14,R10; MOVQ $0,R14; \
ADCXQ R14,R11; \
ADCXQ R14,R12; \
ADCXQ AX,R13; \
ADCXQ R14,R15; \
ADCXQ R14, R8; \
ADCXQ R14, R9; \
ADCXQ R14,R14; \
/* ( c7) + (c6,...,c0) */ \
/* (r14) */ \
MOVQ R14, AX; SHLQ $32, AX; \
CLC; \
ADCXQ R14,R10; MOVQ R10, 0+z; MOVQ $0,R14; \
ADCXQ R14,R11; MOVQ R11, 8+z; \
ADCXQ R14,R12; MOVQ R12,16+z; \
ADCXQ AX,R13; MOVQ R13,24+z; \
ADCXQ R14,R15; MOVQ R15,32+z; \
ADCXQ R14, R8; MOVQ R8,40+z; \
ADCXQ R14, R9; MOVQ R9,48+z;
// addSub calculates two operations: x,y = x+y,x-y
// Uses: AX, DX, R8-R15, FLAGS
#define addSub(x,y) \
MOVQ 0+x, R8; ADDQ 0+y, R8; \
MOVQ 8+x, R9; ADCQ 8+y, R9; \
MOVQ 16+x, R10; ADCQ 16+y, R10; \
MOVQ 24+x, R11; ADCQ 24+y, R11; \
MOVQ 32+x, R12; ADCQ 32+y, R12; \
MOVQ 40+x, R13; ADCQ 40+y, R13; \
MOVQ 48+x, R14; ADCQ 48+y, R14; \
MOVQ $0, AX; ADCQ $0, AX; \
MOVQ AX, DX; \
SHLQ $32, DX; \
ADDQ AX, R8; MOVQ $0, AX; \
ADCQ $0, R9; \
ADCQ $0, R10; \
ADCQ DX, R11; \
ADCQ $0, R12; \
ADCQ $0, R13; \
ADCQ $0, R14; \
ADCQ $0, AX; \
MOVQ AX, DX; \
SHLQ $32, DX; \
ADDQ AX, R8; MOVQ 0+x,AX; MOVQ R8, 0+x; MOVQ AX, R8; \
ADCQ $0, R9; MOVQ 8+x,AX; MOVQ R9, 8+x; MOVQ AX, R9; \
ADCQ $0, R10; MOVQ 16+x,AX; MOVQ R10, 16+x; MOVQ AX, R10; \
ADCQ DX, R11; MOVQ 24+x,AX; MOVQ R11, 24+x; MOVQ AX, R11; \
ADCQ $0, R12; MOVQ 32+x,AX; MOVQ R12, 32+x; MOVQ AX, R12; \
ADCQ $0, R13; MOVQ 40+x,AX; MOVQ R13, 40+x; MOVQ AX, R13; \
ADCQ $0, R14; MOVQ 48+x,AX; MOVQ R14, 48+x; MOVQ AX, R14; \
SUBQ 0+y, R8; \
SBBQ 8+y, R9; \
SBBQ 16+y, R10; \
SBBQ 24+y, R11; \
SBBQ 32+y, R12; \
SBBQ 40+y, R13; \
SBBQ 48+y, R14; \
MOVQ $0, AX; SETCS AX; \
MOVQ AX, DX; \
SHLQ $32, DX; \
SUBQ AX, R8; MOVQ $0, AX; \
SBBQ $0, R9; \
SBBQ $0, R10; \
SBBQ DX, R11; \
SBBQ $0, R12; \
SBBQ $0, R13; \
SBBQ $0, R14; \
SETCS AX; \
MOVQ AX, DX; \
SHLQ $32, DX; \
SUBQ AX, R8; MOVQ R8, 0+y; \
SBBQ $0, R9; MOVQ R9, 8+y; \
SBBQ $0, R10; MOVQ R10, 16+y; \
SBBQ DX, R11; MOVQ R11, 24+y; \
SBBQ $0, R12; MOVQ R12, 32+y; \
SBBQ $0, R13; MOVQ R13, 40+y; \
SBBQ $0, R14; MOVQ R14, 48+y;

View File

@ -0,0 +1,74 @@
// +build amd64
#include "textflag.h"
#include "fp_amd64.h"
// func cmovAmd64(x, y *Elt, n uint)
TEXT ·cmovAmd64(SB),NOSPLIT,$0-24
MOVQ x+0(FP), DI
MOVQ y+8(FP), SI
MOVQ n+16(FP), BX
cselect(0(DI),0(SI),BX)
RET
// func cswapAmd64(x, y *Elt, n uint)
TEXT ·cswapAmd64(SB),NOSPLIT,$0-24
MOVQ x+0(FP), DI
MOVQ y+8(FP), SI
MOVQ n+16(FP), BX
cswap(0(DI),0(SI),BX)
RET
// func subAmd64(z, x, y *Elt)
TEXT ·subAmd64(SB),NOSPLIT,$0-24
MOVQ z+0(FP), DI
MOVQ x+8(FP), SI
MOVQ y+16(FP), BX
subtraction(0(DI),0(SI),0(BX))
RET
// func addsubAmd64(x, y *Elt)
TEXT ·addsubAmd64(SB),NOSPLIT,$0-16
MOVQ x+0(FP), DI
MOVQ y+8(FP), SI
addSub(0(DI),0(SI))
RET
#define addLegacy \
additionLeg(0(DI),0(SI),0(BX))
#define addBmi2Adx \
additionAdx(0(DI),0(SI),0(BX))
#define mulLegacy \
integerMulLeg(0(SP),0(SI),0(BX)) \
reduceFromDoubleLeg(0(DI),0(SP))
#define mulBmi2Adx \
integerMulAdx(0(SP),0(SI),0(BX)) \
reduceFromDoubleAdx(0(DI),0(SP))
#define sqrLegacy \
integerSqrLeg(0(SP),0(SI)) \
reduceFromDoubleLeg(0(DI),0(SP))
#define sqrBmi2Adx \
integerSqrAdx(0(SP),0(SI)) \
reduceFromDoubleAdx(0(DI),0(SP))
// func addAmd64(z, x, y *Elt)
TEXT ·addAmd64(SB),NOSPLIT,$0-24
MOVQ z+0(FP), DI
MOVQ x+8(FP), SI
MOVQ y+16(FP), BX
CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx)
// func mulAmd64(z, x, y *Elt)
TEXT ·mulAmd64(SB),NOSPLIT,$112-24
MOVQ z+0(FP), DI
MOVQ x+8(FP), SI
MOVQ y+16(FP), BX
CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx)
// func sqrAmd64(z, x *Elt)
TEXT ·sqrAmd64(SB),NOSPLIT,$112-16
MOVQ z+0(FP), DI
MOVQ x+8(FP), SI
CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx)

View File

@ -0,0 +1,339 @@
package fp448
import (
"encoding/binary"
"math/bits"
)
func cmovGeneric(x, y *Elt, n uint) {
m := -uint64(n & 0x1)
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
x0 = (x0 &^ m) | (y0 & m)
x1 = (x1 &^ m) | (y1 & m)
x2 = (x2 &^ m) | (y2 & m)
x3 = (x3 &^ m) | (y3 & m)
x4 = (x4 &^ m) | (y4 & m)
x5 = (x5 &^ m) | (y5 & m)
x6 = (x6 &^ m) | (y6 & m)
binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
binary.LittleEndian.PutUint64(x[4*8:5*8], x4)
binary.LittleEndian.PutUint64(x[5*8:6*8], x5)
binary.LittleEndian.PutUint64(x[6*8:7*8], x6)
}
func cswapGeneric(x, y *Elt, n uint) {
m := -uint64(n & 0x1)
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
t0 := m & (x0 ^ y0)
t1 := m & (x1 ^ y1)
t2 := m & (x2 ^ y2)
t3 := m & (x3 ^ y3)
t4 := m & (x4 ^ y4)
t5 := m & (x5 ^ y5)
t6 := m & (x6 ^ y6)
x0 ^= t0
x1 ^= t1
x2 ^= t2
x3 ^= t3
x4 ^= t4
x5 ^= t5
x6 ^= t6
y0 ^= t0
y1 ^= t1
y2 ^= t2
y3 ^= t3
y4 ^= t4
y5 ^= t5
y6 ^= t6
binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
binary.LittleEndian.PutUint64(x[4*8:5*8], x4)
binary.LittleEndian.PutUint64(x[5*8:6*8], x5)
binary.LittleEndian.PutUint64(x[6*8:7*8], x6)
binary.LittleEndian.PutUint64(y[0*8:1*8], y0)
binary.LittleEndian.PutUint64(y[1*8:2*8], y1)
binary.LittleEndian.PutUint64(y[2*8:3*8], y2)
binary.LittleEndian.PutUint64(y[3*8:4*8], y3)
binary.LittleEndian.PutUint64(y[4*8:5*8], y4)
binary.LittleEndian.PutUint64(y[5*8:6*8], y5)
binary.LittleEndian.PutUint64(y[6*8:7*8], y6)
}
func addGeneric(z, x, y *Elt) {
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
z0, c0 := bits.Add64(x0, y0, 0)
z1, c1 := bits.Add64(x1, y1, c0)
z2, c2 := bits.Add64(x2, y2, c1)
z3, c3 := bits.Add64(x3, y3, c2)
z4, c4 := bits.Add64(x4, y4, c3)
z5, c5 := bits.Add64(x5, y5, c4)
z6, z7 := bits.Add64(x6, y6, c5)
z0, c0 = bits.Add64(z0, z7, 0)
z1, c1 = bits.Add64(z1, 0, c0)
z2, c2 = bits.Add64(z2, 0, c1)
z3, c3 = bits.Add64(z3, z7<<32, c2)
z4, c4 = bits.Add64(z4, 0, c3)
z5, c5 = bits.Add64(z5, 0, c4)
z6, z7 = bits.Add64(z6, 0, c5)
z0, c0 = bits.Add64(z0, z7, 0)
z1, c1 = bits.Add64(z1, 0, c0)
z2, c2 = bits.Add64(z2, 0, c1)
z3, c3 = bits.Add64(z3, z7<<32, c2)
z4, c4 = bits.Add64(z4, 0, c3)
z5, c5 = bits.Add64(z5, 0, c4)
z6, _ = bits.Add64(z6, 0, c5)
binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
binary.LittleEndian.PutUint64(z[4*8:5*8], z4)
binary.LittleEndian.PutUint64(z[5*8:6*8], z5)
binary.LittleEndian.PutUint64(z[6*8:7*8], z6)
}
func subGeneric(z, x, y *Elt) {
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
z0, c0 := bits.Sub64(x0, y0, 0)
z1, c1 := bits.Sub64(x1, y1, c0)
z2, c2 := bits.Sub64(x2, y2, c1)
z3, c3 := bits.Sub64(x3, y3, c2)
z4, c4 := bits.Sub64(x4, y4, c3)
z5, c5 := bits.Sub64(x5, y5, c4)
z6, z7 := bits.Sub64(x6, y6, c5)
z0, c0 = bits.Sub64(z0, z7, 0)
z1, c1 = bits.Sub64(z1, 0, c0)
z2, c2 = bits.Sub64(z2, 0, c1)
z3, c3 = bits.Sub64(z3, z7<<32, c2)
z4, c4 = bits.Sub64(z4, 0, c3)
z5, c5 = bits.Sub64(z5, 0, c4)
z6, z7 = bits.Sub64(z6, 0, c5)
z0, c0 = bits.Sub64(z0, z7, 0)
z1, c1 = bits.Sub64(z1, 0, c0)
z2, c2 = bits.Sub64(z2, 0, c1)
z3, c3 = bits.Sub64(z3, z7<<32, c2)
z4, c4 = bits.Sub64(z4, 0, c3)
z5, c5 = bits.Sub64(z5, 0, c4)
z6, _ = bits.Sub64(z6, 0, c5)
binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
binary.LittleEndian.PutUint64(z[4*8:5*8], z4)
binary.LittleEndian.PutUint64(z[5*8:6*8], z5)
binary.LittleEndian.PutUint64(z[6*8:7*8], z6)
}
func addsubGeneric(x, y *Elt) {
z := &Elt{}
addGeneric(z, x, y)
subGeneric(y, x, y)
*x = *z
}
func mulGeneric(z, x, y *Elt) {
x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
yy := [7]uint64{y0, y1, y2, y3, y4, y5, y6}
zz := [7]uint64{}
yi := yy[0]
h0, l0 := bits.Mul64(x0, yi)
h1, l1 := bits.Mul64(x1, yi)
h2, l2 := bits.Mul64(x2, yi)
h3, l3 := bits.Mul64(x3, yi)
h4, l4 := bits.Mul64(x4, yi)
h5, l5 := bits.Mul64(x5, yi)
h6, l6 := bits.Mul64(x6, yi)
zz[0] = l0
a0, c0 := bits.Add64(h0, l1, 0)
a1, c1 := bits.Add64(h1, l2, c0)
a2, c2 := bits.Add64(h2, l3, c1)
a3, c3 := bits.Add64(h3, l4, c2)
a4, c4 := bits.Add64(h4, l5, c3)
a5, c5 := bits.Add64(h5, l6, c4)
a6, _ := bits.Add64(h6, 0, c5)
for i := 1; i < 7; i++ {
yi = yy[i]
h0, l0 = bits.Mul64(x0, yi)
h1, l1 = bits.Mul64(x1, yi)
h2, l2 = bits.Mul64(x2, yi)
h3, l3 = bits.Mul64(x3, yi)
h4, l4 = bits.Mul64(x4, yi)
h5, l5 = bits.Mul64(x5, yi)
h6, l6 = bits.Mul64(x6, yi)
zz[i], c0 = bits.Add64(a0, l0, 0)
a0, c1 = bits.Add64(a1, l1, c0)
a1, c2 = bits.Add64(a2, l2, c1)
a2, c3 = bits.Add64(a3, l3, c2)
a3, c4 = bits.Add64(a4, l4, c3)
a4, c5 = bits.Add64(a5, l5, c4)
a5, a6 = bits.Add64(a6, l6, c5)
a0, c0 = bits.Add64(a0, h0, 0)
a1, c1 = bits.Add64(a1, h1, c0)
a2, c2 = bits.Add64(a2, h2, c1)
a3, c3 = bits.Add64(a3, h3, c2)
a4, c4 = bits.Add64(a4, h4, c3)
a5, c5 = bits.Add64(a5, h5, c4)
a6, _ = bits.Add64(a6, h6, c5)
}
red64(z, &zz, &[7]uint64{a0, a1, a2, a3, a4, a5, a6})
}
func sqrGeneric(z, x *Elt) { mulGeneric(z, x, x) }
func red64(z *Elt, l, h *[7]uint64) {
/* (2C13, 2C12, 2C11, 2C10|C10, C9, C8, C7) + (C6,...,C0) */
h0 := h[0]
h1 := h[1]
h2 := h[2]
h3 := ((h[3] & (0xFFFFFFFF << 32)) << 1) | (h[3] & 0xFFFFFFFF)
h4 := (h[3] >> 63) | (h[4] << 1)
h5 := (h[4] >> 63) | (h[5] << 1)
h6 := (h[5] >> 63) | (h[6] << 1)
h7 := (h[6] >> 63)
l0, c0 := bits.Add64(h0, l[0], 0)
l1, c1 := bits.Add64(h1, l[1], c0)
l2, c2 := bits.Add64(h2, l[2], c1)
l3, c3 := bits.Add64(h3, l[3], c2)
l4, c4 := bits.Add64(h4, l[4], c3)
l5, c5 := bits.Add64(h5, l[5], c4)
l6, c6 := bits.Add64(h6, l[6], c5)
l7, _ := bits.Add64(h7, 0, c6)
/* (C10C9, C9C8,C8C7,C7C13,C13C12,C12C11,C11C10) + (C6,...,C0) */
h0 = (h[3] >> 32) | (h[4] << 32)
h1 = (h[4] >> 32) | (h[5] << 32)
h2 = (h[5] >> 32) | (h[6] << 32)
h3 = (h[6] >> 32) | (h[0] << 32)
h4 = (h[0] >> 32) | (h[1] << 32)
h5 = (h[1] >> 32) | (h[2] << 32)
h6 = (h[2] >> 32) | (h[3] << 32)
l0, c0 = bits.Add64(l0, h0, 0)
l1, c1 = bits.Add64(l1, h1, c0)
l2, c2 = bits.Add64(l2, h2, c1)
l3, c3 = bits.Add64(l3, h3, c2)
l4, c4 = bits.Add64(l4, h4, c3)
l5, c5 = bits.Add64(l5, h5, c4)
l6, c6 = bits.Add64(l6, h6, c5)
l7, _ = bits.Add64(l7, 0, c6)
/* (C7) + (C6,...,C0) */
l0, c0 = bits.Add64(l0, l7, 0)
l1, c1 = bits.Add64(l1, 0, c0)
l2, c2 = bits.Add64(l2, 0, c1)
l3, c3 = bits.Add64(l3, l7<<32, c2)
l4, c4 = bits.Add64(l4, 0, c3)
l5, c5 = bits.Add64(l5, 0, c4)
l6, l7 = bits.Add64(l6, 0, c5)
/* (C7) + (C6,...,C0) */
l0, c0 = bits.Add64(l0, l7, 0)
l1, c1 = bits.Add64(l1, 0, c0)
l2, c2 = bits.Add64(l2, 0, c1)
l3, c3 = bits.Add64(l3, l7<<32, c2)
l4, c4 = bits.Add64(l4, 0, c3)
l5, c5 = bits.Add64(l5, 0, c4)
l6, _ = bits.Add64(l6, 0, c5)
binary.LittleEndian.PutUint64(z[0*8:1*8], l0)
binary.LittleEndian.PutUint64(z[1*8:2*8], l1)
binary.LittleEndian.PutUint64(z[2*8:3*8], l2)
binary.LittleEndian.PutUint64(z[3*8:4*8], l3)
binary.LittleEndian.PutUint64(z[4*8:5*8], l4)
binary.LittleEndian.PutUint64(z[5*8:6*8], l5)
binary.LittleEndian.PutUint64(z[6*8:7*8], l6)
}

View File

@ -0,0 +1,12 @@
//go:build !amd64 || purego
// +build !amd64 purego
package fp448
func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) }
func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) }
func add(z, x, y *Elt) { addGeneric(z, x, y) }
func sub(z, x, y *Elt) { subGeneric(z, x, y) }
func addsub(x, y *Elt) { addsubGeneric(x, y) }
func mul(z, x, y *Elt) { mulGeneric(z, x, y) }
func sqr(z, x *Elt) { sqrGeneric(z, x) }

View File

@ -0,0 +1,75 @@
//go:build gofuzz
// +build gofuzz
// How to run the fuzzer:
//
// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz
// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz-build
// $ go-fuzz-build -libfuzzer -func FuzzReduction -o lib.a
// $ clang -fsanitize=fuzzer lib.a -o fu.exe
// $ ./fu.exe
package fp448
import (
"encoding/binary"
"fmt"
"math/big"
"github.com/cloudflare/circl/internal/conv"
)
// FuzzReduction is a fuzzer target for red64 function, which reduces t
// (112 bits) to a number t' (56 bits) congruent modulo p448.
func FuzzReduction(data []byte) int {
if len(data) != 2*Size {
return -1
}
var got, want Elt
var lo, hi [7]uint64
a := data[:Size]
b := data[Size:]
lo[0] = binary.LittleEndian.Uint64(a[0*8 : 1*8])
lo[1] = binary.LittleEndian.Uint64(a[1*8 : 2*8])
lo[2] = binary.LittleEndian.Uint64(a[2*8 : 3*8])
lo[3] = binary.LittleEndian.Uint64(a[3*8 : 4*8])
lo[4] = binary.LittleEndian.Uint64(a[4*8 : 5*8])
lo[5] = binary.LittleEndian.Uint64(a[5*8 : 6*8])
lo[6] = binary.LittleEndian.Uint64(a[6*8 : 7*8])
hi[0] = binary.LittleEndian.Uint64(b[0*8 : 1*8])
hi[1] = binary.LittleEndian.Uint64(b[1*8 : 2*8])
hi[2] = binary.LittleEndian.Uint64(b[2*8 : 3*8])
hi[3] = binary.LittleEndian.Uint64(b[3*8 : 4*8])
hi[4] = binary.LittleEndian.Uint64(b[4*8 : 5*8])
hi[5] = binary.LittleEndian.Uint64(b[5*8 : 6*8])
hi[6] = binary.LittleEndian.Uint64(b[6*8 : 7*8])
red64(&got, &lo, &hi)
t := conv.BytesLe2BigInt(data[:2*Size])
two448 := big.NewInt(1)
two448.Lsh(two448, 448) // 2^448
mask448 := big.NewInt(1)
mask448.Sub(two448, mask448) // 2^448-1
two224plus1 := big.NewInt(1)
two224plus1.Lsh(two224plus1, 224)
two224plus1.Add(two224plus1, big.NewInt(1)) // 2^224+1
var loBig, hiBig big.Int
for t.Cmp(two448) >= 0 {
loBig.And(t, mask448)
hiBig.Rsh(t, 448)
t.Mul(&hiBig, two224plus1)
t.Add(t, &loBig)
}
conv.BigInt2BytesLe(want[:], t)
if got != want {
fmt.Printf("in: %v\n", conv.BytesLe2BigInt(data[:2*Size]))
fmt.Printf("got: %v\n", got)
fmt.Printf("want: %v\n", want)
panic("error found")
}
return 1
}