328 lines
7.4 KiB
Plaintext
328 lines
7.4 KiB
Plaintext
|
|
import "fmt"
|
|
|
|
//
|
|
// Parse out optional arguments for sign and verify.
|
|
// augSingle []byte - augmentation bytes for aggregate verify (default: nil)
|
|
// aug [][]byte - augmentation bytes for signing (default: nil)
|
|
//
|
|
func parseOpts(optional ...interface{}) (augSingle []byte, aug [][]byte,
|
|
useHash bool, ok bool) {
|
|
useHash = true // hash (true), encode (false)
|
|
|
|
for _, arg := range optional {
|
|
switch v := arg.(type) {
|
|
case []byte:
|
|
augSingle = v
|
|
case [][]byte:
|
|
aug = v
|
|
case bool:
|
|
useHash = v
|
|
default:
|
|
return nil, nil, useHash, false
|
|
}
|
|
}
|
|
return augSingle, aug, useHash, true
|
|
}
|
|
|
|
//
|
|
// These methods are inefficient because of cgo call overhead. For this
|
|
// reason they should be used primarily for prototyping with a goal to
|
|
// formulate interfaces that would process multiple scalars per cgo call.
|
|
//
|
|
func (a *Scalar) MulAssign(b *Scalar) (*Scalar, bool) {
|
|
return a, bool(C.blst_sk_mul_n_check(&a.cgo, &a.cgo, &b.cgo))
|
|
}
|
|
|
|
func (a *Scalar) Mul(b *Scalar) (*Scalar, bool) {
|
|
var ret Scalar
|
|
return &ret, bool(C.blst_sk_mul_n_check(&ret.cgo, &a.cgo, &b.cgo))
|
|
}
|
|
|
|
func (a *Scalar) AddAssign(b *Scalar) (*Scalar, bool) {
|
|
return a, bool(C.blst_sk_add_n_check(&a.cgo, &a.cgo, &b.cgo))
|
|
}
|
|
|
|
func (a *Scalar) Add(b *Scalar) (*Scalar, bool) {
|
|
var ret Scalar
|
|
return &ret, bool(C.blst_sk_add_n_check(&ret.cgo, &a.cgo, &b.cgo))
|
|
}
|
|
|
|
func (a *Scalar) SubAssign(b *Scalar) (*Scalar, bool) {
|
|
return a, bool(C.blst_sk_sub_n_check(&a.cgo, &a.cgo, &b.cgo))
|
|
}
|
|
|
|
func (a *Scalar) Sub(b *Scalar) (*Scalar, bool) {
|
|
var ret Scalar
|
|
return &ret, bool(C.blst_sk_sub_n_check(&ret.cgo, &a.cgo, &b.cgo))
|
|
}
|
|
|
|
func (a *Scalar) Inverse() *Scalar {
|
|
var ret Scalar
|
|
C.blst_sk_inverse(&ret.cgo, &a.cgo)
|
|
return &ret
|
|
}
|
|
|
|
//
|
|
// Serialization/Deserialization.
|
|
//
|
|
|
|
// Scalar serdes
|
|
func (s *Scalar) Serialize() []byte {
|
|
var out [BLST_SCALAR_BYTES]byte
|
|
C.blst_bendian_from_scalar((*C.byte)(&out[0]), &s.cgo)
|
|
return out[:]
|
|
}
|
|
|
|
func (s *Scalar) Deserialize(in []byte) *Scalar {
|
|
if len(in) != BLST_SCALAR_BYTES ||
|
|
!C.go_scalar_from_bendian(&s.cgo, (*C.byte)(&in[0])) {
|
|
return nil
|
|
}
|
|
return s
|
|
}
|
|
|
|
func (s *Scalar) Valid() bool {
|
|
return bool(C.blst_sk_check(&s.cgo))
|
|
}
|
|
|
|
func (s *Scalar) HashTo(msg []byte, dst []byte) bool {
|
|
ret := HashToScalar(msg, dst)
|
|
if ret != nil {
|
|
*s = *ret
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
func HashToScalar(msg []byte, dst []byte) *Scalar {
|
|
var ret Scalar
|
|
|
|
if C.go_hash_to_scalar(&ret.cgo, ptrOrNil(msg), C.size_t(len(msg)),
|
|
ptrOrNil(dst), C.size_t(len(dst))) {
|
|
return &ret
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
//
|
|
// LEndian
|
|
//
|
|
|
|
func (fr *Scalar) ToLEndian() []byte {
|
|
var arr [BLST_SCALAR_BYTES]byte
|
|
C.blst_lendian_from_scalar((*C.byte)(&arr[0]), &fr.cgo)
|
|
return arr[:]
|
|
}
|
|
|
|
func (fp *Fp) ToLEndian() []byte {
|
|
var arr [BLST_FP_BYTES]byte
|
|
C.blst_lendian_from_fp((*C.byte)(&arr[0]), &fp.cgo)
|
|
return arr[:]
|
|
}
|
|
|
|
func (fr *Scalar) FromLEndian(arr []byte) *Scalar {
|
|
nbytes := len(arr)
|
|
if nbytes < BLST_SCALAR_BYTES ||
|
|
!C.blst_scalar_from_le_bytes(&fr.cgo, (*C.byte)(&arr[0]), C.size_t(nbytes)) {
|
|
return nil
|
|
}
|
|
return fr
|
|
}
|
|
|
|
func (fp *Fp) FromLEndian(arr []byte) *Fp {
|
|
if len(arr) != BLST_FP_BYTES {
|
|
return nil
|
|
}
|
|
C.blst_fp_from_lendian(&fp.cgo, (*C.byte)(&arr[0]))
|
|
return fp
|
|
}
|
|
|
|
//
|
|
// BEndian
|
|
//
|
|
|
|
func (fr *Scalar) ToBEndian() []byte {
|
|
var arr [BLST_SCALAR_BYTES]byte
|
|
C.blst_bendian_from_scalar((*C.byte)(&arr[0]), &fr.cgo)
|
|
return arr[:]
|
|
}
|
|
|
|
func (fp *Fp) ToBEndian() []byte {
|
|
var arr [BLST_FP_BYTES]byte
|
|
C.blst_bendian_from_fp((*C.byte)(&arr[0]), &fp.cgo)
|
|
return arr[:]
|
|
}
|
|
|
|
func (fr *Scalar) FromBEndian(arr []byte) *Scalar {
|
|
nbytes := len(arr)
|
|
if nbytes < BLST_SCALAR_BYTES ||
|
|
!C.blst_scalar_from_be_bytes(&fr.cgo, (*C.byte)(&arr[0]), C.size_t(nbytes)) {
|
|
return nil
|
|
}
|
|
return fr
|
|
}
|
|
|
|
func (fp *Fp) FromBEndian(arr []byte) *Fp {
|
|
if len(arr) != BLST_FP_BYTES {
|
|
return nil
|
|
}
|
|
C.blst_fp_from_bendian(&fp.cgo, (*C.byte)(&arr[0]))
|
|
return fp
|
|
}
|
|
|
|
//
|
|
// Printing
|
|
//
|
|
|
|
func PrintBytes(val []byte, name string) {
|
|
fmt.Printf("%s = %02x\n", name, val)
|
|
}
|
|
|
|
func (s *Scalar) Print(name string) {
|
|
arr := s.ToBEndian()
|
|
PrintBytes(arr, name)
|
|
}
|
|
|
|
func (p *P1Affine) Print(name string) {
|
|
fmt.Printf("%s:\n", name)
|
|
x := Fp{p.cgo.x}
|
|
arr := x.ToBEndian()
|
|
PrintBytes(arr, " x")
|
|
y := Fp{p.cgo.y}
|
|
arr = y.ToBEndian()
|
|
PrintBytes(arr, " y")
|
|
}
|
|
|
|
func (p *P1) Print(name string) {
|
|
fmt.Printf("%s:\n", name)
|
|
aff := p.ToAffine()
|
|
aff.Print(name)
|
|
}
|
|
|
|
func (f *Fp2) Print(name string) {
|
|
fmt.Printf("%s:\n", name)
|
|
var arr [BLST_FP_BYTES]byte
|
|
C.blst_bendian_from_fp((*C.byte)(&arr[0]), &f.cgo.fp[0])
|
|
PrintBytes(arr[:], " 0")
|
|
C.blst_bendian_from_fp((*C.byte)(&arr[0]), &f.cgo.fp[1])
|
|
PrintBytes(arr[:], " 1")
|
|
}
|
|
|
|
func (p *P2Affine) Print(name string) {
|
|
fmt.Printf("%s:\n", name)
|
|
x := Fp2{p.cgo.x}
|
|
x.Print(" x")
|
|
y := Fp2{p.cgo.y}
|
|
y.Print(" y")
|
|
}
|
|
|
|
func (p *P2) Print(name string) {
|
|
fmt.Printf("%s:\n", name)
|
|
aff := p.ToAffine()
|
|
aff.Print(name)
|
|
}
|
|
|
|
//
|
|
// Equality
|
|
//
|
|
|
|
func (s1 *Scalar) Equals(s2 *Scalar) bool {
|
|
return *s1 == *s2;
|
|
}
|
|
|
|
func (e1 *Fp) Equals(e2 *Fp) bool {
|
|
return *e1 == *e2;
|
|
}
|
|
|
|
func (e1 *Fp2) Equals(e2 *Fp2) bool {
|
|
return *e1 == *e2;
|
|
}
|
|
|
|
func (e1 *P1Affine) Equals(e2 *P1Affine) bool {
|
|
return bool(C.blst_p1_affine_is_equal(&e1.cgo, &e2.cgo))
|
|
}
|
|
|
|
func (pt *P1Affine) asPtr() *C.blst_p1_affine {
|
|
if (pt != nil) {
|
|
return &pt.cgo
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (e1 *P1) Equals(e2 *P1) bool {
|
|
return bool(C.blst_p1_is_equal(&e1.cgo, &e2.cgo))
|
|
}
|
|
|
|
func (e1 *P2Affine) Equals(e2 *P2Affine) bool {
|
|
return bool(C.blst_p2_affine_is_equal(&e1.cgo, &e2.cgo))
|
|
}
|
|
|
|
func (pt *P2Affine) asPtr() *C.blst_p2_affine {
|
|
if (pt != nil) {
|
|
return &pt.cgo
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func (e1 *P2) Equals(e2 *P2) bool {
|
|
return bool(C.blst_p2_is_equal(&e1.cgo, &e2.cgo))
|
|
}
|
|
|
|
// private thunk for testing
|
|
|
|
func expandMessageXmd(msg []byte, dst []byte, len_in_bytes int) []byte {
|
|
ret := make([]byte, len_in_bytes)
|
|
|
|
C.blst_expand_message_xmd((*C.byte)(&ret[0]), C.size_t(len(ret)),
|
|
ptrOrNil(msg), C.size_t(len(msg)),
|
|
ptrOrNil(dst), C.size_t(len(dst)))
|
|
return ret
|
|
}
|
|
|
|
func breakdown(nbits, window, ncpus int) (nx int, ny int, wnd int) {
|
|
|
|
if nbits > window*ncpus { //nolint:nestif
|
|
nx = 1
|
|
wnd = bits.Len(uint(ncpus)/4)
|
|
if (window + wnd) > 18 {
|
|
wnd = window - wnd
|
|
} else {
|
|
wnd = (nbits / window + ncpus - 1) / ncpus;
|
|
if (nbits / (window + 1) + ncpus - 1) / ncpus < wnd {
|
|
wnd = window + 1;
|
|
} else {
|
|
wnd = window;
|
|
}
|
|
}
|
|
} else {
|
|
nx = 2
|
|
wnd = window-2
|
|
for (nbits/wnd+1)*nx < ncpus {
|
|
nx += 1
|
|
wnd = window - bits.Len(3*uint(nx)/2)
|
|
}
|
|
nx -= 1
|
|
wnd = window - bits.Len(3*uint(nx)/2)
|
|
}
|
|
ny = nbits/wnd + 1
|
|
wnd = nbits/ny + 1
|
|
|
|
return nx, ny, wnd
|
|
}
|
|
|
|
func pippenger_window_size(npoints int) int {
|
|
wbits := bits.Len(uint(npoints))
|
|
|
|
if wbits > 13 {
|
|
return wbits - 4
|
|
}
|
|
if wbits > 5 {
|
|
return wbits - 3
|
|
}
|
|
return 2
|
|
}
|