Files
mev-beta/vendor/github.com/supranational/blst/bindings/go/blst_minpk.tgo

603 lines
18 KiB
Plaintext

import (
"runtime"
"sync"
"sync/atomic"
)
//
// PublicKey
//
func (pk *P1Affine) From(s *Scalar) *P1Affine {
C.blst_sk_to_pk2_in_g1(nil, &pk.cgo, &s.cgo)
return pk
}
func (pk *P1Affine) KeyValidate() bool {
return bool(C.go_p1_affine_validate(&pk.cgo, true))
}
// sigInfcheck, check for infinity, is a way to avoid going
// into resource-consuming verification. Passing 'false' is
// always cryptographically safe, but application might want
// to guard against obviously bogus individual[!] signatures.
func (sig *P2Affine) SigValidate(sigInfcheck bool) bool {
return bool(C.go_p2_affine_validate(&sig.cgo, C.bool(sigInfcheck)))
}
//
// Sign
//
func (sig *P2Affine) Sign(sk *SecretKey, msg []byte, dst []byte,
optional ...interface{}) *P2Affine {
augSingle, aug, useHash, ok := parseOpts(optional...)
if !ok || len(aug) != 0 {
return nil
}
var q *P2
if useHash {
q = HashToG2(msg, dst, augSingle)
} else {
q = EncodeToG2(msg, dst, augSingle)
}
C.blst_sign_pk2_in_g1(nil, &sig.cgo, &q.cgo, &sk.cgo)
return sig
}
//
// Signature
//
// Functions to return a signature and public key+augmentation tuple.
// This enables point decompression (if needed) to happen in parallel.
type sigGetterP2 func() *P2Affine
type pkGetterP1 func(i uint32, temp *P1Affine) (*P1Affine, []byte)
// Single verify with decompressed pk
func (sig *P2Affine) Verify(sigGroupcheck bool, pk *P1Affine, pkValidate bool,
msg Message, dst []byte,
optional ...interface{}) bool { // useHash bool, aug []byte
aug, _, useHash, ok := parseOpts(optional...)
if !ok {
return false
}
return sig.AggregateVerify(sigGroupcheck, []*P1Affine{pk}, pkValidate,
[]Message{msg}, dst, useHash, [][]byte{aug})
}
// Single verify with compressed pk
// Uses a dummy signature to get the correct type
func (dummy *P2Affine) VerifyCompressed(sig []byte, sigGroupcheck bool,
pk []byte, pkValidate bool, msg Message, dst []byte,
optional ...bool) bool { // useHash bool, usePksAsAugs bool
return dummy.AggregateVerifyCompressed(sig, sigGroupcheck,
[][]byte{pk}, pkValidate,
[]Message{msg}, dst, optional...)
}
// Aggregate verify with uncompressed signature and public keys
// Note that checking message uniqueness, if required, is left to the user.
// Not all signature schemes require it and this keeps the binding minimal
// and fast. Refer to the Uniq function for one method method of performing
// this check.
func (sig *P2Affine) AggregateVerify(sigGroupcheck bool,
pks []*P1Affine, pksVerify bool, msgs []Message, dst []byte,
optional ...interface{}) bool { // useHash bool, augs [][]byte
// sanity checks and argument parsing
n := len(pks)
if n == 0 || len(msgs) != n {
return false
}
_, augs, useHash, ok := parseOpts(optional...)
useAugs := len(augs) != 0
if !ok || (useAugs && len(augs) != n) {
return false
}
sigFn := func() *P2Affine {
return sig
}
pkFn := func(i uint32, _ *P1Affine) (*P1Affine, []byte) {
if useAugs {
return pks[i], augs[i]
}
return pks[i], nil
}
return coreAggregateVerifyPkInG1(sigFn, sigGroupcheck, pkFn, pksVerify,
msgs, dst, useHash)
}
// Aggregate verify with compressed signature and public keys
// Uses a dummy signature to get the correct type
func (*P2Affine) AggregateVerifyCompressed(sig []byte, sigGroupcheck bool,
pks [][]byte, pksVerify bool, msgs []Message, dst []byte,
optional ...bool) bool { // useHash bool, usePksAsAugs bool
// sanity checks and argument parsing
if len(pks) != len(msgs) {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
usePksAsAugs := false
if len(optional) > 1 {
usePksAsAugs = optional[1]
}
sigFn := func() *P2Affine {
sigP := new(P2Affine)
if sigP.Uncompress(sig) == nil {
return nil
}
return sigP
}
pkFn := func(i uint32, pk *P1Affine) (*P1Affine, []byte) {
bytes := pks[i]
if len(bytes) == BLST_P1_SERIALIZE_BYTES && (bytes[0] & 0x80) == 0 {
// Not compressed
if pk.Deserialize(bytes) == nil {
return nil, nil
}
} else if len(bytes) == BLST_P1_COMPRESS_BYTES && (bytes[0] & 0x80) != 0 {
if pk.Uncompress(bytes) == nil {
return nil, nil
}
} else {
return nil, nil
}
if usePksAsAugs {
return pk, bytes
}
return pk, nil
}
return coreAggregateVerifyPkInG1(sigFn, sigGroupcheck, pkFn, pksVerify,
msgs, dst, useHash)
}
func coreAggregateVerifyPkInG1(sigFn sigGetterP2, sigGroupcheck bool,
pkFn pkGetterP1, pkValidate bool, msgs []Message, dst []byte,
optional ...bool) bool { // useHash
n := len(msgs)
if n == 0 {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
numCores := runtime.GOMAXPROCS(0)
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding pk,msg[,aug] tuple and
// repeat until n is exceeded. The resulting accumulations will be
// fed into the msgsCh channel.
msgsCh := make(chan Pairing, numThreads)
valid := int32(1)
curItem := uint32(0)
mutex := sync.Mutex{}
mutex.Lock()
for tid := 0; tid < numThreads; tid++ {
go func() {
pairing := PairingCtx(useHash, dst)
var temp P1Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
} else if work == 0 && maxProcs == numCores-1 &&
numThreads == maxProcs {
// Avoid consuming all cores by waiting until the
// main thread has completed its miller loop before
// proceeding.
mutex.Lock()
mutex.Unlock() //nolint:staticcheck
}
// Pull Public Key and augmentation blob
curPk, aug := pkFn(work, &temp)
if curPk == nil {
atomic.StoreInt32(&valid, 0)
break
}
// Pairing and accumulate
ret := PairingAggregatePkInG1(pairing, curPk, pkValidate,
nil, false, msgs[work], aug)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
break
}
// application might have some async work to do
runtime.Gosched()
}
if atomic.LoadInt32(&valid) > 0 {
PairingCommit(pairing)
msgsCh <- pairing
} else {
msgsCh <- nil
}
}()
}
// Uncompress and check signature
var gtsig Fp12
sig := sigFn()
if sig == nil {
atomic.StoreInt32(&valid, 0)
}
if atomic.LoadInt32(&valid) > 0 && sigGroupcheck &&
!sig.SigValidate(false) {
atomic.StoreInt32(&valid, 0)
}
if atomic.LoadInt32(&valid) > 0 {
C.blst_aggregated_in_g2(&gtsig.cgo, &sig.cgo)
}
mutex.Unlock()
// Accumulate the thread results
var pairings Pairing
for i := 0; i < numThreads; i++ {
msg := <-msgsCh
if msg != nil {
if pairings == nil {
pairings = msg
} else {
ret := PairingMerge(pairings, msg)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
}
}
}
}
if atomic.LoadInt32(&valid) == 0 || pairings == nil {
return false
}
return PairingFinalVerify(pairings, &gtsig)
}
func CoreVerifyPkInG1(pk *P1Affine, sig *P2Affine, hash_or_encode bool,
msg Message, dst []byte, optional ...[]byte) int {
var aug []byte
if len(optional) > 0 {
aug = optional[0]
}
if runtime.NumGoroutine() < maxProcs {
sigFn := func() *P2Affine {
return sig
}
pkFn := func(_ uint32, _ *P1Affine) (*P1Affine, []byte) {
return pk, aug
}
if !coreAggregateVerifyPkInG1(sigFn, true, pkFn, true, []Message{msg},
dst, hash_or_encode) {
return C.BLST_VERIFY_FAIL
}
return C.BLST_SUCCESS
}
return int(C.blst_core_verify_pk_in_g1(&pk.cgo, &sig.cgo, C.bool(hash_or_encode),
ptrOrNil(msg), C.size_t(len(msg)),
ptrOrNil(dst), C.size_t(len(dst)),
ptrOrNil(aug), C.size_t(len(aug))))
}
// pks are assumed to be verified for proof of possession,
// which implies that they are already group-checked
func (sig *P2Affine) FastAggregateVerify(sigGroupcheck bool,
pks []*P1Affine, msg Message, dst []byte,
optional ...interface{}) bool { // pass-through to Verify
n := len(pks)
// TODO: return value for length zero?
if n == 0 {
return false
}
aggregator := new(P1Aggregate)
if !aggregator.Aggregate(pks, false) {
return false
}
pkAff := aggregator.ToAffine()
// Verify
return sig.Verify(sigGroupcheck, pkAff, false, msg, dst, optional...)
}
func (*P2Affine) MultipleAggregateVerify(sigs []*P2Affine,
sigsGroupcheck bool, pks []*P1Affine, pksVerify bool,
msgs []Message, dst []byte, randFn func(*Scalar), randBits int,
optional ...interface{}) bool { // useHash
// Sanity checks and argument parsing
n := len(pks)
if n == 0 || len(msgs) != n || len(sigs) != n {
return false
}
_, augs, useHash, ok := parseOpts(optional...)
useAugs := len(augs) != 0
if !ok || (useAugs && len(augs) != n) {
return false
}
paramsFn :=
func(work uint32, _ *P2Affine, _ *P1Affine, rand *Scalar) (
*P2Affine, *P1Affine, *Scalar, []byte) {
randFn(rand)
var aug []byte
if useAugs {
aug = augs[work]
}
return sigs[work], pks[work], rand, aug
}
return multipleAggregateVerifyPkInG1(paramsFn, sigsGroupcheck, pksVerify,
msgs, dst, randBits, useHash)
}
type mulAggGetterPkInG1 func(work uint32, sig *P2Affine, pk *P1Affine,
rand *Scalar) (*P2Affine, *P1Affine, *Scalar, []byte)
func multipleAggregateVerifyPkInG1(paramsFn mulAggGetterPkInG1,
sigsGroupcheck bool, pksVerify bool, msgs []Message,
dst []byte, randBits int,
optional ...bool) bool { // useHash
n := len(msgs)
if n == 0 {
return false
}
useHash := true
if len(optional) > 0 {
useHash = optional[0]
}
numThreads := numThreads(n)
// Each thread will determine next message to process by atomically
// incrementing curItem, process corresponding pk,msg[,aug] tuple and
// repeat until n is exceeded. The resulting accumulations will be
// fed into the msgsCh channel.
msgsCh := make(chan Pairing, numThreads)
valid := int32(1)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
pairing := PairingCtx(useHash, dst)
var tempRand Scalar
var tempPk P1Affine
var tempSig P2Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
curSig, curPk, curRand, aug := paramsFn(work, &tempSig,
&tempPk, &tempRand)
if PairingMulNAggregatePkInG1(pairing, curPk, pksVerify,
curSig, sigsGroupcheck, curRand,
randBits, msgs[work], aug) !=
C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
break
}
// application might have some async work to do
runtime.Gosched()
}
if atomic.LoadInt32(&valid) > 0 {
PairingCommit(pairing)
msgsCh <- pairing
} else {
msgsCh <- nil
}
}()
}
// Accumulate the thread results
var pairings Pairing
for i := 0; i < numThreads; i++ {
msg := <-msgsCh
if msg != nil {
if pairings == nil {
pairings = msg
} else {
ret := PairingMerge(pairings, msg)
if ret != C.BLST_SUCCESS {
atomic.StoreInt32(&valid, 0)
}
}
}
}
if atomic.LoadInt32(&valid) == 0 || pairings == nil {
return false
}
return PairingFinalVerify(pairings, nil)
}
//
// Aggregate P2
//
type aggGetterP2 func(i uint32, temp *P2Affine) *P2Affine
type P2Aggregate struct {
v *P2
}
// Aggregate uncompressed elements
func (agg *P2Aggregate) Aggregate(elmts []*P2Affine,
groupcheck bool) bool {
if len(elmts) == 0 {
return true
}
getter := func(i uint32, _ *P2Affine) *P2Affine { return elmts[i] }
return agg.coreAggregate(getter, groupcheck, len(elmts))
}
func (agg *P2Aggregate) AggregateWithRandomness(pointsIf interface{},
scalarsIf interface{}, nbits int, groupcheck bool) bool {
if groupcheck && !P2AffinesValidate(pointsIf) {
return false
}
agg.v = P2AffinesMult(pointsIf, scalarsIf, nbits)
return true
}
// Aggregate compressed elements
func (agg *P2Aggregate) AggregateCompressed(elmts [][]byte,
groupcheck bool) bool {
if len(elmts) == 0 {
return true
}
getter := func(i uint32, p *P2Affine) *P2Affine {
bytes := elmts[i]
if p.Uncompress(bytes) == nil {
return nil
}
return p
}
return agg.coreAggregate(getter, groupcheck, len(elmts))
}
func (agg *P2Aggregate) AddAggregate(other *P2Aggregate) {
if other.v == nil {
// do nothing
} else if agg.v == nil {
agg.v = other.v
} else {
C.blst_p2_add_or_double(&agg.v.cgo, &agg.v.cgo, &other.v.cgo)
}
}
func (agg *P2Aggregate) Add(elmt *P2Affine, groupcheck bool) bool {
if groupcheck && !bool(C.blst_p2_affine_in_g2(&elmt.cgo)) {
return false
}
if agg.v == nil {
agg.v = new(P2)
C.blst_p2_from_affine(&agg.v.cgo, &elmt.cgo)
} else {
C.blst_p2_add_or_double_affine(&agg.v.cgo, &agg.v.cgo, &elmt.cgo)
}
return true
}
func (agg *P2Aggregate) ToAffine() *P2Affine {
if agg.v == nil {
return new(P2Affine)
}
return agg.v.ToAffine()
}
func (agg *P2Aggregate) coreAggregate(getter aggGetterP2, groupcheck bool,
n int) bool {
if n == 0 {
return true
}
// operations are considered short enough for not to care about
// keeping one core free...
numThreads := runtime.GOMAXPROCS(0)
if numThreads > n {
numThreads = n
}
valid := int32(1)
type result struct {
agg *P2
empty bool
}
msgs := make(chan result, numThreads)
curItem := uint32(0)
for tid := 0; tid < numThreads; tid++ {
go func() {
first := true
var agg P2
var temp P2Affine
for atomic.LoadInt32(&valid) > 0 {
// Get a work item
work := atomic.AddUint32(&curItem, 1) - 1
if work >= uint32(n) {
break
}
// Signature validate
curElmt := getter(work, &temp)
if curElmt == nil {
atomic.StoreInt32(&valid, 0)
break
}
if groupcheck && !bool(C.blst_p2_affine_in_g2(&curElmt.cgo)) {
atomic.StoreInt32(&valid, 0)
break
}
if first {
C.blst_p2_from_affine(&agg.cgo, &curElmt.cgo)
first = false
} else {
C.blst_p2_add_or_double_affine(&agg.cgo, &agg.cgo, &curElmt.cgo)
}
// application might have some async work to do
runtime.Gosched()
}
if first {
msgs <- result{nil, true}
} else if atomic.LoadInt32(&valid) > 0 {
msgs <- result{&agg, false}
} else {
msgs <- result{nil, false}
}
}()
}
// Accumulate the thread results
first := agg.v == nil
validLocal := true
for i := 0; i < numThreads; i++ {
msg := <-msgs
if !validLocal || msg.empty {
// do nothing
} else if msg.agg == nil {
validLocal = false
// This should be unnecessary but seems safer
atomic.StoreInt32(&valid, 0)
} else {
if first {
agg.v = msg.agg
first = false
} else {
C.blst_p2_add_or_double(&agg.v.cgo, &agg.v.cgo, &msg.agg.cgo)
}
}
}
if atomic.LoadInt32(&valid) == 0 {
agg.v = nil
return false
}
return true
}