/* * Copyright Supranational LLC * Licensed under the Apache License, Version 2.0, see LICENSE for details. * SPDX-License-Identifier: Apache-2.0 */ package blst // #cgo CFLAGS: -I${SRCDIR}/.. -I${SRCDIR}/../../build -I${SRCDIR}/../../src -D__BLST_CGO__ -fno-builtin-memcpy -fno-builtin-memset // #cgo amd64 CFLAGS: -D__ADX__ -mno-avx // // no-asm 64-bit platforms from https://go.dev/doc/install/source // #cgo loong64 mips64 mips64le ppc64 ppc64le riscv64 s390x CFLAGS: -D__BLST_NO_ASM__ // // #include "blst.h" // // #if defined(__x86_64__) && (defined(__unix__) || defined(__APPLE__)) // # include // # include // static void handler(int signum) // { ssize_t n = write(2, "Caught SIGILL in blst_cgo_init, " // "consult /bindings/go/README.md.\n", 70); // _exit(128+SIGILL); // (void)n; // } // __attribute__((constructor)) static void blst_cgo_init() // { blst_fp temp = { 0 }; // struct sigaction act = { handler }, oact; // sigaction(SIGILL, &act, &oact); // blst_fp_sqr(&temp, &temp); // sigaction(SIGILL, &oact, NULL); // } // #endif // // static void go_pairing_init(blst_pairing *new_ctx, bool hash_or_encode, // const byte *DST, size_t DST_len) // { if (DST != NULL) { // byte *dst = (byte*)new_ctx + blst_pairing_sizeof(); // for(size_t i = 0; i < DST_len; i++) dst[i] = DST[i]; // DST = dst; // } // blst_pairing_init(new_ctx, hash_or_encode, DST, DST_len); // } // static void go_pairing_as_fp12(blst_fp12 *pt, blst_pairing *ctx) // { *pt = *blst_pairing_as_fp12(ctx); } // // static void go_p1slice_to_affine(blst_p1_affine dst[], // const blst_p1 points[], size_t npoints) // { const blst_p1 *ppoints[2] = { points, NULL }; // blst_p1s_to_affine(dst, ppoints, npoints); // } // static void go_p1slice_add(blst_p1 *dst, const blst_p1_affine points[], // size_t npoints) // { const blst_p1_affine *ppoints[2] = { points, NULL }; // blst_p1s_add(dst, ppoints, npoints); // } // static void go_p2slice_to_affine(blst_p2_affine dst[], // const blst_p2 points[], size_t npoints) // { const blst_p2 *ppoints[2] = { points, NULL }; // blst_p2s_to_affine(dst, ppoints, npoints); // } // static void go_p2slice_add(blst_p2 *dst, const blst_p2_affine points[], // size_t npoints) // { const blst_p2_affine *ppoints[2] = { points, NULL }; // blst_p2s_add(dst, ppoints, npoints); // } // // static void go_p1_mult_n_acc(blst_p1 *acc, const blst_fp *x, bool affine, // const byte *scalar, size_t nbits) // { blst_p1 m[1]; // const void *p = x; // if (p == NULL) // p = blst_p1_generator(); // else if (affine) // blst_p1_from_affine(m, p), p = m; // blst_p1_mult(m, p, scalar, nbits); // blst_p1_add_or_double(acc, acc, m); // } // static void go_p2_mult_n_acc(blst_p2 *acc, const blst_fp2 *x, bool affine, // const byte *scalar, size_t nbits) // { blst_p2 m[1]; // const void *p = x; // if (p == NULL) // p = blst_p2_generator(); // else if (affine) // blst_p2_from_affine(m, p), p = m; // blst_p2_mult(m, p, scalar, nbits); // blst_p2_add_or_double(acc, acc, m); // } // // static void go_p1_sub_assign(blst_p1 *a, const blst_fp *x, bool affine) // { blst_p1 minus_b; // if (affine) // blst_p1_from_affine(&minus_b, (const blst_p1_affine*)x); // else // minus_b = *(const blst_p1*)x; // blst_p1_cneg(&minus_b, 1); // blst_p1_add_or_double(a, a, &minus_b); // } // // static void go_p2_sub_assign(blst_p2 *a, const blst_fp2 *x, bool affine) // { blst_p2 minus_b; // if (affine) // blst_p2_from_affine(&minus_b, (const blst_p2_affine*)x); // else // minus_b = *(const blst_p2*)x; // blst_p2_cneg(&minus_b, 1); // blst_p2_add_or_double(a, a, &minus_b); // } // // static bool go_scalar_from_bendian(blst_scalar *ret, const byte *in) // { blst_scalar_from_bendian(ret, in); // return blst_sk_check(ret); // } // static bool go_hash_to_scalar(blst_scalar *ret, // const byte *msg, size_t msg_len, // const byte *DST, size_t DST_len) // { byte elem[48]; // blst_expand_message_xmd(elem, sizeof(elem), msg, msg_len, DST, DST_len); // return blst_scalar_from_be_bytes(ret, elem, sizeof(elem)); // } // static void go_miller_loop_n(blst_fp12 *dst, const blst_p2_affine Q[], // const blst_p1_affine P[], // size_t npoints, bool acc) // { const blst_p2_affine *Qs[2] = { Q, NULL }; // const blst_p1_affine *Ps[2] = { P, NULL }; // if (acc) { // blst_fp12 tmp; // blst_miller_loop_n(&tmp, Qs, Ps, npoints); // blst_fp12_mul(dst, dst, &tmp); // } else { // blst_miller_loop_n(dst, Qs, Ps, npoints); // } // } // static void go_fp12slice_mul(blst_fp12 *dst, const blst_fp12 in[], size_t n) // { size_t i; // blst_fp12_mul(dst, &in[0], &in[1]); // for (i = 2; i < n; i++) // blst_fp12_mul(dst, dst, &in[i]); // } // static bool go_p1_affine_validate(const blst_p1_affine *p, bool infcheck) // { if (infcheck && blst_p1_affine_is_inf(p)) // return 0; // return blst_p1_affine_in_g1(p); // } // static bool go_p2_affine_validate(const blst_p2_affine *p, bool infcheck) // { if (infcheck && blst_p2_affine_is_inf(p)) // return 0; // return blst_p2_affine_in_g2(p); // } import "C" import "runtime" const BLST_SCALAR_BYTES = 256 / 8 const BLST_FP_BYTES = 384 / 8 const BLST_P1_COMPRESS_BYTES = BLST_FP_BYTES const BLST_P1_SERIALIZE_BYTES = BLST_FP_BYTES * 2 const BLST_P2_COMPRESS_BYTES = BLST_FP_BYTES * 2 const BLST_P2_SERIALIZE_BYTES = BLST_FP_BYTES * 4 type Scalar struct{ cgo C.blst_scalar } type Fp struct{ cgo C.blst_fp } type Fp2 struct{ cgo C.blst_fp2 } type Fp6 = C.blst_fp6 type Fp12 struct{ cgo C.blst_fp12 } type P1 struct{ cgo C.blst_p1 } type P2 struct{ cgo C.blst_p2 } type P1Affine struct{ cgo C.blst_p1_affine } type P2Affine struct{ cgo C.blst_p2_affine } type Message = []byte type Pairing = []C.blst_pairing type SecretKey = Scalar type P1s []P1 type P2s []P2 type P1Affines []P1Affine type P2Affines []P2Affine // // Configuration // var maxProcs = initMaxProcs() func initMaxProcs() int { maxProcs := runtime.GOMAXPROCS(0) var version float32 _, err := fmt.Sscanf(runtime.Version(), "go%f", &version) if err != nil || version < 1.14 { // be cooperative and leave one processor for the application maxProcs -= 1 } if maxProcs <= 0 { maxProcs = 1 } return maxProcs } func SetMaxProcs(procs int) { if procs <= 0 { procs = 1 } maxProcs = procs } func numThreads(maxThreads int) int { numThreads := maxProcs // take into consideration the possility that application reduced // GOMAXPROCS after |maxProcs| was initialized numProcs := runtime.GOMAXPROCS(0) if maxProcs > numProcs { numThreads = numProcs } if maxThreads > 0 && numThreads > maxThreads { return maxThreads } return numThreads } var cgo_pairingSizeOf = C.blst_pairing_sizeof() var cgo_p1Generator = P1{*C.blst_p1_generator()} var cgo_p2Generator = P2{*C.blst_p2_generator()} var cgo_fp12One = Fp12{*C.blst_fp12_one()} // // Secret key // func (sk *SecretKey) Zeroize() { var zero SecretKey *sk = zero } func KeyGen(ikm []byte, optional ...[]byte) *SecretKey { var sk SecretKey var info []byte if len(optional) > 0 { info = optional[0] } if len(ikm) < 32 { return nil } C.blst_keygen(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)), ptrOrNil(info), C.size_t(len(info))) // Postponing secret key zeroing till garbage collection can be too // late to be effective, but every little bit helps... runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() }) return &sk } func KeyGenV3(ikm []byte, optional ...[]byte) *SecretKey { if len(ikm) < 32 { return nil } var sk SecretKey var info []byte if len(optional) > 0 { info = optional[0] } C.blst_keygen_v3(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)), ptrOrNil(info), C.size_t(len(info))) // Postponing secret key zeroing till garbage collection can be too // late to be effective, but every little bit helps... runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() }) return &sk } func KeyGenV45(ikm []byte, salt []byte, optional ...[]byte) *SecretKey { if len(ikm) < 32 { return nil } var sk SecretKey var info []byte if len(optional) > 0 { info = optional[0] } C.blst_keygen_v4_5(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)), (*C.byte)(&salt[0]), C.size_t(len(salt)), ptrOrNil(info), C.size_t(len(info))) // Postponing secret key zeroing till garbage collection can be too // late to be effective, but every little bit helps... runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() }) return &sk } func KeyGenV5(ikm []byte, salt []byte, optional ...[]byte) *SecretKey { if len(ikm) < 32 { return nil } var sk SecretKey var info []byte if len(optional) > 0 { info = optional[0] } C.blst_keygen_v5(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm)), (*C.byte)(&salt[0]), C.size_t(len(salt)), ptrOrNil(info), C.size_t(len(info))) // Postponing secret key zeroing till garbage collection can be too // late to be effective, but every little bit helps... runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() }) return &sk } func DeriveMasterEip2333(ikm []byte) *SecretKey { if len(ikm) < 32 { return nil } var sk SecretKey C.blst_derive_master_eip2333(&sk.cgo, (*C.byte)(&ikm[0]), C.size_t(len(ikm))) // Postponing secret key zeroing till garbage collection can be too // late to be effective, but every little bit helps... runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() }) return &sk } func (master *SecretKey) DeriveChildEip2333(child_index uint32) *SecretKey { var sk SecretKey C.blst_derive_child_eip2333(&sk.cgo, &master.cgo, C.uint(child_index)) // Postponing secret key zeroing till garbage collection can be too // late to be effective, but every little bit helps... runtime.SetFinalizer(&sk, func(sk *SecretKey) { sk.Zeroize() }) return &sk } // // Pairing // func pairingSizeOf(DST_len C.size_t) int { return int((cgo_pairingSizeOf + DST_len + 7) / 8) } func PairingCtx(hash_or_encode bool, DST []byte) Pairing { DST_len := C.size_t(len(DST)) ctx := make([]C.blst_pairing, pairingSizeOf(DST_len)) C.go_pairing_init(&ctx[0], C.bool(hash_or_encode), ptrOrNil(DST), DST_len) return ctx } func PairingCommit(ctx Pairing) { C.blst_pairing_commit(&ctx[0]) } func PairingMerge(ctx Pairing, ctx1 Pairing) int { r := C.blst_pairing_merge(&ctx[0], &ctx1[0]) return int(r) } func PairingFinalVerify(ctx Pairing, optional ...*Fp12) bool { var gtsig *Fp12 if len(optional) > 0 { gtsig = optional[0] } return bool(C.blst_pairing_finalverify(&ctx[0], gtsig.asPtr())) } func PairingRawAggregate(ctx Pairing, q *P2Affine, p *P1Affine) { C.blst_pairing_raw_aggregate(&ctx[0], &q.cgo, &p.cgo) } func PairingAsFp12(ctx Pairing) *Fp12 { var pt Fp12 C.go_pairing_as_fp12(&pt.cgo, &ctx[0]) return &pt } func Fp12One() Fp12 { return cgo_fp12One } func Fp12FinalVerify(pt1 *Fp12, pt2 *Fp12) bool { return bool(C.blst_fp12_finalverify(&pt1.cgo, &pt2.cgo)) } func Fp12MillerLoop(q *P2Affine, p *P1Affine) *Fp12 { var pt Fp12 C.blst_miller_loop(&pt.cgo, &q.cgo, &p.cgo) return &pt } func Fp12MillerLoopN(qs []P2Affine, ps []P1Affine) *Fp12 { if len(qs) != len(ps) || len(qs) == 0 { panic("inputs' lengths mismatch") } nElems := uint32(len(qs)) nThreads := uint32(maxProcs) if nThreads == 1 || nElems == 1 { var pt Fp12 C.go_miller_loop_n(&pt.cgo, &qs[0].cgo, &ps[0].cgo, C.size_t(nElems), false) return &pt } stride := (nElems + nThreads - 1) / nThreads if stride > 16 { stride = 16 } strides := (nElems + stride - 1) / stride if nThreads > strides { nThreads = strides } msgsCh := make(chan Fp12, nThreads) curElem := uint32(0) for tid := uint32(0); tid < nThreads; tid++ { go func() { acc := Fp12One() first := true for { work := atomic.AddUint32(&curElem, stride) - stride if work >= nElems { break } n := nElems - work if n > stride { n = stride } C.go_miller_loop_n(&acc.cgo, &qs[work].cgo, &ps[work].cgo, C.size_t(n), C.bool(!first)) first = false } msgsCh <- acc }() } var ret = make([]Fp12, nThreads); for i := range(ret) { ret[i] = <- msgsCh } var pt Fp12 C.go_fp12slice_mul(&pt.cgo, &ret[0].cgo, C.size_t(nThreads)) return &pt } func (pt *Fp12) MulAssign(p *Fp12) { C.blst_fp12_mul(&pt.cgo, &pt.cgo, &p.cgo) } func (pt *Fp12) FinalExp() { C.blst_final_exp(&pt.cgo, &pt.cgo) } func (pt *Fp12) InGroup() bool { return bool(C.blst_fp12_in_group(&pt.cgo)) } func (pt *Fp12) ToBendian() []byte { var out [BLST_FP_BYTES*12]byte C.blst_bendian_from_fp12((*C.byte)(&out[0]), &pt.cgo) return out[:] } func (pt1 *Fp12) Equals(pt2 *Fp12) bool { return *pt1 == *pt2 } func (pt *Fp12) asPtr() *C.blst_fp12 { if (pt != nil) { return &pt.cgo } return nil } func ptrOrNil(bytes []byte) *C.byte { var ptr *C.byte if len(bytes) > 0 { ptr = (*C.byte)(&bytes[0]) } return ptr }