feature: multiple big int keys and multi values support (#35)

Support to work with BigInts 

---------

Co-authored-by: Guido Iribarren <git@guidoi.com.ar>
This commit is contained in:
Lucas Menéndez
2025-04-14 10:56:42 +02:00
committed by GitHub
parent a7c0c5f8c3
commit 84e7901031
25 changed files with 4354 additions and 1217 deletions

View File

@@ -41,13 +41,17 @@ func debugTime(descr string, time1, time2 time.Duration) {
func testInit(c *qt.C, n int) (*Tree, *Tree) {
database1 := memdb.New()
tree1, err := NewTree(Config{Database: database1, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree1, err := NewTree(Config{
Database: database1, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
database2 := memdb.New()
tree2, err := NewTree(Config{Database: database2, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree2, err := NewTree(Config{
Database: database2, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
bLen := HashFunctionPoseidon.Len()
@@ -73,10 +77,12 @@ func TestAddBatchTreeEmpty(t *testing.T) {
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree, err := NewTree(Config{
database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
var keys, values [][]byte
@@ -97,10 +103,12 @@ func TestAddBatchTreeEmpty(t *testing.T) {
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
tree2.dbgInit()
start = time.Now()
@@ -125,10 +133,12 @@ func TestAddBatchTreeEmptyNotPowerOf2(t *testing.T) {
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree, err := NewTree(Config{
database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
for i := 0; i < nLeafs; i++ {
@@ -141,10 +151,12 @@ func TestAddBatchTreeEmptyNotPowerOf2(t *testing.T) {
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
var keys, values [][]byte
for i := 0; i < nLeafs; i++ {
@@ -174,17 +186,21 @@ func TestAddBatchTestVector1(t *testing.T) {
c := qt.New(t)
database1, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree1, err := NewTree(Config{
database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree1.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
// leafs in 2nd level subtrees: [ 6, 0, 1, 1]
testvectorKeys := []string{
@@ -216,17 +232,21 @@ func TestAddBatchTestVector1(t *testing.T) {
// 2nd test vectors
database1, err = pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err = NewTree(Config{database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree1, err = NewTree(Config{
database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree1.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
database2, err = pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err = NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree2, err = NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
testvectorKeys = []string{
"1c7c2265e368314ca58ed2e1f33a326f1220e234a566d55c3605439dbe411642",
@@ -266,17 +286,21 @@ func TestAddBatchTestVector2(t *testing.T) {
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree1, err := NewTree(Config{
database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree1.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
bLen := tree1.HashFunction().Len()
var keys, values [][]byte
@@ -313,17 +337,21 @@ func TestAddBatchTestVector3(t *testing.T) {
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree1, err := NewTree(Config{
database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree1.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
bLen := tree1.HashFunction().Len()
var keys, values [][]byte
@@ -364,17 +392,21 @@ func TestAddBatchTreeEmptyRandomKeys(t *testing.T) {
database1, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree1, err := NewTree(Config{
database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree1.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
var keys, values [][]byte
for i := 0; i < nLeafs; i++ {
@@ -716,10 +748,12 @@ func TestAddBatchNotEmptyUnbalanced(t *testing.T) {
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
tree2.dbgInit()
var keys, values [][]byte
@@ -794,10 +828,12 @@ func benchAdd(t *testing.T, ks, vs [][]byte) {
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{database, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree, err := NewTree(Config{
database, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
start := time.Now()
for i := 0; i < len(ks); i++ {
@@ -815,10 +851,12 @@ func benchAddBatch(t *testing.T, ks, vs [][]byte) {
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{database, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree, err := NewTree(Config{
database, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
tree.dbgInit()
@@ -849,10 +887,12 @@ func TestDbgStats(t *testing.T) {
// 1
database1, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree1, err := NewTree(Config{
database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree1.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
tree1.dbgInit()
@@ -864,10 +904,12 @@ func TestDbgStats(t *testing.T) {
// 2
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
tree2.dbgInit()
@@ -878,10 +920,12 @@ func TestDbgStats(t *testing.T) {
// 3
database3, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree3, err := NewTree(Config{database3, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree3, err := NewTree(Config{
database3, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree3.db.Close() //nolint:errcheck
defer tree3.treedb.Close() //nolint:errcheck
tree3.dbgInit()
@@ -913,10 +957,12 @@ func TestLoadVT(t *testing.T) {
nLeafs := 1024
database := memdb.New()
tree, err := NewTree(Config{database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree, err := NewTree(Config{
database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
var keys, values [][]byte
for i := 0; i < nLeafs; i++ {
@@ -948,10 +994,12 @@ func TestAddKeysWithEmptyValues(t *testing.T) {
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree, err := NewTree(Config{
database, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
var keys, values [][]byte
@@ -970,10 +1018,12 @@ func TestAddKeysWithEmptyValues(t *testing.T) {
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
tree2.dbgInit()
invalids, err := tree2.AddBatch(keys, values)
@@ -985,10 +1035,12 @@ func TestAddKeysWithEmptyValues(t *testing.T) {
// use tree3 to add nil value array
database3, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree3, err := NewTree(Config{database3, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon})
tree3, err := NewTree(Config{
database3, 256, DefaultThresholdNLeafs,
HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree3.db.Close() //nolint:errcheck
defer tree3.treedb.Close() //nolint:errcheck
invalids, err = tree3.AddBatch(keys, nil)
c.Assert(err, qt.IsNil)
@@ -1014,8 +1066,10 @@ func TestAddKeysWithEmptyValues(t *testing.T) {
c.Check(verif, qt.IsTrue)
// check with array with 32 zeroes
e32 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
e32 := []byte{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
}
c.Assert(len(e32), qt.Equals, 32)
verif, err = CheckProof(tree.hashFunction, keys[9], e32, root, siblings)
c.Assert(err, qt.IsNil)
@@ -1035,24 +1089,30 @@ func TestAddBatchThresholdInDisk(t *testing.T) {
database1, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{database1, 256, testThresholdNLeafs,
HashFunctionBlake2b})
tree1, err := NewTree(Config{
database1, 256, testThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree1.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, testThresholdNLeafs,
HashFunctionBlake2b})
tree2, err := NewTree(Config{
database2, 256, testThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
database3, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree3, err := NewTree(Config{database3, 256, testThresholdNLeafs,
HashFunctionBlake2b})
tree3, err := NewTree(Config{
database3, 256, testThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
defer tree3.db.Close() //nolint:errcheck
defer tree3.treedb.Close() //nolint:errcheck
var keys, values [][]byte
for i := 0; i < 3*testThresholdNLeafs; i++ {
@@ -1078,7 +1138,7 @@ func TestAddBatchThresholdInDisk(t *testing.T) {
checkRoots(c, tree1, tree2)
// call directly the tree3.addBatchInDisk to ensure that is tested
wTx := tree3.db.WriteTx()
wTx := tree3.treedb.WriteTx()
defer wTx.Discard()
invalids, err = tree3.addBatchInDisk(wTx, keys, values)
c.Assert(err, qt.IsNil)
@@ -1103,14 +1163,18 @@ func TestAddBatchThresholdInDisk(t *testing.T) {
func initTestUpFromSubRoots(c *qt.C) (*Tree, *Tree) {
database1, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree1, err := NewTree(Config{
database1, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b})
tree2, err := NewTree(Config{
database2, 256, DefaultThresholdNLeafs,
HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
return tree1, tree2
}
@@ -1127,7 +1191,7 @@ func testUpFromSubRoots(c *qt.C, tree1, tree2 *Tree, preSubRoots [][]byte) {
root1, err := tree1.Root()
c.Assert(err, qt.IsNil)
wTx := tree2.db.WriteTx()
wTx := tree2.treedb.WriteTx()
subRoots := make([][]byte, len(preSubRoots))
for i := 0; i < len(preSubRoots); i++ {
if preSubRoots[i] == nil || bytes.Equal(preSubRoots[i], tree1.emptyHash) {
@@ -1159,8 +1223,8 @@ func testUpFromSubRoots(c *qt.C, tree1, tree2 *Tree, preSubRoots [][]byte) {
func testUpFromSubRootsWithEmpties(c *qt.C, preSubRoots [][]byte, indexEmpties []int) {
tree1, tree2 := initTestUpFromSubRoots(c)
defer tree1.db.Close() //nolint:errcheck
defer tree2.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
testPreSubRoots := make([][]byte, len(preSubRoots))
copy(testPreSubRoots[:], preSubRoots[:])

View File

@@ -20,7 +20,7 @@ type CircomVerifierProof struct {
// MarshalJSON implements the JSON marshaler
func (cvp CircomVerifierProof) MarshalJSON() ([]byte, error) {
m := make(map[string]interface{})
m := make(map[string]any)
m["root"] = BytesToBigInt(cvp.Root).String()
m["siblings"] = siblingsToStringArray(cvp.Siblings)
@@ -40,7 +40,7 @@ func (cvp CircomVerifierProof) MarshalJSON() ([]byte, error) {
func siblingsToStringArray(s [][]byte) []string {
var r []string
for i := 0; i < len(s); i++ {
for i := range s {
r = append(r, BytesToBigInt(s[i]).String())
}
return r

View File

@@ -14,10 +14,12 @@ func TestCircomVerifierProof(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 4,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 4,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
testVector := [][]int64{
{1, 11},

8
ff.go
View File

@@ -12,11 +12,5 @@ var (
// BigToFF function returns the finite field representation of the big.Int
// provided. It uses the curve scalar field to represent the provided number.
func BigToFF(baseField, iv *big.Int) *big.Int {
z := big.NewInt(0)
if c := iv.Cmp(baseField); c == 0 {
return z
} else if c != 1 && iv.Cmp(z) != -1 {
return iv
}
return z.Mod(iv, baseField)
return new(big.Int).Mod(iv, baseField)
}

70
gnark.go Normal file
View File

@@ -0,0 +1,70 @@
package arbo
import "math/big"
// GnarkVerifierProof is a struct that contains all the information needed to
// verify a proof in a gnark circuit. The attributes are all big.Int, so they
// can be used as frontend.Variable's in the gnark circuit. The endianess of
// root, siblings and value has been changed to Little-Endian to match the
// gnark arbo verifier.
type GnarkVerifierProof struct {
Root *big.Int
Siblings []*big.Int
OldKey *big.Int
IsOld0 *big.Int
OldValue *big.Int
Key *big.Int
Value *big.Int
Fnc *big.Int
}
// GenerateGnarkVerifierProof generates a GnarkVerifierProof for a given key
// in the Tree. Every attribute is a big.Int, so it can be used in the gnark
// circuit as frontend.Variable's. The endianess of root, siblings and value
// has been changed to Little-Endian to match the gnark arbo verifier.
func (t *Tree) GenerateGnarkVerifierProof(k []byte) (*GnarkVerifierProof, error) {
// generate the arbo proof
oldKey, value, siblings, existence, err := t.GenProof(k)
if err != nil && err != ErrKeyNotFound {
return nil, err
}
// get the root of the tree
root, err := t.Root()
if err != nil {
return nil, err
}
// unpack the siblings
unpackedSiblings, err := UnpackSiblings(t.hashFunction, siblings)
if err != nil {
return nil, err
}
// convert the siblings to big.Int swapping the endianess
bigSiblings := make([]*big.Int, len(unpackedSiblings))
for i := range bigSiblings {
bigSiblings[i] = BytesToBigInt(unpackedSiblings[i])
}
// initialize the GnarkVerifierProof
gp := GnarkVerifierProof{
Root: BytesToBigInt(root),
Key: BytesToBigInt(k),
Value: BytesToBigInt(value),
Siblings: bigSiblings,
OldKey: big.NewInt(0),
OldValue: big.NewInt(0),
IsOld0: big.NewInt(0),
Fnc: big.NewInt(0), // inclusion
}
// if the arbo proof is for a non-existing key, set the old key and value
// to the key and value of the proof
if !existence {
gp.OldKey = BytesToBigInt(oldKey)
gp.OldValue = BytesToBigInt(value)
gp.Fnc = big.NewInt(1) // exclusion
}
// set the IsOld0 attribute to 1 if there is no old key
if len(oldKey) == 0 {
gp.IsOld0 = big.NewInt(1)
}
return &gp, nil
}

15
go.mod
View File

@@ -1,22 +1,21 @@
module github.com/vocdoni/arbo
go 1.23.2
go 1.24
toolchain go1.23.4
require (
github.com/consensys/gnark-crypto v0.14.1-0.20241213223322-afee1955665f
github.com/consensys/gnark-crypto v0.16.1-0.20250217214835-5ed804970f85
github.com/frankban/quicktest v1.14.6
github.com/iden3/go-iden3-crypto v0.0.17
github.com/vocdoni/vocdoni-z-sandbox v0.0.0-20241212172703-15f6d0594b8e
go.vocdoni.io/dvote v1.10.2-0.20241024102542-c1ce6d744bc5
golang.org/x/crypto v0.28.0
golang.org/x/crypto v0.33.0
)
require (
github.com/DataDog/zstd v1.5.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.14.3 // indirect
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cockroachdb/errors v1.11.3 // indirect
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect
@@ -24,7 +23,7 @@ require (
github.com/cockroachdb/pebble v1.1.2 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/consensys/bavard v0.1.24 // indirect
github.com/consensys/bavard v0.1.29 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
@@ -41,8 +40,8 @@ require (
github.com/prometheus/procfs v0.15.1 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/text v0.19.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

66
go.sum
View File

@@ -2,8 +2,8 @@ github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.14.3 h1:Gd2c8lSNf9pKXom5JtD7AaKO8o7fGQ2LtFj1436qilA=
github.com/bits-and-blooms/bitset v1.14.3/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4=
@@ -20,10 +20,12 @@ github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwP
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/consensys/bavard v0.1.24 h1:Lfe+bjYbpaoT7K5JTFoMi5wo9V4REGLvQQbHmatoN2I=
github.com/consensys/bavard v0.1.24/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
github.com/consensys/gnark-crypto v0.14.1-0.20241213223322-afee1955665f h1:fSN/SblYo81hUUlrwfzMZ240Cxuxe0TkF7uQelxZgjI=
github.com/consensys/gnark-crypto v0.14.1-0.20241213223322-afee1955665f/go.mod h1:ePFa23CZLMRMHxQpY5nMaiAZ3yuEIayaB8ElEvlwLEs=
github.com/cometbft/cometbft v1.0.0-alpha.1 h1:M0q0RsNYhAwCANXLkJCEJnyf8fBR8O94InkELElGv0E=
github.com/cometbft/cometbft v1.0.0-alpha.1/go.mod h1:fwVpJigzDw2UnFchb0fIq7svrLmHcn5AfpMzob/xquI=
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
github.com/consensys/gnark-crypto v0.16.1-0.20250217214835-5ed804970f85 h1:3ht4gGH3smFGVLFhpFTKvDbEdagC6eSaPXnHjCQGh94=
github.com/consensys/gnark-crypto v0.16.1-0.20250217214835-5ed804970f85/go.mod h1:A2URlMHUT81ifJ0UlLzSlm7TmnE3t7VxEThApdMukJw=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -33,6 +35,10 @@ github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
@@ -53,9 +59,17 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
@@ -65,6 +79,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.21.1 h1:5SSAKKWej8LVVzNLuT6KIvP1eFDuPvxa+B6H0w78buQ=
github.com/pressly/goose/v3 v3.21.1/go.mod h1:sqthmzV8PitchEkjecFJII//l43dLOCzfWh8pHEe+vE=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
@@ -76,19 +92,37 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI=
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/vocdoni/vocdoni-z-sandbox v0.0.0-20241212172703-15f6d0594b8e h1:sogZy0UOcDvg4mAeU0vXFDNgymLd4WqwfQImlEdevqw=
github.com/vocdoni/vocdoni-z-sandbox v0.0.0-20241212172703-15f6d0594b8e/go.mod h1:8GkK2SCGHZ8d8SE4AsWWnVnUQxI8vx75KJDCv2yrZlY=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE=
go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.vocdoni.io/dvote v1.10.2-0.20241024102542-c1ce6d744bc5 h1:22esW3YedMfoEOx0Chc2qhrooMBDRHsasJfGytESxo4=
go.vocdoni.io/dvote v1.10.2-0.20241024102542-c1ce6d744bc5/go.mod h1:k5uY7me8lgMjWcXfnYQbsfZgPzGUQM6ldK1VbvKVgFQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA=
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -100,17 +134,17 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=

6
go.work Normal file
View File

@@ -0,0 +1,6 @@
go 1.24.0
use (
.
./testvectors/gnark
)

1290
go.work.sum Normal file

File diff suppressed because it is too large Load Diff

117
hash.go
View File

@@ -6,9 +6,9 @@ import (
fr_bls12377 "github.com/consensys/gnark-crypto/ecc/bls12-377/fr"
mimc_bls12_377 "github.com/consensys/gnark-crypto/ecc/bls12-377/fr/mimc"
fr_bn254 "github.com/consensys/gnark-crypto/ecc/bn254/fr"
mimc_bn254 "github.com/consensys/gnark-crypto/ecc/bn254/fr/mimc"
"github.com/consensys/gnark-crypto/hash"
"github.com/iden3/go-iden3-crypto/mimc7"
"github.com/iden3/go-iden3-crypto/poseidon"
multiposeidon "github.com/vocdoni/vocdoni-z-sandbox/crypto/hash/poseidon"
"golang.org/x/crypto/blake2b"
@@ -31,6 +31,8 @@ var (
// TypeHashMiMC_BN254 represents the label for the HashFunction of MiMC
// over BN254 curve
TypeHashMiMC_BN254 = []byte("mimc_bn254")
// TypeHashMimc7 represents the label for the HashFunction of Mimc7
TypeHashMimc7 = []byte("mimc7")
// HashFunctionSha256 contains the HashSha256 struct which implements
// the HashFunction interface
@@ -50,6 +52,9 @@ var (
// HashFunctionMiMC_BN254 contains the HashMiMC_BN254 struct which
// implements the HashFunction interface
HashFunctionMiMC_BN254 HashMiMC_BN254
// HashFunctionMimc7 contains the HashMiMC7 struct which implements the
// HashFunction interface
HashFunctionMimc7 HashMiMC7
)
// Once Generics are at Go, this will be updated (August 2021
@@ -61,6 +66,8 @@ type HashFunction interface {
Type() []byte
Len() int
Hash(...[]byte) ([]byte, error)
SafeValue([]byte) []byte
SafeBigInt(*big.Int) []byte
}
// HashSha256 implements the HashFunction interface for the Sha256 hash
@@ -86,6 +93,14 @@ func (f HashSha256) Hash(b ...[]byte) ([]byte, error) {
return h[:], nil
}
func (f HashSha256) SafeValue(b []byte) []byte {
return b
}
func (f HashSha256) SafeBigInt(b *big.Int) []byte {
return b.Bytes()
}
// HashPoseidon implements the HashFunction interface for the Poseidon hash
type HashPoseidon struct{}
@@ -116,6 +131,14 @@ func (f HashPoseidon) Hash(b ...[]byte) ([]byte, error) {
return hB, nil
}
func (f HashPoseidon) SafeValue(b []byte) []byte {
return f.SafeBigInt(new(big.Int).SetBytes(b))
}
func (f HashPoseidon) SafeBigInt(b *big.Int) []byte {
return BigToFF(BN254BaseField, b).Bytes()
}
// HashMultiPoseidon implements the HashFunction interface for the MultiPoseidon hash
type HashMultiPoseidon struct{}
@@ -154,6 +177,27 @@ func (f HashMultiPoseidon) Hash(b ...[]byte) ([]byte, error) {
return BigIntToBytes(f.Len(), h), nil
}
// SafeValue returns a valid value of the byte slice provided for the current
// hash function. It expects a byte slice that is a little-endian representation
// of a big.Int value. The function converts the byte slice to a big.Int and
// calls SafeBigInt to get the valid value.
func (f HashMultiPoseidon) SafeValue(b []byte) []byte {
return f.SafeBigInt(BytesToBigInt(b))
}
// SafeBigInt returns a valid value of the big.Int provided for the current
// hash function. In MultiPoseidon, a value is considered valid if it is
// represented in the BN254 base field. The hash function also expects the
// value to be in little-endian format. The function converts the big.Int to
// the BN254 base field and then converts it to bytes swapping the endianness
// to little-endian. If the resulting big.Int is zero, it returns a byte
// slices with a single zero byte.
func (f HashMultiPoseidon) SafeBigInt(b *big.Int) []byte {
safeBigInt := BigToFF(BN254BaseField, b)
arboBytes := BigIntToBytes(f.Len(), safeBigInt)
return ExplicitZero(arboBytes)
}
// HashBlake2b implements the HashFunction interface for the Blake2b hash
type HashBlake2b struct{}
@@ -181,6 +225,14 @@ func (f HashBlake2b) Hash(b ...[]byte) ([]byte, error) {
return hasher.Sum(nil), nil
}
func (f HashBlake2b) SafeValue(b []byte) []byte {
return b
}
func (f HashBlake2b) SafeBigInt(b *big.Int) []byte {
return b.Bytes()
}
// HashMiMC_BLS12_377 implements the HashFunction interface for the MiMC hash
// over the BLS12-377 curve
type HashMiMC_BLS12_377 struct{}
@@ -202,6 +254,14 @@ func (f HashMiMC_BLS12_377) Hash(b ...[]byte) ([]byte, error) {
return hashMiMCbyChunks(h, q, b...)
}
func (f HashMiMC_BLS12_377) SafeValue(b []byte) []byte {
return f.SafeBigInt(new(big.Int).SetBytes(b))
}
func (f HashMiMC_BLS12_377) SafeBigInt(b *big.Int) []byte {
return BigToFF(BLS12377BaseField, b).Bytes()
}
// HashMiMC_BN254 implements the HashFunction interface for the MiMC hash
// over the BN254 curve
type HashMiMC_BN254 struct{}
@@ -218,9 +278,28 @@ func (f HashMiMC_BN254) Len() int {
// Hash implements the hash method for the HashFunction HashMiMC_BN254
func (f HashMiMC_BN254) Hash(b ...[]byte) ([]byte, error) {
q := fr_bn254.Modulus()
// q := fr_bn254.Modulus()
// h := mimc_bn254.NewMiMC()
// return hashMiMCbyChunks(h, q, b...)
h := mimc_bn254.NewMiMC()
return hashMiMCbyChunks(h, q, b...)
var fullBytes []byte
for _, input := range b {
fullBytes = append(fullBytes, input...)
}
for start := 0; start < len(fullBytes); start += h.BlockSize() {
end := min(start+h.BlockSize(), len(fullBytes))
chunk := fullBytes[start:end]
h.Write(chunk)
}
return h.Sum(nil), nil
}
func (f HashMiMC_BN254) SafeValue(b []byte) []byte {
return f.SafeBigInt(new(big.Int).SetBytes(b))
}
func (f HashMiMC_BN254) SafeBigInt(b *big.Int) []byte {
return BigToFF(BN254BaseField, b).Bytes()
}
// hashMiMCbyChunks is a helper function to hash by chunks using the MiMC hash.
@@ -268,3 +347,35 @@ func hashMiMCbyChunks(h hash.StateStorer, q *big.Int, b ...[]byte) ([]byte, erro
}
return SwapEndianness(h.Sum(nil)), nil
}
type HashMiMC7 struct{}
func (f HashMiMC7) Type() []byte {
return TypeHashMimc7
}
func (f HashMiMC7) Len() int {
return 32 //nolint:gomnd
}
func (f HashMiMC7) Hash(b ...[]byte) ([]byte, error) {
var toHash []*big.Int
for _, i := range b {
toHash = append(toHash, BytesToBigInt(i))
}
h, err := mimc7.Hash(toHash, nil)
if err != nil {
return nil, err
}
return BigIntToBytes(f.Len(), h), nil
}
func (f HashMiMC7) SafeValue(b []byte) []byte {
return f.SafeBigInt(BytesToBigInt(b))
}
func (f HashMiMC7) SafeBigInt(b *big.Int) []byte {
safeBigInt := BigToFF(BN254BaseField, b)
arboBytes := BigIntToBytes(f.Len(), safeBigInt)
return ExplicitZero(arboBytes)
}

View File

@@ -1,7 +1,6 @@
package arbo
import (
"crypto/rand"
"encoding/hex"
"math/big"
"testing"
@@ -68,24 +67,3 @@ func TestHashMiMC(t *testing.T) {
qt.Equals,
"f881f34991492d823e02565c778b824bac5eacef6340b70ee90a8966a2e63900")
}
func TestHashMoreThan32BytesMiMC(t *testing.T) {
c := qt.New(t)
// create a random 257 bytes
b := make([]byte, 257)
_, err := rand.Read(b)
c.Assert(err, qt.IsNil)
// MiMC hash bn254
mimcbn254 := &HashMiMC_BN254{}
h, err := mimcbn254.Hash(b)
c.Assert(err, qt.IsNil)
c.Assert(len(h), qt.Equals, 32)
// MiMC hash bls12377
mimcbls12377 := &HashMiMC_BLS12_377{}
h, err = mimcbls12377.Hash(b)
c.Assert(err, qt.IsNil)
c.Assert(len(h), qt.Equals, 32)
}

View File

@@ -40,7 +40,7 @@ func checkRoots(c *qt.C, tree1, tree2 *Tree) {
func storeTree(c *qt.C, tree *Tree, path string) {
dump, err := tree.Dump(nil)
c.Assert(err, qt.IsNil)
err = os.WriteFile(path+"-"+time.Now().String()+".debug", dump, 0600)
err = os.WriteFile(path+"-"+time.Now().String()+".debug", dump, 0o600)
c.Assert(err, qt.IsNil)
}
@@ -83,20 +83,24 @@ func importDumpLoopAdd(tree *Tree, b []byte) error {
}
func TestReadTreeDBG(t *testing.T) {
t.Skip() // test just for debugging purposes, disabled by default
t.Skip("test just for debugging purposes, disabled by default")
c := qt.New(t)
database1, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{Database: database1, MaxLevels: 100,
HashFunction: HashFunctionBlake2b})
tree1, err := NewTree(Config{
Database: database1, MaxLevels: 100,
HashFunction: HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{Database: database2, MaxLevels: 100,
HashFunction: HashFunctionBlake2b})
tree2, err := NewTree(Config{
Database: database2, MaxLevels: 100,
HashFunction: HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
// tree1 is generated by a loop of .Add

View File

@@ -1,6 +1,7 @@
package main
import (
"crypto/rand"
"encoding/json"
"math/big"
"os"
@@ -8,16 +9,16 @@ import (
qt "github.com/frankban/quicktest"
"github.com/vocdoni/arbo"
"go.vocdoni.io/dvote/db"
"go.vocdoni.io/dvote/db/pebbledb"
"github.com/vocdoni/arbo/memdb"
)
func TestGenerator(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := arbo.NewTree(arbo.Config{Database: database, MaxLevels: 4,
HashFunction: arbo.HashFunctionPoseidon})
tree, err := arbo.NewTree(arbo.Config{
Database: memdb.New(),
MaxLevels: 160,
HashFunction: arbo.HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
testVector := [][]int64{
@@ -27,7 +28,7 @@ func TestGenerator(t *testing.T) {
{4, 44},
}
bLen := 1
for i := 0; i < len(testVector); i++ {
for i := range testVector {
k := arbo.BigIntToBytes(bLen, big.NewInt(testVector[i][0]))
v := arbo.BigIntToBytes(bLen, big.NewInt(testVector[i][1]))
if err := tree.Add(k, v); err != nil {
@@ -42,7 +43,7 @@ func TestGenerator(t *testing.T) {
jCvp, err := json.Marshal(cvp)
c.Assert(err, qt.IsNil)
// store the data into a file that will be used at the circom test
err = os.WriteFile("go-smt-verifier-inputs.json", jCvp, 0600)
err = os.WriteFile("go-smt-verifier-inputs.json", jCvp, 0o600)
c.Assert(err, qt.IsNil)
// proof of non-existence
@@ -52,6 +53,34 @@ func TestGenerator(t *testing.T) {
jCvp, err = json.Marshal(cvp)
c.Assert(err, qt.IsNil)
// store the data into a file that will be used at the circom test
err = os.WriteFile("go-smt-verifier-non-existence-inputs.json", jCvp, 0600)
err = os.WriteFile("go-smt-verifier-non-existence-inputs.json", jCvp, 0o600)
c.Assert(err, qt.IsNil)
// create a new tree with big.Int keys
bigtree, err := arbo.NewTree(arbo.Config{
Database: memdb.New(),
MaxLevels: 160,
HashFunction: arbo.HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
// add 100 elements to the tree
var bk *big.Int
for i := range 100 {
k, err := rand.Int(rand.Reader, big.NewInt(100_000_000_000))
c.Assert(err, qt.IsNil)
v := new(big.Int).Mul(k, big.NewInt(2))
c.Assert(bigtree.AddBigInt(k, v), qt.IsNil)
if i == 0 {
bk = k
}
}
// generate a proof of existence for the first key
cvp, err = tree.GenerateCircomVerifierProofBigInt(bk)
c.Assert(err, qt.IsNil)
jCvp, err = json.Marshal(cvp)
c.Assert(err, qt.IsNil)
// store the data into a file that will be used at the circom test
err = os.WriteFile("go-smt-verifier-big-inputs.json", jCvp, 0o600)
c.Assert(err, qt.IsNil)
}

File diff suppressed because it is too large Load Diff

View File

@@ -14,7 +14,7 @@ describe("merkletreetree circom-proof-verifier", function () {
before( async() => {
const circuitCode = `
include "smt-proof-verifier_test.circom";
component main = SMTVerifierTest(4);
component main = SMTVerifierTest(160);
`;
fs.writeFileSync(circuitPath, circuitCode, "utf8");
@@ -80,6 +80,17 @@ describe("merkletreetree circom-proof-verifier", function () {
const witness = await circuit.calculateWitness(inputsVerifier);
await circuit.checkConstraints(witness);
});
it("Test smt-verifier proof of existence go big inputs", async () => {
// fromGo is a json CircomVerifierProof generated from Go code using
// https://github.com/vocdoni/arbo
let rawdata = fs.readFileSync('go-data-generator/go-smt-verifier-big-inputs.json');
let fromGo = JSON.parse(rawdata);
inputsVerifier=fromGo;
// console.log("smtverifier js inputs:\n", inputsVerifier);
const witness = await circuit.calculateWitness(inputsVerifier);
await circuit.checkConstraints(witness);
});
it("Test smt-verifier proof of non-existence go inputs", async () => {
// fromGo is a json CircomVerifierProof generated from Go code using
// https://github.com/vocdoni/arbo

View File

@@ -0,0 +1,133 @@
package testgnark
import (
"crypto/rand"
"math/big"
"testing"
"github.com/consensys/gnark-crypto/ecc"
"github.com/consensys/gnark/backend"
"github.com/consensys/gnark/frontend"
"github.com/consensys/gnark/test"
qt "github.com/frankban/quicktest"
"github.com/vocdoni/arbo"
"github.com/vocdoni/arbo/memdb"
"github.com/vocdoni/gnark-crypto-primitives/hash/bn254/poseidon"
garbo "github.com/vocdoni/gnark-crypto-primitives/tree/arbo"
gsmt "github.com/vocdoni/gnark-crypto-primitives/tree/smt"
)
const nLevels = 160
type testCircuitArbo struct {
Root frontend.Variable
Key frontend.Variable
Value frontend.Variable
Siblings [nLevels]frontend.Variable
}
func (circuit *testCircuitArbo) Define(api frontend.API) error {
return garbo.CheckInclusionProof(api, poseidon.MultiHash, circuit.Key, circuit.Value, circuit.Root, circuit.Siblings[:])
}
func TestGnarkArboVerifier(t *testing.T) {
c := qt.New(t)
tree, err := arbo.NewTree(arbo.Config{
Database: memdb.New(),
MaxLevels: nLevels,
HashFunction: arbo.HashFunctionMultiPoseidon,
})
c.Assert(err, qt.IsNil)
var (
keys []*big.Int
values [][]*big.Int
)
max, _ := new(big.Int).SetString("10000000000000000000000000", 10)
for range 100 {
k, err := rand.Int(rand.Reader, max)
qt.Assert(t, err, qt.IsNil)
v := new(big.Int).Mul(k, big.NewInt(2))
keys = append(keys, k)
values = append(values, []*big.Int{v})
}
_, err = tree.AddBatchBigInt(keys, values)
c.Assert(err, qt.IsNil)
proof, err := tree.GenerateGnarkVerifierProofBigInt(keys[0])
c.Assert(err, qt.IsNil)
var paddedSiblings [nLevels]frontend.Variable
for i := range paddedSiblings {
if i < len(proof.Siblings) {
paddedSiblings[i] = proof.Siblings[i]
continue
}
paddedSiblings[i] = 0
}
assert := test.NewAssert(t)
assert.SolvingSucceeded(&testCircuitArbo{}, &testCircuitArbo{
Root: proof.Root,
Key: proof.Key,
Value: proof.Value,
Siblings: [160]frontend.Variable(paddedSiblings),
}, test.WithCurves(ecc.BN254), test.WithBackends(backend.GROTH16))
}
type testCircuitSMT struct {
Root frontend.Variable
Key frontend.Variable
Value frontend.Variable
Siblings [nLevels]frontend.Variable
}
func (circuit *testCircuitSMT) Define(api frontend.API) error {
gsmt.InclusionVerifier(api, poseidon.MultiHash, circuit.Root, circuit.Siblings[:], circuit.Key, circuit.Value)
return nil
}
func TestGnarkSMTVerifier(t *testing.T) {
c := qt.New(t)
tree, err := arbo.NewTree(arbo.Config{
Database: memdb.New(),
MaxLevels: nLevels,
HashFunction: arbo.HashFunctionMultiPoseidon,
})
c.Assert(err, qt.IsNil)
var (
keys []*big.Int
values [][]*big.Int
)
max, _ := new(big.Int).SetString("10000000000000000000000000", 10)
for range 100 {
k, err := rand.Int(rand.Reader, max)
qt.Assert(t, err, qt.IsNil)
v := new(big.Int).Mul(k, big.NewInt(2))
keys = append(keys, k)
values = append(values, []*big.Int{v})
}
_, err = tree.AddBatchBigInt(keys, values)
c.Assert(err, qt.IsNil)
proof, err := tree.GenerateGnarkVerifierProofBigInt(keys[0])
c.Assert(err, qt.IsNil)
var paddedSiblings [nLevels]frontend.Variable
for i := range paddedSiblings {
if i < len(proof.Siblings) {
paddedSiblings[i] = proof.Siblings[i]
continue
}
paddedSiblings[i] = 0
}
assert := test.NewAssert(t)
assert.SolvingSucceeded(&testCircuitSMT{}, &testCircuitSMT{
Root: proof.Root,
Key: proof.Key,
Value: proof.Value,
Siblings: [160]frontend.Variable(paddedSiblings),
}, test.WithCurves(ecc.BN254), test.WithBackends(backend.GROTH16))
}

44
testvectors/gnark/go.mod Normal file
View File

@@ -0,0 +1,44 @@
module testgnark
go 1.24.0
require (
github.com/consensys/gnark v0.12.1-0.20250320152310-b51a3d4535cb
github.com/consensys/gnark-crypto v0.16.1-0.20250217214835-5ed804970f85
github.com/frankban/quicktest v1.14.6
github.com/vocdoni/arbo v0.0.0-20241216103934-e64315269b49
github.com/vocdoni/gnark-crypto-primitives v0.0.2-0.20250328120724-f9a3af9a3ace
)
require (
github.com/bits-and-blooms/bitset v1.20.0 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/consensys/bavard v0.1.29 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/pprof v0.0.0-20241101162523-b92577c0c142 // indirect
github.com/iden3/go-iden3-crypto v0.0.17 // indirect
github.com/ingonyama-zk/icicle/v3 v3.1.1-0.20241118092657-fccdb2f0921b // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/ronanh/intcomp v1.1.0 // indirect
github.com/rs/zerolog v1.33.0 // indirect
github.com/stretchr/testify v1.10.0 // indirect
github.com/vocdoni/vocdoni-z-sandbox v0.0.0-20241216104229-fa0b063e636c // indirect
github.com/x448/float16 v0.8.4 // indirect
go.vocdoni.io/dvote v1.10.2-0.20241024102542-c1ce6d744bc5 // indirect
golang.org/x/crypto v0.33.0 // indirect
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/blake3 v1.3.0 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

171
testvectors/gnark/go.sum Normal file
View File

@@ -0,0 +1,171 @@
github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8=
github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.20.0 h1:2F+rfL86jE2d/bmw7OhqUg2Sj/1rURkBn3MdfoPyRVU=
github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I=
github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8=
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce h1:giXvy4KSc/6g/esnpM7Geqxka4WSqI1SZc7sMJFd3y4=
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE=
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
github.com/cockroachdb/pebble v1.1.2 h1:CUh2IPtR4swHlEj48Rhfzw6l/d0qA31fItcIszQVIsA=
github.com/cockroachdb/pebble v1.1.2/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU=
github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30=
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/cometbft/cometbft v1.0.0-alpha.1 h1:M0q0RsNYhAwCANXLkJCEJnyf8fBR8O94InkELElGv0E=
github.com/cometbft/cometbft v1.0.0-alpha.1/go.mod h1:fwVpJigzDw2UnFchb0fIq7svrLmHcn5AfpMzob/xquI=
github.com/consensys/bavard v0.1.29 h1:fobxIYksIQ+ZSrTJUuQgu+HIJwclrAPcdXqd7H2hh1k=
github.com/consensys/bavard v0.1.29/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
github.com/consensys/gnark v0.12.1-0.20250320152310-b51a3d4535cb h1:QPrQjK1hd7Y6ELAnUkmU1J/GOAHJ+UwCQxzqhRkBADY=
github.com/consensys/gnark v0.12.1-0.20250320152310-b51a3d4535cb/go.mod h1:x171T3JQ7aJs2SrrAmfu/qVvVltMhlLfnS5gS0owjtI=
github.com/consensys/gnark-crypto v0.16.1-0.20250217214835-5ed804970f85 h1:3ht4gGH3smFGVLFhpFTKvDbEdagC6eSaPXnHjCQGh94=
github.com/consensys/gnark-crypto v0.16.1-0.20250217214835-5ed804970f85/go.mod h1:A2URlMHUT81ifJ0UlLzSlm7TmnE3t7VxEThApdMukJw=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/ethereum/go-ethereum v1.14.12 h1:8hl57x77HSUo+cXExrURjU/w1VhL+ShCTJrTwcCQSe4=
github.com/ethereum/go-ethereum v1.14.12/go.mod h1:RAC2gVMWJ6FkxSPESfbshrcKpIokgQKsVKmAuqdekDY=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/getsentry/sentry-go v0.27.0 h1:Pv98CIbtB3LkMWmXi4Joa5OOcwbmnX88sF5qbK3r3Ps=
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
github.com/glendc/go-external-ip v0.1.0 h1:iX3xQ2Q26atAmLTbd++nUce2P5ht5P4uD4V7caSY/xg=
github.com/glendc/go-external-ip v0.1.0/go.mod h1:CNx312s2FLAJoWNdJWZ2Fpf5O4oLsMFwuYviHjS4uJE=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/pprof v0.0.0-20241101162523-b92577c0c142 h1:sAGdeJj0bnMgUNVeUpp6AYlVdCt3/GdI3pGRqsNSQLs=
github.com/google/pprof v0.0.0-20241101162523-b92577c0c142/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/holiman/uint256 v1.3.1 h1:JfTzmih28bittyHM8z360dCjIA9dbPIBlcTI6lmctQs=
github.com/holiman/uint256 v1.3.1/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY=
github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E=
github.com/ingonyama-zk/icicle/v3 v3.1.1-0.20241118092657-fccdb2f0921b h1:AvQTK7l0PTHODD06PVQX1Tn2o29sRIaKIDOvTJmKurY=
github.com/ingonyama-zk/icicle/v3 v3.1.1-0.20241118092657-fccdb2f0921b/go.mod h1:e0JHb27/P6WorCJS3YolbY5XffS4PGBuoW38OthLkDs=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4=
github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pressly/goose/v3 v3.21.1 h1:5SSAKKWej8LVVzNLuT6KIvP1eFDuPvxa+B6H0w78buQ=
github.com/pressly/goose/v3 v3.21.1/go.mod h1:sqthmzV8PitchEkjecFJII//l43dLOCzfWh8pHEe+vE=
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/ronanh/intcomp v1.1.0 h1:i54kxmpmSoOZFcWPMWryuakN0vLxLswASsGa07zkvLU=
github.com/ronanh/intcomp v1.1.0/go.mod h1:7FOLy3P3Zj3er/kVrU/pl+Ql7JFZj7bwliMGketo0IU=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec=
github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI=
github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/vocdoni/arbo v0.0.0-20241216103934-e64315269b49 h1:GMyepEuxLflqhdDHts/eUMtVkbrCI5mJc8RVdlwZBoA=
github.com/vocdoni/arbo v0.0.0-20241216103934-e64315269b49/go.mod h1:wXxPP+5vkT5t54lrKz6bCXKIyv8aRplKq8uCFb2wgy4=
github.com/vocdoni/gnark-crypto-primitives v0.0.2-0.20250328120724-f9a3af9a3ace h1:ElTiK0ztqX/MAeFJXjjWNUyXrKXyHrt295rP6O2jf1w=
github.com/vocdoni/gnark-crypto-primitives v0.0.2-0.20250328120724-f9a3af9a3ace/go.mod h1:L3OY0etasbmuSwOnAdNq7JnPuR7jY4A/wtFIlWvPhiU=
github.com/vocdoni/vocdoni-z-sandbox v0.0.0-20241216104229-fa0b063e636c h1:0KJ3ufAYWGpdXgdlnLjFblW98MK+KfdYUiXzKPtk9PA=
github.com/vocdoni/vocdoni-z-sandbox v0.0.0-20241216104229-fa0b063e636c/go.mod h1:mFXFbumAbxySlviwrGiclFPRiSDIs4WzXnQQqPyNX9k=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE=
go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.vocdoni.io/dvote v1.10.2-0.20241024102542-c1ce6d744bc5 h1:22esW3YedMfoEOx0Chc2qhrooMBDRHsasJfGytESxo4=
go.vocdoni.io/dvote v1.10.2-0.20241024102542-c1ce6d744bc5/go.mod h1:k5uY7me8lgMjWcXfnYQbsfZgPzGUQM6ldK1VbvKVgFQ=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA=
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE=
lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

167
tree.go
View File

@@ -19,9 +19,11 @@ import (
"io"
"math"
"runtime"
"slices"
"sync"
"go.vocdoni.io/dvote/db"
"go.vocdoni.io/dvote/db/prefixeddb"
)
const (
@@ -55,6 +57,9 @@ var (
// in disk.
DefaultThresholdNLeafs = 65536
dbTreePrefix = []byte("treedb")
dbValuesPrefix = []byte("valuesdb")
dbKeyRoot = []byte("root")
dbKeyNLeafs = []byte("nleafs")
emptyValue = []byte{0}
@@ -88,7 +93,10 @@ var (
type Tree struct {
sync.Mutex
db db.Database
treedb db.Database
valuesdb db.Database
valuesdbMu sync.RWMutex
maxLevels int
// thresholdNLeafs defines the threshold number of leafs in the tree
// that determines if AddBatch will work in memory or in disk. It is
@@ -116,7 +124,7 @@ type Config struct {
// NewTree returns a new Tree, if there is a Tree still in the given database, it
// will load it.
func NewTree(cfg Config) (*Tree, error) {
wTx := cfg.Database.WriteTx()
wTx := prefixeddb.NewPrefixedWriteTx(cfg.Database.WriteTx(), dbTreePrefix)
defer wTx.Discard()
t, err := NewTreeWithTx(wTx, cfg)
@@ -138,12 +146,16 @@ func NewTreeWithTx(wTx db.WriteTx, cfg Config) (*Tree, error) {
if cfg.ThresholdNLeafs == 0 {
cfg.ThresholdNLeafs = DefaultThresholdNLeafs
}
t := Tree{db: cfg.Database, maxLevels: cfg.MaxLevels,
thresholdNLeafs: cfg.ThresholdNLeafs, hashFunction: cfg.HashFunction}
t.emptyHash = make([]byte, t.hashFunction.Len()) // empty
t := Tree{
treedb: prefixeddb.NewPrefixedDatabase(cfg.Database, dbTreePrefix),
valuesdb: prefixeddb.NewPrefixedDatabase(cfg.Database, dbValuesPrefix),
maxLevels: cfg.MaxLevels,
thresholdNLeafs: cfg.ThresholdNLeafs,
hashFunction: cfg.HashFunction,
emptyHash: make([]byte, cfg.HashFunction.Len()), // empty
}
_, err := wTx.Get(dbKeyRoot)
if err == db.ErrKeyNotFound {
if _, err := wTx.Get(dbKeyRoot); err == db.ErrKeyNotFound {
// store new root 0 (empty)
if err = wTx.Set(dbKeyRoot, t.emptyHash); err != nil {
return nil, err
@@ -160,7 +172,7 @@ func NewTreeWithTx(wTx db.WriteTx, cfg Config) (*Tree, error) {
// Root returns the root of the Tree
func (t *Tree) Root() ([]byte, error) {
return t.RootWithTx(t.db)
return t.RootWithTx(t.treedb)
}
// RootWithTx returns the root of the Tree using the given db.ReadTx
@@ -184,6 +196,13 @@ func (t *Tree) HashFunction() HashFunction {
return t.hashFunction
}
// MaxKeyLen returns the maximum length of the key in bytes for the current
// tree. It is calculated as the minimum between the length of the hash
// function used and the number of levels in the tree divided by 8.
func (t *Tree) MaxKeyLen() int {
return MaxKeyLen(t.maxLevels, t.HashFunction().Len())
}
// editable returns true if the tree is editable, and false when is not
// editable (because is a snapshot tree)
func (t *Tree) editable() bool {
@@ -201,7 +220,7 @@ type Invalid struct {
// the indexes of the keys failed to add. Supports empty values as input
// parameters, which is equivalent to 0 valued byte array.
func (t *Tree) AddBatch(keys, values [][]byte) ([]Invalid, error) {
wTx := t.db.WriteTx()
wTx := t.treedb.WriteTx()
defer wTx.Discard()
invalids, err := t.AddBatchWithTx(wTx, keys, values)
@@ -221,7 +240,6 @@ func (t *Tree) AddBatchWithTx(wTx db.WriteTx, keys, values [][]byte) ([]Invalid,
if !t.editable() {
return nil, ErrSnapshotNotEditable
}
e := []byte{}
// equal the number of keys & values
if len(keys) > len(values) {
@@ -233,7 +251,6 @@ func (t *Tree) AddBatchWithTx(wTx db.WriteTx, keys, values [][]byte) ([]Invalid,
// crop extra values
values = values[:len(keys)]
}
nLeafs, err := t.GetNLeafsWithTx(wTx)
if err != nil {
return nil, err
@@ -248,26 +265,25 @@ func (t *Tree) addBatchInDisk(wTx db.WriteTx, keys, values [][]byte) ([]Invalid,
nCPU := flp2(runtime.NumCPU())
if nCPU == 1 || len(keys) < nCPU {
var invalids []Invalid
for i := 0; i < len(keys); i++ {
for i := range keys {
if err := t.addWithTx(wTx, keys[i], values[i]); err != nil {
invalids = append(invalids, Invalid{i, err})
}
}
return invalids, nil
}
// split keys and values in buckets to add them in parallel by CPU
kvs, invalids, err := keysValuesToKvs(t.maxLevels, keys, values)
if err != nil {
return nil, err
}
buckets := splitInBuckets(kvs, nCPU)
// get the root to start adding the keys
root, err := t.RootWithTx(wTx)
if err != nil {
return nil, err
}
// get the subRoots at level l+1
l := int(math.Log2(float64(nCPU)))
subRoots, err := t.getSubRootsAtLevel(wTx, root, l+1)
if err != nil {
@@ -277,12 +293,12 @@ func (t *Tree) addBatchInDisk(wTx db.WriteTx, keys, values [][]byte) ([]Invalid,
// Already populated Tree but Unbalanced.
// add one key at each bucket, and then continue with the flow
for i := 0; i < len(buckets); i++ {
for i := range buckets {
// add one leaf of the bucket, if there is an error when
// adding the k-v, try to add the next one of the bucket
// (until one is added)
inserted := -1
for j := 0; j < len(buckets[i]); j++ {
for j := range buckets[i] {
if newRoot, err := t.add(wTx, root, 0,
buckets[i][j].k, buckets[i][j].v); err == nil {
inserted = j
@@ -290,10 +306,9 @@ func (t *Tree) addBatchInDisk(wTx db.WriteTx, keys, values [][]byte) ([]Invalid,
break
}
}
// remove the inserted element from buckets[i]
if inserted != -1 {
buckets[i] = append(buckets[i][:inserted], buckets[i][inserted+1:]...)
buckets[i] = slices.Delete(buckets[i], inserted, inserted+1)
}
}
subRoots, err = t.getSubRootsAtLevel(wTx, root, l+1)
@@ -311,8 +326,8 @@ func (t *Tree) addBatchInDisk(wTx db.WriteTx, keys, values [][]byte) ([]Invalid,
invalidsInBucket := make([][]Invalid, nCPU)
txs := make([]db.WriteTx, nCPU)
for i := 0; i < nCPU; i++ {
txs[i] = t.db.WriteTx()
for i := range nCPU {
txs[i] = t.treedb.WriteTx()
err := txs[i].Apply(wTx)
if err != nil {
return nil, err
@@ -321,12 +336,12 @@ func (t *Tree) addBatchInDisk(wTx db.WriteTx, keys, values [][]byte) ([]Invalid,
var wg sync.WaitGroup
wg.Add(nCPU)
for i := 0; i < nCPU; i++ {
for i := range nCPU {
go func(cpu int) {
// use different wTx for each cpu, after once all
// are done, iter over the cpuWTxs and copy their
// content into the main wTx
for j := 0; j < len(buckets[cpu]); j++ {
for j := range buckets[cpu] {
newSubRoot, err := t.add(txs[cpu], subRoots[cpu],
l, buckets[cpu][j].k, buckets[cpu][j].v)
if err != nil {
@@ -341,33 +356,27 @@ func (t *Tree) addBatchInDisk(wTx db.WriteTx, keys, values [][]byte) ([]Invalid,
}(i)
}
wg.Wait()
for i := 0; i < nCPU; i++ {
for i := range nCPU {
if err := wTx.Apply(txs[i]); err != nil {
return nil, err
}
txs[i].Discard()
}
for i := 0; i < len(invalidsInBucket); i++ {
for i := range invalidsInBucket {
invalids = append(invalids, invalidsInBucket[i]...)
}
newRoot, err := t.upFromSubRoots(wTx, subRoots)
if err != nil {
return nil, err
}
// update dbKeyNLeafs
if err := t.SetRootWithTx(wTx, newRoot); err != nil {
return nil, err
}
// update nLeafs
if err := t.incNLeafs(wTx, len(keys)-len(invalids)); err != nil {
return nil, err
}
return invalids, nil
}
@@ -412,7 +421,6 @@ func (t *Tree) upFromSubRoots(wTx db.WriteTx, subRoots [][]byte) ([]byte, error)
newSubRoots = append(newSubRoots, subRoots[i+1])
continue
}
k, v, err := t.newIntermediate(subRoots[i], subRoots[i+1])
if err != nil {
return nil, err
@@ -430,7 +438,6 @@ func (t *Tree) upFromSubRoots(wTx db.WriteTx, subRoots [][]byte) ([]byte, error)
func (t *Tree) getSubRootsAtLevel(rTx db.Reader, root []byte, l int) ([][]byte, error) {
// go at level l and return each node key, where each node key is the
// subRoot of the subTree that starts there
var subRoots [][]byte
err := t.iterWithStop(rTx, root, 0, func(currLvl int, k, v []byte) bool {
if currLvl == l && !bytes.Equal(k, t.emptyHash) {
@@ -441,7 +448,6 @@ func (t *Tree) getSubRootsAtLevel(rTx db.Reader, root []byte, l int) ([][]byte,
}
return false
})
return subRoots, err
}
@@ -450,12 +456,10 @@ func (t *Tree) addBatchInMemory(wTx db.WriteTx, keys, values [][]byte) ([]Invali
if err != nil {
return nil, err
}
invalids, err := vt.addBatch(keys, values)
if err != nil {
return nil, err
}
// once the VirtualTree is build, compute the hashes
pairs, err := vt.computeHashes()
if err != nil {
@@ -464,26 +468,22 @@ func (t *Tree) addBatchInMemory(wTx db.WriteTx, keys, values [][]byte) ([]Invali
// nothing stored in the db and the error is returned
return nil, err
}
// store pairs in db
for i := 0; i < len(pairs); i++ {
for i := range pairs {
if err := wTx.Set(pairs[i][0], pairs[i][1]); err != nil {
return nil, err
}
}
// store root (from the vt) to db
if vt.root != nil {
if err := wTx.Set(dbKeyRoot, vt.root.h); err != nil {
return nil, err
}
}
// update nLeafs
if err := t.incNLeafs(wTx, len(keys)-len(invalids)); err != nil {
return nil, err
}
return invalids, nil
}
@@ -493,7 +493,7 @@ func (t *Tree) loadVT() (vt, error) {
vt := newVT(t.maxLevels, t.hashFunction)
vt.params.dbg = t.dbg
var callbackErr error
err := t.IterateWithStopWithTx(t.db, nil, func(_ int, k, v []byte) bool {
err := t.IterateWithStopWithTx(t.treedb, nil, func(_ int, k, v []byte) bool {
if v[0] != PrefixValueLeaf {
return false
}
@@ -515,7 +515,7 @@ func (t *Tree) loadVT() (vt, error) {
// *big.Int, is expected that are represented by a Little-Endian byte array
// (for circom compatibility).
func (t *Tree) Add(k, v []byte) error {
wTx := t.db.WriteTx()
wTx := t.treedb.WriteTx()
defer wTx.Discard()
if err := t.AddWithTx(wTx, k, v); err != nil {
@@ -561,13 +561,19 @@ func (t *Tree) addWithTx(wTx db.WriteTx, k, v []byte) error {
return nil
}
// keyLenByLevels returns the key length in bytes that can be used in the tree
// with maxLevels levels. The key length is calculated as the ceil(maxLevels/8).
func keyLenByLevels(maxLevels int) int {
return int(math.Ceil(float64(maxLevels) / float64(8)))
}
// keyPathFromKey returns the keyPath and checks that the key is not bigger
// than maximum key length for the tree maxLevels size.
// This is because if the key bits length is bigger than the maxLevels of the
// tree, two different keys that their difference is at the end, will collision
// in the same leaf of the tree (at the max depth).
func keyPathFromKey(maxLevels int, k []byte) ([]byte, error) {
maxKeyLen := int(math.Ceil(float64(maxLevels) / float64(8))) //nolint:gomnd
maxKeyLen := keyLenByLevels(maxLevels) //nolint:gomnd
if len(k) > maxKeyLen {
return nil, fmt.Errorf("len(k) can not be bigger than ceil(maxLevels/8), where"+
" len(k): %d, maxLevels: %d, max key len=ceil(maxLevels/8): %d. Might need"+
@@ -639,7 +645,8 @@ func (t *Tree) add(wTx db.WriteTx, root []byte, fromLvl int, k, v []byte) ([]byt
// down goes down to the leaf recursively
func (t *Tree) down(rTx db.Reader, newKey, currKey []byte, siblings [][]byte,
path []bool, currLvl int, getLeaf bool) (
[]byte, []byte, [][]byte, error) {
[]byte, []byte, [][]byte, error,
) {
if currLvl > t.maxLevels {
return nil, nil, nil, ErrMaxLevel
}
@@ -713,15 +720,14 @@ func (t *Tree) down(rTx db.Reader, newKey, currKey []byte, siblings [][]byte,
// downVirtually is used when in a leaf already exists, and a new leaf which
// shares the path until the existing leaf is being added
func (t *Tree) downVirtually(siblings [][]byte, oldKey, newKey []byte, oldPath,
newPath []bool, currLvl int) ([][]byte, error) {
newPath []bool, currLvl int,
) ([][]byte, error) {
var err error
if currLvl > t.maxLevels-1 {
return nil, ErrMaxVirtualLevel
}
if oldPath[currLvl] == newPath[currLvl] {
siblings = append(siblings, t.emptyHash)
siblings, err = t.downVirtually(siblings, oldKey, newKey, oldPath, newPath, currLvl+1)
if err != nil {
return nil, err
@@ -730,13 +736,13 @@ func (t *Tree) downVirtually(siblings [][]byte, oldKey, newKey []byte, oldPath,
}
// reached the divergence
siblings = append(siblings, oldKey)
return siblings, nil
}
// up goes up recursively updating the intermediate nodes
func (t *Tree) up(wTx db.WriteTx, key []byte, siblings [][]byte, path []bool,
currLvl, toLvl int) ([]byte, error) {
currLvl, toLvl int,
) ([]byte, error) {
var k, v []byte
var err error
if path[currLvl+toLvl] {
@@ -754,12 +760,10 @@ func (t *Tree) up(wTx db.WriteTx, key []byte, siblings [][]byte, path []bool,
if err = wTx.Set(k, v); err != nil {
return nil, err
}
if currLvl == 0 {
// reached the root
return k, nil
}
return t.up(wTx, k, siblings, path, currLvl-1, toLvl)
}
@@ -795,7 +799,6 @@ func ReadLeafValue(b []byte) ([]byte, []byte) {
if len(b) < PrefixValueLen {
return []byte{}, []byte{}
}
kLen := b[1]
if len(b) < PrefixValueLen+int(kLen) {
return []byte{}, []byte{}
@@ -825,12 +828,10 @@ func newIntermediate(hashFunc HashFunction, l, r []byte) ([]byte, []byte, error)
b[1] = byte(len(l))
copy(b[PrefixValueLen:PrefixValueLen+hashFunc.Len()], l)
copy(b[PrefixValueLen+hashFunc.Len():], r)
key, err := hashFunc.Hash(l, r)
if err != nil {
return nil, nil, err
}
return key, b, nil
}
@@ -839,7 +840,6 @@ func ReadIntermediateChilds(b []byte) ([]byte, []byte) {
if len(b) < PrefixValueLen {
return []byte{}, []byte{}
}
lLen := b[1]
if len(b) < PrefixValueLen+int(lLen) {
return []byte{}, []byte{}
@@ -851,7 +851,7 @@ func ReadIntermediateChilds(b []byte) ([]byte, []byte) {
func getPath(numLevels int, k []byte) []bool {
path := make([]bool, numLevels)
for n := 0; n < numLevels; n++ {
for n := range numLevels {
path[n] = k[n/8]&(1<<(n%8)) != 0
}
return path
@@ -860,9 +860,8 @@ func getPath(numLevels int, k []byte) []bool {
// Update updates the value for a given existing key. If the given key does not
// exist, returns an error.
func (t *Tree) Update(k, v []byte) error {
wTx := t.db.WriteTx()
wTx := t.treedb.WriteTx()
defer wTx.Discard()
if err := t.UpdateWithTx(wTx, k, v); err != nil {
return err
}
@@ -875,7 +874,6 @@ func (t *Tree) Update(k, v []byte) error {
func (t *Tree) UpdateWithTx(wTx db.WriteTx, k, v []byte) error {
t.Lock()
defer t.Unlock()
if !t.editable() {
return ErrSnapshotNotEditable
}
@@ -929,13 +927,17 @@ func (t *Tree) UpdateWithTx(wTx db.WriteTx, k, v []byte) error {
// GenProof generates a MerkleTree proof for the given key. The leaf value is
// returned, together with the packed siblings of the proof, and a boolean
// parameter that indicates if the proof is of existence (true) or not (false).
func (t *Tree) GenProof(k []byte) ([]byte, []byte, []byte, bool, error) {
return t.GenProofWithTx(t.db, k)
func (t *Tree) GenProof(k []byte) (
leafKey []byte, leafValue []byte, siblings []byte, existence bool, err error,
) {
return t.GenProofWithTx(t.treedb, k)
}
// GenProofWithTx does the same than the GenProof method, but allowing to pass
// the db.ReadTx that is used.
func (t *Tree) GenProofWithTx(rTx db.Reader, k []byte) ([]byte, []byte, []byte, bool, error) {
func (t *Tree) GenProofWithTx(rTx db.Reader, k []byte) (
leafKey []byte, leafValue []byte, siblings []byte, existence bool, err error,
) {
keyPath, err := keyPathFromKey(t.maxLevels, k)
if err != nil {
return nil, nil, nil, false, err
@@ -948,24 +950,23 @@ func (t *Tree) GenProofWithTx(rTx db.Reader, k []byte) ([]byte, []byte, []byte,
}
// go down to the leaf
var siblings [][]byte
_, value, siblings, err := t.down(rTx, k, root, siblings, path, 0, true)
var unpacked [][]byte
_, value, unpacked, err := t.down(rTx, k, root, unpacked, path, 0, true)
if err != nil {
return nil, nil, nil, false, err
}
s, err := PackSiblings(t.hashFunction, siblings)
if err != nil {
if siblings, err = PackSiblings(t.hashFunction, unpacked); err != nil {
return nil, nil, nil, false, err
}
leafK, leafV := ReadLeafValue(value)
if !bytes.Equal(k, leafK) {
// key not in tree, proof of non-existence
return leafK, leafV, s, false, nil
return leafK, leafV, siblings, false, nil
}
return leafK, leafV, s, true, nil
return leafK, leafV, siblings, true, nil
}
// PackSiblings packs the siblings into a byte array.
@@ -1062,7 +1063,7 @@ func bytesToBitmap(b []byte) []bool {
// will be placed the data found in the tree in the leaf that was on the path
// going to the input key.
func (t *Tree) Get(k []byte) ([]byte, []byte, error) {
return t.GetWithTx(t.db, k)
return t.GetWithTx(t.treedb, k)
}
// GetWithTx does the same than the Get method, but allowing to pass the
@@ -1151,7 +1152,7 @@ func (t *Tree) setNLeafs(wTx db.WriteTx, nLeafs int) error {
// GetNLeafs returns the number of Leafs of the Tree.
func (t *Tree) GetNLeafs() (int, error) {
return t.GetNLeafsWithTx(t.db)
return t.GetNLeafsWithTx(t.treedb)
}
// GetNLeafsWithTx does the same than the GetNLeafs method, but allowing to
@@ -1167,7 +1168,7 @@ func (t *Tree) GetNLeafsWithTx(rTx db.Reader) (int, error) {
// SetRoot sets the root to the given root
func (t *Tree) SetRoot(root []byte) error {
wTx := t.db.WriteTx()
wTx := t.treedb.WriteTx()
defer wTx.Discard()
if err := t.SetRootWithTx(wTx, root); err != nil {
@@ -1209,7 +1210,7 @@ func (t *Tree) Snapshot(fromRoot []byte) (*Tree, error) {
return nil, err
}
}
rTx := t.db
rTx := t.treedb
// check that the root exists in the db
if !bytes.Equal(fromRoot, t.emptyHash) {
if _, err := rTx.Get(fromRoot); err == ErrKeyNotFound {
@@ -1222,7 +1223,7 @@ func (t *Tree) Snapshot(fromRoot []byte) (*Tree, error) {
}
return &Tree{
db: t.db,
treedb: t.treedb,
maxLevels: t.maxLevels,
snapshotRoot: fromRoot,
emptyHash: t.emptyHash,
@@ -1234,7 +1235,7 @@ func (t *Tree) Snapshot(fromRoot []byte) (*Tree, error) {
// Iterate iterates through the full Tree, executing the given function on each
// node of the Tree.
func (t *Tree) Iterate(fromRoot []byte, f func([]byte, []byte)) error {
return t.IterateWithTx(t.db, fromRoot, f)
return t.IterateWithTx(t.treedb, fromRoot, f)
}
// IterateWithTx does the same than the Iterate method, but allowing to pass
@@ -1258,18 +1259,19 @@ func (t *Tree) IterateWithStop(fromRoot []byte, f func(int, []byte, []byte) bool
// allow to define which root to use
if fromRoot == nil {
var err error
fromRoot, err = t.RootWithTx(t.db)
fromRoot, err = t.RootWithTx(t.treedb)
if err != nil {
return err
}
}
return t.iterWithStop(t.db, fromRoot, 0, f)
return t.iterWithStop(t.treedb, fromRoot, 0, f)
}
// IterateWithStopWithTx does the same than the IterateWithStop method, but
// allowing to pass the db.ReadTx that is used.
func (t *Tree) IterateWithStopWithTx(rTx db.Reader, fromRoot []byte,
f func(int, []byte, []byte) bool) error {
f func(int, []byte, []byte) bool,
) error {
// allow to define which root to use
if fromRoot == nil {
var err error
@@ -1282,7 +1284,8 @@ func (t *Tree) IterateWithStopWithTx(rTx db.Reader, fromRoot []byte,
}
func (t *Tree) iterWithStop(rTx db.Reader, k []byte, currLevel int,
f func(int, []byte, []byte) bool) error {
f func(int, []byte, []byte) bool,
) error {
var v []byte
var err error
if bytes.Equal(k, t.emptyHash) {
@@ -1470,14 +1473,14 @@ node [fontname=Monospace,fontsize=10,shape=box]
}
if fromRoot == nil {
var err error
fromRoot, err = t.RootWithTx(t.db)
fromRoot, err = t.RootWithTx(t.treedb)
if err != nil {
return err
}
}
nEmpties := 0
err := t.iterWithStop(t.db, fromRoot, 0, func(currLvl int, k, v []byte) bool {
err := t.iterWithStop(t.treedb, fromRoot, 0, func(currLvl int, k, v []byte) bool {
if currLvl == untilLvl {
return true // to stop the iter from going down
}

280
tree_big.go Normal file
View File

@@ -0,0 +1,280 @@
package arbo
import (
"bytes"
"fmt"
"math/big"
"slices"
)
// AddBatchBigInt adds a batch of key-value pairs to the tree, it converts the
// big.Int keys and the slices of big.Int values into bytes and adds them to
// the tree. It locks the tree to prevent concurrent writes to the valuesdb and
// creates a transaction to store the full values in the valuesdb. It returns
// a slice of Invalid items and an error if something fails.
func (t *Tree) AddBatchBigInt(keys []*big.Int, bigintsBatch [][]*big.Int) ([]Invalid, error) {
if len(keys) != len(bigintsBatch) {
return nil, fmt.Errorf("the number of keys and values missmatch")
}
// convert each key-value tuple into bytes
var err error
bKeys := make([][]byte, len(keys))
bValues := make([][]byte, len(keys))
serializedBigIntsBatch := make([][]byte, len(keys))
for i := range keys {
bKeys[i], bValues[i], serializedBigIntsBatch[i], err = bigIntsToLeaf(t.HashFunction(), t.MaxKeyLen(), keys[i], bigintsBatch[i])
if err != nil {
return nil, err
}
}
// acquire lock to make an atomic update to treedb and valuesdb
t.valuesdbMu.Lock()
defer t.valuesdbMu.Unlock()
// add the keys and leaf values in batch
if invalids, err := t.AddBatch(bKeys, bValues); err != nil {
return invalids, err
}
// create a transaction for each group of keys and serialized values and store
// the errors in a slice to return them
var invalids []Invalid
wTx := t.valuesdb.WriteTx()
defer wTx.Discard()
for i := range bKeys {
if err := wTx.Set(bKeys[i], serializedBigIntsBatch[i]); err != nil {
invalids = append(invalids, Invalid{i, err})
}
}
return invalids, wTx.Commit()
}
// AddBigInt adds a key-value pair to the tree, it converts the big.Int key
// and the slice of big.Int values into bytes and adds them to the tree. It
// locks the tree to prevent concurrent writes to the valuesdb and creates a
// transaction to store the serialized bigints in the valuesdb. It returns an error if
// something fails.
func (t *Tree) AddBigInt(key *big.Int, bigints ...*big.Int) error {
if key == nil {
return fmt.Errorf("key cannot be nil")
}
// convert the big ints to bytes
bKey, bValue, serializedBigInts, err := bigIntsToLeaf(t.HashFunction(), t.MaxKeyLen(), key, bigints)
if err != nil {
return err
}
// acquire lock to make an atomic update to treedb and valuesdb
t.valuesdbMu.Lock()
defer t.valuesdbMu.Unlock()
// add it to the tree
if err := t.Add(bKey, bValue); err != nil {
return fmt.Errorf("raw key cannot be added: %w", err)
}
// create a transaction to store the serialized bigints
wTx := t.valuesdb.WriteTx()
defer wTx.Discard()
// store the serialized bigints in the valuesdb
if err := wTx.Set(bKey, serializedBigInts); err != nil {
return fmt.Errorf("serializedBigInts cannot be stored: %w", err)
}
return wTx.Commit()
}
// UpdateBigInt updates the value of a key as a big.Int and the values of the
// leaf node as a slice of big.Ints. It encodes the key as bytes and updates
// the leaf node in the tree, then it stores the full value in the valuesdb. It
// returns an error if something fails.
func (t *Tree) UpdateBigInt(key *big.Int, bigints ...*big.Int) error {
if key == nil {
return fmt.Errorf("key cannot be nil")
}
// convert the big ints to bytes
bKey, bValue, serializedBigInts, err := bigIntsToLeaf(t.HashFunction(), t.MaxKeyLen(), key, bigints)
if err != nil {
return err
}
// acquire lock to make an atomic update to treedb and valuesdb
t.valuesdbMu.Lock()
defer t.valuesdbMu.Unlock()
// update the leaf in the tree
if err := t.Update(bKey, bValue); err != nil {
return err
}
// create a transaction to store the serialized bigints
wTx := t.valuesdb.WriteTx()
defer wTx.Discard()
// store the serialized bigints value in the valuesdb
if err := wTx.Set(bKey, serializedBigInts); err != nil {
return err
}
return wTx.Commit()
}
// GetBigInt receives the value of a key as a big.Int and the values of the leaf
// node as a slice of big.Ints. It encodes the key as bytes and gets the leaf
// node from the tree, then it decodes the serialized bigints of the leaf node and
// returns the key and the values or an error if something fails.
func (t *Tree) GetBigInt(k *big.Int) (
key *big.Int, bigints []*big.Int, err error,
) {
// acquire lock to wait for atomic updates to treedb and valuesdb to finish
t.valuesdbMu.RLock()
defer t.valuesdbMu.RUnlock()
if k == nil {
return nil, nil, fmt.Errorf("key cannot be nil")
}
bk := bigIntToLeafKey(k, t.MaxKeyLen())
_, bv, err := t.Get(bk)
if err != nil {
return nil, nil, err
}
serializedBigInts, err := t.valuesdb.Get(bk)
if err != nil {
return nil, nil, err
}
return t.leafToBigInts(ExplicitZero(bk), bv, serializedBigInts)
}
// GenProofBigInts generates a proof for a key as a big.Int. It converts the
// big.Int key into bytes and generates a proof for the key, then it returns
// the key, the value of the leaf node, the siblings and a boolean indicating
// if the key exists or an error if something fails.
func (t *Tree) GenProofBigInts(key *big.Int) (
leafKey []byte, leafValue []byte, siblings []byte, existence bool, err error,
) {
if key == nil {
return nil, nil, nil, false, fmt.Errorf("key cannot be nil")
}
bk := bigIntToLeafKey(key, t.MaxKeyLen())
return t.GenProof(bk)
}
// GenerateCircomVerifierProofBigInt generates a CircomVerifierProof for a key
// as a big.Int. It converts the big.Int key into bytes and generates a proof
// for the key, then it returns the CircomVerifierProof or an error if
// something fails.
func (t *Tree) GenerateCircomVerifierProofBigInt(k *big.Int) (*CircomVerifierProof, error) {
if k == nil {
return nil, fmt.Errorf("key cannot be nil")
}
bk := bigIntToLeafKey(k, t.MaxKeyLen())
return t.GenerateCircomVerifierProof(bk)
}
// GenerateGnarkVerifierProofBigInt generates a GnarkVerifierProof for a key
// as a big.Int. It converts the big.Int key into bytes and generates a proof
// for the key, then it returns the GnarkVerifierProof or an error if
// something fails.
func (t *Tree) GenerateGnarkVerifierProofBigInt(k *big.Int) (*GnarkVerifierProof, error) {
if k == nil {
return nil, fmt.Errorf("key cannot be nil")
}
bk := bigIntToLeafKey(k, t.MaxKeyLen())
return t.GenerateGnarkVerifierProof(bk)
}
// leafToBigInts converts the bytes of the key and the value of a leaf node
// into a big.Int key and a slice of big.Int values, it gets the serialized bigints
// from the valuesdb and checks if it matches the value of the leaf node. It
// returns the original key and values or an error if the values don't match.
func (t *Tree) leafToBigInts(bkey, value, serializedBigInts []byte) (
key *big.Int, bigints []*big.Int, err error,
) {
// reverse the process of bigints encoding
bigints = deserializeBigInts(serializedBigInts)
// reencode the leaf value of the tree to check if it matches the value
bigintsHash, err := HashBigInts(t.HashFunction(), bigints...)
if err != nil {
return nil, nil, err
}
// check if the value of the leaf node matches the value used to build the
// tree
if !bytes.Equal(bigintsHash, value) {
return nil, nil, fmt.Errorf("LeafToBigInt: bigintsHash != value")
}
// convert the bytes of the key to a big.Int
return leafKeyToBigInt(bkey), bigints, nil
}
// leafKeyToBigInt converts the bytes of a key into a big.Int.
// It assumes the key is encoded in Little-Endian format.
func leafKeyToBigInt(key []byte) *big.Int {
return BytesToBigInt(key)
}
// bigIntToLeafKey converts a big.Int key into the bytes of the key. It
// encodes the key in Little-Endian format and pads it to the maximum length
// of the key. It returns the bytes of the key.
func bigIntToLeafKey(key *big.Int, maxLen int) []byte {
return BigIntToBytes(maxLen, key)
}
// serializeBigInts converts a slice of big.Int values into the bytes of the
// encoded in a reversible way. It concatenates the bytes of the
// values with the length of each value at the beginning of each value.
func serializeBigInts(bigints []*big.Int) ([]byte, error) {
serializedBigInts := []byte{}
for _, bi := range bigints {
if bi == nil {
return nil, fmt.Errorf("value cannot be nil")
}
biBytes := bi.Bytes()
if len(biBytes) > 255 {
return nil, fmt.Errorf("value byte length cannot exceed 255")
}
val := append([]byte{byte(len(biBytes))}, biBytes...)
serializedBigInts = append(serializedBigInts, val...)
}
return serializedBigInts, nil
}
// deserializeBigInts deserializes bigints encoded in bytes into a slice
// of big.Int values. It iterates over the bytes and extracts
// the length of each value and the bytes of the value to build the big.Int
// values.
func deserializeBigInts(serializedBigInts []byte) []*big.Int {
bigints := []*big.Int{}
iter := slices.Clone(serializedBigInts)
for len(iter) > 0 {
lenV := int(iter[0])
bigints = append(bigints, new(big.Int).SetBytes(iter[1:1+lenV]))
iter = iter[1+lenV:]
}
return bigints
}
// bigIntsToLeaf converts a big.Int key and a slice of big.Int values into
// the bytes of the key, the bytes of the value used to build the tree and the
// bytes of the full value encoded
func bigIntsToLeaf(hFn HashFunction, keyLen int, key *big.Int, bigints []*big.Int) (
bKey []byte, bValue []byte, serializedBigInts []byte, err error,
) {
if key == nil {
return nil, nil, nil, fmt.Errorf("key cannot be nil")
}
// calculate the bytes of the key
bKey = bigIntToLeafKey(key, keyLen)
// calculate the bytes of the full values (should be reversible)
serializedBigInts, err = serializeBigInts(bigints)
if err != nil {
return nil, nil, nil, err
}
// calculate the value used to build the tree
bValue, err = HashBigInts(hFn, bigints...)
if err != nil {
return nil, nil, nil, err
}
return bKey, bValue, serializedBigInts, nil
}
// HashBigInts hashes the bytes of the big.Int values
// using the hash function of the tree. The resulting hash can be used as the leaf value
func HashBigInts(hFn HashFunction, values ...*big.Int) ([]byte, error) {
chunks := make([][]byte, len(values))
for _, v := range values {
value := hFn.SafeBigInt(v)
if value == nil {
return nil, fmt.Errorf("value cannot be nil")
}
chunks = append(chunks, value)
}
return hFn.Hash(chunks...)
}

267
tree_big_test.go Normal file
View File

@@ -0,0 +1,267 @@
package arbo
import (
"crypto/rand"
"math/big"
"testing"
qt "github.com/frankban/quicktest"
"github.com/vocdoni/arbo/memdb"
)
func TestGenCheckProofBigInt(t *testing.T) {
c := qt.New(t)
tree, err := NewTree(Config{
Database: memdb.New(),
MaxLevels: 160,
HashFunction: HashFunctionMimc7,
})
c.Assert(err, qt.IsNil)
defer tree.treedb.Close() //nolint:errcheck
defer tree.valuesdb.Close() //nolint:errcheck
keys := []*big.Int{}
values := [][]*big.Int{}
for range 1000 {
k, err := rand.Int(rand.Reader, big.NewInt(100_000_000_000))
c.Assert(err, qt.IsNil)
v := new(big.Int).Mul(k, big.NewInt(2))
values = append(values, []*big.Int{v})
c.Assert(tree.AddBigInt(k, v), qt.IsNil)
keys = append(keys, k)
}
_, err = tree.AddBatchBigInt(keys, values)
c.Assert(err, qt.IsNil)
// validate 20 random keys
for range 20 {
i, err := rand.Int(rand.Reader, big.NewInt(int64(len(keys))))
c.Assert(err, qt.IsNil)
k := keys[i.Int64()]
kAux, vAux, siblings, existence, err := tree.GenProofBigInts(k)
c.Assert(err, qt.IsNil)
c.Assert(existence, qt.IsTrue)
root, err := tree.Root()
c.Assert(err, qt.IsNil)
verif, err := CheckProof(tree.hashFunction, kAux, vAux, root, siblings)
c.Assert(err, qt.IsNil)
c.Check(verif, qt.IsTrue)
}
}
func TestAddGetBigInt(t *testing.T) {
c := qt.New(t)
tree, err := NewTree(Config{
Database: memdb.New(),
MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.treedb.Close() //nolint:errcheck
defer tree.valuesdb.Close() //nolint:errcheck
// Add multiple key-value pairs with large random big ints
keys := make([]*big.Int, 100)
values := make([][]*big.Int, 100)
for i := range 100 {
k, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
keys[i] = k
// Create multiple random values for each key
v1, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
v2, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
values[i] = []*big.Int{v1, v2}
c.Assert(tree.AddBigInt(k, v1, v2), qt.IsNil)
// Verify retrieval
retrievedK, retrievedVs, err := tree.GetBigInt(k)
c.Assert(err, qt.IsNil)
c.Check(retrievedK.Cmp(k), qt.Equals, 0)
c.Assert(len(retrievedVs), qt.Equals, 2)
c.Check(retrievedVs[0].Cmp(v1), qt.Equals, 0)
c.Check(retrievedVs[1].Cmp(v2), qt.Equals, 0)
}
// Test non-existent key
nonExistentKey, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
_, _, err = tree.GetBigInt(nonExistentKey)
c.Check(err, qt.IsNotNil)
// Test nil key
_, _, err = tree.GetBigInt(nil)
c.Check(err, qt.IsNotNil)
// Test adding duplicate key
err = tree.AddBigInt(keys[0], values[0]...)
c.Check(err, qt.IsNotNil)
}
func TestUpdateBigInt(t *testing.T) {
c := qt.New(t)
tree, err := NewTree(Config{
Database: memdb.New(),
MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.treedb.Close() //nolint:errcheck
defer tree.valuesdb.Close() //nolint:errcheck
// Store keys for later updates
keys := make([]*big.Int, 50)
values := make([][]*big.Int, 50)
// Add entries with large random big ints
for i := range 50 {
k, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
keys[i] = k
v1, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
v2, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
values[i] = []*big.Int{v1, v2}
c.Assert(tree.AddBigInt(k, v1, v2), qt.IsNil)
}
// Update entries with new random values
for i := range 25 {
k := keys[i]
newV1, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
newV2, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
c.Assert(tree.UpdateBigInt(k, newV1, newV2), qt.IsNil)
// Verify update
_, retrievedVs, err := tree.GetBigInt(k)
c.Assert(err, qt.IsNil)
c.Assert(len(retrievedVs), qt.Equals, 2)
c.Check(retrievedVs[0].Cmp(newV1), qt.Equals, 0)
c.Check(retrievedVs[1].Cmp(newV2), qt.Equals, 0)
}
// Test updating non-existent key
nonExistentKey, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
err = tree.UpdateBigInt(nonExistentKey, big.NewInt(1))
c.Check(err, qt.IsNotNil)
// Test updating with nil key
err = tree.UpdateBigInt(nil, big.NewInt(1))
c.Check(err, qt.IsNotNil)
}
func TestAddBatchBigInt(t *testing.T) {
c := qt.New(t)
tree, err := NewTree(Config{
Database: memdb.New(),
MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.treedb.Close() //nolint:errcheck
defer tree.valuesdb.Close() //nolint:errcheck
// Prepare batch data with large random big ints
batchSize := 1000
keys := make([]*big.Int, batchSize)
values := make([][]*big.Int, batchSize)
for i := range batchSize {
k, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
keys[i] = k
// Create multiple random values for each key
v1, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
v2, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
c.Assert(err, qt.IsNil)
values[i] = []*big.Int{v1, v2}
}
// Add batch
invalids, err := tree.AddBatchBigInt(keys, values)
c.Assert(err, qt.IsNil)
c.Check(len(invalids), qt.Equals, 0)
// Verify random sample of entries
for i := range 50 {
idx := i % batchSize
_, retrievedVs, err := tree.GetBigInt(keys[idx])
c.Assert(err, qt.IsNil)
c.Assert(len(retrievedVs), qt.Equals, 2)
c.Check(retrievedVs[0].Cmp(values[idx][0]), qt.Equals, 0)
c.Check(retrievedVs[1].Cmp(values[idx][1]), qt.Equals, 0)
}
// Test mismatched lengths
_, err = tree.AddBatchBigInt(keys[:10], values[:5])
c.Check(err, qt.IsNotNil)
// Test empty batch
invalids, err = tree.AddBatchBigInt([]*big.Int{}, [][]*big.Int{})
c.Assert(err, qt.IsNil)
c.Check(len(invalids), qt.Equals, 0)
// Test nil values
invalids, err = tree.AddBatchBigInt(nil, nil)
c.Assert(err, qt.IsNil)
c.Check(len(invalids), qt.Equals, 0)
}
func BenchmarkAddBatchBigInt(b *testing.B) {
// Prepare batch data with large random big ints
batchSize := 1000
keys := make([]*big.Int, batchSize)
values := make([][]*big.Int, batchSize)
for i := range batchSize {
keys[i], _ = rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
v1, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
v2, _ := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 25))
values[i] = []*big.Int{v1, v2}
}
b.Run("Poseidon", func(b *testing.B) {
benchmarkAddBatchBigInt(b, HashFunctionPoseidon, keys, values)
})
b.Run("Sha256", func(b *testing.B) {
benchmarkAddBatchBigInt(b, HashFunctionSha256, keys, values)
})
}
func benchmarkAddBatchBigInt(b *testing.B, hashFunc HashFunction, keys []*big.Int, values [][]*big.Int) {
c := qt.New(b)
b.ResetTimer()
for range b.N {
tree, err := NewTree(Config{
Database: memdb.New(),
MaxLevels: 140,
HashFunction: hashFunc,
})
c.Assert(err, qt.IsNil)
_, err = tree.AddBatchBigInt(keys, values)
if err != nil {
b.Fatal(err)
}
tree.treedb.Close() //nolint:errcheck
tree.valuesdb.Close() //nolint:errcheck
}
}

View File

@@ -73,10 +73,12 @@ func TestAddTestVectors(t *testing.T) {
func testAdd(c *qt.C, hashFunc HashFunction, testVectors []string) {
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: hashFunc})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: hashFunc,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
root, err := tree.Root()
c.Assert(err, qt.IsNil)
@@ -106,10 +108,12 @@ func TestAddBatch(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
for i := 0; i < 1000; i++ {
@@ -125,10 +129,12 @@ func TestAddBatch(t *testing.T) {
database, err = pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree2, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
var keys, values [][]byte
for i := 0; i < 1000; i++ {
@@ -149,10 +155,12 @@ func TestAddDifferentOrder(t *testing.T) {
c := qt.New(t)
database1, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{Database: database1, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree1, err := NewTree(Config{
Database: database1, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree1.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
bLen := 32
for i := 0; i < 16; i++ {
@@ -165,10 +173,12 @@ func TestAddDifferentOrder(t *testing.T) {
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{Database: database2, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree2, err := NewTree(Config{
Database: database2, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
for i := 16 - 1; i >= 0; i-- {
k := BigIntToBytes(bLen, big.NewInt(int64(i)))
@@ -191,10 +201,12 @@ func TestAddRepeatedIndex(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
k := BigIntToBytes(bLen, big.NewInt(int64(3)))
@@ -210,10 +222,12 @@ func TestUpdate(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
k := BigIntToBytes(bLen, big.NewInt(int64(20)))
@@ -264,10 +278,12 @@ func TestAux(t *testing.T) { // TODO split in proper tests
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
k := BigIntToBytes(bLen, big.NewInt(int64(1)))
@@ -304,10 +320,12 @@ func TestGet(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
for i := 0; i < 10; i++ {
@@ -330,17 +348,21 @@ func TestBitmapBytes(t *testing.T) {
b := []byte{15}
bits := bytesToBitmap(b)
c.Assert(bits, qt.DeepEquals, []bool{true, true, true, true,
false, false, false, false})
c.Assert(bits, qt.DeepEquals, []bool{
true, true, true, true,
false, false, false, false,
})
b2 := bitmapToBytes(bits)
c.Assert(b2, qt.DeepEquals, b)
b = []byte{0, 15, 50}
bits = bytesToBitmap(b)
c.Assert(bits, qt.DeepEquals, []bool{false, false, false,
c.Assert(bits, qt.DeepEquals, []bool{
false, false, false,
false, false, false, false, false, true, true, true, true,
false, false, false, false, false, true, false, false, true,
true, false, false})
true, false, false,
})
b2 = bitmapToBytes(bits)
c.Assert(b2, qt.DeepEquals, b)
@@ -429,13 +451,15 @@ func TestGenProofAndVerify(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
for i := 0; i < 10; i++ {
for i := range 1000 {
k := BigIntToBytes(bLen, big.NewInt(int64(i)))
v := BigIntToBytes(bLen, big.NewInt(int64(i*2)))
if err := tree.Add(k, v); err != nil {
@@ -470,10 +494,12 @@ func testDumpAndImportDump(t *testing.T, inFile bool) {
c := qt.New(t)
database1, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{Database: database1, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree1, err := NewTree(Config{
Database: database1, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree1.db.Close() //nolint:errcheck
defer tree1.treedb.Close() //nolint:errcheck
bLen := 32
for i := 0; i < 16; i++ {
@@ -499,10 +525,12 @@ func testDumpAndImportDump(t *testing.T, inFile bool) {
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{Database: database2, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree2, err := NewTree(Config{
Database: database2, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree2.db.Close() //nolint:errcheck
defer tree2.treedb.Close() //nolint:errcheck
if inFile {
f, err := os.Open(filepath.Clean(fileName))
@@ -527,10 +555,12 @@ func TestRWMutex(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
var keys, values [][]byte
@@ -611,14 +641,18 @@ func TestAddBatchFullyUsed(t *testing.T) {
database1, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree1, err := NewTree(Config{Database: database1, MaxLevels: 4,
HashFunction: HashFunctionPoseidon})
tree1, err := NewTree(Config{
Database: database1, MaxLevels: 4,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
database2, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree2, err := NewTree(Config{Database: database2, MaxLevels: 4,
HashFunction: HashFunctionPoseidon})
tree2, err := NewTree(Config{
Database: database2, MaxLevels: 4,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
var keys, values [][]byte
@@ -672,8 +706,10 @@ func TestSetRoot(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
expectedRoot := "13742386369878513332697380582061714160370929283209286127733983161245560237407"
@@ -729,8 +765,10 @@ func TestSnapshot(t *testing.T) {
c := qt.New(t)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
// fill the tree
@@ -780,10 +818,12 @@ func TestGetFromSnapshotExpectArboErrKeyNotFound(t *testing.T) {
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon})
tree, err := NewTree(Config{
Database: database, MaxLevels: 256,
HashFunction: HashFunctionPoseidon,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
bLen := 32
k := BigIntToBytes(bLen, big.NewInt(int64(3)))
@@ -803,8 +843,10 @@ func TestKeyLen(t *testing.T) {
c.Assert(err, qt.IsNil)
// maxLevels is 100, keyPath length = ceil(maxLevels/8) = 13
maxLevels := 100
tree, err := NewTree(Config{Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionBlake2b})
tree, err := NewTree(Config{
Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
// expect no errors when adding a key of only 4 bytes (when the
@@ -836,8 +878,10 @@ func TestKeyLen(t *testing.T) {
maxLevels = 32
database, err = pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err = NewTree(Config{Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionBlake2b})
tree, err = NewTree(Config{
Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
maxKeyLen := int(math.Ceil(float64(maxLevels) / float64(8))) //nolint:gomnd
@@ -907,8 +951,10 @@ func TestKeyLenBiggerThan32(t *testing.T) {
maxLevels := 264
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionBlake2b})
tree, err := NewTree(Config{
Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
bLen := 33
@@ -950,10 +996,12 @@ func benchmarkAdd(b *testing.B, hashFunc HashFunction, ks, vs [][]byte) {
c := qt.New(b)
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: 140,
HashFunction: hashFunc})
tree, err := NewTree(Config{
Database: database, MaxLevels: 140,
HashFunction: hashFunc,
})
c.Assert(err, qt.IsNil)
defer tree.db.Close() //nolint:errcheck
defer tree.treedb.Close() //nolint:errcheck
for i := 0; i < len(ks); i++ {
if err := tree.Add(ks[i], vs[i]); err != nil {

View File

@@ -1,6 +1,7 @@
package arbo
import (
"math"
"math/big"
)
@@ -17,12 +18,30 @@ func SwapEndianness(b []byte) []byte {
func BigIntToBytes(blen int, bi *big.Int) []byte {
// TODO make the length depending on the tree.hashFunction.Len()
b := make([]byte, blen)
copy(b[:], SwapEndianness(bi.Bytes()))
copy(b[:], ExplicitZero(SwapEndianness(bi.Bytes())))
return b[:]
}
// BytesToBigInt converts a byte array in Little-Endian representation into
// *big.Int
func BytesToBigInt(b []byte) *big.Int {
return new(big.Int).SetBytes(SwapEndianness(b))
return new(big.Int).SetBytes(ExplicitZero(SwapEndianness(b)))
}
// ExplicitZero returns a byte slice with a single zero byte if the input slice
// is empty. This is useful for ensuring that a zero value is always returned
// instead of a nil slice, for example for big.Int zero values.
func ExplicitZero(b []byte) []byte {
if len(b) == 0 {
return []byte{0}
}
return b
}
// MaxKeyLen returns the maximum length of the key in bytes. It is calculated
// as the minimum between the length of the hash function provided and the
// number of levels in a tree provided divided by 8. This is used to limit the
// size of the keys in a tree.
func MaxKeyLen(levels, hashLen int) int {
return min(int(math.Ceil(float64(levels)/float64(8))), hashLen)
}

5
vt.go
View File

@@ -434,7 +434,7 @@ func (n *node) add(p *params, currLvl int, leaf *node) error {
switch t {
case vtMid:
if leaf.path[currLvl] {
//right
// right
if n.r == nil {
// empty sub-node, add the leaf here
n.r = leaf
@@ -566,7 +566,8 @@ func flp2(n int) int {
// computeHashes computes the hashes under the node from which is called the
// method. Returns an array of key-values to store in the db
func (n *node) computeHashes(currLvl, maxLvl int, p *params, pairs [][2][]byte) (
[][2][]byte, error) {
[][2][]byte, error,
) {
if n == nil || currLvl >= maxLvl {
// no need to compute any hash
return pairs, nil

View File

@@ -19,8 +19,10 @@ func testVirtualTree(c *qt.C, maxLevels int, keys, values [][]byte) {
// normal tree, to have an expected root value
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionSha256})
tree, err := NewTree(Config{
Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionSha256,
})
c.Assert(err, qt.IsNil)
for i := 0; i < len(keys); i++ {
err := tree.Add(keys[i], values[i])
@@ -125,8 +127,10 @@ func TestVirtualTreeAddBatch(t *testing.T) {
// normal tree, to have an expected root value
database, err := pebbledb.New(db.Options{Path: c.TempDir()})
c.Assert(err, qt.IsNil)
tree, err := NewTree(Config{Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionBlake2b})
tree, err := NewTree(Config{
Database: database, MaxLevels: maxLevels,
HashFunction: HashFunctionBlake2b,
})
c.Assert(err, qt.IsNil)
for i := 0; i < len(keys); i++ {
err := tree.Add(keys[i], values[i])