Fix G115 staticcheck fails on main (#3820)

* fix nosec g115
* fix gofmt
* fix all staticcheck errors
* delegate more range checking to util funcs

---------

Co-authored-by: Arya Tabaie <15056835+Tabaie@users.noreply.github.com>
This commit is contained in:
Arya Tabaie
2024-08-22 07:05:48 -05:00
committed by GitHub
parent f29199e7bf
commit 7629c25e09
49 changed files with 181 additions and 97 deletions

View File

@@ -270,7 +270,7 @@ func PackOffsets(unpacked []bool) []byte {
if b {
// @alex: issue #2261 requires the prover to start counting from 1
// and not from zero for the offsets.
binary.BigEndian.PutUint16(tmp[:], uint16(i+1))
binary.BigEndian.PutUint16(tmp[:], utils.ToUint16(i+1)) // #nosec G115 -- Check above precludes overflowing
resWrite.Write(tmp[:])
}
}

View File

@@ -86,7 +86,7 @@ func makePiProof(cfg *config.Config, cf *CollectedFields) (plonk.Proof, error) {
LastFinalizedL1RollingHashMessageNumber: cf.LastFinalizedL1RollingHashMessageNumber,
L1RollingHashMessageNumber: cf.L1RollingHashMessageNumber,
L2MsgRootHashes: cf.L2MsgRootHashes,
L2MsgMerkleTreeDepth: int(cf.L2MsgTreeDepth),
L2MsgMerkleTreeDepth: utils.ToInt(cf.L2MsgTreeDepth),
},
})
if err != nil {

View File

@@ -1,6 +1,7 @@
package bridge
import (
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
@@ -20,7 +21,7 @@ func BatchReceptionIndex(logs []types.Log, l2BridgeAddress common.Address) []uin
}
// Push the txIndex
res = append(res, uint16(log.TxIndex))
res = append(res, utils.ToUint16(log.TxIndex))
}
return res
}

View File

@@ -87,7 +87,7 @@ func CraftProverOutput(
inspectStateManagerTraces(req, &rsp)
// Value of the first blocks
rsp.FirstBlockNumber = int(blocks[0].NumberU64())
rsp.FirstBlockNumber = utils.ToInt(blocks[0].NumberU64())
// Set the public input as part of the response immediately so that we can
// easily debug issues during the proving.

View File

@@ -67,12 +67,12 @@ func (g *RandGen) TxRlp(numTxs int) ([]string, []uint16) {
rlpTxs[i] = g.AnyTypeTxRlp()
}
receptionPos := []uint16{}
var receptionPos []uint16
// overwrite one of the tx with a receipt confirmation one
txPos := g.Intn(numTxs)
rlpTxs[txPos] = g.MsgReceiptConfirmationTx()
receptionPos = append(receptionPos, uint16(txPos))
receptionPos = append(receptionPos, utils.ToUint16(txPos))
return rlpTxs, receptionPos
}

View File

@@ -143,7 +143,7 @@ func newElementFromVars(api frontend.API, x [2]frontend.Variable) *emulated.Elem
}
func bitReverse(n, logN int) int {
return int(bits.Reverse64(uint64(n)) >> (64 - logN))
return utils.ToInt(bits.Reverse64(uint64(n)) >> (64 - logN))
}
func bitReverseSlice[K interface{}](list []K) {
@@ -183,8 +183,8 @@ func packCrumbsEmulated(api frontend.API, words []frontend.Variable) []*emulated
}
}
nbLimbs := int(fieldParams.NbLimbs())
limbNbWords := int(fieldParams.BitsPerLimb()) / bitsPerWord
nbLimbs := int(fieldParams.NbLimbs()) // #nosec G115 -- Small number of limbs expected
limbNbWords := int(fieldParams.BitsPerLimb()) / bitsPerWord // #nosec G115 -- Expected to be 64
if uint(limbNbWords*bitsPerWord) != fieldParams.BitsPerLimb() {
panic("bitsPerWord must divide bitsPerLimb")
}

View File

@@ -3,6 +3,7 @@ package internal
import (
"errors"
"math/big"
"math/bits"
hint "github.com/consensys/gnark/constraint/solver"
"github.com/consensys/gnark/frontend"
@@ -50,6 +51,9 @@ func (r *RangeChecker) AssertLessThan(bound uint, c ...frontend.Variable) {
// IsLessThan returns a variable that is 1 if 0 ≤ c < bound, 0 otherwise
// TODO perf @Tabaie see if we can get away with a weaker contract, where the return value is 0 iff 0 ≤ c < bound
func (r *RangeChecker) IsLessThan(bound uint, c frontend.Variable) frontend.Variable {
if bound >= 1<<(bits.UintSize/2-1) {
panic("possible overflow")
}
switch bound {
case 1:
return r.api.IsZero(c)
@@ -58,9 +62,11 @@ func (r *RangeChecker) IsLessThan(bound uint, c frontend.Variable) frontend.Vari
if bound%2 != 0 {
panic("odd bounds not yet supported")
}
// #nosec G115 -- bound < MaxInt - 1 ⇒ -bound > MinInt
v := plonk.EvaluateExpression(r.api, c, c, -int(bound-1), 0, 1, 0) // toRead² - (bound-1)× toRead
res := v
for i := uint(1); i < bound/2; i++ {
// #nosec G115 -- i*(bound-i-1) < bound² ≤ MaxUint/4 < MaxInt; the conversion is safe
res = plonk.EvaluateExpression(r.api, res, v, int(i*(bound-i-1)), 0, 1, 0)
}

View File

@@ -22,12 +22,12 @@ func Decompress(api frontend.API, c []frontend.Variable, cLength frontend.Variab
api.AssertIsLessOrEqual(cLength, len(c)) // sanity check
// size-related "constants"
wordNbBits := int(level)
wordNbBits := int(level) // #nosec G115 -- Not expected to run on an architecture with byte-long words
shortBackRefType, longBackRefType, dictBackRefType := lzss.InitBackRefTypes(len(dict), level) // init the dictionary and backref types; only needed for the constants below
shortBrNbWords := int(shortBackRefType.NbBitsBackRef) / wordNbBits
longBrNbWords := int(longBackRefType.NbBitsBackRef) / wordNbBits
dictBrNbWords := int(dictBackRefType.NbBitsBackRef) / wordNbBits
byteNbWords := uint(8 / wordNbBits)
shortBrNbWords := int(shortBackRefType.NbBitsBackRef) / wordNbBits // #nosec G115 -- Not expected to run on an architecture with byte-long words
longBrNbWords := int(longBackRefType.NbBitsBackRef) / wordNbBits // #nosec G115 -- Not expected to run on an architecture with byte-long words
dictBrNbWords := int(dictBackRefType.NbBitsBackRef) / wordNbBits // #nosec G115 -- Not expected to run on an architecture with byte-long words
byteNbWords := uint(8 / wordNbBits) // #nosec G115 -- Guaranteed to be positive
// check header: version and compression level
const (
@@ -133,14 +133,14 @@ func initAddrTable(api frontend.API, bytes, c []frontend.Variable, wordNbBits in
}
}
readers := make([]*compress.NumReader, len(backrefs))
delimAndLenNbWords := int(8+backrefs[0].NbBitsLength) / wordNbBits
delimAndLenNbWords := int(8+backrefs[0].NbBitsLength) / wordNbBits // #nosec G115 -- not a problem on any architecture with word size > 8 bits
for i := range backrefs {
var readerC []frontend.Variable
if len(c) >= delimAndLenNbWords {
readerC = c[delimAndLenNbWords:]
}
readers[i] = compress.NewNumReader(api, readerC, int(backrefs[i].NbBitsAddress), wordNbBits)
readers[i] = compress.NewNumReader(api, readerC, int(backrefs[i].NbBitsAddress), wordNbBits) // #nosec G115 -- not a problem on any architecture with word size > 8 bits
}
res := logderivlookup.New(api)

View File

@@ -79,6 +79,7 @@ func randomizeInts(slices ...[]int) {
}
neg := 1 - 2*int(buff[0]>>7)
buff[0] &= 127
// #nosec G115 -- sign bit is set to be zero above
slice[i] = int(binary.BigEndian.Uint64(buff[:])) * neg
}
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/stretchr/testify/assert"
"golang.org/x/exp/constraints"
"math"
"math/big"
"os"
"strings"
@@ -187,7 +188,10 @@ func RandIntN(n int) int {
if err != nil {
panic(err)
}
return int(binary.BigEndian.Uint64(b[:]) % uint64(n))
if n > math.MaxInt {
panic("RandIntN: n too large")
}
return int(binary.BigEndian.Uint64(b[:]) % uint64(n)) // #nosec G115 -- Above check precludes an overflow
}
func RandIntSliceN(length, n int) []int {

View File

@@ -445,7 +445,7 @@ func checksumSubSlicesHint(_ *big.Int, ins, outs []*big.Int) error {
subLastPoints := ins[:len(outs)]
slice := ins[len(outs):]
sliceAt := func(i uint64) []byte {
sliceAt := func(i int64) []byte {
res := slice[i].Bytes()
if len(res) == 0 {
return []byte{0} // the mimc hash impl ignores empty input
@@ -455,12 +455,12 @@ func checksumSubSlicesHint(_ *big.Int, ins, outs []*big.Int) error {
hsh := hash.MIMC_BLS12_377.New()
var (
first uint64
first int64
i int
)
for ; i < len(outs); i++ {
last := subLastPoints[i].Uint64()
if int(last) >= len(slice) {
last := subLastPoints[i].Int64()
if last >= int64(len(slice)) {
break
}
@@ -795,8 +795,8 @@ func partitionSliceHint(_ *big.Int, ins, outs []*big.Int) error {
}
for i := range s {
b := int(indicators[i].Uint64())
if b < 0 || b >= len(subs) || !indicators[i].IsUint64() {
b := indicators[i].Int64()
if b < 0 || b >= int64(len(subs)) || !indicators[i].IsUint64() {
return errors.New("indicator out of range")
}
subs[b][0] = s[i]

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"errors"
"fmt"
"math"
"net/http"
"os"
"runtime"
@@ -66,7 +67,10 @@ func fetchAndInspect(cmd *cobra.Command, args []string) error {
for {
<-time.Tick(tickTime)
processedRangeCount := atomic.LoadUint64(&processedRangeCount)
processedBlockCount := blockRange * int(processedRangeCount)
if blockRange < 0 || processedRangeCount > uint64(math.MaxInt/blockRange) { // #nosec G115 -- Checked for overflow
panic("overflow")
}
processedBlockCount := blockRange * int(processedRangeCount) // #nosec G115 -- Checked for overflow
totalBlockToProcess := numRangeArgs * blockRange
logrus.Infof("processed %v blocks of %v to process", processedBlockCount, totalBlockToProcess)

View File

@@ -55,7 +55,7 @@ func NewConfigFromFile(path string) (*Config, error) {
}
// Set the logging level
logrus.SetLevel(logrus.Level(cfg.LogLevel))
logrus.SetLevel(logrus.Level(cfg.LogLevel)) // #nosec G115 -- overflow not possible (uint8 -> uint32)
// Extract the Layer2.MsgSvcContract address from the string
addr, err := common.NewMixedcaseAddressFromString(cfg.Layer2.MsgSvcContractStr)

View File

@@ -8,7 +8,7 @@ import (
)
// bytesAsBlockPtrUnsafe unsafely cast a slice into an array. The caller is
// responsible for checking the length of the slice is at least as large as a
// responsible for checking the length of the slice is at least as large as
// a block.
func bytesAsBlockPtrUnsafe(s []byte) *Block {
return (*Block)(unsafe.Pointer(&s[0]))
@@ -17,7 +17,7 @@ func bytesAsBlockPtrUnsafe(s []byte) *Block {
// castDigest casts a 4-uplets of uint64 into a Keccak digest
func castDigest(a0, a1, a2, a3 uint64) Digest {
resU64 := [4]uint64{a0, a1, a2, a3}
return *(*Digest)(unsafe.Pointer(&resU64[0]))
return *(*Digest)(unsafe.Pointer(&resU64[0])) // #nosec G115 -- TODO look into this. Seems impossible to overflow here
}
// cycShf is an alias for [bits.RotateLeft64]. The function performs a bit

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"encoding/binary"
"io"
"math"
"runtime"
"sync"
@@ -120,7 +121,10 @@ func (s *Key) Hash(v []field.Element) []field.Element {
// unmarshal the result
var rlen [4]byte
binary.BigEndian.PutUint32(rlen[:], uint32(len(sum)/fr.Bytes))
if len(sum) > math.MaxUint32*fr.Bytes {
panic("slice too long")
}
binary.BigEndian.PutUint32(rlen[:], uint32(len(sum)/fr.Bytes)) // #nosec G115 -- Overflow checked
reader := io.MultiReader(bytes.NewReader(rlen[:]), bytes.NewReader(sum))
var result fr.Vector
_, err := result.ReadFrom(reader)

View File

@@ -110,7 +110,7 @@ func gnarkInterpolate(api frontend.API, p []frontend.Variable, z frontend.Variab
res = 0
lagranges := gnarkComputeLagrangeAtZ(api, z, gen, cardinality)
for i := 0; i < int(cardinality); i++ {
for i := uint64(0); i < cardinality; i++ {
tmp := api.Mul(lagranges[i], p[i])
res = api.Add(res, tmp)
}
@@ -147,7 +147,7 @@ func gnarkComputeLagrangeAtZ(api frontend.API, z frontend.Variable, gen fr.Eleme
var accOmega fr.Element
accOmega.SetOne()
for i := 1; i < int(cardinality); i++ {
for i := uint64(1); i < cardinality; i++ {
res[i] = api.Mul(res[i-1], gen) // res[i] <- ω * res[i-1]
res[i] = api.Mul(res[i], accZetaMinusOmegai) // res[i] <- res[i]*(ζ-ωⁱ⁻¹)
accOmega.Mul(&accOmega, &gen) // accOmega <- accOmega * ω

View File

@@ -43,7 +43,7 @@ func cosetID(r, numCoset int) (cosetID int) {
cosetID64 := uint64(maxDomain / r * numCoset)
cosetID64 = bits.Reverse64(cosetID64)
cosetID64 >>= 64 - field.RootOfUnityOrder
return int(cosetID64)
return utils.ToInt(cosetID64)
}
/*

View File

@@ -14,7 +14,7 @@ import (
// a and b are destroyed during the operation
func MultModXMinus1(domain *fft.Domain, res, a, b []field.Element) {
// All the item must be of the right size
if len(a) != len(b) || len(a) != len(res) || len(a) != int(domain.Cardinality) {
if len(a) != len(b) || len(a) != len(res) || uint64(len(a)) != domain.Cardinality {
panic(
fmt.Sprintf("All items should have the right size %v %v %v %v",
domain.Cardinality, len(res), len(a), len(b)),
@@ -37,7 +37,7 @@ func MultModXMinus1(domain *fft.Domain, res, a, b []field.Element) {
func MultModXnMinus1Precomputed(domain *fft.Domain, res, a, precomp []field.Element) {
// All the item must be of the right size
if len(a) != len(precomp) || len(a) != len(res) || len(a) != int(domain.Cardinality) {
if len(a) != len(precomp) || len(a) != len(res) || uint64(len(a)) != domain.Cardinality {
panic(
fmt.Sprintf("All items should have the right size %v %v %v %v",
domain.Cardinality, len(res), len(a), len(precomp)),

View File

@@ -32,7 +32,10 @@ func NewDomain(m int) *Domain {
// Generator = FinerGenerator^2 has order x
expo := uint64(1 << (maxOrderInt - order))
domain.Generator.Exp(field.RootOfUnity, big.NewInt(int64(expo))) // order x
var expoBig big.Int
expoBig.SetUint64(expo)
// order x
domain.Generator.Exp(field.RootOfUnity, &expoBig)
domain.GeneratorInv.Inverse(&domain.Generator)
domain.CardinalityInv.SetUint64(uint64(m)).Inverse(&domain.CardinalityInv)
@@ -53,7 +56,7 @@ Equipe the current domain with a custom coset obtained as explained in
the doc of `GetCoset`
*/
func (dom *Domain) WithCustomCoset(r, numcoset int) *Domain {
n := int(dom.Cardinality)
n := utils.ToInt(dom.Cardinality)
dom.CosetTable,
dom.CosetTableInv,
dom.CosetTableReversed,

View File

@@ -0,0 +1,13 @@
package field
// This file is NOT autogenerated
import "math"
func ToInt(e *Element) int {
n := e.Uint64()
if !e.IsUint64() || n > math.MaxInt {
panic("out of range")
}
return int(n) // #nosec G115 -- Checked for overflow
}

View File

@@ -150,7 +150,7 @@ func (ctx *compilationCtx) addRangeCheckConstraint() {
numRcR = smartvectors.Sum(rcRValue)
numRcO = smartvectors.Sum(rcOValue)
totalNumRangeCheckedValues = numRcL.Uint64() + numRcR.Uint64() + numRcO.Uint64()
totalNumRangeCheckedValuesPadded = utils.NextPowerOfTwo(int(totalNumRangeCheckedValues))
totalNumRangeCheckedValuesPadded = utils.NextPowerOfTwo(totalNumRangeCheckedValues)
)
if totalNumRangeCheckedValues == 0 {
@@ -169,7 +169,7 @@ func (ctx *compilationCtx) addRangeCheckConstraint() {
l = ctx.Columns.L[i]
r = ctx.Columns.R[i]
o = ctx.Columns.O[i]
rangeChecked = ctx.comp.InsertCommit(round, ctx.colIDf("RANGE_CHECKED_%v", i), totalNumRangeCheckedValuesPadded)
rangeChecked = ctx.comp.InsertCommit(round, ctx.colIDf("RANGE_CHECKED_%v", i), utils.ToInt(totalNumRangeCheckedValuesPadded))
)
ctx.Columns.RangeChecked[i] = rangeChecked

View File

@@ -49,6 +49,11 @@ func (p Aggregation) Sum(hsh hash.Hash) []byte {
hsh.Write(b[:])
}
writeUint := func(i uint) {
b := utils.FmtUint32Bytes(i)
hsh.Write(b[:])
}
hsh.Reset()
for _, hex := range p.L2MsgRootHashes {
@@ -60,14 +65,14 @@ func (p Aggregation) Sum(hsh hash.Hash) []byte {
hsh.Reset()
writeHex(p.ParentAggregationFinalShnarf)
writeHex(p.FinalShnarf)
writeInt(int(p.ParentAggregationLastBlockTimestamp))
writeInt(int(p.FinalTimestamp))
writeInt(int(p.LastFinalizedBlockNumber))
writeInt(int(p.FinalBlockNumber))
writeUint(p.ParentAggregationLastBlockTimestamp)
writeUint(p.FinalTimestamp)
writeUint(p.LastFinalizedBlockNumber)
writeUint(p.FinalBlockNumber)
writeHex(p.LastFinalizedL1RollingHash)
writeHex(p.L1RollingHash)
writeInt(int(p.LastFinalizedL1RollingHashMessageNumber))
writeInt(int(p.L1RollingHashMessageNumber))
writeUint(p.LastFinalizedL1RollingHashMessageNumber)
writeUint(p.L1RollingHashMessageNumber)
writeInt(p.L2MsgMerkleTreeDepth)
hsh.Write(l2Msgs)

View File

@@ -74,12 +74,12 @@ func (n *Node) addParent(p nodeID) {
// posInLevel returns the position in the level from a NodeID
func (i nodeID) posInLevel() int {
res := i & ((1 << 32) - 1)
return int(res)
return utils.ToInt(res)
}
// level returns the level from a NodeID
func (i nodeID) level() int {
return int(i >> 32)
return utils.ToInt(i >> 32)
}
// newNodeID returns the node id given its level and its position in a level

View File

@@ -21,7 +21,7 @@ func PartialChecksumBatchesPackedHint(maxNbBatches int) solver.Hint {
return errors.New("expected exactly maxNbBatches outputs")
}
nbBatches := int(ins[0].Uint64())
nbBatches := ins[0].Int64()
ends := utils.BigsToInts(ins[1 : 1+maxNbBatches])
in := append(utils.BigsToBytes(ins[1+maxNbBatches:]), make([]byte, 31)...) // pad with 31 bytes to avoid out of range panic TODO try removing this

View File

@@ -17,8 +17,10 @@ import (
// Encode the uint64 into an hexstring representing it as a u256 in bigendian form
func HexHashUint64(v ...uint64) string {
buffer := bytes.Buffer{}
var I big.Int
for i := range v {
bytes := big.NewInt(int64(v[i])).Bytes()
I.SetUint64(v[i])
bytes := I.Bytes()
bytes = append(make([]byte, 32-len(bytes)), bytes...)
buffer.Write(bytes)
}
@@ -33,6 +35,15 @@ func FmtInt32Bytes(v int) [32]byte {
return res
}
func FmtUint32Bytes(v uint) [32]byte {
var res [32]byte
var i big.Int
i.SetUint64(uint64(v))
b := i.Bytes()
copy(res[32-len(b):], b)
return res
}
// Format an integer as a 32 bytes hex string
func FmtIntHex32Bytes(v int) string {
bytes := FmtInt32Bytes(v)

View File

@@ -60,7 +60,8 @@ func ReadInt64On32Bytes(r io.Reader) (x, n_ int64, err error) {
if n < 0 {
panic("we are only reading 8 bits so this should not overflow")
}
return int64(xU64), 32, err
xU64 &= 0x7fffffffffffffff // TODO delete this if negative numbers are allowed
return int64(xU64), 32, err // #nosec G115 -- above line precludes overflowing
}
// Big int are assumed to fit on 32 bytes and are written as a single

View File

@@ -4,7 +4,9 @@ import (
"crypto/sha256"
"encoding/hex"
"fmt"
"golang.org/x/exp/constraints"
"io"
"math"
"math/big"
"reflect"
)
@@ -67,9 +69,9 @@ Taken from :
https://github.com/protolambda/zrnt/blob/v0.13.2/eth2/util/math/math_util.go#L58
The function panics if the input is more than 2**62 as this causes overflow
*/
func NextPowerOfTwo[T ~int](in T) T {
if in > 1<<62 {
panic("Input is too large")
func NextPowerOfTwo[T ~int64 | ~uint64 | ~uintptr | ~int | ~uint](in T) T {
if in < 0 || uint64(in) > 1<<62 {
panic("Input out of range")
}
v := in
v--
@@ -187,7 +189,36 @@ func BigsToBytes(ins []*big.Int) []byte {
func BigsToInts(ints []*big.Int) []int {
res := make([]int, len(ints))
for i := range ints {
res[i] = int(ints[i].Uint64())
u := ints[i].Uint64()
res[i] = int(u) // #nosec G115 - check below
if !ints[i].IsUint64() || uint64(res[i]) != u {
panic("overflow")
}
}
return res
}
// ToInt converts a uint, uint64 or int64 to an int, panicking on overflow.
// Due to its use of generics, it is inefficient to use in loops than run a "cryptographic" number of iterations. Use type-specific functions in such cases.
func ToInt[T ~uint | ~uint64 | ~int64](i T) int {
if i > math.MaxInt {
panic("overflow")
}
return int(i) // #nosec G115 -- Checked for overflow
}
// ToUint64 converts a signed integer into a uint64, panicking on negative values.
// Due to its use of generics, it is inefficient to use in loops than run a "cryptographic" number of iterations. Use type-specific functions in such cases.
func ToUint64[T constraints.Signed](i T) uint64 {
if i < 0 {
panic("negative")
}
return uint64(i)
}
func ToUint16[T ~int | ~uint](i T) uint16 {
if i < 0 || i > math.MaxUint16 {
panic("out of range")
}
return uint16(i) // #nosec G115 -- Checked for overflow
}

View File

@@ -261,10 +261,8 @@ func (imp *importation) Run(run *wizard.ProverRuntime) {
iab.IsNewHash.PushZero()
}
var (
indexInt = int(index[i].Uint64())
nBytesInt = int(nBytes[i].Uint64())
)
indexInt := field.ToInt(&index[i])
nBytesInt := field.ToInt(&nBytes[i])
currByteSize += nBytesInt

View File

@@ -150,8 +150,8 @@ func (h *hashBaseConversion) Run(
for i := range h.Inputs.LimbsHiB {
for row := 0; row < size; row++ {
limbsLo[i].PushInt(int(BaseBToUint4(v[i][row], keccakf.BaseB)))
limbsHi[i].PushInt(int(BaseBToUint4(w[i][row], keccakf.BaseB)))
limbsLo[i].PushInt(utils.ToInt(BaseBToUint4(v[i][row], keccakf.BaseB)))
limbsHi[i].PushInt(utils.ToInt(BaseBToUint4(w[i][row], keccakf.BaseB)))
}
limbsHi[i].PadAndAssign(run, field.Zero())

View File

@@ -2,6 +2,7 @@ package base_conversion
import (
"encoding/binary"
"math"
"github.com/consensys/zkevm-monorepo/prover/maths/common/smartvectors"
"github.com/consensys/zkevm-monorepo/prover/maths/field"
@@ -44,12 +45,12 @@ func NewLookupTables(comp *wizard.CompiledIOP) lookUpTables {
// convert slices of 16bits to keccak.BaseX (from uint16-BE to baseA_LE/baseB_LE)
func baseConversionKeccakBaseX() (uint16Col, baseACol, baseBCol smartvectors.SmartVector) {
var u, v, w []field.Element
for i := 0; i < Power16; i++ {
for i := 0; i < math.MaxUint16; i++ {
u = append(u, field.NewElement(uint64(i)))
bs := make([]byte, 2)
// from uint16-BE to baseA_LE/baseB_LE
binary.LittleEndian.PutUint16(bs, uint16(i))
binary.LittleEndian.PutUint16(bs, uint16(i)) // #nosec G115 -- Bounded by loop condition
v = append(v, bytesToBaseX(bs, &keccakf.BaseAFr))
w = append(w, bytesToBaseX(bs, &keccakf.BaseBFr))
}

View File

@@ -7,7 +7,7 @@ import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/symbolic"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/common"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/base_conversion.go"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/base_conversion"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/keccak/keccakf"
"github.com/consensys/zkevm-monorepo/prover/zkevm/prover/hash/packing/dedicated/spaghettifier"
)

View File

@@ -76,7 +76,7 @@ func valBaseXToBaseY(
realSize := IntExp(uint64(baseX), numChunkBaseX)
bxDirty := make([]field.Element, realSize)
byClean := make([]field.Element, realSize)
colSize := utils.NextPowerOfTwo(int(realSize))
colSize := utils.NextPowerOfTwo(realSize)
// Runtime assertion to protect the structure of the tables
if numChunkBaseX != 4 {
@@ -86,17 +86,17 @@ func valBaseXToBaseY(
}
for l3 := 0; l3 < baseX; l3++ {
d3 := l3 * int(IntExp(uint64(baseX), 3))
c3 := ((l3 >> cleanBit) & 1) * int(IntExp(uint64(baseY), 3))
d3 := l3 * utils.ToInt(IntExp(utils.ToUint64(baseX), 3))
c3 := ((l3 >> cleanBit) & 1) * utils.ToInt(IntExp(uint64(baseY), 3))
for l2 := 0; l2 < baseX; l2++ {
d2 := l2 * int(IntExp(uint64(baseX), 2))
c2 := ((l2 >> cleanBit) & 1) * int(IntExp(uint64(baseY), 2))
d2 := l2 * utils.ToInt(IntExp(uint64(baseX), 2))
c2 := ((l2 >> cleanBit) & 1) * utils.ToInt(IntExp(uint64(baseY), 2))
for l1 := 0; l1 < baseX; l1++ {
d1 := l1 * baseX
c1 := ((l1 >> cleanBit) & 1) * baseY
for l0 := 0; l0 < baseX; l0++ {
d0 := l0
c0 := ((l0 >> cleanBit) & 1)
c0 := (l0 >> cleanBit) & 1
// Coincidentally, dirty1 ranges from 0 to realSize in
// increasing order.
dirtyx := d3 + d2 + d1 + d0
@@ -110,8 +110,8 @@ func valBaseXToBaseY(
// Since, Wizard requires powers-of-two vector length we zero-pad them. Note that
// (0, 0) does constitute a valid entry in the mapping already.
return smartvectors.RightZeroPadded(bxDirty, colSize),
smartvectors.RightZeroPadded(byClean, colSize)
return smartvectors.RightZeroPadded(bxDirty, utils.ToInt(colSize)),
smartvectors.RightZeroPadded(byClean, utils.ToInt(colSize))
}
// Returns a lookup table for the round constant of keccak given a maximal

View File

@@ -14,13 +14,13 @@ func TestLookupsBaseAToBaseB(t *testing.T) {
baseADirty, baseBClean := valBaseXToBaseY(BaseA, BaseB, 0)
for i := uint64(0); i < BaseAPow4; i++ {
for i := 0; i < BaseAPow4; i++ {
// baseADirty is equal to i
dirtyA := baseADirty.Get(int(i))
dirtyA := baseADirty.Get(i)
assert.Equal(t, i, dirtyA.Uint64(), baseADirty, "base A dirty")
// cleanB is consistent with the declaration that dirty
cleanB := baseBClean.Get(int(i))
cleanB := baseBClean.Get(i)
assert.Equal(t, BaseXToU64(dirtyA, &BaseAFr), BaseXToU64(cleanB, &BaseBFr), "base B clean")
if t.Failed() {
@@ -33,13 +33,13 @@ func TestLookupsBaseBToBaseA(t *testing.T) {
baseBDirty, baseAClean := valBaseXToBaseY(BaseB, BaseA, 1)
for i := uint64(0); i < BaseBPow4; i++ {
for i := 0; i < BaseBPow4; i++ {
// baseBDirty is equal to i
dirtyB := baseBDirty.Get(int(i))
dirtyB := baseBDirty.Get(i)
assert.Equal(t, i, dirtyB.Uint64(), "base B dirty")
// cleanA is consistent with the declaration that dirty
cleanA := baseAClean.Get(int(i))
cleanA := baseAClean.Get(i)
assert.Equal(
t,
BaseXToU64(dirtyB, &BaseBFr, 1),

View File

@@ -275,7 +275,7 @@ func (c *piChiIota) assign(
pos := aIotaBaseBSliced[x][y][k][r].Uint64()
// Coincidentally, the values of base2Dirty are 0, 1, 3
// so we can us the bear value to perform the lookup.
lookedUp := base1Clean.Get(int(pos))
lookedUp := base1Clean.Get(utils.ToInt(pos))
// Sanity-check : are we getting the same value with the
// conversion
{

View File

@@ -142,7 +142,7 @@ func DecomposeFr(f field.Element, base int, nb int) (res []field.Element) {
// internal testing only.
func BaseXToU64(x field.Element, base *field.Element, optBitP0s ...int) (res uint64) {
res = 0
decomposedF := DecomposeFr(x, int(base.Uint64()), 64)
decomposedF := DecomposeFr(x, field.ToInt(base), 64)
bitPos := 0
if len(optBitP0s) > 0 {

View File

@@ -106,7 +106,7 @@ func (b *block) Assign(run *wizard.ProverRuntime) {
accNumLane[size-1] = isActive[size-1]
// accNumLanes[i] = accNumLane[i+1]*(1-isBlockComplete[i+1]) + isLaneActive[i]
for row := size - 2; row >= 0; row-- {
if int(accNumLane[row+1].Uint64()) == nbOfLanesPerBlock {
if field.ToInt(&accNumLane[row+1]) == nbOfLanesPerBlock {
accNumLane[row] = field.One()
} else {
accNumLane[row].Add(&isActive[row], &accNumLane[row+1])

View File

@@ -252,11 +252,11 @@ func (decomposed *decomposition) assignMainColumns(run *wizard.ProverRuntime) {
// i-th row of DecomposedLen
var lenRow []int
for j := 0; j < decomposed.nbSlices; j++ {
lenRow = append(lenRow, int(decomposedLen[j][i].Uint64()))
lenRow = append(lenRow, utils.ToInt(decomposedLen[j][i].Uint64()))
}
// populate DecomposedLimb
decomposedLimb := decomposeByLength(cleanLimbs[i], int(nByte[i].Uint64()), lenRow)
decomposedLimb := decomposeByLength(cleanLimbs[i], field.ToInt(&nByte[i]), lenRow)
for j := 0; j < decomposed.nbSlices; j++ {
decomposedLimbs[j][i] = decomposedLimb[j]
@@ -307,11 +307,11 @@ func cutUpToMax(nByte []field.Element, nbChunk, max int) (b [][]field.Element) {
}
s := 0
for j := 0; j < nbChunk; j++ {
s = s + int(a[j].Uint64())
s += field.ToInt(&a[j])
b[j] = append(b[j], a[j])
}
if s != int(nByte[i].Uint64()) {
if s != field.ToInt(&nByte[i]) {
utils.Panic("decomposition of nByte is not correct; nByte %v, s %v", nByte[i].Uint64(), s)
}

View File

@@ -110,7 +110,7 @@ func (ctx *cleaningCtx) assignCleanLimbs(run *wizard.ProverRuntime) {
for pos := 0; pos < len(limbs); pos++ {
// Extract the limb, which is left aligned to the 16-th byte
limbSerialized = limbs[pos].Bytes()
nbyte := int(nByte[pos].Uint64())
nbyte := field.ToInt(&nByte[pos])
res := limbSerialized[LEFT_ALIGNMENT : LEFT_ALIGNMENT+nbyte]
cleanLimbs.PushField(*(f.SetBytes(res)))
}

View File

@@ -133,14 +133,15 @@ func (lc *lengthConsistency) Run(run *wizard.ProverRuntime) {
// getZeroOnes receives n and outputs the pattern (0,..0,1,..,1) such that there are n elements 1.
func getZeroOnes(n field.Element, max int) (a []field.Element) {
if n.Uint64() > uint64(max) {
utils.Panic("%v should be smaller than %v", n.Uint64(), max)
_n := field.ToInt(&n)
if _n > max {
utils.Panic("%v should be smaller than %v", _n, max)
}
for j := 0; j < max-int(n.Uint64()); j++ {
for j := 0; j < max-_n; j++ {
a = append(a, field.Zero())
}
for i := max - int(n.Uint64()); i < max; i++ {
for i := max - _n; i < max; i++ {
a = append(a, field.One())
}

View File

@@ -251,7 +251,7 @@ func (l *laneRepacking) getBlocks(run *wizard.ProverRuntime, inp PackingInput) (
var isFirstBlockOfHash []int
isFirstBlockOfHash = append(isFirstBlockOfHash, 1)
for pos := 0; pos < len(limbs); pos++ {
nbyte := int(nBytes[pos].Uint64())
nbyte := field.ToInt(&nBytes[pos])
s = s + nbyte
// Extract the limb, which is left aligned to the 16-th byte

View File

@@ -1200,7 +1200,7 @@ func AssignExecutionDataCollector(run *wizard.ProverRuntime,
vect := NewExecutionDataCollectorVectors(size)
fetchedAbsTxIdMax := rlp.AbsTxNumMax.GetColAssignmentAt(run, 0)
absTxIdMax := int(fetchedAbsTxIdMax.Uint64())
absTxIdMax := field.ToInt(&fetchedAbsTxIdMax)
absTxCt := 1
rlpCt := 0
@@ -1256,7 +1256,7 @@ func AssignExecutionDataCollector(run *wizard.ProverRuntime,
totalCt++
// iterate through transactions
for txIdInBlock := 1; txIdInBlock <= int(totalTxBlock); txIdInBlock++ {
for txIdInBlock := uint64(1); txIdInBlock <= totalTxBlock; txIdInBlock++ {
// load the sender address Hi
fetchedAddrHi := txnData.FromHi.GetColAssignmentAt(run, absTxCt-1)

View File

@@ -163,9 +163,9 @@ func AssignBlockTxnMetadata(run *wizard.ProverRuntime, btm BlockTxnMetadata, td
// set the absolute IDs, firstAbsTxId and lastAbsTxId for the block
firstAbsTxId[counter].SetInt64(ctAbsTxNum)
lastAbsTxId[counter].Set(&firstAbsTxId[counter])
integerNoOfTxBlock := int64(fetchTotalNoTxnBlock.Uint64())
integerNoOfTxBlock := int64(field.ToInt(&fetchTotalNoTxnBlock))
lastAbsTxId[counter].SetInt64(ctAbsTxNum + integerNoOfTxBlock - 1)
// increas ctAbsTxNum counter
// increase ctAbsTxNum counter
ctAbsTxNum += integerNoOfTxBlock
// set the counter
counter++

View File

@@ -585,14 +585,13 @@ func (stitcher *Stitcher) Finalize(sampleType int) *StateManagerVectors {
stateManagerVectors.LastAOCBlock = append(stateManagerVectors.LastAOCBlock, dummyVector...)
minDeplBlockFragment := make([]field.Element, len(stoHist.addressHI))
for index := range minDeplBlockFragment {
block := stoHist.blockNumber[index].Uint64()
minDeplBlockFragment[index].SetInt64(int64(accHist.minDeplBlock[int(block)]))
minDeplBlock := accHist.minDeplBlock[field.ToInt(&stoHist.blockNumber[index])]
minDeplBlockFragment[index].SetInt64(int64(minDeplBlock))
}
stateManagerVectors.MinDeploymentBlock = append(stateManagerVectors.MinDeploymentBlock, minDeplBlockFragment...)
maxDeplBlockFragment := make([]field.Element, len(stoHist.addressHI))
for index := range maxDeplBlockFragment {
block := stoHist.blockNumber[index].Uint64()
maxDeplBlockFragment[index].SetInt64(int64(accHist.maxDeplBlock[int(block)]))
maxDeplBlockFragment[index].SetInt64(int64(accHist.maxDeplBlock[field.ToInt(&stoHist.blockNumber[index])]))
}
stateManagerVectors.MaxDeploymentBlock = append(stateManagerVectors.MaxDeploymentBlock, maxDeplBlockFragment...)
}

View File

@@ -2,6 +2,7 @@ package statesummary
import (
"github.com/consensys/zkevm-monorepo/prover/protocol/wizard"
"github.com/consensys/zkevm-monorepo/prover/utils"
"github.com/consensys/zkevm-monorepo/prover/utils/types"
)
@@ -71,7 +72,7 @@ func (sr *ArithmetizationStorageParser) Process() {
mapKey := KeysAndBlock{
address: address.Bytes(),
storageKey: types.FullBytes32(keyBytes),
block: int(block),
block: utils.ToInt(block),
}
valueHI := sr.scp.ValueHINext.GetColAssignmentAt(sr.run, index)