Compare commits

..

6 Commits

Author SHA1 Message Date
Péter Garamvölgyi
2962fa4b0e batch proposer: only sleep if we failed to create batch (#388) 2023-03-22 22:16:31 +08:00
colin
5b7ee9e55c fix(batch proposer): propose up to propose batch limit (#383)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-03-22 20:39:25 +08:00
maskpp
0b8a737090 fix(integration test): fix bug in integration test (#386)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-03-22 18:18:13 +08:00
Lawliet-Chan
ceb406b68b feat(roller): add dump proof (#289)
Co-authored-by: xinran chen <lawliet@xinran-m1x.local>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-03-22 13:46:09 +08:00
HAOYUatHZ
1a29797ee1 fix(CI): temporarily disable integration test (#385) 2023-03-22 12:42:52 +08:00
Xi Lin
19f74075a1 fix(contracts): add missing payable in L2 ERC721/1155 gateway (#382) 2023-03-22 12:20:12 +08:00
17 changed files with 110 additions and 54 deletions

View File

@@ -40,6 +40,8 @@ type BatchProposerConfig struct {
BatchBlocksLimit uint64 `json:"batch_blocks_limit"`
// Commit tx calldata size limit in bytes, target to cap the gas use of commit tx at 2M gas
CommitTxCalldataSizeLimit uint64 `json:"commit_tx_calldata_size_limit"`
// Commit tx calldata min size limit in bytes
CommitTxCalldataMinSize uint64 `json:"commit_tx_calldata_min_size,omitempty"`
// The public input hash config
PublicInputConfig *types.PublicInputHashConfig `json:"public_input_config"`
}

View File

@@ -80,6 +80,7 @@ type BatchProposer struct {
batchCommitTimeSec uint64
commitCalldataSizeLimit uint64
batchDataBufferSizeLimit uint64
commitCalldataMinSize uint64
proofGenerationFreq uint64
batchDataBuffer []*types.BatchData
@@ -102,6 +103,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
batchBlocksLimit: cfg.BatchBlocksLimit,
batchCommitTimeSec: cfg.BatchCommitTimeSec,
commitCalldataSizeLimit: cfg.CommitTxCalldataSizeLimit,
commitCalldataMinSize: cfg.CommitTxCalldataMinSize,
batchDataBufferSizeLimit: 100*cfg.CommitTxCalldataSizeLimit + 1*1024*1024, // @todo: determine the value.
proofGenerationFreq: cfg.ProofGenerationFreq,
piCfg: cfg.PublicInputConfig,
@@ -217,7 +219,7 @@ func (p *BatchProposer) tryProposeBatch() {
p.mutex.Lock()
defer p.mutex.Unlock()
if p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit {
for p.getBatchDataBufferSize() < p.batchDataBufferSizeLimit {
blocks, err := p.orm.GetUnbatchedL2Blocks(
map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", p.batchBlocksLimit),
@@ -227,7 +229,18 @@ func (p *BatchProposer) tryProposeBatch() {
return
}
p.proposeBatch(blocks)
batchCreated := p.proposeBatch(blocks)
// while size of batchDataBuffer < commitCalldataMinSize,
// proposer keeps fetching and porposing batches.
if p.getBatchDataBufferSize() >= p.commitCalldataMinSize {
return
}
if !batchCreated {
// wait for watcher to insert l2 traces.
time.Sleep(time.Second)
}
}
}
@@ -276,9 +289,9 @@ func (p *BatchProposer) tryCommitBatches() {
}
}
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) bool {
if len(blocks) == 0 {
return
return false
}
if blocks[0].GasUsed > p.batchGasThreshold {
@@ -291,7 +304,7 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
}
return
return true
}
if blocks[0].TxNum > p.batchTxNumThreshold {
@@ -304,7 +317,7 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(blocks[0].GasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(1)
}
return
return true
}
var gasUsed, txNum uint64
@@ -324,7 +337,7 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if !reachThreshold && blocks[0].BlockTimestamp+p.batchTimeSec > uint64(time.Now().Unix()) {
return
return false
}
if err := p.createBatchForBlocks(blocks); err != nil {
@@ -334,6 +347,8 @@ func (p *BatchProposer) proposeBatch(blocks []*types.BlockInfo) {
bridgeL2BatchesGasCreatedRateMeter.Mark(int64(gasUsed))
bridgeL2BatchesCreatedRateMeter.Mark(int64(len(blocks)))
}
return true
}
func (p *BatchProposer) createBatchForBlocks(blocks []*types.BlockInfo) error {

View File

@@ -4,11 +4,11 @@ ${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
# ${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
npx cobertura-merge -o cobertura.xml \
package1=coverage.bridge.xml \
package2=coverage.db.xml \
package3=coverage.common.xml \
package4=coverage.coordinator.xml \
package5=coverage.integration.xml
package4=coverage.coordinator.xml
# package5=coverage.integration.xml

View File

@@ -46,7 +46,10 @@ func (c *Cmd) WaitExit() {
// Send interrupt signal.
c.mu.Lock()
_ = c.cmd.Process.Signal(os.Interrupt)
_, _ = c.cmd.Process.Wait()
// should use `_ = c.cmd.Process.Wait()` here, but we have some bugs in coordinator's graceful exit,
// so we use `Kill` as a temp workaround. And since `WaitExit` is only used in integration tests, so
// it won't really affect our functionalities.
_ = c.cmd.Process.Kill()
c.mu.Unlock()
}

View File

@@ -184,7 +184,7 @@ func newTestL1Docker(t *testing.T) ImgInstance {
assert.NoError(t, imgL1geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
utils.TryTimes(10, func() bool {
client, _ := ethclient.Dial(imgL1geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
@@ -203,7 +203,7 @@ func newTestL2Docker(t *testing.T) ImgInstance {
assert.NoError(t, imgL2geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
utils.TryTimes(10, func() bool {
client, _ := ethclient.Dial(imgL2geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
@@ -222,7 +222,7 @@ func newTestDBDocker(t *testing.T, driverName string) ImgInstance {
assert.NoError(t, imgDB.Start())
// try 5 times until the db is ready.
utils.TryTimes(5, func() bool {
utils.TryTimes(10, func() bool {
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
if db != nil {
return db.Ping() == nil

View File

@@ -45,7 +45,6 @@ func (i *ImgDB) Start() error {
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.cmd.RunCmd(true)
i.running = i.isOk()
if !i.running {
_ = i.Stop()
@@ -106,10 +105,12 @@ func (i *ImgDB) isOk() bool {
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(3, func() bool {
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})

View File

@@ -48,7 +48,6 @@ func (i *ImgGeth) Start() error {
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.cmd.RunCmd(true)
i.running = i.isOk()
if !i.running {
_ = i.Stop()
@@ -85,10 +84,12 @@ func (i *ImgGeth) isOk() bool {
}
})
defer i.cmd.UnRegistFunc(keyword)
// Start cmd in parallel.
i.cmd.RunCmd(true)
select {
case <-okCh:
utils.TryTimes(3, func() bool {
utils.TryTimes(20, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "alpha-v2.3"
var tag = "alpha-v2.5"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -86,7 +86,7 @@ interface IL2ERC1155Gateway {
uint256 tokenId,
uint256 amount,
uint256 gasLimit
) external;
) external payable;
/// @notice Withdraw some ERC1155 NFT to caller's account on layer 1.
/// @param token The address of ERC1155 NFT in layer 2.
@@ -100,7 +100,7 @@ interface IL2ERC1155Gateway {
uint256 tokenId,
uint256 amount,
uint256 gasLimit
) external;
) external payable;
/// @notice Batch withdraw a list of ERC1155 NFT to caller's account on layer 1.
/// @param token The address of ERC1155 NFT in layer 2.
@@ -112,7 +112,7 @@ interface IL2ERC1155Gateway {
uint256[] memory tokenIds,
uint256[] memory amounts,
uint256 gasLimit
) external;
) external payable;
/// @notice Batch withdraw a list of ERC1155 NFT to caller's account on layer 1.
/// @param token The address of ERC1155 NFT in layer 2.
@@ -126,7 +126,7 @@ interface IL2ERC1155Gateway {
uint256[] memory tokenIds,
uint256[] memory amounts,
uint256 gasLimit
) external;
) external payable;
/// @notice Complete ERC1155 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
/// @dev Requirements:

View File

@@ -76,7 +76,7 @@ interface IL2ERC721Gateway {
address token,
uint256 tokenId,
uint256 gasLimit
) external;
) external payable;
/// @notice Withdraw some ERC721 NFT to caller's account on layer 1.
/// @param token The address of ERC721 NFT in layer 2.
@@ -88,7 +88,7 @@ interface IL2ERC721Gateway {
address to,
uint256 tokenId,
uint256 gasLimit
) external;
) external payable;
/// @notice Batch withdraw a list of ERC721 NFT to caller's account on layer 1.
/// @param token The address of ERC721 NFT in layer 2.
@@ -98,7 +98,7 @@ interface IL2ERC721Gateway {
address token,
uint256[] memory tokenIds,
uint256 gasLimit
) external;
) external payable;
/// @notice Batch withdraw a list of ERC721 NFT to caller's account on layer 1.
/// @param token The address of ERC721 NFT in layer 2.
@@ -110,7 +110,7 @@ interface IL2ERC721Gateway {
address to,
uint256[] memory tokenIds,
uint256 gasLimit
) external;
) external payable;
/// @notice Complete ERC721 deposit from layer 1 to layer 2 and send NFT to recipient's account in layer 2.
/// @dev Requirements:

View File

@@ -57,7 +57,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256 _tokenId,
uint256 _amount,
uint256 _gasLimit
) external override {
) external payable override {
_withdrawERC1155(_token, msg.sender, _tokenId, _amount, _gasLimit);
}
@@ -68,7 +68,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256 _tokenId,
uint256 _amount,
uint256 _gasLimit
) external override {
) external payable override {
_withdrawERC1155(_token, _to, _tokenId, _amount, _gasLimit);
}
@@ -78,7 +78,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256[] calldata _tokenIds,
uint256[] calldata _amounts,
uint256 _gasLimit
) external override {
) external payable override {
_batchWithdrawERC1155(_token, msg.sender, _tokenIds, _amounts, _gasLimit);
}
@@ -89,7 +89,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
uint256[] calldata _tokenIds,
uint256[] calldata _amounts,
uint256 _gasLimit
) external override {
) external payable override {
_batchWithdrawERC1155(_token, _to, _tokenIds, _amounts, _gasLimit);
}
@@ -173,7 +173,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
);
// 3. Send message to L2ScrollMessenger.
IL2ScrollMessenger(messenger).sendMessage(counterpart, msg.value, _message, _gasLimit);
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
emit WithdrawERC1155(_l1Token, _token, msg.sender, _to, _tokenId, _amount);
}
@@ -216,7 +216,7 @@ contract L2ERC1155Gateway is OwnableUpgradeable, ERC1155HolderUpgradeable, Scrol
);
// 3. Send message to L2ScrollMessenger.
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, msg.value, _message, _gasLimit);
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
emit BatchWithdrawERC1155(_l1Token, _token, msg.sender, _to, _tokenIds, _amounts);
}

View File

@@ -56,7 +56,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _token,
uint256 _tokenId,
uint256 _gasLimit
) external override {
) external payable override {
_withdrawERC721(_token, msg.sender, _tokenId, _gasLimit);
}
@@ -66,7 +66,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _to,
uint256 _tokenId,
uint256 _gasLimit
) external override {
) external payable override {
_withdrawERC721(_token, _to, _tokenId, _gasLimit);
}
@@ -75,7 +75,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _token,
uint256[] calldata _tokenIds,
uint256 _gasLimit
) external override {
) external payable override {
_batchWithdrawERC721(_token, msg.sender, _tokenIds, _gasLimit);
}
@@ -85,7 +85,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
address _to,
uint256[] calldata _tokenIds,
uint256 _gasLimit
) external override {
) external payable override {
_batchWithdrawERC721(_token, _to, _tokenIds, _gasLimit);
}
@@ -166,7 +166,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
);
// 3. Send message to L2ScrollMessenger.
IL2ScrollMessenger(messenger).sendMessage(counterpart, msg.value, _message, _gasLimit);
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
emit WithdrawERC721(_l1Token, _token, msg.sender, _to, _tokenId);
}
@@ -205,7 +205,7 @@ contract L2ERC721Gateway is OwnableUpgradeable, ERC721HolderUpgradeable, ScrollG
);
// 3. Send message to L2ScrollMessenger.
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, msg.value, _message, _gasLimit);
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
emit BatchWithdrawERC721(_l1Token, _token, msg.sender, _to, _tokenIds);
}

View File

@@ -22,6 +22,7 @@ type Config struct {
type ProverConfig struct {
ParamsPath string `json:"params_path"`
SeedPath string `json:"seed_path"`
DumpDir string `json:"dump_dir,omitempty"`
}
// NewConfig returns a new instance of Config.

View File

@@ -3,8 +3,6 @@
package prover
import (
"github.com/scroll-tech/go-ethereum/core/types"
"scroll-tech/common/message"
"scroll-tech/roller/config"
@@ -21,7 +19,7 @@ func NewProver(cfg *config.ProverConfig) (*Prover, error) {
}
// Prove call rust ffi to generate proof, if first failed, try again.
func (p *Prover) Prove(_ []*types.BlockTrace) (*message.AggProof, error) {
func (p *Prover) Prove(_ *message.TaskMsg) (*message.AggProof, error) {
return &message.AggProof{
Proof: []byte{},
Instance: []byte{},

View File

@@ -12,9 +12,10 @@ import "C" //nolint:typecheck
import (
"encoding/json"
"os"
"path/filepath"
"unsafe"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/message"
@@ -37,19 +38,38 @@ func NewProver(cfg *config.ProverConfig) (*Prover, error) {
}()
C.init_prover(paramsPathStr, seedPathStr)
if cfg.DumpDir != "" {
err := os.MkdirAll(cfg.DumpDir, os.ModePerm)
if err != nil {
return nil, err
}
log.Info("Enabled dump_proof", "dir", cfg.DumpDir)
}
return &Prover{cfg: cfg}, nil
}
// Prove call rust ffi to generate proof, if first failed, try again.
func (p *Prover) Prove(traces []*types.BlockTrace) (*message.AggProof, error) {
return p.prove(traces)
}
func (p *Prover) prove(traces []*types.BlockTrace) (*message.AggProof, error) {
tracesByt, err := json.Marshal(traces)
func (p *Prover) Prove(task *message.TaskMsg) (*message.AggProof, error) {
tracesByt, err := json.Marshal(task.Traces)
if err != nil {
return nil, err
}
proofByt := p.prove(tracesByt)
// dump proof
err = p.dumpProof(task.ID, proofByt)
if err != nil {
log.Error("Dump proof failed", "task-id", task.ID, "error", err)
}
zkProof := &message.AggProof{}
return zkProof, json.Unmarshal(proofByt, zkProof)
}
// Call cgo to generate proof.
func (p *Prover) prove(tracesByt []byte) []byte {
tracesStr := C.CString(string(tracesByt))
defer func() {
@@ -61,7 +81,19 @@ func (p *Prover) prove(traces []*types.BlockTrace) (*message.AggProof, error) {
log.Info("Finish creating agg proof!")
proof := C.GoString(cProof)
zkProof := &message.AggProof{}
err = json.Unmarshal([]byte(proof), zkProof)
return zkProof, err
return []byte(proof)
}
func (p *Prover) dumpProof(id string, proofByt []byte) error {
if p.cfg.DumpDir == "" {
return nil
}
path := filepath.Join(p.cfg.DumpDir, id)
f, err := os.Create(path)
if err != nil {
return err
}
log.Info("Saving proof", "task-id", id)
_, err = f.Write(proofByt)
return err
}

View File

@@ -12,6 +12,8 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/common/message"
"scroll-tech/roller/config"
"scroll-tech/roller/prover"
)
@@ -49,7 +51,8 @@ func TestFFI(t *testing.T) {
as.NoError(json.Unmarshal(byt, trace))
traces = append(traces, trace)
}
proof, err := prover.Prove(traces)
task := &message.TaskMsg{ID: "test", Traces: traces}
proof, err := prover.Prove(task)
as.NoError(err)
t.Log("prove success")

View File

@@ -222,7 +222,7 @@ func (r *Roller) prove() error {
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
// the proof will not be submitted.
var proof *message.AggProof
proof, err = r.prover.Prove(traces)
proof, err = r.prover.Prove(task.Task)
if err != nil {
proofMsg = &message.ProofDetail{
Status: message.StatusProofError,