Compare commits

..

3 Commits

Author SHA1 Message Date
Péter Garamvölgyi
f00c400993 fix(chunk proposer): treat L2 tx gas check as soft limit (#602) 2023-06-29 23:32:16 +08:00
Péter Garamvölgyi
bb6428848f fix: improve import genesis logs (#601) 2023-06-29 17:18:54 +02:00
HAOYUatHZ
df97200a41 feat(roller): return concrete error for get traces error (#595)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-06-29 19:09:40 +08:00
26 changed files with 487 additions and 49 deletions

View File

@@ -12,12 +12,13 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
bridgeUtils "scroll-tech/bridge/internal/utils"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeUtils "scroll-tech/bridge/internal/utils"
)
var (

View File

@@ -236,7 +236,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
// wait for confirmation
// we assume that no other transactions are sent before initializeGenesis completes
@@ -247,11 +247,11 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
select {
// print progress
case <-ticker.C:
log.Info("Waiting for confirmation, pending count: %d", r.rollupSender.PendingCount())
log.Info("Waiting for confirmation", "pending count", r.rollupSender.PendingCount())
// timeout
case <-time.After(5 * time.Minute):
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash)
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash.String())
// handle confirmation
case confirmation := <-r.rollupSender.ConfirmChan():
@@ -261,7 +261,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
if !confirmation.IsSuccessful {
return fmt.Errorf("import genesis batch tx failed")
}
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash)
log.Info("Successfully committed genesis batch on L1", "txHash", confirmation.TxHash.String())
return nil
}
}

View File

@@ -12,14 +12,15 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/controller/sender"
"scroll-tech/bridge/internal/orm"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
bridgeUtils "scroll-tech/bridge/internal/utils"
)
func setupL2RelayerDB(t *testing.T) *gorm.DB {

View File

@@ -106,15 +106,6 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
)
}
if totalTxGasUsed > p.maxTxGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx gas limit; block number: %v, gas used: %v, max gas limit: %v",
firstBlock.Header.Number,
totalTxGasUsed,
p.maxTxGasPerChunk,
)
}
if totalL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
@@ -133,6 +124,16 @@ func (p *ChunkProposer) proposeChunk() (*bridgeTypes.Chunk, error) {
)
}
// Check if the first block breaks any soft limits.
if totalTxGasUsed > p.maxTxGasPerChunk {
log.Warn(
"The first block in chunk exceeds l2 tx gas limit",
"block number", firstBlock.Header.Number,
"gas used", totalTxGasUsed,
"max gas limit", p.maxTxGasPerChunk,
)
}
for i, block := range blocks[1:] {
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()

View File

@@ -9,11 +9,12 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/docker"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
"scroll-tech/common/docker"
"scroll-tech/database/migrate"
)
var (

View File

@@ -0,0 +1,61 @@
package migrate
import (
"database/sql"
"embed"
"os"
"strconv"
"github.com/pressly/goose/v3"
)
//go:embed migrations/*.sql
var embedMigrations embed.FS
// MigrationsDir migration dir
const MigrationsDir string = "migrations"
func init() {
goose.SetBaseFS(embedMigrations)
goose.SetSequential(true)
goose.SetTableName("scroll_migrations")
verbose, _ := strconv.ParseBool(os.Getenv("LOG_SQL_MIGRATIONS"))
goose.SetVerbose(verbose)
}
// Migrate migrate db
func Migrate(db *sql.DB) error {
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}
// Rollback rollback to the given version
func Rollback(db *sql.DB, version *int64) error {
if version != nil {
return goose.DownTo(db, MigrationsDir, *version)
}
return goose.Down(db, MigrationsDir)
}
// ResetDB clean and migrate db.
func ResetDB(db *sql.DB) error {
if err := Rollback(db, new(int64)); err != nil {
return err
}
return Migrate(db)
}
// Current get current version
func Current(db *sql.DB) (int64, error) {
return goose.GetDBVersion(db)
}
// Status is normal or not
func Status(db *sql.DB) error {
return goose.Version(db, MigrationsDir)
}
// Create a new migration folder
func Create(db *sql.DB, name, migrationType string) error {
return goose.Create(db, MigrationsDir, name, migrationType)
}

View File

@@ -0,0 +1,86 @@
package migrate
import (
"testing"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database"
)
var (
base *docker.App
pgDB *sqlx.DB
)
func initEnv(t *testing.T) error {
// Start db container.
base.RunDBImage(t)
// Create db orm handler.
factory, err := database.NewOrmFactory(base.DBConfig)
if err != nil {
return err
}
pgDB = factory.GetDB()
return nil
}
func TestMigrate(t *testing.T) {
base = docker.NewDockerApp()
if err := initEnv(t); err != nil {
t.Fatal(err)
}
t.Run("testCurrent", testCurrent)
t.Run("testStatus", testStatus)
t.Run("testResetDB", testResetDB)
t.Run("testMigrate", testMigrate)
t.Run("testRollback", testRollback)
t.Cleanup(func() {
base.Free()
})
}
func testCurrent(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, 0, int(cur))
}
func testStatus(t *testing.T) {
status := Status(pgDB.DB)
assert.NoError(t, status)
}
func testResetDB(t *testing.T) {
assert.NoError(t, ResetDB(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 6, int(cur))
}
func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB.DB))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur > 0)
}
func testRollback(t *testing.T) {
version, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, version > 0)
assert.NoError(t, Rollback(pgDB.DB, nil))
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
assert.Equal(t, true, cur+1 == version)
}

View File

@@ -10,12 +10,13 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/bridge/internal/config"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/database/migrate"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
bridgeTypes "scroll-tech/bridge/internal/types"
"scroll-tech/bridge/internal/utils"
)
var (

View File

@@ -11,12 +11,13 @@ import (
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
"scroll-tech/common/docker"
bcmd "scroll-tech/bridge/cmd"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/orm/migrate"
"scroll-tech/bridge/internal/utils"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/common/docker"
"scroll-tech/database/migrate"
)
var (

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.0.2"
var tag = "v4.0.4"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -26,6 +26,7 @@ func init() {
// Migrate migrate db
func Migrate(db *sql.DB) error {
//return goose.Up(db, MIGRATIONS_DIR, goose.WithAllowMissing())
return goose.Up(db, MigrationsDir, goose.WithAllowMissing())
}

View File

@@ -63,7 +63,7 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB.DB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, 6, int(cur))
assert.Equal(t, 7, int(cur))
}
func testMigrate(t *testing.T) {

View File

@@ -0,0 +1,38 @@
-- +goose Up
-- +goose StatementBegin
-- TODO: use foreign key for batch_id?
-- TODO: why tx_num is bigint?
create table block_trace
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
trace JSON NOT NULL,
batch_hash VARCHAR DEFAULT NULL,
tx_num INTEGER NOT NULL,
gas_used BIGINT NOT NULL,
block_timestamp NUMERIC NOT NULL
);
create unique index block_trace_hash_uindex
on block_trace (hash);
create unique index block_trace_number_uindex
on block_trace (number);
create unique index block_trace_parent_uindex
on block_trace (number, parent_hash);
create unique index block_trace_parent_hash_uindex
on block_trace (hash, parent_hash);
create index block_trace_batch_hash_index
on block_trace (batch_hash);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_trace;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table l1_message
(
queue_index BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
gas_limit BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer1_hash VARCHAR NOT NULL,
layer2_hash VARCHAR DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);
create unique index l1_message_nonce_uindex
on l1_message (queue_index);
create index l1_message_height_index
on l1_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l1_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_message;
-- +goose StatementEnd

View File

@@ -0,0 +1,50 @@
-- +goose Up
-- +goose StatementBegin
create table l2_message
(
nonce BIGINT NOT NULL,
msg_hash VARCHAR NOT NULL,
height BIGINT NOT NULL,
sender VARCHAR NOT NULL,
target VARCHAR NOT NULL,
value VARCHAR NOT NULL,
calldata TEXT NOT NULL,
layer2_hash VARCHAR NOT NULL,
layer1_hash VARCHAR DEFAULT NULL,
proof TEXT DEFAULT NULL,
status INTEGER DEFAULT 1,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
comment
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired, relay_failed';
create unique index l2_message_hash_uindex
on l2_message (msg_hash);
create unique index l2_message_nonce_uindex
on l2_message (nonce);
create index l2_message_height_index
on l2_message (height);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON l2_message FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l2_message;
-- +goose StatementEnd

View File

@@ -0,0 +1,49 @@
-- +goose Up
-- +goose StatementBegin
create table block_batch
(
hash VARCHAR NOT NULL,
index BIGINT NOT NULL,
start_block_number BIGINT NOT NULL,
start_block_hash VARCHAR NOT NULL,
end_block_number BIGINT NOT NULL,
end_block_hash VARCHAR NOT NULL,
parent_hash VARCHAR NOT NULL,
state_root VARCHAR NOT NULL,
total_tx_num BIGINT NOT NULL,
total_l1_tx_num BIGINT NOT NULL,
total_l2_gas BIGINT NOT NULL,
proving_status INTEGER DEFAULT 1,
proof BYTEA DEFAULT NULL,
proof_time_sec INTEGER DEFAULT 0,
rollup_status INTEGER DEFAULT 1,
commit_tx_hash VARCHAR DEFAULT NULL,
finalize_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
prover_assigned_at TIMESTAMP(0) DEFAULT NULL,
proved_at TIMESTAMP(0) DEFAULT NULL,
committed_at TIMESTAMP(0) DEFAULT NULL,
finalized_at TIMESTAMP(0) DEFAULT NULL
);
comment
on column block_batch.proving_status is 'undefined, unassigned, skipped, assigned, proved, verified, failed';
comment
on column block_batch.rollup_status is 'undefined, pending, committing, committed, finalizing, finalized, finalization_skipped, commit_failed, finalize_failed';
comment
on column block_batch.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index block_batch_hash_uindex
on block_batch (hash);
create unique index block_batch_index_uindex
on block_batch (index);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists block_batch;
-- +goose StatementEnd

View File

@@ -0,0 +1,18 @@
-- +goose Up
-- +goose StatementBegin
create table session_info
(
hash VARCHAR NOT NULL,
rollers_info BYTEA NOT NULL
);
create unique index session_info_hash_uindex
on session_info (hash);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists session_info;
-- +goose StatementEnd

View File

@@ -0,0 +1,33 @@
-- +goose Up
-- +goose StatementBegin
create table l1_block
(
number BIGINT NOT NULL,
hash VARCHAR NOT NULL,
header_rlp TEXT NOT NULL,
base_fee BIGINT NOT NULL,
block_status INTEGER DEFAULT 1,
import_tx_hash VARCHAR DEFAULT NULL,
oracle_status INTEGER DEFAULT 1,
oracle_tx_hash VARCHAR DEFAULT NULL
);
comment
on column l1_block.block_status is 'undefined, pending, importing, imported, failed';
comment
on column l1_block.oracle_status is 'undefined, pending, importing, imported, failed';
create unique index l1_block_hash_uindex
on l1_block (hash);
create unique index l1_block_number_uindex
on l1_block (number);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists l1_block;
-- +goose StatementEnd

View File

@@ -0,0 +1,38 @@
-- +goose Up
-- +goose StatementBegin
create table agg_task
(
id VARCHAR NOT NULL,
start_batch_index BIGINT NOT NULL,
start_batch_hash VARCHAR NOT NULL,
end_batch_index BIGINT NOT NULL,
end_batch_hash VARCHAR NOT NULL,
proving_status SMALLINT DEFAULT 1,
proof BYTEA DEFAULT NULL,
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP
);
create unique index agg_task_hash_uindex
on agg_task (id);
CREATE OR REPLACE FUNCTION update_timestamp()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = CURRENT_TIMESTAMP;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_timestamp BEFORE UPDATE
ON agg_task FOR EACH ROW EXECUTE PROCEDURE
update_timestamp();
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
drop table if exists agg_task;
-- +goose StatementEnd

View File

@@ -231,30 +231,38 @@ func (r *Roller) prove() error {
var traces []*types.BlockTrace
traces, err = r.getSortedTracesByHashes(task.Task.BlockHashes)
if err != nil {
return err
}
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
// the proof will not be submitted.
var proof *message.AggProof
proof, err = r.prover.Prove(task.Task.ID, traces)
if err != nil {
proofMsg = &message.ProofDetail{
Status: message.StatusProofError,
Error: err.Error(),
Error: "get traces failed",
ID: task.Task.ID,
Type: task.Task.Type,
Proof: &message.AggProof{},
Proof: nil,
}
log.Error("prove block failed!", "task-id", task.Task.ID)
log.Error("get traces failed!", "task-id", task.Task.ID, "err", err)
} else {
proofMsg = &message.ProofDetail{
Status: message.StatusOk,
ID: task.Task.ID,
Type: task.Task.Type,
Proof: proof,
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
// the proof will not be submitted.
var proof *message.AggProof
proof, err = r.prover.Prove(task.Task.ID, traces)
if err != nil {
proofMsg = &message.ProofDetail{
Status: message.StatusProofError,
Error: err.Error(),
ID: task.Task.ID,
Type: task.Task.Type,
Proof: nil,
}
log.Error("prove block failed!", "task-id", task.Task.ID)
} else {
proofMsg = &message.ProofDetail{
Status: message.StatusOk,
ID: task.Task.ID,
Type: task.Task.Type,
Proof: proof,
}
log.Info("prove block successfully!", "task-id", task.Task.ID)
}
log.Info("prove block successfully!", "task-id", task.Task.ID)
}
} else {
// when the roller has more than 3 times panic,
@@ -264,7 +272,7 @@ func (r *Roller) prove() error {
Error: "zk proving panic",
ID: task.Task.ID,
Type: task.Task.Type,
Proof: &message.AggProof{},
Proof: nil,
}
}