mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-15 00:48:01 -05:00
Compare commits
7 Commits
v4.3.91
...
test-v1.22
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d7a57235d3 | ||
|
|
91d21301ec | ||
|
|
4b32a44a70 | ||
|
|
55b400c5fb | ||
|
|
1b49091207 | ||
|
|
5b827c3c18 | ||
|
|
6b2eb80aa5 |
1
.github/workflows/docker.yml
vendored
1
.github/workflows/docker.yml
vendored
@@ -39,6 +39,7 @@ jobs:
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
platforms: linux/amd64,linux/arm64
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: event-watcher
|
||||
|
||||
@@ -61,8 +61,11 @@ func (t *TestcontainerApps) StartL1GethContainer() error {
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l1geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
|
||||
Cmd: []string{"--log.debug", "ANY"},
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
|
||||
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
|
||||
),
|
||||
Cmd: []string{"--log.debug", "ANY"},
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
@@ -85,7 +88,10 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l2geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
|
||||
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
|
||||
),
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.91"
|
||||
var tag = "v4.3.93"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -38,7 +38,7 @@ make lint
|
||||
|
||||
## Configure
|
||||
|
||||
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
|
||||
The coordinator behavior can be configured using [`conf/config.json`](conf/config.json). Check the code comments under `ProverManager` in [`internal/config/config.go`](internal/config/config.go) for more details.
|
||||
|
||||
|
||||
## Start
|
||||
|
||||
@@ -24,6 +24,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -54,6 +55,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -248,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -262,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ActiveAttempts: 0,
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -48,6 +48,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -300,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(16), cur)
|
||||
assert.Equal(t, int64(17), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), cur)
|
||||
assert.Equal(t, int64(17), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), version)
|
||||
assert.Equal(t, int64(17), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
|
||||
27
database/migrate/migrations/00017_add_blob_meta_data.sql
Normal file
27
database/migrate/migrations/00017_add_blob_meta_data.sql
Normal file
@@ -0,0 +1,27 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE chunk
|
||||
ADD COLUMN crc_max INTEGER DEFAULT 0,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
ALTER TABLE batch
|
||||
ADD COLUMN data_hash VARCHAR DEFAULT '',
|
||||
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE IF EXISTS batch
|
||||
DROP COLUMN data_hash,
|
||||
DROP COLUMN blob_data_proof,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
ALTER TABLE IF EXISTS chunk
|
||||
DROP COLUMN crc_max,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
-- +goose StatementEnd
|
||||
@@ -24,7 +24,7 @@ rollup_relayer: ## Builds the rollup_relayer bin
|
||||
test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
lint: mock_abi ## Lint the files - used for CI
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
|
||||
@@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
})
|
||||
|
||||
log.Info("Start event-watcher successfully")
|
||||
log.Info("Start event-watcher successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -109,7 +109,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start gas-oracle successfully")
|
||||
log.Info("Start gas-oracle successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
log.Info("Start rollup-relayer successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -578,7 +578,7 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash)
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil {
|
||||
|
||||
@@ -25,6 +25,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -53,6 +54,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
|
||||
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
|
||||
@@ -257,6 +262,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: batchMeta.BatchHash.Hex(),
|
||||
DataHash: batchMeta.BatchDataHash.Hex(),
|
||||
StartChunkHash: batchMeta.StartChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: batchMeta.EndChunkHash.Hex(),
|
||||
@@ -271,6 +277,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
BlobDataProof: batchMeta.BatchBlobDataProof,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -44,6 +44,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -212,6 +216,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: metrics.CrcMax,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -191,10 +191,12 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code
|
||||
|
||||
// BatchMetadata represents the metadata of a batch.
|
||||
type BatchMetadata struct {
|
||||
BatchHash common.Hash
|
||||
BatchBytes []byte
|
||||
StartChunkHash common.Hash
|
||||
EndChunkHash common.Hash
|
||||
BatchHash common.Hash
|
||||
BatchDataHash common.Hash
|
||||
BatchBlobDataProof []byte
|
||||
BatchBytes []byte
|
||||
StartChunkHash common.Hash
|
||||
EndChunkHash common.Hash
|
||||
}
|
||||
|
||||
// GetBatchMetadata retrieves the metadata of a batch.
|
||||
@@ -212,9 +214,11 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
|
||||
}
|
||||
|
||||
// BatchBlobDataProof is left as empty for codecv0.
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
@@ -243,9 +247,16 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProof()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv1 blob data proof: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBlobDataProof: blobDataProof,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
|
||||
@@ -21,6 +21,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -49,6 +50,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -151,6 +156,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -163,6 +169,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -43,6 +43,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -150,6 +154,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
Reference in New Issue
Block a user