Compare commits

..

1 Commits

Author SHA1 Message Date
Mengran Lan
79f86c879e fix(coordinator): fix issue devnet's coordinator can not set genesis's curieBlock to 0 2024-07-18 18:19:15 +08:00
9 changed files with 28 additions and 79 deletions

View File

@@ -14,8 +14,6 @@ RUN go mod download -x
# Build gas_oracle
FROM base as builder
RUN apt update && apt install ca-certificates -y
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/gas_oracle/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/gas_oracle

View File

@@ -14,8 +14,6 @@ RUN go mod download -x
# Build rollup_relayer
FROM base as builder
RUN apt update && apt install ca-certificates -y
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/rollup_relayer/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.27"
var tag = "v4.4.26"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -3,6 +3,7 @@ package provertask
import (
"context"
"encoding/json"
"errors"
"fmt"
"math"
"time"
@@ -190,7 +191,7 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) {
hardForkNumber, err := bp.getHardForkNumberByName(hardForkName)
if err != nil {
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
// log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
}
@@ -246,10 +247,7 @@ func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *prov
for i := 0; i < 2; i++ {
hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]]
chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i])
if err != nil {
return nil, err
}
if chunkRanges[i] != nil {
if err == nil && chunkRanges[i] != nil {
if chunkRange == nil {
chunkRange = chunkRanges[i]
} else {
@@ -258,7 +256,8 @@ func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *prov
}
}
if chunkRange == nil {
return nil, nil
log.Error("chunkRange empty")
return nil, errors.New("chunkRange empty")
}
var hardForkName string
getHardForkName := func(batch *orm.Batch) (string, error) {

View File

@@ -3,6 +3,7 @@ package provertask
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
@@ -181,21 +182,31 @@ func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *prov
blockRanges [2]*blockRange
err error
)
var blockRange *blockRange
for i := 0; i < 2; i++ {
hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]]
blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i])
if err != nil {
return nil, err
if err == nil && blockRanges[i] != nil {
if blockRange == nil {
blockRange = blockRanges[i]
} else {
var err2 error
blockRange, err2 = blockRange.merge(*blockRanges[i])
if err2 != nil {
return nil, err2
}
}
}
}
blockRange, err := blockRanges[0].merge(*blockRanges[1])
if err != nil {
return nil, err
if blockRange == nil {
log.Error("blockRange empty")
return nil, errors.New("blockRange empty")
}
var hardForkName string
getHardForkName := func(chunk *orm.Chunk) (string, error) {
for i := 0; i < 2; i++ {
if blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
if blockRanges[i] != nil && blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
hardForkName = hardForkNames[i]
break
}
@@ -235,7 +246,7 @@ func (r *blockRange) contains(start, end uint64) bool {
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
hardForkNumber, err := cp.getHardForkNumberByName(hardForkName)
if err != nil {
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
// log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
}

View File

@@ -240,8 +240,7 @@ func (p *BatchProposer) proposeBatch() error {
batch.Chunks = append(batch.Chunks, chunk)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
var compressErr *encoding.CompressedDataCompatibilityError
if errors.As(calcErr, &compressErr) {
if errors.Is(calcErr, &encoding.CompressedDataCompatibilityError{}) {
if i == 0 {
// The first chunk fails compressed data compatibility check, manual fix is needed.
return fmt.Errorf("the first chunk fails compressed data compatibility check; start block number: %v, end block number: %v", dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber)

View File

@@ -242,8 +242,7 @@ func (p *ChunkProposer) proposeChunk() error {
chunk.Blocks = append(chunk.Blocks, block)
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
var compressErr *encoding.CompressedDataCompatibilityError
if errors.As(calcErr, &compressErr) {
if errors.Is(calcErr, &encoding.CompressedDataCompatibilityError{}) {
if i == 0 {
// The first block fails compressed data compatibility check, manual fix is needed.
return fmt.Errorf("the first block fails compressed data compatibility check; block number: %v", block.Header.Number)

View File

@@ -92,8 +92,7 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
metrics.EstimateBlobSizeTime = time.Since(start)
if err != nil {
var compressErr *encoding.CompressedDataCompatibilityError
if errors.As(err, &compressErr) {
if errors.Is(err, &encoding.CompressedDataCompatibilityError{}) {
return nil, err
} else {
return nil, fmt.Errorf("failed to estimate codecv2 chunk L1 commit batch size and blob size: %w", err)
@@ -177,8 +176,7 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
metrics.EstimateBlobSizeTime = time.Since(start)
if err != nil {
var compressErr *encoding.CompressedDataCompatibilityError
if errors.As(err, &compressErr) {
if errors.Is(err, &encoding.CompressedDataCompatibilityError{}) {
return nil, err
} else {
return nil, fmt.Errorf("failed to estimate codecv2 batch L1 commit batch size and blob size: %w", err)

View File

@@ -1,53 +0,0 @@
package utils
import (
"errors"
"math/big"
"testing"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/da-codec/encoding/codecv2"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
// regression test
func TestCompressedDataCompatibilityErrorCatching(t *testing.T) {
block := &encoding.Block{
Header: &types.Header{
Number: big.NewInt(0),
},
RowConsumption: &types.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
patchGuard1 := gomonkey.ApplyFunc(codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize, func(b *encoding.Chunk) (uint64, uint64, error) {
return 0, 0, &encoding.CompressedDataCompatibilityError{Err: errors.New("test-error-1")}
})
defer patchGuard1.Reset()
var compressErr *encoding.CompressedDataCompatibilityError
_, err := CalculateChunkMetrics(chunk, encoding.CodecV2)
assert.Error(t, err)
assert.ErrorAs(t, err, &compressErr)
patchGuard2 := gomonkey.ApplyFunc(codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize, func(b *encoding.Batch) (uint64, uint64, error) {
return 0, 0, &encoding.CompressedDataCompatibilityError{Err: errors.New("test-error-2")}
})
defer patchGuard2.Reset()
_, err = CalculateBatchMetrics(batch, encoding.CodecV2)
assert.Error(t, err)
assert.ErrorAs(t, err, &compressErr)
}