Compare commits

..

3 Commits

Author SHA1 Message Date
Sebastien Baizet
0ed87916da add ca-certificates to gas-oracle and rollup-relayer 2024-07-12 10:57:27 +02:00
sbaizet
4e3dc52db3 fix: add ca-certificates on go-rust-builder intermediate image (#1428) 2024-07-11 15:05:55 +08:00
colin
8471838cd4 fix(rollup-relayer): catch errors (#1427) 2024-07-10 22:56:16 +08:00
9 changed files with 79 additions and 28 deletions

View File

@@ -14,6 +14,8 @@ RUN go mod download -x
# Build gas_oracle
FROM base as builder
RUN apt update && apt install ca-certificates -y
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/gas_oracle/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/gas_oracle

View File

@@ -14,6 +14,8 @@ RUN go mod download -x
# Build rollup_relayer
FROM base as builder
RUN apt update && apt install ca-certificates -y
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/rollup_relayer/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.26"
var tag = "v4.4.27"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -3,7 +3,6 @@ package provertask
import (
"context"
"encoding/json"
"errors"
"fmt"
"math"
"time"
@@ -191,7 +190,7 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) {
hardForkNumber, err := bp.getHardForkNumberByName(hardForkName)
if err != nil {
// log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
}
@@ -247,7 +246,10 @@ func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *prov
for i := 0; i < 2; i++ {
hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]]
chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i])
if err == nil && chunkRanges[i] != nil {
if err != nil {
return nil, err
}
if chunkRanges[i] != nil {
if chunkRange == nil {
chunkRange = chunkRanges[i]
} else {
@@ -256,8 +258,7 @@ func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *prov
}
}
if chunkRange == nil {
log.Error("chunkRange empty")
return nil, errors.New("chunkRange empty")
return nil, nil
}
var hardForkName string
getHardForkName := func(batch *orm.Batch) (string, error) {

View File

@@ -3,7 +3,6 @@ package provertask
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
@@ -182,31 +181,21 @@ func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *prov
blockRanges [2]*blockRange
err error
)
var blockRange *blockRange
for i := 0; i < 2; i++ {
hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]]
blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i])
if err == nil && blockRanges[i] != nil {
if blockRange == nil {
blockRange = blockRanges[i]
} else {
var err2 error
blockRange, err2 = blockRange.merge(*blockRanges[i])
if err2 != nil {
return nil, err2
}
}
if err != nil {
return nil, err
}
}
if blockRange == nil {
log.Error("blockRange empty")
return nil, errors.New("blockRange empty")
blockRange, err := blockRanges[0].merge(*blockRanges[1])
if err != nil {
return nil, err
}
var hardForkName string
getHardForkName := func(chunk *orm.Chunk) (string, error) {
for i := 0; i < 2; i++ {
if blockRanges[i] != nil && blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
if blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
hardForkName = hardForkNames[i]
break
}
@@ -246,7 +235,7 @@ func (r *blockRange) contains(start, end uint64) bool {
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
hardForkNumber, err := cp.getHardForkNumberByName(hardForkName)
if err != nil {
// log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
return nil, err
}

View File

@@ -240,7 +240,8 @@ func (p *BatchProposer) proposeBatch() error {
batch.Chunks = append(batch.Chunks, chunk)
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion)
if errors.Is(calcErr, &encoding.CompressedDataCompatibilityError{}) {
var compressErr *encoding.CompressedDataCompatibilityError
if errors.As(calcErr, &compressErr) {
if i == 0 {
// The first chunk fails compressed data compatibility check, manual fix is needed.
return fmt.Errorf("the first chunk fails compressed data compatibility check; start block number: %v, end block number: %v", dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber)

View File

@@ -242,7 +242,8 @@ func (p *ChunkProposer) proposeChunk() error {
chunk.Blocks = append(chunk.Blocks, block)
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
if errors.Is(calcErr, &encoding.CompressedDataCompatibilityError{}) {
var compressErr *encoding.CompressedDataCompatibilityError
if errors.As(calcErr, &compressErr) {
if i == 0 {
// The first block fails compressed data compatibility check, manual fix is needed.
return fmt.Errorf("the first block fails compressed data compatibility check; block number: %v", block.Header.Number)

View File

@@ -92,7 +92,8 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
metrics.EstimateBlobSizeTime = time.Since(start)
if err != nil {
if errors.Is(err, &encoding.CompressedDataCompatibilityError{}) {
var compressErr *encoding.CompressedDataCompatibilityError
if errors.As(err, &compressErr) {
return nil, err
} else {
return nil, fmt.Errorf("failed to estimate codecv2 chunk L1 commit batch size and blob size: %w", err)
@@ -176,7 +177,8 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
metrics.EstimateBlobSizeTime = time.Since(start)
if err != nil {
if errors.Is(err, &encoding.CompressedDataCompatibilityError{}) {
var compressErr *encoding.CompressedDataCompatibilityError
if errors.As(err, &compressErr) {
return nil, err
} else {
return nil, fmt.Errorf("failed to estimate codecv2 batch L1 commit batch size and blob size: %w", err)

View File

@@ -0,0 +1,53 @@
package utils
import (
"errors"
"math/big"
"testing"
"github.com/agiledragon/gomonkey/v2"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/da-codec/encoding/codecv2"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
// regression test
func TestCompressedDataCompatibilityErrorCatching(t *testing.T) {
block := &encoding.Block{
Header: &types.Header{
Number: big.NewInt(0),
},
RowConsumption: &types.RowConsumption{},
}
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{block},
}
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
patchGuard1 := gomonkey.ApplyFunc(codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize, func(b *encoding.Chunk) (uint64, uint64, error) {
return 0, 0, &encoding.CompressedDataCompatibilityError{Err: errors.New("test-error-1")}
})
defer patchGuard1.Reset()
var compressErr *encoding.CompressedDataCompatibilityError
_, err := CalculateChunkMetrics(chunk, encoding.CodecV2)
assert.Error(t, err)
assert.ErrorAs(t, err, &compressErr)
patchGuard2 := gomonkey.ApplyFunc(codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize, func(b *encoding.Batch) (uint64, uint64, error) {
return 0, 0, &encoding.CompressedDataCompatibilityError{Err: errors.New("test-error-2")}
})
defer patchGuard2.Reset()
_, err = CalculateBatchMetrics(batch, encoding.CodecV2)
assert.Error(t, err)
assert.ErrorAs(t, err, &compressErr)
}