mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
13 Commits
v4.5.12-ba
...
bump-versi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b55c858980 | ||
|
|
5d6b5a89f4 | ||
|
|
4ee459a602 | ||
|
|
276385fd0a | ||
|
|
82fb15de3b | ||
|
|
5204ad50e0 | ||
|
|
f824fb0efc | ||
|
|
a55c7bdc77 | ||
|
|
47b1a037a9 | ||
|
|
ae34020c34 | ||
|
|
fa9fab6e98 | ||
|
|
c4f869a33a | ||
|
|
0cee9a51e6 |
45
.github/workflows/docker.yml
vendored
45
.github/workflows/docker.yml
vendored
@@ -99,6 +99,51 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
blob_uploader:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: blob-uploader
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: blob-uploader
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/blob_uploader.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
scrolltech/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
scrolltech/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
rollup-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
@@ -154,7 +154,7 @@ func (c *CrossMessage) GetL2UnclaimedWithdrawalsByAddress(ctx context.Context, s
|
||||
db := c.db.WithContext(ctx)
|
||||
db = db.Model(&CrossMessage{})
|
||||
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
|
||||
db = db.Where("tx_status = ?", types.TxStatusTypeSent)
|
||||
db = db.Where("tx_status in (?)", []types.TxStatusType{types.TxStatusTypeSent, types.TxStatusTypeFailedRelayed, types.TxStatusTypeRelayTxReverted})
|
||||
db = db.Where("sender = ?", sender)
|
||||
db = db.Order("block_timestamp desc")
|
||||
db = db.Limit(500)
|
||||
|
||||
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
30
build/dockerfiles/blob_uploader.Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build blob_uploader
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/blob_uploader/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/blob_uploader
|
||||
|
||||
# Pull blob_uploader into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/blob_uploader /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["blob_uploader"]
|
||||
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
5
build/dockerfiles/blob_uploader.Dockerfile.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
|
||||
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
|
||||
elapsed := time.Since(begin)
|
||||
sql, rowsAffected := fc()
|
||||
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
|
||||
}
|
||||
|
||||
// InitDB init the db handler
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
#![allow(static_mut_refs)]
|
||||
|
||||
mod euclid;
|
||||
mod euclidv2;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use euclid::EuclidVerifier;
|
||||
use euclidv2::EuclidV2Verifier;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{cell::OnceCell, path::Path, rc::Rc};
|
||||
@@ -42,26 +39,14 @@ pub struct VerifierConfig {
|
||||
type HardForkName = String;
|
||||
|
||||
struct VerifierPair(HardForkName, Rc<Box<dyn ProofVerifier>>);
|
||||
|
||||
static mut VERIFIER_LOW: OnceCell<VerifierPair> = OnceCell::new();
|
||||
static mut VERIFIER_HIGH: OnceCell<VerifierPair> = OnceCell::new();
|
||||
|
||||
pub fn init(config: VerifierConfig) {
|
||||
let verifier = EuclidVerifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_LOW
|
||||
.set(VerifierPair(
|
||||
"euclid".to_string(),
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
}
|
||||
|
||||
let verifier = EuclidV2Verifier::new(&config.high_version_circuit.assets_path);
|
||||
unsafe {
|
||||
VERIFIER_HIGH
|
||||
.set(VerifierPair(
|
||||
"euclidV2".to_string(),
|
||||
config.high_version_circuit.fork_name,
|
||||
Rc::new(Box::new(verifier)),
|
||||
))
|
||||
.unwrap_unchecked();
|
||||
@@ -70,12 +55,6 @@ pub fn init(config: VerifierConfig) {
|
||||
|
||||
pub fn get_verifier(fork_name: &str) -> Result<Rc<Box<dyn ProofVerifier>>> {
|
||||
unsafe {
|
||||
if let Some(verifier) = VERIFIER_LOW.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(verifier) = VERIFIER_HIGH.get() {
|
||||
if verifier.0 == fork_name {
|
||||
return Ok(verifier.1.clone());
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
use super::{ProofVerifier, TaskType, VKDump};
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
use crate::utils::panic_catch;
|
||||
use euclid_prover::{BatchProof, BundleProof, ChunkProof};
|
||||
use euclid_verifier::verifier::{BatchVerifier, BundleVerifierEuclidV1, ChunkVerifier};
|
||||
use std::{fs::File, path::Path};
|
||||
|
||||
pub struct EuclidVerifier {
|
||||
chunk_verifier: ChunkVerifier,
|
||||
batch_verifier: BatchVerifier,
|
||||
bundle_verifier: BundleVerifierEuclidV1,
|
||||
}
|
||||
|
||||
impl EuclidVerifier {
|
||||
pub fn new(assets_dir: &str) -> Self {
|
||||
let verifier_bin = Path::new(assets_dir).join("verifier.bin");
|
||||
let config = Path::new(assets_dir).join("root-verifier-vm-config");
|
||||
let exe = Path::new(assets_dir).join("root-verifier-committed-exe");
|
||||
|
||||
Self {
|
||||
chunk_verifier: ChunkVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up chunk verifier"),
|
||||
batch_verifier: BatchVerifier::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up batch verifier"),
|
||||
bundle_verifier: BundleVerifierEuclidV1::setup(&config, &exe, &verifier_bin)
|
||||
.expect("Setting up bundle verifier"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ProofVerifier for EuclidVerifier {
|
||||
fn verify(&self, task_type: super::TaskType, proof: Vec<u8>) -> Result<bool> {
|
||||
panic_catch(|| match task_type {
|
||||
TaskType::Chunk => {
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
self.chunk_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Batch => {
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
self.batch_verifier
|
||||
.verify_proof(proof.proof.as_root_proof().unwrap())
|
||||
}
|
||||
TaskType::Bundle => {
|
||||
let proof = serde_json::from_slice::<BundleProof>(proof.as_slice()).unwrap();
|
||||
self.bundle_verifier
|
||||
.verify_proof_evm(&proof.proof.as_evm_proof().unwrap())
|
||||
}
|
||||
})
|
||||
.map_err(|err_str: String| anyhow::anyhow!(err_str))
|
||||
}
|
||||
|
||||
fn dump_vk(&self, file: &Path) {
|
||||
let f = File::create(file).expect("Failed to open file to dump VK");
|
||||
|
||||
let dump = VKDump {
|
||||
chunk_vk: base64::encode(self.chunk_verifier.get_app_vk()),
|
||||
batch_vk: base64::encode(self.batch_verifier.get_app_vk()),
|
||||
bundle_vk: base64::encode(self.bundle_verifier.get_app_vk()),
|
||||
};
|
||||
serde_json::to_writer(f, &dump).expect("Failed to dump VK");
|
||||
}
|
||||
}
|
||||
4
common/testdata/blobdata.json
vendored
Normal file
4
common/testdata/blobdata.json
vendored
Normal file
File diff suppressed because one or more lines are too long
@@ -326,3 +326,53 @@ func (s TxStatus) String() string {
|
||||
return fmt.Sprintf("Unknown TxStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobUploadStatus represents the status of a blob upload
|
||||
type BlobUploadStatus int
|
||||
|
||||
const (
|
||||
// BlobUploadStatusUndefined indicates an undefined status
|
||||
BlobUploadStatusUndefined BlobUploadStatus = iota
|
||||
// BlobUploadStatusPending indicates a pending upload status
|
||||
BlobUploadStatusPending
|
||||
// BlobUploadStatusUploaded indicates a successful upload status
|
||||
BlobUploadStatusUploaded
|
||||
// BlobUploadStatusFailed indicates a failed upload status
|
||||
BlobUploadStatusFailed
|
||||
)
|
||||
|
||||
func (s BlobUploadStatus) String() string {
|
||||
switch s {
|
||||
case BlobUploadStatusPending:
|
||||
return "BlobUploadStatusPending"
|
||||
case BlobUploadStatusUploaded:
|
||||
return "BlobUploadStatusUploaded"
|
||||
case BlobUploadStatusFailed:
|
||||
return "BlobUploadStatusFailed"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobUploadStatus (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
// BlobStoragePlatform represents the platform a blob upload to
|
||||
type BlobStoragePlatform int
|
||||
|
||||
const (
|
||||
// BlobStoragePlatformUndefined indicates an undefined platform
|
||||
BlobStoragePlatformUndefined BlobStoragePlatform = iota
|
||||
// BlobStoragePlatformS3 represents AWS S3
|
||||
BlobStoragePlatformS3
|
||||
// BlobStoragePlatformArweave represents storage blockchain Arweave
|
||||
BlobStoragePlatformArweave
|
||||
)
|
||||
|
||||
func (s BlobStoragePlatform) String() string {
|
||||
switch s {
|
||||
case BlobStoragePlatformS3:
|
||||
return "BlobStoragePlatformS3"
|
||||
case BlobStoragePlatformArweave:
|
||||
return "BlobStoragePlatformArweave"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown BlobStoragePlatform (%d)", int32(s))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
EuclidFork = "euclid"
|
||||
EuclidV2Fork = "euclidV2"
|
||||
|
||||
EuclidV2ForkNameForProver = "euclidv2"
|
||||
@@ -46,7 +45,7 @@ const (
|
||||
|
||||
// ChunkTaskDetail is a type containing ChunkTask detail for chunk task.
|
||||
type ChunkTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BlockHashes []common.Hash `json:"block_hashes"`
|
||||
PrevMsgQueueHash common.Hash `json:"prev_msg_queue_hash"`
|
||||
@@ -97,7 +96,7 @@ func (e *Byte48) UnmarshalJSON(input []byte) error {
|
||||
|
||||
// BatchTaskDetail is a type containing BatchTask detail.
|
||||
type BatchTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
ChunkInfos []*ChunkInfo `json:"chunk_infos"`
|
||||
ChunkProofs []*OpenVMChunkProof `json:"chunk_proofs"`
|
||||
@@ -110,7 +109,7 @@ type BatchTaskDetail struct {
|
||||
|
||||
// BundleTaskDetail consists of all the information required to describe the task to generate a proof for a bundle of batches.
|
||||
type BundleTaskDetail struct {
|
||||
// use one of the string of EuclidFork / EuclidV2Fork
|
||||
// use one of the string of "euclidv1" / "euclidv2"
|
||||
ForkName string `json:"fork_name"`
|
||||
BatchProofs []*OpenVMBatchProof `json:"batch_proofs"`
|
||||
BundleInfo *OpenVMBundleInfo `json:"bundle_info,omitempty"`
|
||||
|
||||
23
common/utils/blob.go
Normal file
23
common/utils/blob.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
// CalculateVersionedBlobHash calculate the kzg4844 versioned blob hash from a blob
|
||||
func CalculateVersionedBlobHash(blob kzg4844.Blob) ([32]byte, error) {
|
||||
// calculate kzg4844 commitment from blob
|
||||
commit, err := kzg4844.BlobToCommitment(&blob)
|
||||
if err != nil {
|
||||
return [32]byte{}, fmt.Errorf("failed to get blob commitment, err: %w", err)
|
||||
}
|
||||
|
||||
// calculate kzg4844 versioned blob hash from blob commitment
|
||||
hasher := sha256.New()
|
||||
vh := kzg4844.CalcBlobHashV1(hasher, &commit)
|
||||
|
||||
return vh, nil
|
||||
}
|
||||
51
common/utils/blob_test.go
Normal file
51
common/utils/blob_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
)
|
||||
|
||||
type BlobData struct {
|
||||
VersionedBlobHash string `json:"versionedBlobHash"`
|
||||
BlobData string `json:"blobData"`
|
||||
}
|
||||
|
||||
// TestCalculateVersionedBlobHash tests the CalculateVersionedBlobHash function
|
||||
func TestCalculateVersionedBlobHash(t *testing.T) {
|
||||
// Read the test data
|
||||
data, err := os.ReadFile("../testdata/blobdata.json")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
var blobData BlobData
|
||||
if err := json.Unmarshal(data, &blobData); err != nil {
|
||||
t.Fatalf("Failed to parse blobdata.json: %v", err)
|
||||
}
|
||||
|
||||
blobBytes, err := hex.DecodeString(blobData.BlobData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode blob data: %v", err)
|
||||
}
|
||||
|
||||
// Convert []byte to kzg4844.Blob
|
||||
var blob kzg4844.Blob
|
||||
copy(blob[:], blobBytes)
|
||||
|
||||
// Calculate the hash
|
||||
calculatedHashBytes, err := CalculateVersionedBlobHash(blob)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to calculate versioned blob hash: %v", err)
|
||||
}
|
||||
|
||||
calculatedHash := hex.EncodeToString(calculatedHashBytes[:])
|
||||
|
||||
if calculatedHash != blobData.VersionedBlobHash {
|
||||
t.Fatalf("Hash mismatch: got %s, want %s", calculatedHash, blobData.VersionedBlobHash)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.12"
|
||||
var tag = "v4.5.24"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
@@ -34,7 +33,6 @@ func NewLoginLogic(db *gorm.DB, cfg *config.Config, vf *verifier.Verifier) *Logi
|
||||
|
||||
var highHardForks []string
|
||||
highHardForks = append(highHardForks, cfg.ProverManager.Verifier.HighVersionCircuit.ForkName)
|
||||
highHardForks = append(highHardForks, message.EuclidFork, message.EuclidV2Fork)
|
||||
proverVersionHardForkMap[cfg.ProverManager.Verifier.HighVersionCircuit.MinProverVersion] = highHardForks
|
||||
|
||||
return &LoginLogic{
|
||||
|
||||
@@ -79,10 +79,6 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
OpenVMVkMap: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
if err := v.loadOpenVMVks(message.EuclidFork); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := v.loadOpenVMVks(message.EuclidV2Fork); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
|
||||
@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(26), cur)
|
||||
assert.Equal(t, int64(27), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(26), cur)
|
||||
assert.Equal(t, int64(27), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(26), version)
|
||||
assert.Equal(t, int64(27), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
|
||||
32
database/migrate/migrations/00027_ blob_upload.sql
Normal file
32
database/migrate/migrations/00027_ blob_upload.sql
Normal file
@@ -0,0 +1,32 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
CREATE TABLE blob_upload (
|
||||
batch_index BIGINT NOT NULL,
|
||||
batch_hash VARCHAR NOT NULL,
|
||||
|
||||
platform SMALLINT NOT NULL,
|
||||
status SMALLINT NOT NULL,
|
||||
|
||||
-- metadata
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS batch_index_batch_hash_platform_uindex
|
||||
ON blob_upload(batch_index, batch_hash, platform) WHERE deleted_at IS NULL;
|
||||
|
||||
COMMENT ON COLUMN blob_upload.status IS 'undefined, pending, uploaded, failed';
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_blob_upload_status_platform ON blob_upload(status, platform) WHERE deleted_at IS NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_blob_upload_batch_index_batch_hash_status_platform
|
||||
ON blob_upload(batch_index, batch_hash, status, platform) WHERE deleted_at IS NULL;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
DROP TABLE blob_upload;
|
||||
-- +goose StatementEnd
|
||||
70
go.work.sum
70
go.work.sum
@@ -555,12 +555,16 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOC
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno=
|
||||
github.com/CloudyKit/jet/v6 v6.1.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4=
|
||||
github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=
|
||||
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
|
||||
github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
|
||||
@@ -572,6 +576,7 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
|
||||
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
|
||||
github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY=
|
||||
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
@@ -654,6 +659,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
||||
github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0=
|
||||
github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
|
||||
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bits-and-blooms/bitset v1.14.2/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/bketelsen/crypt v0.0.4 h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU=
|
||||
@@ -665,11 +671,9 @@ github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha
|
||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
|
||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
|
||||
@@ -684,6 +688,7 @@ github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
|
||||
@@ -714,7 +719,14 @@ github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XP
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ=
|
||||
github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM=
|
||||
github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU=
|
||||
github.com/cockroachdb/errors v1.11.3/go.mod h1:m4UIW4CDjx+R5cybPsNrRbreomiFqt8o1h1wUVazSd8=
|
||||
github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce/go.mod h1:9/y3cnZ5GKakj/H4y9r9GTjCvAFta7KLgSHPJJYc52M=
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs=
|
||||
github.com/cockroachdb/pebble v1.1.1/go.mod h1:4exszw1r40423ZsmkG/09AFEG83I0uDgfujJdbL6kYU=
|
||||
github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
|
||||
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/compose-spec/compose-go v1.20.0 h1:h4ZKOst1EF/DwZp7dWkb+wbTVE4nEyT9Lc89to84Ol4=
|
||||
github.com/compose-spec/compose-go v1.20.0/go.mod h1:+MdqXV4RA7wdFsahh/Kb8U0pAJqkg7mr4PM9tFKU8RM=
|
||||
@@ -764,6 +776,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI=
|
||||
github.com/crate-crypto/go-ipa v0.0.0-20240223125850-b1e8a79f509c/go.mod h1:geZJZH3SzKCqnz5VT0q/DyIG/tvu/dZk+VIfXicupJs=
|
||||
github.com/crate-crypto/go-kzg-4844 v1.0.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c h1:/ovYnF02fwL0kvspmy9AuyKg1JhdTRUgPw4nUxd9oZM=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
@@ -775,9 +788,9 @@ github.com/dchest/blake512 v1.0.0 h1:oDFEQFIqFSeuA34xLtXZ/rWxCXdSjirjzPhey5EUvmA
|
||||
github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo=
|
||||
github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/deepmap/oapi-codegen v1.6.0 h1:w/d1ntwh91XI0b/8ja7+u5SvA4IFfM0UNNLmiDR1gg0=
|
||||
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
|
||||
@@ -799,11 +812,13 @@ github.com/docker/cli-docs-tool v0.6.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbI
|
||||
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v24.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
|
||||
github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0/go.mod h1:56wL82FO0bfMU5RvfXoIwSOP2ggqqxT+tAfNEIyxuHw=
|
||||
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ=
|
||||
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20211022113120-dc8c55024d06/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ=
|
||||
github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
|
||||
github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
|
||||
github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127/go.mod h1:QMWlm50DNe14hD7t24KEqZuUdC9sOTy8W6XbCU1mlw4=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg=
|
||||
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
|
||||
@@ -822,10 +837,12 @@ github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/Ir
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
|
||||
github.com/ethereum/c-kzg-4844 v1.0.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4 h1:B2mpK+MNqgPqk2/KNi1LbqwtZDy5F7iy0mynQiBr8VA=
|
||||
github.com/ethereum/c-kzg-4844/bindings/go v0.0.0-20230126171313-363c7d7593b4/go.mod h1:y4GA2JbAUama1S4QwYjC2hefgGLU8Ul0GMtL/ADMF1c=
|
||||
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
|
||||
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
|
||||
github.com/ethereum/go-verkle v0.1.1-0.20240306133620-7d920df305f0/go.mod h1:D9AJLVXSyZQXJQVk8oh1EwjISE+sJTn2duYIZC0dy3w=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
@@ -834,7 +851,9 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
|
||||
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
|
||||
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs=
|
||||
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0=
|
||||
github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
|
||||
github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY=
|
||||
@@ -854,6 +873,7 @@ github.com/getkin/kin-openapi v0.61.0 h1:6awGqF5nG5zkVpMsAih1QH4VgzS8phTxECUWIFo
|
||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getsentry/sentry-go v0.11.0 h1:qro8uttJGvNAMr5CLcFI9CHR0aDzXl0Vs3Pmw/oTPg8=
|
||||
github.com/getsentry/sentry-go v0.11.0/go.mod h1:KBQIxiZAetw62Cj8Ri964vAEWVdgfaUCn30Q3bCvANo=
|
||||
github.com/getsentry/sentry-go v0.27.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY=
|
||||
github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd h1:r04MMPyLHj/QwZuMJ5+7tJcBr1AQjpiAK/rZWRrQT7o=
|
||||
@@ -862,6 +882,7 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-chi/chi/v5 v5.0.0 h1:DBPx88FjZJH3FsICfDAfIfnb7XxKIYVGG6lOPlhENAg=
|
||||
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE=
|
||||
@@ -873,6 +894,7 @@ github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
|
||||
github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
|
||||
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
@@ -913,6 +935,7 @@ github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+Licev
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219 h1:utua3L2IbQJmauC5IXdEA547bcoU5dozgQAfc8Onsg4=
|
||||
@@ -1053,7 +1076,10 @@ github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
|
||||
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
|
||||
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
|
||||
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/holiman/uint256 v1.3.0/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
|
||||
@@ -1123,13 +1149,16 @@ github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0=
|
||||
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
|
||||
github.com/karalabe/hid v1.0.1-0.20240306101548-573246063e52/go.mod h1:qk1sX/IBgppQNcGCRoj90u6EGC056EBoIc1oEjCWla8=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559 h1:0VWDXPNE0brOek1Q8bLfzKkvOzwbQE/snjGojlCr8CY=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/karalabe/usb v0.0.2 h1:M6QQBNxF+CQ8OFvxrT90BA0qBOXymndZnk5q235mFc4=
|
||||
github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I=
|
||||
github.com/kataras/golog v0.1.7/go.mod h1:jOSQ+C5fUqsNSwurB/oAHq1IFSb0KI3l6GMa7xB6dZA=
|
||||
github.com/kataras/golog v0.1.8/go.mod h1:rGPAin4hYROfk1qT9wZP6VY2rsb4zzc37QpdPjdkqVw=
|
||||
github.com/kataras/iris/v12 v12.2.0-beta5/go.mod h1:q26aoWJ0Knx/00iPKg5iizDK7oQQSPjbD8np0XDh6dc=
|
||||
github.com/kataras/iris/v12 v12.2.0/go.mod h1:BLzBpEunc41GbE68OUaQlqX4jzi791mx5HU04uPb90Y=
|
||||
github.com/kataras/pio v0.0.11/go.mod h1:38hH6SWH6m4DKSYmRhlrCJ5WItwWgCVrTNU62XZyUvI=
|
||||
github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4=
|
||||
github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw=
|
||||
@@ -1143,9 +1172,11 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23 h1:FOOIBWrEkLgmlgGfM
|
||||
github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4=
|
||||
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada h1:3L+neHp83cTjegPdCiOxVOJtRIy7/8RldvMTsyPYH10=
|
||||
@@ -1164,9 +1195,11 @@ github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+
|
||||
github.com/labstack/echo/v4 v4.2.1 h1:LF5Iq7t/jrtUuSutNuiEWtB5eiHfZ5gSe2pcu5exjQw=
|
||||
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||
github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
|
||||
github.com/labstack/echo/v4 v4.10.0/go.mod h1:S/T/5fy/GigaXnHTkh0ZGe4LpkkQysvRjFMSUTkDRNQ=
|
||||
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||
github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A=
|
||||
github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y=
|
||||
github.com/lestrrat-go/blackmagic v1.0.0 h1:XzdxDbuQTz0RZZEmdU7cnQxUtFUzgCSPq8RCz4BxIi4=
|
||||
@@ -1186,6 +1219,7 @@ github.com/lyft/protoc-gen-star/v2 v2.0.3 h1:/3+/2sWyXeMLzKd1bX+ixWKgEMsULrIivpD
|
||||
github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mailgun/raymond/v2 v2.0.46/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
|
||||
github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd h1:HvFwW+cm9bCbZ/+vuGNq7CRWXql8c0y8nGeYpqmpvmk=
|
||||
github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
@@ -1207,12 +1241,15 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U=
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
|
||||
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
|
||||
github.com/microcosm-cc/bluemonday v1.0.21/go.mod h1:ytNkv4RrDrLJ2pqlsSI46O6IVXmZOBBD4SaJyDwwTkM=
|
||||
github.com/microcosm-cc/bluemonday v1.0.23/go.mod h1:mN70sk7UkkF8TUr2IGBpNN0jAgStuPzlK76QuruE/z4=
|
||||
github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc=
|
||||
github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
@@ -1265,6 +1302,7 @@ github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c h1:bY6ktFuJkt+ZXkX0RChQch2FtHpWQLVS8Qo1YasiIVk=
|
||||
@@ -1296,6 +1334,7 @@ github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
|
||||
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
|
||||
github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
|
||||
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
|
||||
@@ -1316,15 +1355,24 @@ github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/protolambda/bls12-381-util v0.0.0-20220416220906-d8552aa452c7/go.mod h1:IToEjHuttnUzwZI5KBSM/LOOW3qLbbrHOEfp3SbECGY=
|
||||
github.com/protolambda/bls12-381-util v0.1.0/go.mod h1:cdkysJTRpeFeuUVx/TXGDQNMTiRAalk1vQw3TYTHcE4=
|
||||
github.com/protolambda/messagediff v1.4.0/go.mod h1:LboJp0EwIbJsePYpzh5Op/9G1/4mIztMRYzzwR0dR2M=
|
||||
github.com/protolambda/zrnt v0.32.2/go.mod h1:A0fezkp9Tt3GBLATSPIbuY4ywYESyAuc/FFmPKg8Lqs=
|
||||
github.com/protolambda/ztyp v0.2.2/go.mod h1:9bYgKGqg3wJqT9ac1gI2hnVb0STQq7p/1lapqrqY1dU=
|
||||
github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48/go.mod h1:4pWaT30XoEx1j8KNJf3TV+E3mQkaufn7mf+jRNb/Fuk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
|
||||
@@ -1357,8 +1405,6 @@ github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b h1:5H6V6yba
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:48uxaqVgpD8ulH8p+nrBtfeLHZ9tX82bVVdPNkW3rPE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f h1:YYbhuUwjowqI4oyXtECRofck7Fyj18e1tcRjuQlZpJE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
|
||||
@@ -1371,6 +1417,7 @@ github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPO
|
||||
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ=
|
||||
github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
|
||||
@@ -1438,6 +1485,7 @@ github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxn
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/vektah/gqlparser/v2 v2.4.5 h1:C02NsyEsL4TXJB7ndonqTfuQOL4XPIu0aAWugdmTgmc=
|
||||
github.com/vektah/gqlparser/v2 v2.4.5/go.mod h1:flJWIR04IMQPGz+BXLrORkrARBxv/rtyIAFvd/MceW0=
|
||||
github.com/veraison/go-cose v1.0.0-rc.1 h1:4qA7dbFJGvt7gcqv5MCIyCQvN+NpHFPkW7do3EeDLb8=
|
||||
@@ -1550,6 +1598,7 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
@@ -1557,6 +1606,7 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1571,6 +1621,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
|
||||
@@ -1599,6 +1650,7 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -1638,6 +1690,7 @@ golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -1659,7 +1712,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
@@ -1719,6 +1771,7 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
@@ -1751,6 +1804,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -1801,8 +1855,10 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
|
||||
@@ -1954,6 +2010,8 @@ gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNj
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=
|
||||
|
||||
@@ -11,6 +11,7 @@ mock_abi:
|
||||
rollup_bins: ## Builds the Rollup bins.
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
go build -o $(PWD)/build/bin/blob_uploader ./cmd/blob_uploader/
|
||||
|
||||
gas_oracle: ## Builds the gas_oracle bin
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
@@ -18,6 +19,9 @@ gas_oracle: ## Builds the gas_oracle bin
|
||||
rollup_relayer: ## Builds the rollup_relayer bin
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
blob_uploader: ## Builds the blob_uploader bin
|
||||
go build -o $(PWD)/build/bin/blob_uploader ./cmd/blob_uploader/
|
||||
|
||||
test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
|
||||
@@ -10,33 +10,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPackCommitBatch(t *testing.T) {
|
||||
scrollChainABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(t, err)
|
||||
|
||||
version := uint8(1)
|
||||
var parentBatchHeader []byte
|
||||
var chunks [][]byte
|
||||
var skippedL1MessageBitmap []byte
|
||||
|
||||
_, err = scrollChainABI.Pack("commitBatch", version, parentBatchHeader, chunks, skippedL1MessageBitmap)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPackCommitBatchWithBlobProof(t *testing.T) {
|
||||
scrollChainABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(t, err)
|
||||
|
||||
version := uint8(1)
|
||||
var parentBatchHeader []byte
|
||||
var chunks [][]byte
|
||||
var skippedL1MessageBitmap []byte
|
||||
var blobDataProof []byte
|
||||
|
||||
_, err = scrollChainABI.Pack("commitBatchWithBlobProof", version, parentBatchHeader, chunks, skippedL1MessageBitmap, blobDataProof)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPackCommitBatches(t *testing.T) {
|
||||
scrollChainABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(t, err)
|
||||
@@ -49,58 +22,6 @@ func TestPackCommitBatches(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPackFinalizeBatchWithProof(t *testing.T) {
|
||||
l1RollupABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var batchHeader []byte
|
||||
var prevStateRoot common.Hash
|
||||
var postStateRoot common.Hash
|
||||
var withdrawRoot common.Hash
|
||||
var aggrProof []byte
|
||||
|
||||
_, err = l1RollupABI.Pack("finalizeBatchWithProof", batchHeader, prevStateRoot, postStateRoot, withdrawRoot, aggrProof)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPackFinalizeBatchWithProof4844(t *testing.T) {
|
||||
l1RollupABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var batchHeader []byte
|
||||
var prevStateRoot common.Hash
|
||||
var postStateRoot common.Hash
|
||||
var withdrawRoot common.Hash
|
||||
var blobDataProof []byte
|
||||
var aggrProof []byte
|
||||
|
||||
_, err = l1RollupABI.Pack("finalizeBatchWithProof4844", batchHeader, prevStateRoot, postStateRoot, withdrawRoot, blobDataProof, aggrProof)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPackFinalizeBundleWithProof(t *testing.T) {
|
||||
l1RollupABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var batchHeader []byte
|
||||
var postStateRoot common.Hash
|
||||
var withdrawRoot common.Hash
|
||||
var aggrProof []byte
|
||||
|
||||
_, err = l1RollupABI.Pack("finalizeBundleWithProof", batchHeader, postStateRoot, withdrawRoot, aggrProof)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPackFinalizeEuclidInitialBatch(t *testing.T) {
|
||||
l1RollupABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var postStateRoot common.Hash
|
||||
|
||||
_, err = l1RollupABI.Pack("finalizeEuclidInitialBatch", postStateRoot)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestPackFinalizeBundlePostEuclidV2(t *testing.T) {
|
||||
l1RollupABI, err := ScrollChainMetaData.GetAbi()
|
||||
assert.NoError(t, err)
|
||||
|
||||
94
rollup/cmd/blob_uploader/app/app.go
Normal file
94
rollup/cmd/blob_uploader/app/app.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/blob_uploader"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up blob-uploader app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "blob-uploader"
|
||||
app.Usage = "The Scroll Blob Uploader"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Crit("failed to close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
|
||||
// sanity check config
|
||||
if cfg.L2Config.BlobUploaderConfig == nil {
|
||||
log.Crit("cfg.L2Config.BlobUploaderConfig must not be nil")
|
||||
}
|
||||
|
||||
blobUploader, err := blob_uploader.NewBlobUploader(ctx.Context, db, cfg.L2Config.BlobUploaderConfig, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, blobUploader.UploadBlobToS3)
|
||||
|
||||
// Finish start all blob-uploader functions.
|
||||
log.Info("Start blob-uploader successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run blob uploader cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
7
rollup/cmd/blob_uploader/main.go
Normal file
7
rollup/cmd/blob_uploader/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/rollup/cmd/blob_uploader/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -54,6 +54,9 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
|
||||
if minCodecVersion < encoding.CodecV7 {
|
||||
log.Crit("min codec version must be greater than or equal to CodecV7", "minCodecVersion", minCodecVersion)
|
||||
}
|
||||
|
||||
// sanity check config
|
||||
if cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch <= 0 {
|
||||
|
||||
@@ -102,6 +102,10 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
|
||||
if minCodecVersion < encoding.CodecV7 {
|
||||
log.Crit("min codec version must be greater than or equal to CodecV7", "minCodecVersion", minCodecVersion)
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
bundleProposer := watcher.NewBundleProposer(subCtx, cfg.L2Config.BundleProposerConfig, minCodecVersion, genesis.Config, db, registry)
|
||||
|
||||
@@ -88,33 +88,31 @@
|
||||
"private_key_signer_config": {
|
||||
"private_key": "1515151515151515151515151515151515151515151515151515151515151515"
|
||||
}
|
||||
},
|
||||
"l1_commit_gas_limit_multiplier": 1.2
|
||||
}
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"propose_interval_milliseconds": 100,
|
||||
"max_block_num_per_chunk": 100,
|
||||
"max_tx_num_per_chunk": 100,
|
||||
"max_l2_gas_per_chunk": 20000000,
|
||||
"max_l1_commit_gas_per_chunk": 11234567,
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_row_consumption_per_chunk": 1048319,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634880
|
||||
"chunk_timeout_sec": 300
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"propose_interval_milliseconds": 1000,
|
||||
"max_l1_commit_gas_per_batch": 11234567,
|
||||
"max_l1_commit_calldata_size_per_batch": 112345,
|
||||
"batch_timeout_sec": 300,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634880,
|
||||
"max_chunks_per_batch": 12
|
||||
"max_chunks_per_batch": 45
|
||||
},
|
||||
"bundle_proposer_config": {
|
||||
"max_batch_num_per_bundle": 20,
|
||||
"bundle_timeout_sec": 36000
|
||||
},
|
||||
"blob_uploader_config": {
|
||||
"start_batch": 0,
|
||||
"aws_s3_config": {
|
||||
"bucket": "blob-data",
|
||||
"region": "us-west-2",
|
||||
"access_key": "ACCESSKEY",
|
||||
"secret_key": "SECRETKEY"
|
||||
}
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
|
||||
@@ -27,7 +27,7 @@ services:
|
||||
command: [
|
||||
"--config", "/app/conf/proposer-tool-config.json",
|
||||
"--genesis", "/app/conf/proposer-tool-genesis.json",
|
||||
"--min-codec-version", "4",
|
||||
"--min-codec-version", "7",
|
||||
"--start-l2-block", "10000",
|
||||
"--log.debug", "--verbosity", "3"
|
||||
]
|
||||
|
||||
@@ -4,6 +4,10 @@ go 1.22
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.12.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.14
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.67
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0
|
||||
github.com/consensys/gnark-crypto v0.16.0
|
||||
github.com/crate-crypto/go-kzg-4844 v1.1.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
@@ -11,8 +15,8 @@ require (
|
||||
github.com/holiman/uint256 v1.3.2
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250610090337-00c5c2bd3e65
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
@@ -22,6 +26,20 @@ require (
|
||||
|
||||
require (
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.20.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
|
||||
@@ -8,6 +8,42 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 h1:BCG7DCXEXpNCcpwCxg1oi9pkJWH2+eZzTn9MY56MbVw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0 h1:fV4XIU5sn/x8gjRouoJpDVHj+ExJaUk4prYF+eb6qTs=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0/go.mod h1:qbn305Je/IofWBJ4bJz/Q7pDEtnnoInw/dGt71v6rHE=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
@@ -249,10 +285,10 @@ github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6ke
|
||||
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
|
||||
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1 h1:6aKqJSal+QVdB5HMWMs0JTbAIZ6/iAHJx9qizz0w9dU=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250610090337-00c5c2bd3e65 h1:idsnkl5rwVr7eNUB0HxdkKI0L3zbTm8XSGwMTB5ndws=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250610090337-00c5c2bd3e65/go.mod h1:756YMENiSfx/5pCwKq3+uSTWjXuHTbiCB+TirJjsQT8=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
|
||||
@@ -24,31 +24,23 @@ type L2Config struct {
|
||||
BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"`
|
||||
// The bundle_proposer config
|
||||
BundleProposerConfig *BundleProposerConfig `json:"bundle_proposer_config"`
|
||||
// The blob_uploader config
|
||||
BlobUploaderConfig *BlobUploaderConfig `json:"blob_uploader_config"`
|
||||
}
|
||||
|
||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||
type ChunkProposerConfig struct {
|
||||
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
|
||||
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
|
||||
MaxTxNumPerChunk uint64 `json:"max_tx_num_per_chunk"`
|
||||
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
|
||||
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
|
||||
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
||||
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
|
||||
MaxBlockNumPerChunk uint64 `json:"max_block_num_per_chunk"`
|
||||
MaxL2GasPerChunk uint64 `json:"max_l2_gas_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
}
|
||||
|
||||
// BatchProposerConfig loads batch_proposer configuration items.
|
||||
type BatchProposerConfig struct {
|
||||
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
|
||||
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
|
||||
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
||||
MaxChunksPerBatch int `json:"max_chunks_per_batch"`
|
||||
ProposeIntervalMilliseconds uint64 `json:"propose_interval_milliseconds"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
MaxChunksPerBatch int `json:"max_chunks_per_batch"`
|
||||
}
|
||||
|
||||
// BundleProposerConfig loads bundle_proposer configuration items.
|
||||
@@ -56,3 +48,17 @@ type BundleProposerConfig struct {
|
||||
MaxBatchNumPerBundle uint64 `json:"max_batch_num_per_bundle"`
|
||||
BundleTimeoutSec uint64 `json:"bundle_timeout_sec"`
|
||||
}
|
||||
|
||||
// BlobUploaderConfig loads blob_uploader configuration items.
|
||||
type BlobUploaderConfig struct {
|
||||
StartBatch uint64 `json:"start_batch"`
|
||||
AWSS3Config *AWSS3Config `json:"aws_s3_config"`
|
||||
}
|
||||
|
||||
// AWSS3Config loads s3_uploader configuration items.
|
||||
type AWSS3Config struct {
|
||||
Bucket string `json:"bucket"`
|
||||
Region string `json:"region"`
|
||||
AccessKey string `json:"access_key"`
|
||||
SecretKey string `json:"secret_key"`
|
||||
}
|
||||
|
||||
@@ -65,8 +65,6 @@ type RelayerConfig struct {
|
||||
GasOracleConfig *GasOracleConfig `json:"gas_oracle_config"`
|
||||
// ChainMonitor config of monitoring service
|
||||
ChainMonitor *ChainMonitor `json:"chain_monitor"`
|
||||
// L1CommitGasLimitMultiplier multiplier for fallback gas limit in commitBatch txs
|
||||
L1CommitGasLimitMultiplier float64 `json:"l1_commit_gas_limit_multiplier,omitempty"`
|
||||
|
||||
// Configs of transaction signers (GasOracle, Commit, Finalize)
|
||||
GasOracleSenderSignerConfig *SignerConfig `json:"gas_oracle_sender_signer_config"`
|
||||
@@ -75,8 +73,6 @@ type RelayerConfig struct {
|
||||
|
||||
// Indicates if bypass features specific to testing environments are enabled.
|
||||
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
|
||||
// Sets rollup-relayer to stop fake finalizing at the fork boundary
|
||||
TestEnvBypassOnlyUntilForkBoundary bool `json:"test_env_bypass_only_until_fork_boundary"`
|
||||
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
|
||||
// The timeout in seconds for finalizing a bundle without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
package blob_uploader
|
||||
251
rollup/internal/controller/blob_uploader/blob_uploader.go
Normal file
251
rollup/internal/controller/blob_uploader/blob_uploader.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package blob_uploader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
// BlobUploader is responsible for uploading blobs to blob storage services.
|
||||
type BlobUploader struct {
|
||||
ctx context.Context
|
||||
|
||||
cfg *config.BlobUploaderConfig
|
||||
|
||||
s3Uploader *S3Uploader
|
||||
|
||||
blobUploadOrm *orm.BlobUpload
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
metrics *blobUploaderMetrics
|
||||
}
|
||||
|
||||
// NewBlobUploader will return a new instance of BlobUploader
|
||||
func NewBlobUploader(ctx context.Context, db *gorm.DB, cfg *config.BlobUploaderConfig, reg prometheus.Registerer) (*BlobUploader, error) {
|
||||
var s3Uploader *S3Uploader
|
||||
var err error
|
||||
if cfg.AWSS3Config != nil {
|
||||
s3Uploader, err = NewS3Uploader(cfg.AWSS3Config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new blob uploader failed, err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
blobUploader := &BlobUploader{
|
||||
ctx: ctx,
|
||||
cfg: cfg,
|
||||
s3Uploader: s3Uploader,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
blobUploadOrm: orm.NewBlobUpload(db),
|
||||
}
|
||||
|
||||
blobUploader.metrics = initBlobUploaderMetrics(reg)
|
||||
|
||||
return blobUploader, nil
|
||||
}
|
||||
|
||||
func (b *BlobUploader) UploadBlobToS3() {
|
||||
// skip upload if s3 uploader is not configured
|
||||
if b.s3Uploader == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// get un-uploaded batches from database in ascending order by their index.
|
||||
dbBatch, err := b.GetFirstUnuploadedBatchByPlatform(b.ctx, b.cfg.StartBatch, types.BlobStoragePlatformS3)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unuploaded batch", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// nothing to do if we don't have any pending batches
|
||||
if dbBatch == nil {
|
||||
log.Debug("no pending batches to upload")
|
||||
return
|
||||
}
|
||||
|
||||
// construct blob
|
||||
codecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
blob, err := b.constructBlobCodec(dbBatch)
|
||||
if err != nil {
|
||||
log.Error("failed to construct constructBlobCodec payload ", "codecVersion", codecVersion, "batch index", dbBatch.Index, "err", err)
|
||||
b.metrics.rollupBlobUploaderUploadToS3FailedTotal.Inc()
|
||||
if updateErr := b.blobUploadOrm.InsertOrUpdateBlobUpload(b.ctx, dbBatch.Index, dbBatch.Hash, types.BlobStoragePlatformS3, types.BlobUploadStatusFailed); updateErr != nil {
|
||||
log.Error("failed to update blob upload status to failed", "batch index", dbBatch.Index, "err", updateErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// calculate versioned blob hash
|
||||
versionedBlobHash, err := utils.CalculateVersionedBlobHash(*blob)
|
||||
if err != nil {
|
||||
log.Error("failed to calculate versioned blob hash", "batch index", dbBatch.Index, "err", err)
|
||||
b.metrics.rollupBlobUploaderUploadToS3FailedTotal.Inc()
|
||||
// update status to failed
|
||||
if updateErr := b.blobUploadOrm.InsertOrUpdateBlobUpload(b.ctx, dbBatch.Index, dbBatch.Hash, types.BlobStoragePlatformS3, types.BlobUploadStatusFailed); updateErr != nil {
|
||||
log.Error("failed to update blob upload status to failed", "batch index", dbBatch.Index, "err", updateErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// upload blob data to s3 bucket
|
||||
key := common.BytesToHash(versionedBlobHash[:]).Hex()
|
||||
err = b.s3Uploader.UploadData(b.ctx, blob[:], key)
|
||||
if err != nil {
|
||||
log.Error("failed to upload blob data to AWS S3", "batch index", dbBatch.Index, "versioned blob hash", key, "err", err)
|
||||
b.metrics.rollupBlobUploaderUploadToS3FailedTotal.Inc()
|
||||
// update status to failed
|
||||
if updateErr := b.blobUploadOrm.InsertOrUpdateBlobUpload(b.ctx, dbBatch.Index, dbBatch.Hash, types.BlobStoragePlatformS3, types.BlobUploadStatusFailed); updateErr != nil {
|
||||
log.Error("failed to update blob upload status to failed", "batch index", dbBatch.Index, "err", updateErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// update status to uploaded
|
||||
if err = b.blobUploadOrm.InsertOrUpdateBlobUpload(b.ctx, dbBatch.Index, dbBatch.Hash, types.BlobStoragePlatformS3, types.BlobUploadStatusUploaded); err != nil {
|
||||
log.Error("failed to update blob upload status to uploaded", "batch index", dbBatch.Index, "err", err)
|
||||
b.metrics.rollupBlobUploaderUploadToS3FailedTotal.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
b.metrics.rollupBlobUploaderUploadToS3SuccessTotal.Inc()
|
||||
log.Info("Successfully uploaded blob to S3", "batch index", dbBatch.Index, "versioned blob hash", key)
|
||||
}
|
||||
|
||||
func (b *BlobUploader) constructBlobCodec(dbBatch *orm.Batch) (*kzg4844.Blob, error) {
|
||||
var dbChunks []*orm.Chunk
|
||||
|
||||
dbChunks, err := b.chunkOrm.GetChunksInRange(b.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get chunks in range: %v", err)
|
||||
}
|
||||
|
||||
// check codec version
|
||||
for _, dbChunk := range dbChunks {
|
||||
if dbBatch.CodecVersion != dbChunk.CodecVersion {
|
||||
return nil, fmt.Errorf("batch codec version is different from chunk codec version, batch index: %d, chunk index: %d, batch codec version: %d, chunk codec version: %d", dbBatch.Index, dbChunk.Index, dbBatch.CodecVersion, dbChunk.CodecVersion)
|
||||
}
|
||||
}
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
var allBlocks []*encoding.Block // collect blocks for CodecV7
|
||||
for i, c := range dbChunks {
|
||||
blocks, getErr := b.l2BlockOrm.GetL2BlocksInRange(b.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if getErr != nil {
|
||||
return nil, fmt.Errorf("failed to get blocks in range for batch %d: %w", dbBatch.Index, getErr)
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
allBlocks = append(allBlocks, blocks...)
|
||||
}
|
||||
|
||||
var encodingBatch *encoding.Batch
|
||||
codecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch codecVersion {
|
||||
case encoding.CodecV0:
|
||||
return nil, fmt.Errorf("codec version 0 doesn't support blob, batch index: %d", dbBatch.Index)
|
||||
case encoding.CodecV1, encoding.CodecV2, encoding.CodecV3, encoding.CodecV4, encoding.CodecV5, encoding.CodecV6:
|
||||
encodingBatch = &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
case encoding.CodecV7:
|
||||
encodingBatch = &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
ParentBatchHash: common.HexToHash(dbBatch.ParentBatchHash),
|
||||
Chunks: chunks,
|
||||
PrevL1MessageQueueHash: common.HexToHash(dbBatch.PrevL1MessageQueueHash),
|
||||
PostL1MessageQueueHash: common.HexToHash(dbBatch.PostL1MessageQueueHash),
|
||||
Blocks: allBlocks,
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported codec version, batch index: %d, batch codec version: %d", dbBatch.Index, codecVersion)
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
}
|
||||
|
||||
daBatch, err := codec.NewDABatch(encodingBatch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create DA batch: %w", err)
|
||||
}
|
||||
|
||||
if daBatch.Blob() == nil {
|
||||
return nil, fmt.Errorf("codec version doesn't support blob, batch index: %d, batch codec version: %d", dbBatch.Index, dbBatch.CodecVersion)
|
||||
}
|
||||
|
||||
return daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
// GetFirstUnuploadedBatchByPlatform retrieves the first batch that either hasn't been uploaded to corresponding blob storage service
|
||||
// The batch must have a commit_tx_hash (committed).
|
||||
func (b *BlobUploader) GetFirstUnuploadedBatchByPlatform(ctx context.Context, startBatch uint64, platform types.BlobStoragePlatform) (*orm.Batch, error) {
|
||||
batchIndex, err := b.blobUploadOrm.GetNextBatchIndexToUploadByPlatform(ctx, startBatch, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var batch *orm.Batch
|
||||
for {
|
||||
var err error
|
||||
batch, err = b.batchOrm.GetBatchByIndex(ctx, batchIndex)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
log.Debug("got batch not proposed for blob uploading", "batch_index", batchIndex, "platform", platform.String())
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// to check if the parent batch uploaded
|
||||
// if no, there is a batch revert happened, we need to fallback to upload previous batch
|
||||
// skip the check if the parent batch is genesis batch
|
||||
if batchIndex <= 1 || batchIndex == startBatch {
|
||||
break
|
||||
}
|
||||
fields := map[string]interface{}{
|
||||
"batch_index = ?": batchIndex - 1,
|
||||
"batch_hash = ?": batch.ParentBatchHash,
|
||||
"platform = ?": platform,
|
||||
"status = ?": types.BlobUploadStatusUploaded,
|
||||
}
|
||||
blobUpload, err := b.blobUploadOrm.GetBlobUploads(ctx, fields, nil, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(blobUpload) == 0 {
|
||||
batchIndex--
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if len(batch.CommitTxHash) == 0 {
|
||||
log.Debug("got batch not committed for blob uploading", "batch_index", batchIndex, "platform", platform.String())
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return batch, nil
|
||||
}
|
||||
@@ -0,0 +1,34 @@
|
||||
package blob_uploader
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type blobUploaderMetrics struct {
|
||||
rollupBlobUploaderUploadToS3SuccessTotal prometheus.Counter
|
||||
rollupBlobUploaderUploadToS3FailedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initBlobUploaderMetricsOnce sync.Once
|
||||
blobUploaderMetric *blobUploaderMetrics
|
||||
)
|
||||
|
||||
func initBlobUploaderMetrics(reg prometheus.Registerer) *blobUploaderMetrics {
|
||||
initBlobUploaderMetricsOnce.Do(func() {
|
||||
blobUploaderMetric = &blobUploaderMetrics{
|
||||
rollupBlobUploaderUploadToS3SuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_blob_uploader_upload_to_s3_success_total",
|
||||
Help: "The total number of upload blob to S3 runs success total",
|
||||
}),
|
||||
rollupBlobUploaderUploadToS3FailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_blob_uploader_upload_to_s3_failed_total",
|
||||
Help: "The total number of upload blob to S3 runs failed total",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return blobUploaderMetric
|
||||
}
|
||||
66
rollup/internal/controller/blob_uploader/s3_sender.go
Normal file
66
rollup/internal/controller/blob_uploader/s3_sender.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package blob_uploader
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
)
|
||||
|
||||
// S3 Uploader is responsible for uploading data to AWS S3.
|
||||
type S3Uploader struct {
|
||||
client *s3.Client
|
||||
bucket string
|
||||
region string
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func NewS3Uploader(cfg *config.AWSS3Config) (*S3Uploader, error) {
|
||||
// load AWS config
|
||||
var opts []func(*awsconfig.LoadOptions) error
|
||||
opts = append(opts, awsconfig.WithRegion(cfg.Region))
|
||||
|
||||
// if AccessKey && SecretKey provided, use it
|
||||
if cfg.AccessKey != "" && cfg.SecretKey != "" {
|
||||
opts = append(opts, awsconfig.WithCredentialsProvider(
|
||||
credentials.NewStaticCredentialsProvider(
|
||||
cfg.AccessKey,
|
||||
cfg.SecretKey,
|
||||
"",
|
||||
)),
|
||||
)
|
||||
}
|
||||
|
||||
awsCfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load default config: %w", err)
|
||||
}
|
||||
|
||||
return &S3Uploader{
|
||||
client: s3.NewFromConfig(awsCfg),
|
||||
bucket: cfg.Bucket,
|
||||
region: cfg.Region,
|
||||
timeout: 30 * time.Second,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UploadData uploads data to s3 bucket
|
||||
func (u *S3Uploader) UploadData(ctx context.Context, data []byte, objectKey string) error {
|
||||
uploadCtx, cancel := context.WithTimeout(ctx, u.timeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := u.client.PutObject(uploadCtx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(u.bucket),
|
||||
Key: aws.String(objectKey),
|
||||
Body: bytes.NewReader(data),
|
||||
ContentType: aws.String("application/octet-stream"),
|
||||
})
|
||||
return err
|
||||
}
|
||||
@@ -1,20 +1,11 @@
|
||||
package relayer
|
||||
|
||||
import "errors"
|
||||
|
||||
const (
|
||||
gasPriceDiffPrecision = 1000000
|
||||
|
||||
defaultGasPriceDiff = 50000 // 5%
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrExecutionRevertedMessageExpired error of Message expired
|
||||
ErrExecutionRevertedMessageExpired = errors.New("execution reverted: Message expired")
|
||||
// ErrExecutionRevertedAlreadySuccessExecuted error of Message was already successfully executed
|
||||
ErrExecutionRevertedAlreadySuccessExecuted = errors.New("execution reverted: Message was already successfully executed")
|
||||
)
|
||||
|
||||
// ServiceType defines the various types of services within the relayer.
|
||||
type ServiceType int
|
||||
|
||||
|
||||
@@ -179,13 +179,13 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil, 0)
|
||||
txHash, _, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil)
|
||||
if err != nil {
|
||||
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, hash.String())
|
||||
err = r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, block.Hash, types.GasOracleImporting, txHash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
return
|
||||
@@ -195,7 +195,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
r.lastBlobBaseFee = blobBaseFee
|
||||
r.metrics.rollupL1RelayerLatestBaseFee.Set(float64(r.lastBaseFee))
|
||||
r.metrics.rollupL1RelayerLatestBlobBaseFee.Set(float64(r.lastBlobBaseFee))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee)
|
||||
log.Info("Update l1 base fee", "txHash", txHash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,13 +146,13 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
|
||||
convey.Convey("send transaction failure", t, func() {
|
||||
targetErr := errors.New("send transaction failure")
|
||||
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob, uint64) (hash common.Hash, err error) {
|
||||
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob) (hash common.Hash, err error) {
|
||||
return common.Hash{}, targetErr
|
||||
})
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob, uint64) (hash common.Hash, err error) {
|
||||
patchGuard.ApplyMethodFunc(l1Relayer.gasOracleSender, "SendTransaction", func(string, *common.Address, []byte, *kzg4844.Blob) (hash common.Hash, err error) {
|
||||
return common.Hash{}, nil
|
||||
})
|
||||
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
@@ -105,10 +104,15 @@ type StrategyParams struct {
|
||||
}
|
||||
|
||||
// bestParams maps your 2h/5h/12h windows to their best rules.
|
||||
// Timeouts are in seconds, 2, 5 and 12 hours (and same + 20 mins to account for
|
||||
// time to create batch currently roughly, as time is measured from block creation)
|
||||
var bestParams = map[uint64]StrategyParams{
|
||||
2 * 3600: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
|
||||
5 * 3600: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
|
||||
12 * 3600: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
|
||||
7200: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
|
||||
8400: {BaselineType: PctMin, BaselineParam: 0.10, Gamma: 0.4, Beta: 8, RelaxType: Exponential},
|
||||
18000: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
|
||||
19200: {BaselineType: PctMin, BaselineParam: 0.30, Gamma: 0.6, Beta: 20, RelaxType: Sigmoid},
|
||||
42800: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
|
||||
44400: {BaselineType: PctMin, BaselineParam: 0.50, Gamma: 0.5, Beta: 20, RelaxType: Sigmoid},
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
@@ -148,6 +152,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
return nil, fmt.Errorf("invalid service type for l2_relayer: %v", serviceType)
|
||||
}
|
||||
|
||||
strategy, ok := bestParams[uint64(cfg.BatchSubmission.TimeoutSec)]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid timeout for batch submission: %v", cfg.BatchSubmission.TimeoutSec)
|
||||
}
|
||||
|
||||
layer2Relayer := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
@@ -165,7 +174,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
|
||||
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
|
||||
batchStrategy: bestParams[uint64(cfg.BatchSubmission.TimeoutSec)],
|
||||
batchStrategy: strategy,
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
}
|
||||
@@ -208,17 +217,10 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
|
||||
log.Info("retrieved L2 genesis header", "hash", genesis.Hash().String())
|
||||
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{{
|
||||
Header: genesis,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}},
|
||||
}
|
||||
chunk := &encoding.Chunk{Blocks: []*encoding.Block{{Header: genesis}}}
|
||||
|
||||
err = r.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
if err = r.l2BlockOrm.InsertL2Blocks(r.ctx, chunk.Blocks); err != nil {
|
||||
if err = r.l2BlockOrm.InsertL2Blocks(r.ctx, chunk.Blocks, dbTX); err != nil {
|
||||
return fmt.Errorf("failed to insert genesis block: %v", err)
|
||||
}
|
||||
|
||||
@@ -279,11 +281,11 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
txHash, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, calldata, nil, 0)
|
||||
txHash, _, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, calldata, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
|
||||
}
|
||||
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash.String(), "batchHash", batchHash)
|
||||
log.Info("importGenesisBatch transaction sent", "contract", r.cfg.RollupContractAddress, "txHash", txHash, "batchHash", batchHash)
|
||||
|
||||
// wait for confirmation
|
||||
// we assume that no other transactions are sent before initializeGenesis completes
|
||||
@@ -327,8 +329,15 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
// nothing to do if we don't have any pending batches
|
||||
if len(dbBatches) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// if backlog outgrow max size, force‐submit enough oldest batches
|
||||
backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx)
|
||||
r.metrics.rollupL2RelayerBacklogCounts.Set(float64(backlogCount))
|
||||
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
@@ -336,9 +345,15 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
var forceSubmit bool
|
||||
|
||||
oldestBatchTimestamp := dbBatches[0].CreatedAt
|
||||
startChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, dbBatches[0].StartChunkIndex)
|
||||
if err != nil {
|
||||
log.Error("failed to get first chunk", "err", err, "batch index", dbBatches[0].Index, "chunk index", dbBatches[0].StartChunkIndex)
|
||||
return
|
||||
}
|
||||
oldestBlockTimestamp := time.Unix(int64(startChunk.StartBlockTime), 0)
|
||||
|
||||
// if the batch with the oldest index is too old, we force submit all batches that we have so far in the next step
|
||||
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBatchTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
|
||||
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBlockTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
|
||||
forceSubmit = true
|
||||
}
|
||||
|
||||
@@ -349,10 +364,12 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
if !forceSubmit {
|
||||
// check if we should skip submitting the batch based on the fee target
|
||||
skip, err := r.skipSubmitByFee(oldestBatchTimestamp)
|
||||
skip, err := r.skipSubmitByFee(oldestBlockTimestamp, r.metrics)
|
||||
// return if not hitting target price
|
||||
if skip {
|
||||
log.Debug("Skipping batch submission", "reason", err)
|
||||
log.Debug("Skipping batch submission", "first batch index", dbBatches[0].Index, "backlog count", backlogCount, "reason", err)
|
||||
log.Debug("first batch index", dbBatches[0].Index)
|
||||
log.Debug("backlog count", backlogCount)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
@@ -362,12 +379,6 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
var batchesToSubmit []*dbBatchWithChunksAndParent
|
||||
for i, dbBatch := range dbBatches {
|
||||
if i == 0 && encoding.CodecVersion(dbBatch.CodecVersion) < encoding.CodecV7 {
|
||||
// if the first batch is not >= V7 then we need to submit batches one by one
|
||||
r.processPendingBatchesV4(dbBatches)
|
||||
return
|
||||
}
|
||||
|
||||
var dbChunks []*orm.Chunk
|
||||
var dbParentBatch *orm.Batch
|
||||
|
||||
@@ -441,7 +452,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
}
|
||||
|
||||
if forceSubmit {
|
||||
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "created at", batchesToSubmit[0].Batch.CreatedAt)
|
||||
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "first block created at", oldestBlockTimestamp)
|
||||
}
|
||||
|
||||
// We have at least 1 batch to commit
|
||||
@@ -466,7 +477,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
txHash, err := r.commitSender.SendTransaction(r.contextIDFromBatches(batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs, 0)
|
||||
txHash, blobBaseFee, err := r.commitSender.SendTransaction(r.contextIDFromBatches(batchesToSubmit), &r.cfg.RollupContractAddress, calldata, blobs)
|
||||
if err != nil {
|
||||
if errors.Is(err, sender.ErrTooManyPendingBlobTxs) {
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal.Inc()
|
||||
@@ -506,6 +517,8 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
r.metrics.rollupL2RelayerCommitThroughput.Add(float64(totalGasUsed))
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Add(float64(len(batchesToSubmit)))
|
||||
r.metrics.rollupL2RelayerProcessBatchesPerTxCount.Set(float64(len(batchesToSubmit)))
|
||||
r.metrics.rollupL2RelayerCommitLatency.Set(time.Since(oldestBlockTimestamp).Seconds())
|
||||
r.metrics.rollupL2RelayerCommitPrice.Set(float64(blobBaseFee))
|
||||
|
||||
log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
|
||||
}
|
||||
@@ -534,117 +547,6 @@ type dbBatchWithChunksAndParent struct {
|
||||
ParentBatch *orm.Batch
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) processPendingBatchesV4(dbBatches []*orm.Batch) {
|
||||
for _, dbBatch := range dbBatches {
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchTotal.Inc()
|
||||
|
||||
dbChunks, err := r.chunkOrm.GetChunksInRange(r.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
log.Error("failed to get chunks in range", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// check codec version
|
||||
for _, dbChunk := range dbChunks {
|
||||
if dbBatch.CodecVersion != dbChunk.CodecVersion {
|
||||
log.Error("batch codec version is different from chunk codec version", "batch index", dbBatch.Index, "chunk index", dbChunk.Index, "batch codec version", dbBatch.CodecVersion, "chunk codec version", dbChunk.CodecVersion)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, getErr := r.l2BlockOrm.GetL2BlocksInRange(r.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get blocks in range", "err", getErr)
|
||||
return
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
if dbBatch.Index == 0 {
|
||||
log.Error("invalid args: batch index is 0, should only happen in committing genesis batch")
|
||||
return
|
||||
}
|
||||
|
||||
dbParentBatch, getErr := r.batchOrm.GetBatchByIndex(r.ctx, dbBatch.Index-1)
|
||||
if getErr != nil {
|
||||
log.Error("failed to get parent batch header", "err", getErr)
|
||||
return
|
||||
}
|
||||
|
||||
if dbParentBatch.CodecVersion > dbBatch.CodecVersion {
|
||||
log.Error("parent batch codec version is greater than current batch codec version", "index", dbBatch.Index, "hash", dbBatch.Hash, "parent codec version", dbParentBatch.CodecVersion, "current codec version", dbBatch.CodecVersion)
|
||||
return
|
||||
}
|
||||
|
||||
var calldata []byte
|
||||
var blob *kzg4844.Blob
|
||||
codecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch codecVersion {
|
||||
case encoding.CodecV4, encoding.CodecV5, encoding.CodecV6:
|
||||
calldata, blob, err = r.constructCommitBatchPayloadCodecV4(dbBatch, dbParentBatch, dbChunks, chunks)
|
||||
if err != nil {
|
||||
log.Error("failed to construct commitBatchWithBlobProof payload for V4", "codecVersion", codecVersion, "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
log.Error("unsupported codec version in processPendingBatchesV4", "codecVersion", codecVersion)
|
||||
return
|
||||
}
|
||||
|
||||
// fallbackGasLimit is non-zero only in sending non-blob transactions.
|
||||
fallbackGasLimit := uint64(float64(dbBatch.TotalL1CommitGas) * r.cfg.L1CommitGasLimitMultiplier)
|
||||
if types.RollupStatus(dbBatch.RollupStatus) == types.RollupCommitFailed {
|
||||
// use eth_estimateGas if this batch has been committed and failed at least once.
|
||||
fallbackGasLimit = 0
|
||||
log.Warn("Batch commit previously failed, using eth_estimateGas for the re-submission", "hash", dbBatch.Hash)
|
||||
}
|
||||
|
||||
txHash, err := r.commitSender.SendTransaction(dbBatch.Hash, &r.cfg.RollupContractAddress, calldata, []*kzg4844.Blob{blob}, fallbackGasLimit)
|
||||
if err != nil {
|
||||
if errors.Is(err, sender.ErrTooManyPendingBlobTxs) {
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal.Inc()
|
||||
log.Debug(
|
||||
"Skipped sending commitBatch tx to L1: too many pending blob txs",
|
||||
"maxPending", r.cfg.SenderConfig.MaxPendingBlobTxs,
|
||||
"err", err,
|
||||
)
|
||||
return
|
||||
}
|
||||
log.Error(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
"index", dbBatch.Index,
|
||||
"hash", dbBatch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
"calldata", common.Bytes2Hex(calldata),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
err = r.batchOrm.UpdateCommitTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupCommitting)
|
||||
if err != nil {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", dbBatch.Hash, "index", dbBatch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
var maxBlockHeight uint64
|
||||
var totalGasUsed uint64
|
||||
for _, dbChunk := range dbChunks {
|
||||
if dbChunk.EndBlockNumber > maxBlockHeight {
|
||||
maxBlockHeight = dbChunk.EndBlockNumber
|
||||
}
|
||||
totalGasUsed += dbChunk.TotalL2TxGas
|
||||
}
|
||||
r.metrics.rollupL2RelayerCommitBlockHeight.Set(float64(maxBlockHeight))
|
||||
r.metrics.rollupL2RelayerCommitThroughput.Add(float64(totalGasUsed))
|
||||
|
||||
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessPendingBundles submits proof to layer 1 rollup contract
|
||||
func (r *Layer2Relayer) ProcessPendingBundles() {
|
||||
r.metrics.rollupL2RelayerProcessPendingBundlesTotal.Inc()
|
||||
@@ -679,33 +581,6 @@ func (r *Layer2Relayer) ProcessPendingBundles() {
|
||||
return
|
||||
}
|
||||
|
||||
lastFinalizedChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, lastBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
log.Error("failed to get last finalized chunk", "chunk index", lastBatch.EndChunkIndex)
|
||||
return
|
||||
}
|
||||
|
||||
firstUnfinalizedBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, bundle.StartBatchIndex)
|
||||
if err != nil {
|
||||
log.Error("failed to get first unfinalized batch", "batch index", bundle.StartBatchIndex)
|
||||
return
|
||||
}
|
||||
|
||||
firstUnfinalizedChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, firstUnfinalizedBatch.StartChunkIndex)
|
||||
if err != nil {
|
||||
log.Error("failed to get firsr unfinalized chunk", "chunk index", firstUnfinalizedBatch.StartChunkIndex)
|
||||
return
|
||||
}
|
||||
|
||||
if r.cfg.TestEnvBypassOnlyUntilForkBoundary {
|
||||
lastFork := encoding.GetHardforkName(r.chainCfg, lastFinalizedChunk.StartBlockNumber, lastFinalizedChunk.StartBlockTime)
|
||||
nextFork := encoding.GetHardforkName(r.chainCfg, firstUnfinalizedChunk.StartBlockNumber, firstUnfinalizedChunk.StartBlockTime)
|
||||
if lastFork != nextFork {
|
||||
log.Info("not fake finalizing past the fork boundary", "last fork", lastFork, "next fork", nextFork)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.finalizeBundle(bundle, false); err != nil {
|
||||
log.Error("failed to finalize timeout bundle without proof", "bundle index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
|
||||
return
|
||||
@@ -818,11 +693,6 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
|
||||
|
||||
var calldata []byte
|
||||
switch encoding.CodecVersion(bundle.CodecVersion) {
|
||||
case encoding.CodecV4, encoding.CodecV5, encoding.CodecV6:
|
||||
calldata, err = r.constructFinalizeBundlePayloadCodecV4(dbBatch, aggProof)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct finalizeBundle payload codecv4, bundle index: %v, last batch index: %v, err: %w", bundle.Index, dbBatch.Index, err)
|
||||
}
|
||||
case encoding.CodecV7:
|
||||
calldata, err = r.constructFinalizeBundlePayloadCodecV7(dbBatch, endChunk, aggProof)
|
||||
if err != nil {
|
||||
@@ -832,7 +702,7 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
|
||||
return fmt.Errorf("unsupported codec version in finalizeBundle, bundle index: %v, version: %d", bundle.Index, bundle.CodecVersion)
|
||||
}
|
||||
|
||||
txHash, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil, 0)
|
||||
txHash, _, err := r.finalizeSender.SendTransaction("finalizeBundle-"+bundle.Hash, &r.cfg.RollupContractAddress, calldata, nil)
|
||||
if err != nil {
|
||||
log.Error("finalizeBundle in layer1 failed", "with proof", withProof, "index", bundle.Index,
|
||||
"start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex,
|
||||
@@ -1028,48 +898,6 @@ func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV4(dbBatch *orm.Batch, dbParentBatch *orm.Batch, dbChunks []*orm.Chunk, chunks []*encoding.Chunk) ([]byte, *kzg4844.Blob, error) {
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
}
|
||||
|
||||
daBatch, createErr := codec.NewDABatch(batch)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA batch: %w", createErr)
|
||||
}
|
||||
|
||||
encodedChunks := make([][]byte, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
daChunk, createErr := codec.NewDAChunk(chunks[i], c.TotalL1MessagesPoppedBefore)
|
||||
if createErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create DA chunk: %w", createErr)
|
||||
}
|
||||
encodedChunks[i], err = daChunk.Encode()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to encode DA chunk: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProofForPointEvaluation()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to get blob data proof for point evaluation: %w", err)
|
||||
}
|
||||
|
||||
calldata, packErr := r.l1RollupABI.Pack("commitBatchWithBlobProof", daBatch.Version(), dbParentBatch.BatchHeader, encodedChunks, daBatch.SkippedL1MessageBitmap(), blobDataProof)
|
||||
if packErr != nil {
|
||||
return nil, nil, fmt.Errorf("failed to pack commitBatchWithBlobProof: %w", packErr)
|
||||
}
|
||||
return calldata, daBatch.Blob(), nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*dbBatchWithChunksAndParent, firstBatch, lastBatch *orm.Batch) ([]byte, []*kzg4844.Blob, uint64, uint64, error) {
|
||||
var maxBlockHeight uint64
|
||||
var totalGasUsed uint64
|
||||
@@ -1127,34 +955,6 @@ func (r *Layer2Relayer) constructCommitBatchPayloadCodecV7(batchesToSubmit []*db
|
||||
return calldata, blobs, maxBlockHeight, totalGasUsed, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV4(dbBatch *orm.Batch, aggProof *message.OpenVMBundleProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBundle with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBundleWithProof",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
aggProof.Proof(),
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBundleWithProof: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
// finalizeBundle without proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
"finalizeBundle",
|
||||
dbBatch.BatchHeader,
|
||||
common.HexToHash(dbBatch.StateRoot),
|
||||
common.HexToHash(dbBatch.WithdrawRoot),
|
||||
)
|
||||
if packErr != nil {
|
||||
return nil, fmt.Errorf("failed to pack finalizeBundle: %w", packErr)
|
||||
}
|
||||
return calldata, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch, endChunk *orm.Chunk, aggProof *message.OpenVMBundleProof) ([]byte, error) {
|
||||
if aggProof != nil { // finalizeBundle with proof.
|
||||
calldata, packErr := r.l1RollupABI.Pack(
|
||||
@@ -1301,7 +1101,7 @@ func calculateTargetPrice(windowSec uint64, strategy StrategyParams, firstTime t
|
||||
// skipSubmitByFee returns (true, nil) when submission should be skipped right now
|
||||
// because the blob‐fee is above target and the timeout window hasn’t yet elapsed.
|
||||
// Otherwise returns (false, err)
|
||||
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
|
||||
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetrics) (bool, error) {
|
||||
windowSec := uint64(r.cfg.BatchSubmission.TimeoutSec)
|
||||
|
||||
hist, err := r.fetchBlobFeeHistory(windowSec)
|
||||
@@ -1316,6 +1116,11 @@ func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
|
||||
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
|
||||
current := hist[len(hist)-1]
|
||||
|
||||
currentFloat, _ := current.Float64()
|
||||
targetFloat, _ := target.Float64()
|
||||
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
|
||||
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)
|
||||
|
||||
// if current fee > target and still inside the timeout window, skip
|
||||
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
|
||||
return true, fmt.Errorf(
|
||||
|
||||
@@ -26,6 +26,12 @@ type l2RelayerMetrics struct {
|
||||
|
||||
rollupL2RelayerCommitBlockHeight prometheus.Gauge
|
||||
rollupL2RelayerCommitThroughput prometheus.Counter
|
||||
|
||||
rollupL2RelayerCurrentBlobPrice prometheus.Gauge
|
||||
rollupL2RelayerTargetBlobPrice prometheus.Gauge
|
||||
rollupL2RelayerCommitLatency prometheus.Gauge
|
||||
rollupL2RelayerBacklogCounts prometheus.Gauge
|
||||
rollupL2RelayerCommitPrice prometheus.Gauge
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -104,6 +110,26 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
Name: "rollup_l2_relayer_commit_throughput",
|
||||
Help: "The cumulative gas used in blocks committed by the L2 relayer",
|
||||
}),
|
||||
rollupL2RelayerTargetBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_relayer_target_blob_price",
|
||||
Help: "The target blob price for the L2 relayer's submission strategy",
|
||||
}),
|
||||
rollupL2RelayerCurrentBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_relayer_current_blob_price",
|
||||
Help: "The current blob price",
|
||||
}),
|
||||
rollupL2RelayerCommitLatency: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_relayer_commit_latency",
|
||||
Help: "The latency of the commit measured from oldest blocktime",
|
||||
}),
|
||||
rollupL2RelayerBacklogCounts: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_relayer_backlog_counts",
|
||||
Help: "The number of pending batches in the backlog",
|
||||
}),
|
||||
rollupL2RelayerCommitPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_relayer_commit_price",
|
||||
Help: "The commit price for the L2 relayer's submission strategy",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2RelayerMetric
|
||||
|
||||
@@ -48,203 +48,186 @@ func testCreateNewRelayer(t *testing.T) {
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
l2Cfg := cfg.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
|
||||
return nil
|
||||
})
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV7, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV7, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
relayer.StopSenders()
|
||||
patchGuard.Reset()
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
Blocks: []*encoding.Block{block1, block2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV7, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitting, statuses[0])
|
||||
relayer.StopSenders()
|
||||
patchGuard.Reset()
|
||||
}
|
||||
|
||||
func testL2RelayerProcessPendingBundles(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
l2Cfg := cfg.L2Config
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = bundleOrm.UpdateRollupStatus(context.Background(), bundle.Hash, types.RollupPending)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = bundleOrm.UpdateProvingStatus(context.Background(), dbBatch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBundles()
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(bundles))
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc((*message.OpenVMBundleProof)(nil), "SanityCheck", func() error {
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBundles()
|
||||
bundles, err = bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(bundles))
|
||||
assert.Equal(t, types.RollupFinalizing, types.RollupStatus(bundles[0].RollupStatus))
|
||||
relayer.StopSenders()
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
Blocks: []*encoding.Block{block1, block2},
|
||||
}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV7, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV7, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV7, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV7)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = bundleOrm.UpdateRollupStatus(context.Background(), bundle.Hash, types.RollupPending)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = bundleOrm.UpdateProvingStatus(context.Background(), bundle.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBundles()
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(bundles))
|
||||
// no valid proof, rollup status remains the same
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(bundles[0].RollupStatus))
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc((*message.OpenVMBundleProof)(nil), "SanityCheck", func() error {
|
||||
return nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
proof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, proof, types.ProvingTaskVerified, 600)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBundles()
|
||||
bundles, err = bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(bundles))
|
||||
assert.Equal(t, types.RollupFinalizing, types.RollupStatus(bundles[0].RollupStatus))
|
||||
relayer.StopSenders()
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBundleWithoutProofTimeoutSec = 0
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV7, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV7, rutils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, codecVersion)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
relayer.ProcessPendingBundles()
|
||||
|
||||
bundleInDB, bundleErr := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
if bundleErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
bundleStatus := len(bundleInDB) == 1 && types.RollupStatus(bundleInDB[0].RollupStatus) == types.RollupFinalizing &&
|
||||
types.ProvingStatus(bundleInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
|
||||
if batchErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
batchStatus := len(batchInDB) == 1 && types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
|
||||
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
return bundleStatus && batchStatus && chunkStatus
|
||||
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch or Chunk status did not update as expected")
|
||||
relayer.StopSenders()
|
||||
batch := &encoding.Batch{
|
||||
Index: 1,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
Blocks: []*encoding.Block{block1, block2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV7, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV7)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateBundleHashInRange(context.Background(), dbBatch.Index, dbBatch.Index, bundle.Hash, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
relayer.ProcessPendingBundles()
|
||||
|
||||
bundleInDB, bundleErr := bundleOrm.GetBundles(context.Background(), map[string]interface{}{"hash": bundle.Hash}, nil, 0)
|
||||
if bundleErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
bundleStatus := len(bundleInDB) == 1 && types.RollupStatus(bundleInDB[0].RollupStatus) == types.RollupFinalizing &&
|
||||
types.ProvingStatus(bundleInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0)
|
||||
if batchErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
batchStatus := len(batchInDB) == 1 && types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash)
|
||||
if chunkErr != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified &&
|
||||
types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified
|
||||
|
||||
return bundleStatus && batchStatus && chunkStatus
|
||||
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch or Chunk status did not update as expected")
|
||||
relayer.StopSenders()
|
||||
}
|
||||
|
||||
func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
@@ -269,6 +252,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
Blocks: []*encoding.Block{block1, block2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
@@ -327,13 +311,14 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
Blocks: []*encoding.Block{block1, block2},
|
||||
}
|
||||
|
||||
dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = dbBatch.Hash
|
||||
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV4)
|
||||
bundle, err := bundleOrm.InsertBundle(context.Background(), []*orm.Batch{dbBatch}, encoding.CodecV7)
|
||||
assert.NoError(t, err)
|
||||
bundleHashes[i] = bundle.Hash
|
||||
|
||||
@@ -413,6 +398,7 @@ func testGetBatchStatusByIndex(t *testing.T) {
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1, chunk2},
|
||||
Blocks: []*encoding.Block{block1, block2},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
@@ -7,11 +7,10 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func (s *Sender) estimateLegacyGas(to *common.Address, data []byte, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) estimateLegacyGas(to *common.Address, data []byte) (*FeeData, error) {
|
||||
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("estimateLegacyGas SuggestGasPrice failure", "error", err)
|
||||
@@ -26,21 +25,17 @@ func (s *Sender) estimateLegacyGas(to *common.Address, data []byte, fallbackGasL
|
||||
gasLimit, _, err := s.estimateGasLimit(to, data, nil, gasPrice, nil, nil, nil)
|
||||
if err != nil {
|
||||
log.Error("estimateLegacyGas estimateGasLimit failure", "gas price", gasPrice, "from", s.transactionSigner.GetAddr().String(),
|
||||
"nonce", s.transactionSigner.GetNonce(), "to address", to.String(), "fallback gas limit", fallbackGasLimit, "error", err)
|
||||
if fallbackGasLimit == 0 {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = fallbackGasLimit
|
||||
} else {
|
||||
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
|
||||
"nonce", s.transactionSigner.GetNonce(), "to address", to.String(), "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FeeData{
|
||||
gasPrice: gasPrice,
|
||||
gasLimit: gasLimit,
|
||||
gasLimit: gasLimit * 12 / 10, // 20% extra gas to avoid out of gas error
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uint64, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uint64) (*FeeData, error) {
|
||||
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("estimateDynamicGas SuggestGasTipCap failure", "error", err)
|
||||
@@ -57,16 +52,12 @@ func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uin
|
||||
if err != nil {
|
||||
log.Error("estimateDynamicGas estimateGasLimit failure",
|
||||
"from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "to address", to.String(),
|
||||
"fallback gas limit", fallbackGasLimit, "error", err)
|
||||
if fallbackGasLimit == 0 {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = fallbackGasLimit
|
||||
} else {
|
||||
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
|
||||
"fallback gas limit", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
feeData := &FeeData{
|
||||
gasLimit: gasLimit,
|
||||
gasLimit: gasLimit * 12 / 10, // 20% extra gas to avoid out of gas error,
|
||||
gasTipCap: gasTipCap,
|
||||
gasFeeCap: gasFeeCap,
|
||||
}
|
||||
@@ -76,7 +67,7 @@ func (s *Sender) estimateDynamicGas(to *common.Address, data []byte, baseFee uin
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
func (s *Sender) estimateBlobGas(to *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, baseFee, blobBaseFee uint64, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) estimateBlobGas(to *common.Address, data []byte, sidecar *types.BlobTxSidecar, baseFee, blobBaseFee uint64) (*FeeData, error) {
|
||||
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("estimateBlobGas SuggestGasTipCap failure", "error", err)
|
||||
@@ -93,17 +84,12 @@ func (s *Sender) estimateBlobGas(to *common.Address, data []byte, sidecar *gethT
|
||||
gasLimit, accessList, err := s.estimateGasLimit(to, data, sidecar, nil, gasTipCap, gasFeeCap, blobGasFeeCap)
|
||||
if err != nil {
|
||||
log.Error("estimateBlobGas estimateGasLimit failure",
|
||||
"from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "to address", to.String(),
|
||||
"fallback gas limit", fallbackGasLimit, "error", err)
|
||||
if fallbackGasLimit == 0 {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = fallbackGasLimit
|
||||
} else {
|
||||
gasLimit = gasLimit * 12 / 10 // 20% extra gas to avoid out of gas error
|
||||
"from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "to address", to.String(), "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
feeData := &FeeData{
|
||||
gasLimit: gasLimit,
|
||||
gasLimit: gasLimit * 12 / 10, // 20% extra gas to avoid out of gas error
|
||||
gasTipCap: gasTipCap,
|
||||
gasFeeCap: gasFeeCap,
|
||||
blobGasFeeCap: blobGasFeeCap,
|
||||
@@ -115,7 +101,7 @@ func (s *Sender) estimateBlobGas(to *common.Address, data []byte, sidecar *gethT
|
||||
return feeData, nil
|
||||
}
|
||||
|
||||
func (s *Sender) estimateGasLimit(to *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, gasPrice, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int) (uint64, *types.AccessList, error) {
|
||||
func (s *Sender) estimateGasLimit(to *common.Address, data []byte, sidecar *types.BlobTxSidecar, gasPrice, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int) (uint64, *types.AccessList, error) {
|
||||
msg := ethereum.CallMsg{
|
||||
From: s.transactionSigner.GetAddr(),
|
||||
To: to,
|
||||
|
||||
@@ -156,22 +156,22 @@ func (s *Sender) SendConfirmation(cfm *Confirmation) {
|
||||
s.confirmCh <- cfm
|
||||
}
|
||||
|
||||
func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, baseFee, blobBaseFee uint64, fallbackGasLimit uint64) (*FeeData, error) {
|
||||
func (s *Sender) getFeeData(target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, baseFee, blobBaseFee uint64) (*FeeData, error) {
|
||||
switch s.config.TxType {
|
||||
case LegacyTxType:
|
||||
return s.estimateLegacyGas(target, data, fallbackGasLimit)
|
||||
return s.estimateLegacyGas(target, data)
|
||||
case DynamicFeeTxType:
|
||||
if sidecar == nil {
|
||||
return s.estimateDynamicGas(target, data, baseFee, fallbackGasLimit)
|
||||
return s.estimateDynamicGas(target, data, baseFee)
|
||||
}
|
||||
return s.estimateBlobGas(target, data, sidecar, baseFee, blobBaseFee, fallbackGasLimit)
|
||||
return s.estimateBlobGas(target, data, sidecar, baseFee, blobBaseFee)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported transaction type: %s", s.config.TxType)
|
||||
}
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(contextID string, target *common.Address, data []byte, blobs []*kzg4844.Blob, fallbackGasLimit uint64) (common.Hash, error) {
|
||||
func (s *Sender) SendTransaction(contextID string, target *common.Address, data []byte, blobs []*kzg4844.Blob) (common.Hash, uint64, error) {
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
var (
|
||||
feeData *FeeData
|
||||
@@ -190,37 +190,37 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
numPendingTransactions, err = s.pendingTransactionOrm.GetCountPendingTransactionsBySenderType(s.ctx, s.senderType)
|
||||
if err != nil {
|
||||
log.Error("failed to count pending transactions", "err: %w", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to count pending transactions, err: %w", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to count pending transactions, err: %w", err)
|
||||
}
|
||||
if numPendingTransactions >= s.config.MaxPendingBlobTxs {
|
||||
return common.Hash{}, ErrTooManyPendingBlobTxs
|
||||
return common.Hash{}, 0, ErrTooManyPendingBlobTxs
|
||||
}
|
||||
|
||||
}
|
||||
sidecar, err = makeSidecar(blobs)
|
||||
if err != nil {
|
||||
log.Error("failed to make sidecar for blob transaction", "error", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to make sidecar for blob transaction, err: %w", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to make sidecar for blob transaction, err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
blockNumber, baseFee, blobBaseFee, err := s.getBlockNumberAndBaseFeeAndBlobFee(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number and base fee", "error", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to get block number and base fee, err: %w", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to get block number and base fee, err: %w", err)
|
||||
}
|
||||
|
||||
if feeData, err = s.getFeeData(target, data, sidecar, baseFee, blobBaseFee, fallbackGasLimit); err != nil {
|
||||
if feeData, err = s.getFeeData(target, data, sidecar, baseFee, blobBaseFee); err != nil {
|
||||
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to get fee data", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "fallback gas limit", fallbackGasLimit, "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
log.Error("failed to get fee data", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
}
|
||||
|
||||
signedTx, err := s.createTx(feeData, target, data, sidecar, s.transactionSigner.GetNonce())
|
||||
if err != nil {
|
||||
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to create signed tx (non-resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to create signed transaction, err: %w", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to create signed transaction, err: %w", err)
|
||||
}
|
||||
|
||||
// Insert the transaction into the pending transaction table.
|
||||
@@ -228,14 +228,14 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
// This case will be handled by the checkPendingTransaction function.
|
||||
if err = s.pendingTransactionOrm.InsertPendingTransaction(s.ctx, contextID, s.getSenderMeta(), signedTx, blockNumber); err != nil {
|
||||
log.Error("failed to insert transaction", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to insert transaction, err: %w", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to insert transaction, err: %w", err)
|
||||
}
|
||||
|
||||
if err := s.client.SendTransaction(s.ctx, signedTx); err != nil {
|
||||
// Delete the transaction from the pending transaction table if it fails to send.
|
||||
if updateErr := s.pendingTransactionOrm.DeleteTransactionByTxHash(s.ctx, signedTx.Hash()); updateErr != nil {
|
||||
log.Error("failed to delete transaction", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", updateErr)
|
||||
return common.Hash{}, fmt.Errorf("failed to delete transaction, err: %w", updateErr)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to delete transaction, err: %w", updateErr)
|
||||
}
|
||||
|
||||
log.Error("failed to send tx", "tx hash", signedTx.Hash().String(), "from", s.transactionSigner.GetAddr().String(), "nonce", signedTx.Nonce(), "err", err)
|
||||
@@ -244,12 +244,12 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
|
||||
if strings.Contains(err.Error(), "nonce too low") {
|
||||
s.resetNonce(context.Background())
|
||||
}
|
||||
return common.Hash{}, fmt.Errorf("failed to send transaction, err: %w", err)
|
||||
return common.Hash{}, 0, fmt.Errorf("failed to send transaction, err: %w", err)
|
||||
}
|
||||
|
||||
s.transactionSigner.SetNonce(signedTx.Nonce() + 1)
|
||||
|
||||
return signedTx.Hash(), nil
|
||||
return signedTx.Hash(), blobBaseFee, nil
|
||||
}
|
||||
|
||||
func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, nonce uint64) (*gethTypes.Transaction, error) {
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -34,7 +33,6 @@ import (
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/mock_bridge"
|
||||
)
|
||||
|
||||
@@ -136,7 +134,6 @@ func TestSender(t *testing.T) {
|
||||
setupEnv(t)
|
||||
t.Run("test new sender", testNewSender)
|
||||
t.Run("test send and retrieve transaction", testSendAndRetrieveTransaction)
|
||||
t.Run("test fallback gas limit", testFallbackGasLimit)
|
||||
t.Run("test access list transaction gas limit", testAccessListTransactionGasLimit)
|
||||
t.Run("test resubmit zero gas price transaction", testResubmitZeroGasPriceTransaction)
|
||||
t.Run("test resubmit non-zero gas price transaction", testResubmitNonZeroGasPriceTransaction)
|
||||
@@ -190,7 +187,7 @@ func testSendAndRetrieveTransaction(t *testing.T) {
|
||||
if txBlob[i] != nil {
|
||||
blobs = []*kzg4844.Blob{txBlob[i]}
|
||||
}
|
||||
hash, err := s.SendTransaction("0", &common.Address{}, nil, blobs, 0)
|
||||
hash, _, err := s.SendTransaction("0", &common.Address{}, nil, blobs)
|
||||
assert.NoError(t, err)
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
assert.NoError(t, err)
|
||||
@@ -214,63 +211,6 @@ func testSendAndRetrieveTransaction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testFallbackGasLimit(t *testing.T) {
|
||||
for i, txType := range txTypes {
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
cfgCopy := *cfg.L2Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
s, err := NewSender(context.Background(), &cfgCopy, signerConfig, "test", "test", types.SenderTypeUnknown, db, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
client, err := ethclient.Dial(cfgCopy.Endpoint)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var blobs []*kzg4844.Blob
|
||||
if txBlob[i] != nil {
|
||||
blobs = []*kzg4844.Blob{txBlob[i]}
|
||||
}
|
||||
// FallbackGasLimit = 0
|
||||
txHash0, err := s.SendTransaction("0", &common.Address{}, nil, blobs, 0)
|
||||
assert.NoError(t, err)
|
||||
tx0, _, err := client.TransactionByHash(context.Background(), txHash0)
|
||||
assert.NoError(t, err)
|
||||
assert.Greater(t, tx0.Gas(), uint64(0))
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
var txs []orm.PendingTransaction
|
||||
txs, err = s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 100)
|
||||
assert.NoError(t, err)
|
||||
return len(txs) == 0
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
// FallbackGasLimit = 100000
|
||||
patchGuard := gomonkey.ApplyPrivateMethod(s, "estimateGasLimit",
|
||||
func(contract *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, gasPrice, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int) (uint64, *gethTypes.AccessList, error) {
|
||||
return 0, nil, errors.New("estimateGasLimit error")
|
||||
},
|
||||
)
|
||||
|
||||
txHash1, err := s.SendTransaction("1", &common.Address{}, nil, blobs, 100000)
|
||||
assert.NoError(t, err)
|
||||
tx1, _, err := client.TransactionByHash(context.Background(), txHash1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(100000), tx1.Gas())
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 100)
|
||||
assert.NoError(t, err)
|
||||
return len(txs) == 0
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
s.Stop()
|
||||
patchGuard.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func testResubmitZeroGasPriceTransaction(t *testing.T) {
|
||||
for i, txType := range txTypes {
|
||||
if txBlob[i] != nil {
|
||||
@@ -605,10 +545,10 @@ func testResubmitNonceGappedTransaction(t *testing.T) {
|
||||
if txBlob[i] != nil {
|
||||
blobs = []*kzg4844.Blob{txBlob[i]}
|
||||
}
|
||||
_, err = s.SendTransaction("test-1", &common.Address{}, nil, blobs, 0)
|
||||
_, _, err = s.SendTransaction("test-1", &common.Address{}, nil, blobs)
|
||||
assert.NoError(t, err)
|
||||
|
||||
_, err = s.SendTransaction("test-2", &common.Address{}, nil, blobs, 0)
|
||||
_, _, err = s.SendTransaction("test-2", &common.Address{}, nil, blobs)
|
||||
assert.NoError(t, err)
|
||||
|
||||
s.checkPendingTransaction()
|
||||
@@ -649,7 +589,7 @@ func testCheckPendingTransactionTxConfirmed(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
_, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1), 0)
|
||||
_, _, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
@@ -691,7 +631,7 @@ func testCheckPendingTransactionResubmitTxConfirmed(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
originTxHash, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1), 0)
|
||||
originTxHash, _, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
@@ -751,7 +691,7 @@ func testCheckPendingTransactionReplacedTxConfirmed(t *testing.T) {
|
||||
return nil
|
||||
})
|
||||
|
||||
txHash, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1), 0)
|
||||
txHash, _, err := s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
@@ -821,7 +761,7 @@ func testCheckPendingTransactionTxMultipleTimesWithOnlyOneTxPending(t *testing.T
|
||||
return nil
|
||||
})
|
||||
|
||||
_, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1), 0)
|
||||
_, _, err = s.SendTransaction("test", &common.Address{}, nil, randBlobs(1))
|
||||
assert.NoError(t, err)
|
||||
|
||||
txs, err := s.pendingTransactionOrm.GetPendingOrReplacedTransactionsBySenderType(context.Background(), s.senderType, 1)
|
||||
@@ -895,7 +835,7 @@ func testBlobTransactionWithBlobhashOpContractCall(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
defer s.Stop()
|
||||
|
||||
_, err = s.SendTransaction("0", &testContractsAddress, data, blobs, 0)
|
||||
_, _, err = s.SendTransaction("0", &testContractsAddress, data, blobs)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var txHash common.Hash
|
||||
@@ -953,10 +893,10 @@ func testSendBlobCarryingTxOverLimit(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < int(cfgCopy.MaxPendingBlobTxs); i++ {
|
||||
_, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1), 0)
|
||||
_, _, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1))
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
_, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1), 0)
|
||||
_, _, err = s.SendTransaction("0", &common.Address{}, nil, randBlobs(1))
|
||||
assert.ErrorIs(t, err, ErrTooManyPendingBlobTxs)
|
||||
s.Stop()
|
||||
}
|
||||
|
||||
@@ -29,12 +29,7 @@ type BatchProposer struct {
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxL1CommitGasPerBatch uint64
|
||||
maxL1CommitCalldataSizePerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
maxChunksPerBatch int
|
||||
cfg *config.BatchProposerConfig
|
||||
|
||||
replayMode bool
|
||||
minCodecVersion encoding.CodecVersion
|
||||
@@ -44,14 +39,10 @@ type BatchProposer struct {
|
||||
proposeBatchFailureTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoFailureTotal prometheus.Counter
|
||||
totalL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
totalL1CommitBlobSize prometheus.Gauge
|
||||
batchChunksNum prometheus.Gauge
|
||||
batchFirstBlockTimeoutReached prometheus.Counter
|
||||
batchChunksProposeNotEnoughTotal prometheus.Counter
|
||||
batchEstimateGasTime prometheus.Gauge
|
||||
batchEstimateCalldataSizeTime prometheus.Gauge
|
||||
batchEstimateBlobSizeTime prometheus.Gauge
|
||||
|
||||
// total number of times that batch proposer stops early due to compressed data compatibility breach
|
||||
@@ -63,29 +54,18 @@ type BatchProposer struct {
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
log.Info("new batch proposer",
|
||||
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
|
||||
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
"batchTimeoutSec", cfg.BatchTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
|
||||
log.Info("new batch proposer", "batchTimeoutSec", cfg.BatchTimeoutSec, "maxBlobSize", maxBlobSize)
|
||||
|
||||
p := &BatchProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
|
||||
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
maxChunksPerBatch: cfg.MaxChunksPerBatch,
|
||||
replayMode: false,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
cfg: cfg,
|
||||
replayMode: false,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_batch_circle_total",
|
||||
@@ -107,14 +87,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
|
||||
Name: "rollup_propose_batch_due_to_compressed_data_compatibility_breach_total",
|
||||
Help: "Total number of propose batch due to compressed data compatibility breach.",
|
||||
}),
|
||||
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_total_l1_commit_gas",
|
||||
Help: "The total l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_total_l1_call_data_size",
|
||||
Help: "The total l1 call data size",
|
||||
}),
|
||||
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_total_l1_commit_blob_size",
|
||||
Help: "The total l1 commit blob size",
|
||||
@@ -131,14 +103,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
|
||||
Name: "rollup_propose_batch_chunks_propose_not_enough_total",
|
||||
Help: "Total number of batch chunk propose not enough",
|
||||
}),
|
||||
batchEstimateGasTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_estimate_gas_time",
|
||||
Help: "Time taken to estimate gas for the chunk.",
|
||||
}),
|
||||
batchEstimateCalldataSizeTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_estimate_calldata_size_time",
|
||||
Help: "Time taken to estimate calldata size for the chunk.",
|
||||
}),
|
||||
batchEstimateBlobSizeTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_batch_estimate_blob_size_time",
|
||||
Help: "Time taken to estimate blob size for the chunk.",
|
||||
@@ -197,6 +161,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
|
||||
}
|
||||
|
||||
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
|
||||
batch.PostL1MessageQueueHash = batch.Chunks[len(batch.Chunks)-1].PostL1MessageQueueHash
|
||||
|
||||
log.Info("Batch not compatible with compressed data, removing last chunk", "batch index", batch.Index, "truncated chunk length", len(batch.Chunks))
|
||||
}
|
||||
@@ -277,7 +242,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
}
|
||||
|
||||
// always take the minimum of the configured max chunks per batch and the codec's max chunks per batch
|
||||
maxChunksThisBatch := min(codec.MaxNumChunksPerBatch(), p.maxChunksPerBatch)
|
||||
maxChunksThisBatch := min(codec.MaxNumChunksPerBatch(), p.cfg.MaxChunksPerBatch)
|
||||
|
||||
// select at most maxChunkNumPerBatch chunks
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, firstUnbatchedChunkIndex, maxChunksThisBatch)
|
||||
@@ -319,9 +284,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
|
||||
for i, chunk := range daChunks {
|
||||
batch.Chunks = append(batch.Chunks, chunk)
|
||||
if codec.Version() >= encoding.CodecV7 {
|
||||
batch.Blocks = append(batch.Blocks, chunk.Blocks...)
|
||||
}
|
||||
batch.Blocks = append(batch.Blocks, chunk.Blocks...)
|
||||
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i].PostL1MessageQueueHash)
|
||||
|
||||
metrics, calcErr := utils.CalculateBatchMetrics(&batch, codec.Version())
|
||||
@@ -331,33 +294,21 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
|
||||
p.recordTimerBatchMetrics(metrics)
|
||||
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
|
||||
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch || totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize || metrics.L1CommitUncompressedBatchBytesSize > p.maxUncompressedBatchBytesSize {
|
||||
if metrics.L1CommitBlobSize > maxBlobSize {
|
||||
if i == 0 {
|
||||
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
|
||||
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v",
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize, p.maxUncompressedBatchBytesSize)
|
||||
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxBlobSize: %v",
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, maxBlobSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in batching",
|
||||
"l1CommitCalldataSize", metrics.L1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSize", p.maxL1CommitCalldataSizePerBatch,
|
||||
"l1CommitGas", metrics.L1CommitGas,
|
||||
"overEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGas", p.maxL1CommitGasPerBatch,
|
||||
"l1CommitBlobSize", metrics.L1CommitBlobSize,
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
|
||||
"maxUncompressedBatchBytesSize", p.maxUncompressedBatchBytesSize)
|
||||
"maxBlobSize", maxBlobSize)
|
||||
|
||||
lastChunk := batch.Chunks[len(batch.Chunks)-1]
|
||||
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
|
||||
batch.PostL1MessageQueueHash = common.HexToHash(dbChunks[i-1].PostL1MessageQueueHash)
|
||||
|
||||
if codec.Version() >= encoding.CodecV7 {
|
||||
batch.Blocks = batch.Blocks[:len(batch.Blocks)-len(lastChunk.Blocks)]
|
||||
}
|
||||
batch.Blocks = batch.Blocks[:len(batch.Blocks)-len(lastChunk.Blocks)]
|
||||
|
||||
metrics, err = utils.CalculateBatchMetrics(&batch, codec.Version())
|
||||
if err != nil {
|
||||
@@ -374,7 +325,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
return fmt.Errorf("failed to calculate batch metrics: %w", calcErr)
|
||||
}
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if metrics.FirstBlockTimestamp+p.batchTimeoutSec < currentTimeSec || metrics.NumChunks == uint64(maxChunksThisBatch) {
|
||||
if metrics.FirstBlockTimestamp+p.cfg.BatchTimeoutSec < currentTimeSec || metrics.NumChunks == uint64(maxChunksThisBatch) {
|
||||
log.Info("reached maximum number of chunks in batch or first block timeout",
|
||||
"chunk count", metrics.NumChunks,
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
@@ -410,17 +361,11 @@ func (p *BatchProposer) getDAChunks(dbChunks []*orm.Chunk) ([]*encoding.Chunk, e
|
||||
}
|
||||
|
||||
func (p *BatchProposer) recordAllBatchMetrics(metrics *utils.BatchMetrics) {
|
||||
p.totalL1CommitGas.Set(float64(metrics.L1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(metrics.NumChunks))
|
||||
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
|
||||
p.batchEstimateGasTime.Set(float64(metrics.EstimateGasTime))
|
||||
p.batchEstimateCalldataSizeTime.Set(float64(metrics.EstimateCalldataSizeTime))
|
||||
p.batchEstimateBlobSizeTime.Set(float64(metrics.EstimateBlobSizeTime))
|
||||
}
|
||||
|
||||
func (p *BatchProposer) recordTimerBatchMetrics(metrics *utils.BatchMetrics) {
|
||||
p.batchEstimateGasTime.Set(float64(metrics.EstimateGasTime))
|
||||
p.batchEstimateCalldataSizeTime.Set(float64(metrics.EstimateCalldataSizeTime))
|
||||
p.batchEstimateBlobSizeTime.Set(float64(metrics.EstimateBlobSizeTime))
|
||||
}
|
||||
|
||||
@@ -20,60 +20,24 @@ import (
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func testBatchProposerLimitsCodecV4(t *testing.T) {
|
||||
func testBatchProposerLimitsCodecV7(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
batchTimeoutSec uint64
|
||||
expectedBatchesLen int
|
||||
expectedChunksInFirstBatch uint64 // only be checked when expectedBatchesLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
name: "NoLimitReached",
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 0,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIs0",
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIs0",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 249179,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
batchTimeoutSec: 1000000000000,
|
||||
expectedBatchesLen: 1,
|
||||
expectedChunksInFirstBatch: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -86,7 +50,6 @@ func testBatchProposerLimitsCodecV4(t *testing.T) {
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
@@ -109,45 +72,32 @@ func testBatchProposerLimitsCodecV4(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL2GasPerChunk: 20000000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxL2GasPerChunk: 20000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, encoding.CodecV7, ¶ms.ChainConfig{
|
||||
LondonBlock: big.NewInt(0),
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
EuclidTime: new(uint64),
|
||||
EuclidV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
}, encoding.CodecV7, ¶ms.ChainConfig{
|
||||
LondonBlock: big.NewInt(0),
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
DarwinTime: new(uint64),
|
||||
DarwinV2Time: new(uint64),
|
||||
EuclidTime: new(uint64),
|
||||
EuclidV2Time: new(uint64),
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
@@ -173,7 +123,7 @@ func testBatchProposerLimitsCodecV4(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) {
|
||||
func testBatchProposerBlobSizeLimitCodecV7(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -182,7 +132,6 @@ func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) {
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
@@ -200,230 +149,104 @@ func testBatchCommitGasAndCalldataSizeEstimationCodecV4(t *testing.T) {
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL2GasPerChunk: 20_000_000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(51124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(51124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
blockHeight := uint64(0)
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for total := int64(0); total < 90; total++ {
|
||||
for i := int64(0); i < 30; i++ {
|
||||
blockHeight++
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = new(big.Int).SetUint64(blockHeight)
|
||||
block.Header.Time = blockHeight
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
bp.TryProposeBatch()
|
||||
}
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
batches = batches[1:]
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedNumBatches int = 1
|
||||
var numChunksMultiplier uint64 = 64
|
||||
assert.Len(t, batches, expectedNumBatches)
|
||||
|
||||
for i, batch := range batches {
|
||||
assert.Equal(t, numChunksMultiplier*(uint64(i)+1), batch.EndChunkIndex)
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerMaxChunkNumPerBatchLimitCodecV7(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedChunkNum uint64 = 45
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for blockHeight := uint64(1); blockHeight <= 60; blockHeight++ {
|
||||
block.Header.Number = new(big.Int).SetUint64(blockHeight)
|
||||
block.Header.Time = blockHeight
|
||||
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunksPerBatch: 45,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
batches = batches[1:]
|
||||
assert.Equal(t, uint64(1), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(2), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
dbBatch := batches[1]
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, dbChunks, 2)
|
||||
for _, chunk := range dbChunks {
|
||||
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(209350), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
func testBatchProposerBlobSizeLimitCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
blockHeight := int64(0)
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for total := int64(0); total < 90; total++ {
|
||||
for i := int64(0); i < 30; i++ {
|
||||
blockHeight++
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(blockHeight)
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
bp.TryProposeBatch()
|
||||
}
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
batches = batches[1:]
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedNumBatches int
|
||||
var numChunksMultiplier uint64
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
expectedNumBatches = 2
|
||||
numChunksMultiplier = 45
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
assert.Len(t, batches, expectedNumBatches)
|
||||
|
||||
for i, batch := range batches {
|
||||
assert.Equal(t, numChunksMultiplier*(uint64(i)+1), batch.EndChunkIndex)
|
||||
}
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchProposerMaxChunkNumPerBatchLimitCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
|
||||
// Add genesis batch.
|
||||
block := &encoding.Block{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
}
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedChunkNum uint64
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
expectedChunkNum = 45
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for blockHeight := int64(1); blockHeight <= 60; blockHeight++ {
|
||||
block.Header.Number = big.NewInt(blockHeight)
|
||||
err = orm.NewL2Block(db).InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
dbBatch := batches[1]
|
||||
|
||||
assert.Equal(t, expectedChunkNum, dbBatch.EndChunkIndex)
|
||||
|
||||
database.CloseDB(db)
|
||||
}
|
||||
assert.Equal(t, expectedChunkNum, dbBatch.EndChunkIndex)
|
||||
}
|
||||
|
||||
@@ -26,8 +26,7 @@ type BundleProposer struct {
|
||||
batchOrm *orm.Batch
|
||||
bundleOrm *orm.Bundle
|
||||
|
||||
maxBatchNumPerBundle uint64
|
||||
bundleTimeoutSec uint64
|
||||
cfg *config.BundleProposerConfig
|
||||
|
||||
minCodecVersion encoding.CodecVersion
|
||||
chainCfg *params.ChainConfig
|
||||
@@ -46,15 +45,14 @@ func NewBundleProposer(ctx context.Context, cfg *config.BundleProposerConfig, mi
|
||||
log.Info("new bundle proposer", "bundleBatchesNum", cfg.MaxBatchNumPerBundle, "bundleTimeoutSec", cfg.BundleTimeoutSec)
|
||||
|
||||
p := &BundleProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
maxBatchNumPerBundle: cfg.MaxBatchNumPerBundle,
|
||||
bundleTimeoutSec: cfg.BundleTimeoutSec,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
bundleOrm: orm.NewBundle(db),
|
||||
cfg: cfg,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
bundleProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_bundle_circle_total",
|
||||
@@ -132,7 +130,7 @@ func (p *BundleProposer) proposeBundle() error {
|
||||
}
|
||||
|
||||
// select at most maxBlocksThisChunk blocks
|
||||
maxBatchesThisBundle := p.maxBatchNumPerBundle
|
||||
maxBatchesThisBundle := p.cfg.MaxBatchNumPerBundle
|
||||
batches, err := p.batchOrm.GetCommittedBatchesGEIndexGECodecVersion(p.ctx, firstUnbundledBatchIndex, p.minCodecVersion, int(maxBatchesThisBundle))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -161,11 +159,6 @@ func (p *BundleProposer) proposeBundle() error {
|
||||
return fmt.Errorf("unsupported codec version: %v, expected at least %v", codecVersion, p.minCodecVersion)
|
||||
}
|
||||
|
||||
if codecVersion == encoding.CodecV5 {
|
||||
maxBatchesThisBundle = 1
|
||||
batches = batches[:maxBatchesThisBundle]
|
||||
}
|
||||
|
||||
for i := 1; i < len(batches); i++ {
|
||||
// Make sure that all batches have been committed.
|
||||
if len(batches[i].CommitTxHash) == 0 {
|
||||
@@ -198,8 +191,8 @@ func (p *BundleProposer) proposeBundle() error {
|
||||
}
|
||||
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if firstChunk.StartBlockTime+p.bundleTimeoutSec < currentTimeSec {
|
||||
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "bundle timeout", p.bundleTimeoutSec, "current time", currentTimeSec)
|
||||
if firstChunk.StartBlockTime+p.cfg.BundleTimeoutSec < currentTimeSec {
|
||||
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "bundle timeout", p.cfg.BundleTimeoutSec, "current time", currentTimeSec)
|
||||
|
||||
batches, err = p.allBatchesCommittedInSameTXIncluded(batches)
|
||||
if err != nil {
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func testBundleProposerLimitsCodecV4(t *testing.T) {
|
||||
func testBundleProposerLimitsCodecV7(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBatchNumPerBundle uint64
|
||||
@@ -69,7 +69,6 @@ func testBundleProposerLimitsCodecV4(t *testing.T) {
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(0),
|
||||
},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
}
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{block},
|
||||
@@ -91,27 +90,18 @@ func testBundleProposerLimitsCodecV4(t *testing.T) {
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
MaxBlockNumPerChunk: 1,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
bap := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
BatchTimeoutSec: 0,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
bap.TryProposeBatch() // batch1 contains chunk1
|
||||
@@ -121,7 +111,7 @@ func testBundleProposerLimitsCodecV4(t *testing.T) {
|
||||
bup := NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: tt.maxBatchNumPerBundle,
|
||||
BundleTimeoutSec: tt.bundleTimeoutSec,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -26,15 +26,7 @@ type ChunkProposer struct {
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxBlockNumPerChunk uint64
|
||||
maxTxNumPerChunk uint64
|
||||
maxL2GasPerChunk uint64
|
||||
maxL1CommitGasPerChunk uint64
|
||||
maxL1CommitCalldataSizePerChunk uint64
|
||||
maxRowConsumptionPerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
cfg *config.ChunkProposerConfig
|
||||
|
||||
replayMode bool
|
||||
minCodecVersion encoding.CodecVersion
|
||||
@@ -46,15 +38,10 @@ type ChunkProposer struct {
|
||||
proposeChunkUpdateInfoFailureTotal prometheus.Counter
|
||||
chunkTxNum prometheus.Gauge
|
||||
chunkL2Gas prometheus.Gauge
|
||||
chunkEstimateL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
totalL1CommitBlobSize prometheus.Gauge
|
||||
maxTxConsumption prometheus.Gauge
|
||||
chunkBlocksNum prometheus.Gauge
|
||||
chunkFirstBlockTimeoutReached prometheus.Counter
|
||||
chunkBlocksProposeNotEnoughTotal prometheus.Counter
|
||||
chunkEstimateGasTime prometheus.Gauge
|
||||
chunkEstimateCalldataSizeTime prometheus.Gauge
|
||||
chunkEstimateBlobSizeTime prometheus.Gauge
|
||||
|
||||
// total number of times that chunk proposer stops early due to compressed data compatibility breach
|
||||
@@ -68,33 +55,19 @@ type ChunkProposer struct {
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
log.Info("new chunk proposer",
|
||||
"maxBlockNumPerChunk", cfg.MaxBlockNumPerChunk,
|
||||
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
|
||||
"maxL2GasPerChunk", cfg.MaxL2GasPerChunk,
|
||||
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
|
||||
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
|
||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize)
|
||||
"maxBlobSize", maxBlobSize)
|
||||
|
||||
p := &ChunkProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxBlockNumPerChunk: cfg.MaxBlockNumPerChunk,
|
||||
maxTxNumPerChunk: cfg.MaxTxNumPerChunk,
|
||||
maxL2GasPerChunk: cfg.MaxL2GasPerChunk,
|
||||
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
|
||||
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
replayMode: false,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
cfg: cfg,
|
||||
replayMode: false,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_propose_chunk_circle_total",
|
||||
@@ -124,22 +97,11 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
|
||||
Name: "rollup_propose_chunk_l2_gas",
|
||||
Help: "The chunk l2 gas",
|
||||
}),
|
||||
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_estimate_l1_commit_gas",
|
||||
Help: "The chunk estimate l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_total_l1_commit_call_data_size",
|
||||
Help: "The total l1 commit call data size",
|
||||
}),
|
||||
totalL1CommitBlobSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_total_l1_commit_blob_size",
|
||||
Help: "The total l1 commit blob size",
|
||||
}),
|
||||
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_max_tx_consumption",
|
||||
Help: "The max tx consumption",
|
||||
}),
|
||||
|
||||
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_chunk_block_number",
|
||||
Help: "The number of blocks in the chunk",
|
||||
@@ -152,14 +114,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
|
||||
Name: "rollup_propose_chunk_blocks_propose_not_enough_total",
|
||||
Help: "Total number of chunk block propose not enough",
|
||||
}),
|
||||
chunkEstimateGasTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_estimate_gas_time",
|
||||
Help: "Time taken to estimate gas for the chunk.",
|
||||
}),
|
||||
chunkEstimateCalldataSizeTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_estimate_calldata_size_time",
|
||||
Help: "Time taken to estimate calldata size for the chunk.",
|
||||
}),
|
||||
chunkEstimateBlobSizeTime: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_propose_chunk_estimate_blob_size_time",
|
||||
Help: "Time taken to estimate blob size for the chunk.",
|
||||
@@ -196,7 +150,7 @@ func (p *ChunkProposer) TryProposeChunk() {
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion encoding.CodecVersion, metrics *utils.ChunkMetrics) error {
|
||||
if chunk == nil {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -221,6 +175,11 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
}
|
||||
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
|
||||
chunk.PostL1MessageQueueHash, err = encoding.MessageQueueV2ApplyL1MessagesFromBlocks(chunk.PrevL1MessageQueueHash, chunk.Blocks)
|
||||
if err != nil {
|
||||
log.Error("Failed to calculate last L1 message queue hash for block", "block number", chunk.Blocks[0].Header.Number, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("Chunk not compatible with compressed data, removing last block", "start block number", chunk.Blocks[0].Header.Number, "truncated block length", len(chunk.Blocks))
|
||||
}
|
||||
@@ -239,9 +198,7 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
p.recordAllChunkMetrics(metrics)
|
||||
}
|
||||
|
||||
if len(chunk.Blocks) > 0 {
|
||||
p.chunkProposeBlockHeight.Set(float64(chunk.Blocks[len(chunk.Blocks)-1].Header.Number.Uint64()))
|
||||
}
|
||||
p.chunkProposeBlockHeight.Set(float64(chunk.Blocks[len(chunk.Blocks)-1].Header.Number.Uint64()))
|
||||
p.chunkProposeThroughput.Add(float64(chunk.TotalGasUsed()))
|
||||
|
||||
p.proposeChunkUpdateInfoTotal.Inc()
|
||||
@@ -275,7 +232,7 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
return err
|
||||
}
|
||||
|
||||
maxBlocksThisChunk := p.maxBlockNumPerChunk
|
||||
maxBlocksThisChunk := p.cfg.MaxBlockNumPerChunk
|
||||
|
||||
// select at most maxBlocksThisChunk blocks
|
||||
blocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, unchunkedBlockHeight, int(maxBlocksThisChunk))
|
||||
@@ -305,53 +262,30 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
return fmt.Errorf("unsupported codec version: %v, expected at least %v", codecVersion, p.minCodecVersion)
|
||||
}
|
||||
|
||||
// Including Curie block in a sole chunk.
|
||||
if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 {
|
||||
chunk := encoding.Chunk{Blocks: blocks[:1]}
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
p.recordTimerChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion, metrics)
|
||||
}
|
||||
|
||||
if proposed, err := p.tryProposeEuclidTransitionChunk(blocks); proposed || err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var chunk encoding.Chunk
|
||||
// From CodecV7 / EuclidV2 onwards we need to provide the PrevL1MessageQueueHash and PostL1MessageQueueHash.
|
||||
// PrevL1MessageQueueHash of the first chunk in the fork needs to be the empty hash.
|
||||
if codecVersion >= encoding.CodecV7 {
|
||||
parentChunk, err := p.chunkOrm.GetLatestChunk(p.ctx)
|
||||
if err != nil || parentChunk == nil {
|
||||
return fmt.Errorf("failed to get parent chunk: %w", err)
|
||||
}
|
||||
|
||||
chunk.PrevL1MessageQueueHash = common.HexToHash(parentChunk.PostL1MessageQueueHash)
|
||||
|
||||
// previous chunk is not CodecV7, this means this is the first chunk of the fork.
|
||||
if encoding.CodecVersion(parentChunk.CodecVersion) < codecVersion {
|
||||
chunk.PrevL1MessageQueueHash = common.Hash{}
|
||||
}
|
||||
|
||||
chunk.PostL1MessageQueueHash = chunk.PrevL1MessageQueueHash
|
||||
parentChunk, err := p.chunkOrm.GetLatestChunk(p.ctx)
|
||||
if err != nil || parentChunk == nil {
|
||||
return fmt.Errorf("failed to get parent chunk: %w", err)
|
||||
}
|
||||
|
||||
chunk.PrevL1MessageQueueHash = common.HexToHash(parentChunk.PostL1MessageQueueHash)
|
||||
|
||||
// previous chunk is not CodecV7, this means this is the first chunk of the fork.
|
||||
if encoding.CodecVersion(parentChunk.CodecVersion) < codecVersion {
|
||||
chunk.PrevL1MessageQueueHash = common.Hash{}
|
||||
}
|
||||
|
||||
chunk.PostL1MessageQueueHash = chunk.PrevL1MessageQueueHash
|
||||
|
||||
var previousPostL1MessageQueueHash common.Hash
|
||||
chunk.Blocks = make([]*encoding.Block, 0, len(blocks))
|
||||
for i, block := range blocks {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
|
||||
// Compute rolling PostL1MessageQueueHash for the chunk. Each block's L1 messages are applied to the previous
|
||||
// hash starting from the PrevL1MessageQueueHash for the chunk.
|
||||
if codecVersion >= encoding.CodecV7 {
|
||||
previousPostL1MessageQueueHash = chunk.PostL1MessageQueueHash
|
||||
chunk.PostL1MessageQueueHash, err = encoding.MessageQueueV2ApplyL1MessagesFromBlocks(previousPostL1MessageQueueHash, []*encoding.Block{block})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate last L1 message queue hash for block %d: %w", block.Header.Number.Uint64(), err)
|
||||
}
|
||||
previousPostL1MessageQueueHash = chunk.PostL1MessageQueueHash
|
||||
chunk.PostL1MessageQueueHash, err = encoding.MessageQueueV2ApplyL1MessagesFromBlocks(previousPostL1MessageQueueHash, []*encoding.Block{block})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to calculate last L1 message queue hash for block %d: %w", block.Header.Number.Uint64(), err)
|
||||
}
|
||||
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
@@ -361,36 +295,17 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
|
||||
p.recordTimerChunkMetrics(metrics)
|
||||
|
||||
overEstimatedL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
|
||||
if metrics.TxNum > p.maxTxNumPerChunk ||
|
||||
metrics.L2Gas > p.maxL2GasPerChunk ||
|
||||
metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
overEstimatedL1CommitGas > p.maxL1CommitGasPerChunk ||
|
||||
metrics.CrcMax > p.maxRowConsumptionPerChunk ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize ||
|
||||
metrics.L1CommitUncompressedBatchBytesSize > p.maxUncompressedBatchBytesSize {
|
||||
if metrics.L2Gas > p.cfg.MaxL2GasPerChunk || metrics.L1CommitBlobSize > maxBlobSize {
|
||||
if i == 0 {
|
||||
// The first block exceeds hard limits, which indicates a bug in the sequencer, manual fix is needed.
|
||||
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v",
|
||||
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize, p.maxUncompressedBatchBytesSize)
|
||||
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxBlobSize: %v", block.Header.Number, metrics, maxBlobSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in chunking",
|
||||
"txNum", metrics.TxNum,
|
||||
"maxTxNum", p.maxTxNumPerChunk,
|
||||
"l2Gas", metrics.L2Gas,
|
||||
"maxL2Gas", p.maxL2GasPerChunk,
|
||||
"l1CommitCalldataSize", metrics.L1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSize", p.maxL1CommitCalldataSizePerChunk,
|
||||
"l1CommitGas", metrics.L1CommitGas,
|
||||
"overEstimatedL1CommitGas", overEstimatedL1CommitGas,
|
||||
"maxL1CommitGas", p.maxL1CommitGasPerChunk,
|
||||
"rowConsumption", metrics.CrcMax,
|
||||
"maxRowConsumption", p.maxRowConsumptionPerChunk,
|
||||
"maxL2Gas", p.cfg.MaxL2GasPerChunk,
|
||||
"l1CommitBlobSize", metrics.L1CommitBlobSize,
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
|
||||
"maxUncompressedBatchBytesSize", p.maxUncompressedBatchBytesSize)
|
||||
"maxBlobSize", maxBlobSize)
|
||||
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
|
||||
chunk.PostL1MessageQueueHash = previousPostL1MessageQueueHash
|
||||
@@ -411,7 +326,7 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
}
|
||||
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if metrics.FirstBlockTimestamp+p.chunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
|
||||
if metrics.FirstBlockTimestamp+p.cfg.ChunkTimeoutSec < currentTimeSec || metrics.NumBlocks == maxBlocksThisChunk {
|
||||
log.Info("reached maximum number of blocks in chunk or first block timeout",
|
||||
"block count", len(chunk.Blocks),
|
||||
"start block number", chunk.Blocks[0].Header.Number,
|
||||
@@ -431,54 +346,12 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
|
||||
func (p *ChunkProposer) recordAllChunkMetrics(metrics *utils.ChunkMetrics) {
|
||||
p.chunkTxNum.Set(float64(metrics.TxNum))
|
||||
p.maxTxConsumption.Set(float64(metrics.CrcMax))
|
||||
p.chunkBlocksNum.Set(float64(metrics.NumBlocks))
|
||||
p.chunkL2Gas.Set(float64(metrics.L2Gas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(metrics.L1CommitCalldataSize))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(metrics.L1CommitGas))
|
||||
p.totalL1CommitBlobSize.Set(float64(metrics.L1CommitBlobSize))
|
||||
p.chunkEstimateGasTime.Set(float64(metrics.EstimateGasTime))
|
||||
p.chunkEstimateCalldataSizeTime.Set(float64(metrics.EstimateCalldataSizeTime))
|
||||
p.chunkEstimateBlobSizeTime.Set(float64(metrics.EstimateBlobSizeTime))
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) recordTimerChunkMetrics(metrics *utils.ChunkMetrics) {
|
||||
p.chunkEstimateGasTime.Set(float64(metrics.EstimateGasTime))
|
||||
p.chunkEstimateCalldataSizeTime.Set(float64(metrics.EstimateCalldataSizeTime))
|
||||
p.chunkEstimateBlobSizeTime.Set(float64(metrics.EstimateBlobSizeTime))
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) tryProposeEuclidTransitionChunk(blocks []*encoding.Block) (bool, error) {
|
||||
// If we are in replay mode, there is a corner case when StartL2Block is set as 0 in this check,
|
||||
// it needs to get genesis block, but in mainnet db there is no genesis block, so we need to bypass this check.
|
||||
if p.replayMode {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !p.chainCfg.IsEuclid(blocks[0].Header.Time) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
prevBlocks, err := p.l2BlockOrm.GetL2BlocksGEHeight(p.ctx, blocks[0].Header.Number.Uint64()-1, 1)
|
||||
if err != nil || len(prevBlocks) == 0 || prevBlocks[0].Header.Hash() != blocks[0].Header.ParentHash {
|
||||
return false, fmt.Errorf("failed to get parent block: %w", err)
|
||||
}
|
||||
|
||||
if p.chainCfg.IsEuclid(prevBlocks[0].Header.Time) {
|
||||
// Parent is still Euclid, transition happened already
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// blocks[0] is Euclid, but parent is not, propose a chunk with only blocks[0]
|
||||
chunk := encoding.Chunk{Blocks: blocks[:1]}
|
||||
codecVersion := encoding.CodecV5
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return false, fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
p.recordTimerChunkMetrics(metrics)
|
||||
if err := p.updateDBChunkInfo(&chunk, codecVersion, metrics); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common/math"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
@@ -14,119 +15,44 @@ import (
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
func testChunkProposerLimitsCodecV4(t *testing.T) {
|
||||
func testChunkProposerLimitsCodecV7(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maxBlockNum uint64
|
||||
maxTxNum uint64
|
||||
maxL2Gas uint64
|
||||
maxL1CommitGas uint64
|
||||
maxL1CommitCalldataSize uint64
|
||||
maxRowConsumption uint64
|
||||
chunkTimeoutSec uint64
|
||||
expectedChunksLen int
|
||||
expectedBlocksInFirstChunk int // only be checked when expectedChunksLen > 0
|
||||
}{
|
||||
{
|
||||
name: "NoLimitReached",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
name: "NoLimitReached",
|
||||
maxBlockNum: 100,
|
||||
maxL2Gas: 20_000_000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "Timeout",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 0,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 2,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 0,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL2GasPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10,
|
||||
maxL2Gas: 0,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 0,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 0,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs0",
|
||||
maxBlockNum: 100,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 0,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
name: "MaxL2GasPerChunkIs0",
|
||||
maxBlockNum: 10,
|
||||
maxL2Gas: 0,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 0,
|
||||
},
|
||||
{
|
||||
name: "MaxBlockNumPerChunkIs1",
|
||||
maxBlockNum: 1,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxTxNumPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 2,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
@@ -136,47 +62,7 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
|
||||
// with the first block it exceeds the maxL2GasPerChunk limit.
|
||||
name: "MaxL2GasPerChunkIsSecondBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 1_153_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitGasPerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 62500,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 60,
|
||||
maxRowConsumption: 1000000,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
},
|
||||
{
|
||||
name: "MaxRowConsumptionPerChunkIs1",
|
||||
maxBlockNum: 10,
|
||||
maxTxNum: 10000,
|
||||
maxL2Gas: 20_000_000,
|
||||
maxL1CommitGas: 50000000000,
|
||||
maxL1CommitCalldataSize: 1000000,
|
||||
maxRowConsumption: 1,
|
||||
chunkTimeoutSec: 1000000000000,
|
||||
expectedChunksLen: 1,
|
||||
expectedBlocksInFirstChunk: 1,
|
||||
@@ -192,21 +78,19 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Add genesis chunk.
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err = chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
||||
MaxTxNumPerChunk: tt.maxTxNum,
|
||||
MaxL2GasPerChunk: tt.maxL2Gas,
|
||||
MaxL1CommitGasPerChunk: tt.maxL1CommitGas,
|
||||
MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize,
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}, db, nil)
|
||||
MaxBlockNumPerChunk: tt.maxBlockNum,
|
||||
MaxL2GasPerChunk: tt.maxL2Gas,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
}, encoding.CodecV7, ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, tt.expectedChunksLen)
|
||||
|
||||
@@ -224,61 +108,48 @@ func testChunkProposerLimitsCodecV4(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerBlobSizeLimitCodecV4(t *testing.T) {
|
||||
codecVersions := []encoding.CodecVersion{encoding.CodecV4}
|
||||
for _, codecVersion := range codecVersions {
|
||||
db := setupDB(t)
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for i := int64(0); i < 510; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(i + 1)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
var chainConfig *params.ChainConfig
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 255,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
func testChunkProposerBlobSizeLimitCodecV7(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
for i := uint64(0); i < 510; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = new(big.Int).SetUint64(i + 1)
|
||||
block.Header.Time = i + 1
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
var expectedNumChunks int = 2
|
||||
var numBlocksMultiplier uint64
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
numBlocksMultiplier = 255
|
||||
} else {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
assert.Len(t, chunks, expectedNumChunks)
|
||||
// Add genesis chunk.
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
_, err := chunkOrm.InsertChunk(context.Background(), &encoding.Chunk{Blocks: []*encoding.Block{{Header: &gethTypes.Header{Number: big.NewInt(0)}}}}, encoding.CodecV0, utils.ChunkMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i, chunk := range chunks {
|
||||
expected := numBlocksMultiplier * (uint64(i) + 1)
|
||||
if expected > 2000 {
|
||||
expected = 2000
|
||||
}
|
||||
assert.Equal(t, expected, chunk.EndBlockNumber)
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: new(uint64), EuclidV2Time: new(uint64)}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 255,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint32,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
chunkOrm = orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 1, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var expectedNumChunks int = 2
|
||||
var numBlocksMultiplier uint64 = 255
|
||||
assert.Len(t, chunks, expectedNumChunks)
|
||||
|
||||
for i, chunk := range chunks {
|
||||
expected := numBlocksMultiplier * (uint64(i) + 1)
|
||||
if expected > 2000 {
|
||||
expected = 2000
|
||||
}
|
||||
database.CloseDB(db)
|
||||
assert.Equal(t, expected, chunk.EndBlockNumber)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,10 +92,6 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to GetBlockByNumberOrHash: %v. number: %v", err, number)
|
||||
}
|
||||
if block.RowConsumption == nil && !w.chainCfg.IsEuclid(block.Time()) {
|
||||
w.metrics.fetchNilRowConsumptionBlockTotal.Inc()
|
||||
return fmt.Errorf("fetched block does not contain RowConsumption. number: %v", number)
|
||||
}
|
||||
|
||||
var count int
|
||||
for _, tx := range block.Transactions() {
|
||||
@@ -110,10 +106,9 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
|
||||
return fmt.Errorf("failed to get withdrawRoot: %v. number: %v", err3, number)
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: block.Header(),
|
||||
Transactions: encoding.TxsToTxsData(block.Transactions()),
|
||||
WithdrawRoot: common.BytesToHash(withdrawRoot),
|
||||
RowConsumption: block.RowConsumption,
|
||||
Header: block.Header(),
|
||||
Transactions: encoding.TxsToTxsData(block.Transactions()),
|
||||
WithdrawRoot: common.BytesToHash(withdrawRoot),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -123,11 +118,6 @@ func (w *L2WatcherClient) getAndStoreBlocks(ctx context.Context, from, to uint64
|
||||
if codec == nil {
|
||||
return fmt.Errorf("failed to retrieve codec for block number %v and time %v", block.Header.Number, block.Header.Time)
|
||||
}
|
||||
blockL1CommitCalldataSize, err := codec.EstimateBlockL1CommitCalldataSize(block)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to estimate block L1 commit calldata size: %v", err)
|
||||
}
|
||||
w.metrics.rollupL2BlockL1CommitCalldataSize.Set(float64(blockL1CommitCalldataSize))
|
||||
w.metrics.rollupL2WatcherSyncThroughput.Add(float64(block.Header.GasUsed))
|
||||
}
|
||||
if err := w.l2BlockOrm.InsertL2Blocks(w.ctx, blocks); err != nil {
|
||||
|
||||
@@ -8,11 +8,9 @@ import (
|
||||
)
|
||||
|
||||
type l2WatcherMetrics struct {
|
||||
fetchRunningMissingBlocksTotal prometheus.Counter
|
||||
fetchRunningMissingBlocksHeight prometheus.Gauge
|
||||
rollupL2BlocksFetchedGap prometheus.Gauge
|
||||
rollupL2BlockL1CommitCalldataSize prometheus.Gauge
|
||||
fetchNilRowConsumptionBlockTotal prometheus.Counter
|
||||
fetchRunningMissingBlocksTotal prometheus.Counter
|
||||
fetchRunningMissingBlocksHeight prometheus.Gauge
|
||||
rollupL2BlocksFetchedGap prometheus.Gauge
|
||||
|
||||
rollupL2WatcherSyncThroughput prometheus.Counter
|
||||
}
|
||||
@@ -37,14 +35,6 @@ func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
|
||||
Name: "rollup_l2_watcher_blocks_fetched_gap",
|
||||
Help: "The gap of l2 fetch",
|
||||
}),
|
||||
rollupL2BlockL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_l2_block_l1_commit_calldata_size",
|
||||
Help: "The l1 commitBatch calldata size of the l2 block",
|
||||
}),
|
||||
fetchNilRowConsumptionBlockTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l2_watcher_fetch_nil_row_consumption_block_total",
|
||||
Help: "The total number of occurrences where a fetched block has nil RowConsumption",
|
||||
}),
|
||||
rollupL2WatcherSyncThroughput: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_l2_watcher_sync_throughput",
|
||||
Help: "The cumulative gas used in blocks that L2 watcher sync",
|
||||
|
||||
@@ -14,9 +14,10 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
|
||||
@@ -101,17 +101,16 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestFetchRunningMissingBlocks", testFetchRunningMissingBlocks)
|
||||
|
||||
// Run chunk proposer test cases.
|
||||
t.Run("TestChunkProposerLimitsCodecV4", testChunkProposerLimitsCodecV4)
|
||||
t.Run("TestChunkProposerBlobSizeLimitCodecV4", testChunkProposerBlobSizeLimitCodecV4)
|
||||
t.Run("TestChunkProposerLimitsCodecV7", testChunkProposerLimitsCodecV7)
|
||||
t.Run("TestChunkProposerBlobSizeLimitCodecV7", testChunkProposerBlobSizeLimitCodecV7)
|
||||
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerLimitsCodecV4", testBatchProposerLimitsCodecV4)
|
||||
t.Run("TestBatchCommitGasAndCalldataSizeEstimationCodecV4", testBatchCommitGasAndCalldataSizeEstimationCodecV4)
|
||||
t.Run("TestBatchProposerBlobSizeLimitCodecV4", testBatchProposerBlobSizeLimitCodecV4)
|
||||
t.Run("TestBatchProposerMaxChunkNumPerBatchLimitCodecV4", testBatchProposerMaxChunkNumPerBatchLimitCodecV4)
|
||||
t.Run("TestBatchProposerLimitsCodecV7", testBatchProposerLimitsCodecV7)
|
||||
t.Run("TestBatchProposerBlobSizeLimitCodecV7", testBatchProposerBlobSizeLimitCodecV7)
|
||||
t.Run("TestBatchProposerMaxChunkNumPerBatchLimitCodecV7", testBatchProposerMaxChunkNumPerBatchLimitCodecV7)
|
||||
|
||||
// Run bundle proposer test cases.
|
||||
t.Run("TestBundleProposerLimitsCodecV4", testBundleProposerLimitsCodecV4)
|
||||
t.Run("TestBundleProposerLimitsCodecV7", testBundleProposerLimitsCodecV7)
|
||||
}
|
||||
|
||||
func readBlockFromJSON(t *testing.T, filename string) *encoding.Block {
|
||||
|
||||
@@ -300,30 +300,28 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
}
|
||||
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: batchMeta.BatchHash.Hex(),
|
||||
DataHash: batchMeta.BatchDataHash.Hex(),
|
||||
StartChunkHash: batchMeta.StartChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: batchMeta.EndChunkHash.Hex(),
|
||||
EndChunkIndex: startChunkIndex + numChunks - 1,
|
||||
StateRoot: batch.StateRoot().Hex(),
|
||||
WithdrawRoot: batch.WithdrawRoot().Hex(),
|
||||
ParentBatchHash: batch.ParentBatchHash.Hex(),
|
||||
BatchHeader: batchMeta.BatchBytes,
|
||||
CodecVersion: int16(codecVersion),
|
||||
PrevL1MessageQueueHash: batch.PrevL1MessageQueueHash.Hex(),
|
||||
PostL1MessageQueueHash: batch.PostL1MessageQueueHash.Hex(),
|
||||
EnableCompress: enableCompress,
|
||||
BlobBytes: batchMeta.BlobBytes,
|
||||
ChallengeDigest: batchMeta.ChallengeDigest.Hex(),
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
BlobDataProof: batchMeta.BatchBlobDataProof,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
Index: batch.Index,
|
||||
Hash: batchMeta.BatchHash.Hex(),
|
||||
DataHash: batchMeta.BatchDataHash.Hex(),
|
||||
StartChunkHash: batchMeta.StartChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: batchMeta.EndChunkHash.Hex(),
|
||||
EndChunkIndex: startChunkIndex + numChunks - 1,
|
||||
StateRoot: batch.StateRoot().Hex(),
|
||||
WithdrawRoot: batch.WithdrawRoot().Hex(),
|
||||
ParentBatchHash: batch.ParentBatchHash.Hex(),
|
||||
BatchHeader: batchMeta.BatchBytes,
|
||||
CodecVersion: int16(codecVersion),
|
||||
PrevL1MessageQueueHash: batch.PrevL1MessageQueueHash.Hex(),
|
||||
PostL1MessageQueueHash: batch.PostL1MessageQueueHash.Hex(),
|
||||
EnableCompress: enableCompress,
|
||||
BlobBytes: batchMeta.BlobBytes,
|
||||
ChallengeDigest: batchMeta.ChallengeDigest.Hex(),
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
BlobDataProof: batchMeta.BatchBlobDataProof,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
123
rollup/internal/orm/blob_upload.go
Normal file
123
rollup/internal/orm/blob_upload.go
Normal file
@@ -0,0 +1,123 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// BlobUpload represents a blob upload record in the database.
|
||||
type BlobUpload struct {
|
||||
db *gorm.DB `gorm:"-"`
|
||||
|
||||
// blob upload
|
||||
BatchIndex uint64 `json:"batch_index" gorm:"column:batch_index"`
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash"`
|
||||
Platform int16 `json:"platform" gorm:"column:platform"`
|
||||
Status int16 `json:"status" gorm:"column:status"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewBlobUpload creates a new BlobUpload database instance.
|
||||
func NewBlobUpload(db *gorm.DB) *BlobUpload {
|
||||
return &BlobUpload{db: db}
|
||||
}
|
||||
|
||||
// TableName returns the table name for the BlobUpload model.
|
||||
func (*BlobUpload) TableName() string {
|
||||
return "blob_upload"
|
||||
}
|
||||
|
||||
// GetNextBatchIndexToUploadByPlatform retrieves the next batch index that hasn't been uploaded to corresponding blob storage service
|
||||
func (o *BlobUpload) GetNextBatchIndexToUploadByPlatform(ctx context.Context, startBatch uint64, platform types.BlobStoragePlatform) (uint64, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&BlobUpload{})
|
||||
db = db.Where("platform = ? AND status = ?", platform, types.BlobUploadStatusUploaded)
|
||||
db = db.Order("batch_index DESC")
|
||||
db = db.Limit(1)
|
||||
|
||||
var blobUpload BlobUpload
|
||||
var batchIndex uint64
|
||||
if err := db.First(&blobUpload).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
batchIndex = startBatch
|
||||
} else {
|
||||
return 0, fmt.Errorf("BlobUpload.GetNextBatchIndexToUploadByPlatform error: %w", err)
|
||||
}
|
||||
} else {
|
||||
batchIndex = blobUpload.BatchIndex + 1
|
||||
}
|
||||
|
||||
return batchIndex, nil
|
||||
}
|
||||
|
||||
// GetBlobUpload retrieves the selected blob uploads from the database.
|
||||
func (o *BlobUpload) GetBlobUploads(ctx context.Context, fields map[string]interface{}, orderByList []string, limit int) ([]*BlobUpload, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&BlobUpload{})
|
||||
|
||||
for key, value := range fields {
|
||||
db = db.Where(key, value)
|
||||
}
|
||||
|
||||
for _, orderBy := range orderByList {
|
||||
db = db.Order(orderBy)
|
||||
}
|
||||
|
||||
if limit > 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
db = db.Order("batch_index ASC")
|
||||
|
||||
var blobUploads []*BlobUpload
|
||||
if err := db.Find(&blobUploads).Error; err != nil {
|
||||
return nil, fmt.Errorf("BlobUpload.GetBlobUploads error: %w", err)
|
||||
}
|
||||
|
||||
return blobUploads, nil
|
||||
}
|
||||
|
||||
// InsertOrUpdateBlobUpload inserts a new blob upload record or updates the existing one.
|
||||
func (o *BlobUpload) InsertOrUpdateBlobUpload(ctx context.Context, batchIndex uint64, batchHash string, platform types.BlobStoragePlatform, status types.BlobUploadStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
|
||||
var existing BlobUpload
|
||||
err := db.Where("batch_index = ? AND batch_hash = ? AND platform = ? AND deleted_at IS NULL",
|
||||
batchIndex, batchHash, int16(platform),
|
||||
).First(&existing).Error
|
||||
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
newRecord := BlobUpload{
|
||||
BatchIndex: batchIndex,
|
||||
BatchHash: batchHash,
|
||||
Platform: int16(platform),
|
||||
Status: int16(status),
|
||||
}
|
||||
if err := db.Create(&newRecord).Error; err != nil {
|
||||
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload insert error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
|
||||
}
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload query error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
|
||||
}
|
||||
|
||||
if err := db.Model(&existing).Update("status", int16(status)).Error; err != nil {
|
||||
return fmt.Errorf("BlobUpload.InsertOrUpdateBlobUpload update error: %w, batch index: %v, batch_hash: %v, platform: %v", err, batchIndex, batchHash, platform)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -50,14 +50,14 @@ type Chunk struct {
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"` // deprecated
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"`
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"`
|
||||
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size"` // deprecated
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas"` // deprecated
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
@@ -246,8 +246,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
|
||||
EndBlockHash: chunk.Blocks[numBlocks-1].Header.Hash().Hex(),
|
||||
TotalL2TxGas: chunk.TotalGasUsed(),
|
||||
TotalL2TxNum: chunk.NumL2Transactions(),
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
StartBlockTime: chunk.Blocks[0].Header.Time,
|
||||
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
|
||||
TotalL1MessagesPoppedInChunk: chunk.NumL1Messages(totalL1MessagePoppedBefore),
|
||||
@@ -260,7 +258,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
|
||||
CodecVersion: int16(codecVersion),
|
||||
EnableCompress: enableCompress,
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: metrics.CrcMax,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
|
||||
@@ -96,11 +96,6 @@ func (o *L2Block) GetL2BlocksGEHeight(ctx context.Context, height uint64, limit
|
||||
}
|
||||
|
||||
block.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
|
||||
|
||||
if err := json.Unmarshal([]byte(v.RowConsumption), &block.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksGEHeight error: %w", err)
|
||||
}
|
||||
|
||||
blocks = append(blocks, &block)
|
||||
}
|
||||
|
||||
@@ -171,11 +166,6 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
|
||||
}
|
||||
|
||||
block.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
|
||||
|
||||
if err := json.Unmarshal([]byte(v.RowConsumption), &block.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetL2BlocksInRange error: %w, start block: %v, end block: %v", err, startBlockNumber, endBlockNumber)
|
||||
}
|
||||
|
||||
blocks = append(blocks, &block)
|
||||
}
|
||||
|
||||
@@ -183,7 +173,7 @@ func (o *L2Block) GetL2BlocksInRange(ctx context.Context, startBlockNumber uint6
|
||||
}
|
||||
|
||||
// InsertL2Blocks inserts l2 blocks into the "l2_block" table.
|
||||
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block) error {
|
||||
func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block, dbTX ...*gorm.DB) error {
|
||||
var l2Blocks []L2Block
|
||||
for _, block := range blocks {
|
||||
header, err := json.Marshal(block.Header)
|
||||
@@ -198,12 +188,6 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block)
|
||||
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
|
||||
}
|
||||
|
||||
rc, err := json.Marshal(block.RowConsumption)
|
||||
if err != nil {
|
||||
log.Error("failed to marshal RowConsumption", "hash", block.Header.Hash().String(), "err", err)
|
||||
return fmt.Errorf("L2Block.InsertL2Blocks error: %w", err)
|
||||
}
|
||||
|
||||
l2Block := L2Block{
|
||||
Number: block.Header.Number.Uint64(),
|
||||
Hash: block.Header.Hash().String(),
|
||||
@@ -214,13 +198,16 @@ func (o *L2Block) InsertL2Blocks(ctx context.Context, blocks []*encoding.Block)
|
||||
TxNum: uint32(len(block.Transactions)),
|
||||
GasUsed: block.Header.GasUsed,
|
||||
BlockTimestamp: block.Header.Time,
|
||||
RowConsumption: string(rc),
|
||||
Header: string(header),
|
||||
}
|
||||
l2Blocks = append(l2Blocks, l2Block)
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
|
||||
if err := db.Create(&l2Blocks).Error; err != nil {
|
||||
|
||||
@@ -71,12 +71,14 @@ func setupEnv(t *testing.T) {
|
||||
block1 = &encoding.Block{}
|
||||
err = json.Unmarshal(templateBlockTrace, block1)
|
||||
assert.NoError(t, err)
|
||||
block1.RowConsumption = nil
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../../../common/testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
block2 = &encoding.Block{}
|
||||
err = json.Unmarshal(templateBlockTrace, block2)
|
||||
assert.NoError(t, err)
|
||||
block2.RowConsumption = nil
|
||||
}
|
||||
|
||||
func tearDownEnv(t *testing.T) {
|
||||
|
||||
@@ -13,19 +13,12 @@ type ChunkMetrics struct {
|
||||
NumBlocks uint64
|
||||
TxNum uint64
|
||||
L2Gas uint64
|
||||
CrcMax uint64
|
||||
FirstBlockTimestamp uint64
|
||||
|
||||
L1CommitCalldataSize uint64
|
||||
L1CommitGas uint64
|
||||
|
||||
L1CommitBlobSize uint64
|
||||
L1CommitUncompressedBatchBytesSize uint64
|
||||
L1CommitBlobSize uint64
|
||||
|
||||
// timing metrics
|
||||
EstimateGasTime time.Duration
|
||||
EstimateCalldataSizeTime time.Duration
|
||||
EstimateBlobSizeTime time.Duration
|
||||
EstimateBlobSizeTime time.Duration
|
||||
}
|
||||
|
||||
// CalculateChunkMetrics calculates chunk metrics.
|
||||
@@ -42,34 +35,13 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
|
||||
}
|
||||
|
||||
var err error
|
||||
metrics.CrcMax, err = chunk.CrcMax()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get crc max, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(codecVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateGasTime, err = measureTime(func() error {
|
||||
metrics.L1CommitGas, err = codec.EstimateChunkL1CommitGas(chunk)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit gas, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateCalldataSizeTime, err = measureTime(func() error {
|
||||
metrics.L1CommitCalldataSize, err = codec.EstimateChunkL1CommitCalldataSize(chunk)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate chunk L1 commit calldata size, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateBlobSizeTime, err = measureTime(func() error {
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codec.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
|
||||
_, metrics.L1CommitBlobSize, err = codec.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
@@ -84,16 +56,10 @@ type BatchMetrics struct {
|
||||
NumChunks uint64
|
||||
FirstBlockTimestamp uint64
|
||||
|
||||
L1CommitCalldataSize uint64
|
||||
L1CommitGas uint64
|
||||
|
||||
L1CommitBlobSize uint64
|
||||
L1CommitUncompressedBatchBytesSize uint64
|
||||
L1CommitBlobSize uint64
|
||||
|
||||
// timing metrics
|
||||
EstimateGasTime time.Duration
|
||||
EstimateCalldataSizeTime time.Duration
|
||||
EstimateBlobSizeTime time.Duration
|
||||
EstimateBlobSizeTime time.Duration
|
||||
}
|
||||
|
||||
// CalculateBatchMetrics calculates batch metrics.
|
||||
@@ -108,24 +74,8 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
|
||||
return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateGasTime, err = measureTime(func() error {
|
||||
metrics.L1CommitGas, err = codec.EstimateBatchL1CommitGas(batch)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate batch L1 commit gas, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateCalldataSizeTime, err = measureTime(func() error {
|
||||
metrics.L1CommitCalldataSize, err = codec.EstimateBatchL1CommitCalldataSize(batch)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate batch L1 commit calldata size, version: %v, err: %w", codecVersion, err)
|
||||
}
|
||||
|
||||
metrics.EstimateBlobSizeTime, err = measureTime(func() error {
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codec.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
|
||||
_, metrics.L1CommitBlobSize, err = codec.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -3,21 +3,11 @@
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"chunk_proposer_config": {
|
||||
"max_block_num_per_chunk": 100,
|
||||
"max_tx_num_per_chunk": 100,
|
||||
"max_l2_gas_per_chunk": 20000000,
|
||||
"max_l1_commit_gas_per_chunk": 5000000,
|
||||
"max_l1_commit_calldata_size_per_chunk": 123740,
|
||||
"chunk_timeout_sec": 72000000000,
|
||||
"max_row_consumption_per_chunk": 10000000000,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634693
|
||||
"chunk_timeout_sec": 72000000000
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"max_l1_commit_gas_per_batch": 5000000,
|
||||
"max_l1_commit_calldata_size_per_batch": 123740,
|
||||
"batch_timeout_sec": 72000000000,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634693,
|
||||
"max_chunks_per_batch": 45
|
||||
},
|
||||
"bundle_proposer_config": {
|
||||
|
||||
@@ -208,7 +208,6 @@ func TestFunction(t *testing.T) {
|
||||
|
||||
// l1 rollup and watch rollup events
|
||||
t.Run("TestCommitAndFinalizeGenesisBatch", testCommitAndFinalizeGenesisBatch)
|
||||
t.Run("TestCommitBatchAndFinalizeBundleCodecV4V5V6", testCommitBatchAndFinalizeBundleCodecV4V5V6)
|
||||
t.Run("TestCommitBatchAndFinalizeBundleCodecV7", testCommitBatchAndFinalizeBundleCodecV7)
|
||||
|
||||
// l1 gas oracle
|
||||
|
||||
@@ -66,9 +66,8 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -141,9 +140,8 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ func testProcessStart(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
rollupApp.RunApp(t, cutils.GasOracleApp, "--genesis", "../conf/genesis.json")
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json", "--min-codec-version", "4")
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--genesis", "../conf/genesis.json", "--min-codec-version", "7")
|
||||
|
||||
rollupApp.WaitExit()
|
||||
}
|
||||
@@ -36,7 +36,7 @@ func testProcessStartEnableMetrics(t *testing.T) {
|
||||
port, err = rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
svrPort = strconv.FormatInt(port.Int64()+30000, 10)
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json", "--min-codec-version", "4")
|
||||
rollupApp.RunApp(t, cutils.RollupRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort, "--genesis", "../conf/genesis.json", "--min-codec-version", "7")
|
||||
|
||||
rollupApp.WaitExit()
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -19,7 +18,6 @@ import (
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/relayer"
|
||||
@@ -56,172 +54,6 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) {
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(batch.RollupStatus))
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBundleCodecV4V5V6(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
euclidTime := uint64(3)
|
||||
chainConfig := ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64), EuclidTime: &euclidTime}
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add some blocks to db
|
||||
var blocks []*encoding.Block
|
||||
for i := int64(0); i < 10; i++ {
|
||||
header := gethTypes.Header{
|
||||
Number: big.NewInt(i + 1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
Root: common.HexToHash("0x1"),
|
||||
Time: uint64(i),
|
||||
}
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
})
|
||||
}
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
MaxBatchNumPerBundle: 1000000,
|
||||
BundleTimeoutSec: 300,
|
||||
}, encoding.CodecV4, chainConfig, db, nil)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
bundleOrm := orm.NewBundle(db)
|
||||
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[:5])
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
bap.TryProposeBatch()
|
||||
|
||||
err = l2BlockOrm.InsertL2Blocks(context.Background(), blocks[5:])
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
bap.TryProposeBatch()
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
|
||||
// make sure that batches are committed before proposing bundles (as bundle proposing depends on batches being committed).
|
||||
require.Eventually(t, func() bool {
|
||||
batches, getErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, getErr)
|
||||
|
||||
assert.Len(t, batches, 3)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupCommitted != types.RollupStatus(batch.RollupStatus) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// make sure that batches 1 and 2 have been committed in separate transactions
|
||||
return batches[0].CommitTxHash != batches[1].CommitTxHash
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
bup.TryProposeBundle() // The proposed bundle contains two batches when codec version is codecv3.
|
||||
|
||||
patchGuard1 := gomonkey.ApplyMethodFunc((*message.OpenVMBatchProof)(nil), "SanityCheck", func() error {
|
||||
return nil
|
||||
})
|
||||
defer patchGuard1.Reset()
|
||||
|
||||
batchProof := &message.OpenVMBatchProof{}
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
err = batchOrm.UpdateProofByHash(context.Background(), batch.Hash, batchProof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateProvingStatus(context.Background(), batch.Hash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
patchGuard2 := gomonkey.ApplyMethodFunc((*message.OpenVMBundleProof)(nil), "SanityCheck", func() error {
|
||||
return nil
|
||||
})
|
||||
defer patchGuard2.Reset()
|
||||
|
||||
bundleProof := &message.OpenVMBundleProof{EvmProof: &message.OpenVMEvmProof{Instances: make([]byte, 384)}}
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
for _, bundle := range bundles {
|
||||
err = bundleOrm.UpdateProofAndProvingStatusByHash(context.Background(), bundle.Hash, bundleProof, types.ProvingTaskVerified, 100)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
l2Relayer.ProcessPendingBundles()
|
||||
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 3)
|
||||
batches = batches[1:]
|
||||
for _, batch := range batches {
|
||||
if types.RollupStatus(batch.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, getErr)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
}
|
||||
|
||||
bundles, err := bundleOrm.GetBundles(context.Background(), map[string]interface{}{}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, bundles, 1)
|
||||
|
||||
bundle := bundles[0]
|
||||
if types.RollupStatus(bundle.RollupStatus) != types.RollupFinalized {
|
||||
return false
|
||||
}
|
||||
assert.NotEmpty(t, bundle.FinalizeTxHash)
|
||||
receipt, err := l1Client.TransactionReceipt(context.Background(), common.HexToHash(bundle.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status)
|
||||
batches, err = batchOrm.GetBatches(context.Background(), map[string]interface{}{"bundle_hash": bundle.Hash}, nil, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 2)
|
||||
for _, batch := range batches {
|
||||
assert.Equal(t, batch.RollupStatus, bundle.RollupStatus)
|
||||
assert.Equal(t, bundle.FinalizeTxHash, batch.FinalizeTxHash)
|
||||
}
|
||||
|
||||
return true
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
l2Relayer.StopSenders()
|
||||
database.CloseDB(db)
|
||||
}
|
||||
|
||||
func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
|
||||
@@ -278,29 +110,22 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
|
||||
}
|
||||
|
||||
blocks = append(blocks, &encoding.Block{
|
||||
Header: &header,
|
||||
Transactions: transactions,
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
Header: &header,
|
||||
Transactions: transactions,
|
||||
WithdrawRoot: common.HexToHash("0x2"),
|
||||
})
|
||||
parentHash = header.Hash()
|
||||
}
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxBlockNumPerChunk: 100,
|
||||
MaxL2GasPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
bap := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
MaxChunksPerBatch: math.MaxInt32,
|
||||
BatchTimeoutSec: 300,
|
||||
}, encoding.CodecV7, chainConfig, db, nil)
|
||||
|
||||
bup := watcher.NewBundleProposer(context.Background(), &config.BundleProposerConfig{
|
||||
|
||||
16
zkvm-prover/Cargo.lock
generated
16
zkvm-prover/Cargo.lock
generated
@@ -1547,7 +1547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3623,7 +3623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.52.6",
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7607,7 +7607,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-core"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#7a70e412961921e8c7248e2675d0cfe9aa62a1aa"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#2c46ab68c4d77bf27aa6c27bf7763bede4dab968"
|
||||
dependencies = [
|
||||
"reth-evm",
|
||||
"reth-evm-ethereum",
|
||||
@@ -7624,7 +7624,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-helpers"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#7a70e412961921e8c7248e2675d0cfe9aa62a1aa"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#2c46ab68c4d77bf27aa6c27bf7763bede4dab968"
|
||||
dependencies = [
|
||||
"revm 19.4.0",
|
||||
]
|
||||
@@ -7632,7 +7632,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-kv"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#7a70e412961921e8c7248e2675d0cfe9aa62a1aa"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#2c46ab68c4d77bf27aa6c27bf7763bede4dab968"
|
||||
dependencies = [
|
||||
"auto_impl",
|
||||
"hashbrown 0.15.2",
|
||||
@@ -7642,7 +7642,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-primitives"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#7a70e412961921e8c7248e2675d0cfe9aa62a1aa"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#2c46ab68c4d77bf27aa6c27bf7763bede4dab968"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-eips",
|
||||
@@ -7673,7 +7673,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-trie"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#7a70e412961921e8c7248e2675d0cfe9aa62a1aa"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#2c46ab68c4d77bf27aa6c27bf7763bede4dab968"
|
||||
dependencies = [
|
||||
"alloy-rlp",
|
||||
"alloy-trie",
|
||||
@@ -7688,7 +7688,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sbv-utils"
|
||||
version = "2.0.0"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#7a70e412961921e8c7248e2675d0cfe9aa62a1aa"
|
||||
source = "git+https://github.com/scroll-tech/stateless-block-verifier?branch=zkvm%2Feuclid-upgrade#2c46ab68c4d77bf27aa6c27bf7763bede4dab968"
|
||||
dependencies = [
|
||||
"alloy-provider",
|
||||
"alloy-transport",
|
||||
|
||||
Reference in New Issue
Block a user