mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da4f6818e3 | ||
|
|
200ca7c15b | ||
|
|
53cf26597d | ||
|
|
f0f7341271 | ||
|
|
b6af88c936 | ||
|
|
de541a650a | ||
|
|
d7a57235d3 | ||
|
|
91d21301ec | ||
|
|
4b32a44a70 | ||
|
|
55b400c5fb | ||
|
|
1b49091207 | ||
|
|
5b827c3c18 | ||
|
|
6b2eb80aa5 | ||
|
|
71f88b04f5 | ||
|
|
bcd9764bcd | ||
|
|
b4f8377a08 |
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -29,7 +29,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
8
.github/workflows/coordinator.yml
vendored
8
.github/workflows/coordinator.yml
vendored
@@ -33,13 +33,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -54,7 +54,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -95,7 +95,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.21.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
|
||||
7
.github/workflows/docker.yml
vendored
7
.github/workflows/docker.yml
vendored
@@ -46,6 +46,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -90,6 +91,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -134,6 +136,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -178,6 +181,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -222,6 +226,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -266,6 +271,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -310,6 +316,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-cron.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
4
.github/workflows/intermediate-docker.yml
vendored
4
.github/workflows/intermediate-docker.yml
vendored
@@ -7,12 +7,12 @@ on:
|
||||
description: 'Go version'
|
||||
required: true
|
||||
type: string
|
||||
default: '1.20'
|
||||
default: '1.21'
|
||||
RUST_VERSION:
|
||||
description: 'Rust toolchain version'
|
||||
required: true
|
||||
type: string
|
||||
default: 'nightly-2022-12-10'
|
||||
default: 'nightly-2023-12-03'
|
||||
PYTHON_VERSION:
|
||||
description: 'Python version'
|
||||
required: false
|
||||
|
||||
2
.github/workflows/prover.yml
vendored
2
.github/workflows/prover.yml
vendored
@@ -50,7 +50,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-2022-12-10
|
||||
toolchain: nightly-2023-12-03
|
||||
override: true
|
||||
components: rustfmt, clippy
|
||||
- name: Install Go
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY ./bridge-history-api/go.* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM golang:1.20-alpine3.16 as base
|
||||
FROM golang:1.21-alpine3.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.mod* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Build libzkp dependency
|
||||
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as chef
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as chef
|
||||
WORKDIR app
|
||||
|
||||
FROM chef as planner
|
||||
@@ -17,7 +17,7 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
|
||||
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.20-rust-nightly-2022-12-10 as base
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
ifeq ($(GO_VERSION),)
|
||||
GO_VERSION=1.20
|
||||
GO_VERSION=1.21
|
||||
endif
|
||||
ifeq ($(RUST_VERSION),)
|
||||
RUST_VERSION=nightly-2022-12-10
|
||||
RUST_VERSION=nightly-2023-12-03
|
||||
endif
|
||||
ifeq ($(PYTHON_VERSION),)
|
||||
PYTHON_VERSION=3.10
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
ARG CUDA_VERSION=11.7.1
|
||||
ARG GO_VERSION=1.20
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG GO_VERSION=1.21
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG GO_VERSION=1.21
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG GO_VERSION=1.21
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG GO_VERSION=1.20
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG GO_VERSION=1.21
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
ARG ALPINE_VERSION=3.15
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG RUST_VERSION=nightly-2023-12-03
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.20 as base
|
||||
FROM scrolltech/go-alpine-builder:1.21 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
|
||||
22
common/libzkp/impl/Cargo.lock
generated
22
common/libzkp/impl/Cargo.lock
generated
@@ -31,7 +31,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"ark-std 0.3.0",
|
||||
"c-kzg",
|
||||
@@ -521,7 +521,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1139,7 +1139,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"ethers-core",
|
||||
@@ -1293,7 +1293,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1485,7 +1485,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"halo2_proofs",
|
||||
@@ -1507,7 +1507,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"env_logger 0.10.0",
|
||||
"gobuild",
|
||||
@@ -2142,7 +2142,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"env_logger 0.10.0",
|
||||
"eth-types",
|
||||
@@ -2292,7 +2292,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2307,7 +2307,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"halo2-mpt-circuits",
|
||||
@@ -2769,7 +2769,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4456,7 +4456,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.3#5776400eca902bf9a69306a07ea62ca6300dff76"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
|
||||
@@ -25,7 +25,7 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.2", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -61,8 +61,11 @@ func (t *TestcontainerApps) StartL1GethContainer() error {
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l1geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
|
||||
Cmd: []string{"--log.debug", "ANY"},
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
|
||||
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
|
||||
),
|
||||
Cmd: []string{"--log.debug", "ANY"},
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
@@ -85,7 +88,10 @@ func (t *TestcontainerApps) StartL2GethContainer() error {
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "scroll_l2geth",
|
||||
ExposedPorts: []string{"8546/tcp", "8545/tcp"},
|
||||
WaitingFor: wait.ForHTTP("/").WithPort("8545").WithStartupTimeout(100 * time.Second),
|
||||
WaitingFor: wait.ForAll(
|
||||
wait.ForListeningPort("8546").WithStartupTimeout(100*time.Second),
|
||||
wait.ForListeningPort("8545").WithStartupTimeout(100*time.Second),
|
||||
),
|
||||
}
|
||||
genericContainerReq := testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
|
||||
@@ -227,18 +227,11 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
|
||||
}
|
||||
|
||||
// blob payload
|
||||
blob, z, err := constructBlobPayload(batch.Chunks)
|
||||
blob, blobVersionedHash, z, err := constructBlobPayload(batch.Chunks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
|
||||
|
||||
daBatch := DABatch{
|
||||
Version: CodecV1Version,
|
||||
BatchIndex: batch.Index,
|
||||
@@ -281,7 +274,7 @@ func computeBatchDataHash(chunks []*encoding.Chunk, totalL1MessagePoppedBefore u
|
||||
}
|
||||
|
||||
// constructBlobPayload constructs the 4844 blob payload.
|
||||
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Point, error) {
|
||||
func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, common.Hash, *kzg4844.Point, error) {
|
||||
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
|
||||
metadataLength := 2 + MaxNumChunks*4
|
||||
|
||||
@@ -289,8 +282,8 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
blobBytes := make([]byte, metadataLength)
|
||||
|
||||
// challenge digest preimage
|
||||
// 1 hash for metadata and 1 for each chunk
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
|
||||
// 1 hash for metadata, 1 hash for each chunk, 1 hash for blob versioned hash
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks+1)*32)
|
||||
|
||||
// the chunk data hash used for calculating the challenge preimage
|
||||
var chunkDataHash common.Hash
|
||||
@@ -309,7 +302,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// encode L2 txs into blob payload
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, common.Hash{}, nil, err
|
||||
}
|
||||
blobBytes = append(blobBytes, rlpTxData...)
|
||||
}
|
||||
@@ -341,9 +334,19 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// convert raw data to BLSFieldElements
|
||||
blob, err := makeBlobCanonical(blobBytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, common.Hash{}, nil, err
|
||||
}
|
||||
|
||||
// compute blob versioned hash
|
||||
c, err := kzg4844.BlobToCommitment(*blob)
|
||||
if err != nil {
|
||||
return nil, common.Hash{}, nil, fmt.Errorf("failed to create blob commitment")
|
||||
}
|
||||
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
|
||||
|
||||
// challenge: append blob versioned hash
|
||||
copy(challengePreimage[(1+MaxNumChunks)*32:], blobVersionedHash[:])
|
||||
|
||||
// compute z = challenge_digest % BLS_MODULUS
|
||||
challengeDigest := crypto.Keccak256Hash(challengePreimage)
|
||||
pointBigInt := new(big.Int).Mod(new(big.Int).SetBytes(challengeDigest[:]), BLSModulus)
|
||||
@@ -354,7 +357,7 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
start := 32 - len(pointBytes)
|
||||
copy(z[start:], pointBytes)
|
||||
|
||||
return blob, &z, nil
|
||||
return blob, blobVersionedHash, &z, nil
|
||||
}
|
||||
|
||||
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
|
||||
|
||||
@@ -479,55 +479,55 @@ func TestCodecV1BatchChallenge(t *testing.T) {
|
||||
originalBatch := &encoding.Batch{Chunks: []*encoding.Chunk{chunk2}}
|
||||
batch, err := NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b36", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk3}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c99294", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e58", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk4}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk5}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk6}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
|
||||
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk7}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
// 15 chunks
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b", hex.EncodeToString(batch.z[:]))
|
||||
|
||||
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
|
||||
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk8, chunk9}}
|
||||
batch, err = NewDABatch(originalBatch)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c43", hex.EncodeToString(batch.z[:]))
|
||||
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab", hex.EncodeToString(batch.z[:]))
|
||||
}
|
||||
|
||||
func repeat(element byte, count int) string {
|
||||
@@ -547,31 +547,31 @@ func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
|
||||
expectedy string
|
||||
}{
|
||||
// single empty chunk
|
||||
{chunks: [][]string{{}}, expectedz: "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec", expectedy: "28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df51"},
|
||||
{chunks: [][]string{{}}, expectedz: "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925", expectedy: "304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd08"},
|
||||
// single non-empty chunk
|
||||
{chunks: [][]string{{"0x010203"}}, expectedz: "30a9d6cfc2b87fb00d80e7fea28ebb9eff0bd526dbf1da32acfe8c5fd49632ff", expectedy: "723515444cb320fe437b9cea3b51293f5fbcb5913739ad35eab28b1863f7c312"},
|
||||
{chunks: [][]string{{"0x010203"}}, expectedz: "1c1d4bd5153f877d799853080aba243f2c186dd6d6064eaefacfe715c92b6354", expectedy: "24e80ed99526b0d15ba46f7ec682f517576ddae68d5131e5d351f8bae06ea7d3"},
|
||||
// multiple empty chunks
|
||||
{chunks: [][]string{{}, {}}, expectedz: "17772348f946a4e4adfcaf5c1690d078933b6b090ca9a52fab6c7e545b1007ae", expectedy: "05ba9abbc81a1c97f4cdaa683a7e0c731d9dfd88feef8f7b2fcfd79e593662b5"},
|
||||
{chunks: [][]string{{}, {}}, expectedz: "152c9ccfcc2884f9891f7adce2de110cf9f85bfd0e21f0933ae0636390a84d41", expectedy: "5f6f532676e25b49e2eae77513fbeca173a300b434c0a5e24fa554b68e27d582"},
|
||||
// multiple non-empty chunks
|
||||
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "60376321eea0886c29bd97d95851c7b5fbdb064c8adfdadd7678617b32b3ebf2", expectedy: "50cfbcece01cadb4eade40649e17b140b31f96088097e38f020e31dfe6551604"},
|
||||
{chunks: [][]string{{"0x010203"}, {"0x070809"}}, expectedz: "62100f5381179ea7db7aa8fdedb0f7fc7b82730b75432d50ab41f80aeebe45a3", expectedy: "5b1f6e7a54907ddc06871853cf1f5d53bf2de0df7b61d0df84bc2c3fb80320cd"},
|
||||
// empty chunk followed by non-empty chunk
|
||||
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "054539f03564eda9462d582703cde0788e4e27c311582ddfb19835358273a7ca", expectedy: "1fba03580b5908c4c66b48e79c10e7a34e4b27ed37a1a049b3e17e017cad5245"},
|
||||
{chunks: [][]string{{}, {"0x010203"}}, expectedz: "2d94d241c4a2a8d8f02845ca40cfba344f3b42384af2045a75c82e725a184232", expectedy: "302416c177e9e7fe40c3bc4315066c117e27d246b0a33ef68cdda6dd333c485c"},
|
||||
// non-empty chunk followed by empty chunk
|
||||
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "0b82dceaa6ca4b5d704590c921accfd991b56b5ad0212e6a4e63e54915a2053b", expectedy: "2362f3a0c87f0ea11eb898ed608c7f09a42926a058d4c5d111a0f54cad10ebbd"},
|
||||
{chunks: [][]string{{"0x070809"}, {}}, expectedz: "7227567e3b1dbacb48a32bb85e4e99f73e4bd5620ea8cd4f5ac00a364c86af9c", expectedy: "2eb3dfd28362f35f562f779e749a555d2f1f87ddc716e95f04133d25189a391c"},
|
||||
// max number of chunks all empty
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "174cd3ba9b2ae8ab789ec0b5b8e0b27ee122256ec1756c383dbf2b5b96903f1b", expectedy: "225cab9658904181671eb7abc342ffc36a6836048b64a67f0fb758439da2567b"},
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}}, expectedz: "1128ac3e22ced6af85be4335e0d03a266946a7cade8047e7fc59d6c8be642321", expectedy: "2d9b16422ce17f328fd00c99349768f0cb0c8648115eb3bd9b7864617ba88059"},
|
||||
// max number of chunks all non-empty
|
||||
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1e93e961cdfb4bd26a5be48f23af4f1aa8c6bebe57a089d3250f8afb1e988bf8", expectedy: "24ed4791a70b28a6bad21c22d58f82a5ea5f9f9d2bcfc07428b494e9ae93de6e"},
|
||||
{chunks: [][]string{{"0x0a"}, {"0x0a0b"}, {"0x0a0b0c"}, {"0x0a0b0c0d"}, {"0x0a0b0c0d0e"}, {"0x0a0b0c0d0e0f"}, {"0x0a0b0c0d0e0f10"}, {"0x0a0b0c0d0e0f1011"}, {"0x0a0b0c0d0e0f101112"}, {"0x0a0b0c0d0e0f10111213"}, {"0x0a0b0c0d0e0f1011121314"}, {"0x0a0b0c0d0e0f101112131415"}, {"0x0a0b0c0d0e0f10111213141516"}, {"0x0a0b0c0d0e0f1011121314151617"}, {"0x0a0b0c0d0e0f101112131415161718"}}, expectedz: "1a4025a3d74e70b511007dd55a2e252478c48054c6383285e8a176f33d99853b", expectedy: "12071ac2571c11220432a27b8be549392892e9baf4c654748ca206def3843940"},
|
||||
// single chunk blob full
|
||||
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "61405cb0b114dfb4d611be84bedba0fcd2e55615e193e424f1cc7b1af0df3d31", expectedy: "58609bbca10e50489b630ecb5b9347378579ed784d6a10749fd505055d35c3c0"},
|
||||
{chunks: [][]string{{repeat(123, nRowsData)}}, expectedz: "72714cc4a0ca75cee2d543b1f958e3d3dd59ac7df0d9d5617d8117b65295a5f2", expectedy: "4ebb690362bcbc42321309c210c99f2ebdb53b3fcf7cf3b17b78f6cfd1203ed3"},
|
||||
// multiple chunks blob full
|
||||
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "22533c3ea99536b4b83a89835aa91e6f0d2fc3866c201e18d7ca4b3af92fad61", expectedy: "40d4b71492e1a06ee3c273ef9003c7cb05aed021208871e13fa33302fa0f4dcc"},
|
||||
{chunks: [][]string{{repeat(123, 1111)}, {repeat(231, nRowsData-1111)}}, expectedz: "70eb5b4db503e59413238eef451871c5d12f2bb96c8b96ceca012f4ca0114727", expectedy: "568d0aaf280ec83f9c81ed2d80ecbdf199bd72dafb8a350007d37ea82997e455"},
|
||||
// max number of chunks only last one non-empty not full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "0e6525c0dd261e8f62342b1139062bb23bc2b8b460163364598fb29e82a4eed5", expectedy: "1db984d6deb5e84bc67d0755aa2da8fe687233147603b4ecba94d0c8463c3836"},
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData-1111)}}, expectedz: "03db68ae16ee88489d52db19e6111b25630c5f23ad7cd14530aacf0cd231d476", expectedy: "24527d0b0e93b3dec0060c7b128975a8088b3104d3a297dc807ab43862a77a1a"},
|
||||
// max number of chunks only last one non-empty full blob
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "3a638eac98f22f817b84e3d81ccaa3de080f83dc80a5823a3f19320ef3cb6fc8", expectedy: "73ab100278822144e2ed8c9d986e92f7a2662fd18a51bdf96ec55848578b227a"},
|
||||
{chunks: [][]string{{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {repeat(132, nRowsData)}}, expectedz: "677670193f73db499cede572bcb55677f0d2f13d690f9a820bd00bf584c3c241", expectedy: "1d85677f172dbdf4ad3094a17deeb1df4d7d2b7f35ecea44aebffa757811a268"},
|
||||
// max number of chunks but last is empty
|
||||
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "02ef442d99f450559647a7823f1be0e148c75481cc5c703c02a116e8ac531fa8", expectedy: "31743538cfc3ac43d1378a5c497ebc9462c20b4cb4470e0e7a9f7342ea948333"},
|
||||
{chunks: [][]string{{repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {repeat(111, 100)}, {}}, expectedz: "22935042dfe7df771b02c1f5cababfe508869e8f6339dabe25a8a32e37728bb0", expectedy: "48ca66fb5a094401728c3a6a517ffbd72c4d4d9a8c907e2d2f1320812f4d856f"},
|
||||
} {
|
||||
chunks := []*encoding.Chunk{}
|
||||
|
||||
@@ -587,7 +587,7 @@ func TestCodecV1BatchChallengeWithStandardTestCases(t *testing.T) {
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
|
||||
b, z, err := constructBlobPayload(chunks)
|
||||
b, _, z, err := constructBlobPayload(chunks)
|
||||
assert.NoError(t, err)
|
||||
actualZ := hex.EncodeToString(z[:])
|
||||
assert.Equal(t, tc.expectedz, actualZ)
|
||||
@@ -608,7 +608,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err := batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "06138a688f328d13cb9caf0e2046d65bbcf766eab00196fb05e43806c7b26b363d27683f7aab53cf071e2c8c8f3abfe750d206c048489450d120679cdc823f7db44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607a30dad96431f70551dd950c1426131d73ccea6d050d38dea123aad90aa8c0b734c98e8e04bd8ea8f19b415f2d85156d8", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "0d8e67f882c61159aa99b04ec4f6f3d90cb95cbfba6efd56cefc55ca15b290ef423dc493f1dd7c9fbecdffa021ca4649b13e8d72231487034ec6b27e155ecfd7b44a38af1f9a6c70cd3ccfbf71968f447aa566bbafb0bbc566fc9eeb42973484802635a1bbd8305d34a46693331bf607b38542ec811c92d86ff6f3319de06ee60c42655278ccf874f3615f450de730895276828b73db03c553b0bc7e5474a5e0", hex.EncodeToString(verifyData))
|
||||
|
||||
trace3 := readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{trace3}}
|
||||
@@ -617,7 +617,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1e3f41f46941b3d30bbc482942026b09224636ed63a160738d7ae57a00c992946dc7e51a42a31f429bc1f321dcf020b9a661225259522dba186fcfe5dc012191b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e86a0b3c76e33edb24eb07faeaa5d3f2b15a55df6ab99abf828b5803f5681dc634602eb7469ee0556563b2eccebf16ec822", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "32da228f4945de828954675f9396debb169bbf336ba93f849a8fc7fee1bc9e5821975f318babe50be728f9b52754d5ce2caa2ba82ba35b5888af1c5f28d23206b8aab265dc352e352807a298f7bb99d432c7cd543e63158cbdb8fbf99f3182a71af35ccbed2693c5e0bc5be38d565e868e0c6fe7bd39baa5ee6339cd334a18af7c680d24e825262499e83b31633b13a9ee89813fae8441630c82bc9dce3f1e07", hex.EncodeToString(verifyData))
|
||||
|
||||
trace4 := readBlockFromJSON(t, "../../../testdata/blockTrace_04.json")
|
||||
chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{trace4}}
|
||||
@@ -626,7 +626,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "37c3ab6ad48e99fc0ce8e9de5f9b2c9be832699b293e4243b85d4e42bad0db7a24164e6ea8b7946ce5e40d2baa4f6aa0d030076f6074295288133c00e75dafa2afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d676248f5ca4a9f0d9b7fa48f5f649dc84e928161fd99ad1bd9a9879b05d29c5f718bfb3b0a696a5f3ed50b5b8c6a9d530b3ee", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "09a37ab43d41bcae3000c090a341e4661a8dc705b3c93d01b9eda3a0b3f8d4a8088a01e54e3565d2e91ce6afbadf479330847d9106737875303ce17f17c48722afd4e1c55a17dbdf8390b5736158afe238d82f8b696669ba47015fcdfd4d1becd0ff7a47f8f379a4ac8d1741e2d67624aee03a0f7cdb7807bc7e0b9fb20bc299af2a35e38cda816708b40f2f18db491e14a0f5d9cfe2f4c12e4ca1a219484f17", hex.EncodeToString(verifyData))
|
||||
|
||||
trace5 := readBlockFromJSON(t, "../../../testdata/blockTrace_05.json")
|
||||
chunk5 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
@@ -635,7 +635,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
|
||||
|
||||
trace6 := readBlockFromJSON(t, "../../../testdata/blockTrace_06.json")
|
||||
chunk6 := &encoding.Chunk{Blocks: []*encoding.Block{trace6}}
|
||||
@@ -644,7 +644,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
|
||||
|
||||
trace7 := readBlockFromJSON(t, "../../../testdata/blockTrace_07.json")
|
||||
chunk7 := &encoding.Chunk{Blocks: []*encoding.Block{trace7}}
|
||||
@@ -653,7 +653,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "1fa77f72d924ed6efdc399cf7a3de45fd3b50538d368d80d94840d30fdb606ec28bda8f1836f60a3879f4253c4f51b3e41a905449b60a83a594f9f2487e8df518f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea389598d958507378f8212199c51c059f8c419fd809dcc7de5750f76220c9c54cd57ad18cb3c38c127559a133df250f66b7", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "17c71700d949f82963d3bd6af3994ecc383a3d58007f2f27702758fefa34a925304817c2a9ec97b4cfdfc7a646f4bd5ac309e967465bb49059d397094e57cd088f26f349339c68b33ce856aa2c05b8f89e7c23db0c00817550679998efcbd8f2464f9e1ea6c3172b0b750603d1e4ea38979341a25ec6b613f9f32b23fc0e1a11342bc84d4af0705c666e7813de790d0e63b0a9bc56dc484590728aaaafa6b7a4", hex.EncodeToString(verifyData))
|
||||
|
||||
// 15 chunks
|
||||
originalBatch = &encoding.Batch{Chunks: []*encoding.Chunk{chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2, chunk2}}
|
||||
@@ -661,7 +661,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0244c987922db21694e8eb0184c4a5e6f3785fb688224822f1f826874ed5aae2613ca15d051a539e3b239027f9bdbd03bd3c66c98afafb674e2a7441912cbe099743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc93291fbc65cfa558e4df12bcde442483d31072000c56f94fe012285bc5832eaee5fe1d47f1e8655539c4500f66207d8edc6", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "55dac3baa818133cfdce0f97ddbb950e341399756d7b49bc34107dd65ecd3a4b54d28f1479467d8b97fb99f5257d3e5d63a81cb2d60e3564fe6ec6066a311c119743324c70e20042de6480f115b215fbba3472a8b994303a99576c1244aa4aec22fdfe6c74ec728aa28a9eb3812bc932a0b603cc94be2007d4b3b17af06b4fb30caf0e574d5abcfc5654079e65154679afad75844396082a7200a4e82462aeed", hex.EncodeToString(verifyData))
|
||||
|
||||
chunk8 := &encoding.Chunk{Blocks: []*encoding.Block{trace2, trace3, trace4}}
|
||||
chunk9 := &encoding.Chunk{Blocks: []*encoding.Block{trace5}}
|
||||
@@ -670,7 +670,7 @@ func TestCodecV1BatchBlobDataProof(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
verifyData, err = batch.BlobDataProof()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "03523cd88a7227826e093305cbe4ce237e8df38e2157566fb3742cc39dbc9c4330b3863672052b3d6c6552d121b0b13f97659f49bbfb6d7fed6e4b7076e4a43383bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fdae9cb71d402cfe8bc4d659f228c41f0b9d195c5074278a2346204cfaa336f5de2244a3d53e0effa2f49c81924720e84e", hex.EncodeToString(verifyData))
|
||||
assert.Equal(t, "0b14dce4abfdeb3a69a341f7db6b1e16162c20826e6d964a829e20f671030cab35b73ddb4a78fc4a8540f1d8259512c46e606a701e7ef7742e38cc4562ef53b983bee97f95fbf2d789a8e0fb365c26e141d6a31e43403b4a469d1723128f6d5de5c54e913e143feede32d0af9b6fd6fda28e5610ca6b185d6ac30b53bd83d6366fccb1956daafa90ff6b504a966b119ebb45cb3f7085b7c1d622ee1ad27fcff9", hex.EncodeToString(verifyData))
|
||||
}
|
||||
|
||||
func TestCodecV1BatchSkipBitmap(t *testing.T) {
|
||||
|
||||
91
common/types/message/auth_msg.go
Normal file
91
common/types/message/auth_msg.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
// HardForkName the hard fork name
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
89
common/types/message/legacy_auth_msg.go
Normal file
89
common/types/message/legacy_auth_msg.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package message
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// LegacyAuthMsg is the old auth message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type LegacyAuthMsg struct {
|
||||
// Message fields
|
||||
Identity *LegacyIdentity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// LegacyIdentity contains all the fields to be signed by the prover.
|
||||
type LegacyIdentity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *LegacyAuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *LegacyAuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *LegacyAuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *LegacyIdentity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
@@ -58,28 +58,6 @@ const (
|
||||
ProofTypeBatch
|
||||
)
|
||||
|
||||
// AuthMsg is the first message exchanged from the Prover to the Sequencer.
|
||||
// It effectively acts as a registration, and makes the Prover identification
|
||||
// known to the Sequencer.
|
||||
type AuthMsg struct {
|
||||
// Message fields
|
||||
Identity *Identity `json:"message"`
|
||||
// Prover signature
|
||||
Signature string `json:"signature"`
|
||||
}
|
||||
|
||||
// Identity contains all the fields to be signed by the prover.
|
||||
type Identity struct {
|
||||
// ProverName the prover name
|
||||
ProverName string `json:"prover_name"`
|
||||
// ProverVersion the prover version
|
||||
ProverVersion string `json:"prover_version"`
|
||||
// Challenge unique challenge generated by manager
|
||||
Challenge string `json:"challenge"`
|
||||
// HardForkName the hard fork name
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
// GenerateToken generates token
|
||||
func GenerateToken() (string, error) {
|
||||
b := make([]byte, 16)
|
||||
@@ -89,65 +67,6 @@ func GenerateToken() (string, error) {
|
||||
return hex.EncodeToString(b), nil
|
||||
}
|
||||
|
||||
// SignWithKey auth message with private key and set public key in auth message's Identity
|
||||
func (a *AuthMsg) SignWithKey(priv *ecdsa.PrivateKey) error {
|
||||
// Hash identity content
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sign register message
|
||||
sig, err := crypto.Sign(hash, priv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
a.Signature = hexutil.Encode(sig)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies the message of auth.
|
||||
func (a *AuthMsg) Verify() (bool, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return crypto.VerifySignature(crypto.CompressPubkey(pk), hash, sig[:len(sig)-1]), nil
|
||||
}
|
||||
|
||||
// PublicKey return public key from signature
|
||||
func (a *AuthMsg) PublicKey() (string, error) {
|
||||
hash, err := a.Identity.Hash()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig := common.FromHex(a.Signature)
|
||||
// recover public key
|
||||
pk, err := crypto.SigToPub(hash, sig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return common.Bytes2Hex(crypto.CompressPubkey(pk)), nil
|
||||
}
|
||||
|
||||
// Hash returns the hash of the auth message, which should be the message used
|
||||
// to construct the Signature.
|
||||
func (i *Identity) Hash() ([]byte, error) {
|
||||
byt, err := rlp.EncodeToBytes(i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hash := crypto.Keccak256Hash(byt)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
// ProofMsg is the data structure sent to the coordinator.
|
||||
type ProofMsg struct {
|
||||
*ProofDetail `json:"zkProof"`
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.88"
|
||||
var tag = "v4.3.95"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"license": "MIT",
|
||||
"scripts": {
|
||||
"test:hardhat": "npx hardhat test",
|
||||
"test:forge": "forge test -vvv",
|
||||
"test:forge": "forge test -vvv --evm-version cancun",
|
||||
"test": "yarn test:hardhat && yarn test:forge",
|
||||
"solhint": "./node_modules/.bin/solhint -f table 'src/**/*.sol'",
|
||||
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",
|
||||
|
||||
@@ -477,7 +477,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
_postStateRoot,
|
||||
_withdrawRoot,
|
||||
_dataHash,
|
||||
_blobDataProof[0:64]
|
||||
_blobDataProof[0:64],
|
||||
_blobVersionedHash
|
||||
)
|
||||
);
|
||||
|
||||
|
||||
@@ -83,6 +83,8 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
|
||||
}
|
||||
|
||||
function testTransferUSDCRoles(address owner) external {
|
||||
hevm.assume(owner != address(0));
|
||||
|
||||
// non-whitelisted caller call, should revert
|
||||
hevm.expectRevert("only circle caller");
|
||||
gateway.transferUSDCRoles(owner);
|
||||
|
||||
@@ -38,7 +38,7 @@ make lint
|
||||
|
||||
## Configure
|
||||
|
||||
The coordinator behavior can be configured using [`config.json`](config.json). Check the code comments under `ProverManager` in [`config/config.go`](config/config.go) for more details.
|
||||
The coordinator behavior can be configured using [`conf/config.json`](conf/config.json). Check the code comments under `ProverManager` in [`internal/config/config.go`](internal/config/config.go) for more details.
|
||||
|
||||
|
||||
## Start
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module scroll-tech/coordinator
|
||||
|
||||
go 1.20
|
||||
go 1.21
|
||||
|
||||
require (
|
||||
github.com/appleboy/gin-jwt/v2 v2.9.1
|
||||
|
||||
@@ -59,6 +59,7 @@ github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
|
||||
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
@@ -95,6 +96,7 @@ github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU=
|
||||
github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
@@ -117,11 +119,13 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
@@ -144,6 +148,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
@@ -153,6 +158,7 @@ github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
||||
@@ -164,6 +170,7 @@ github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
@@ -197,6 +204,7 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0=
|
||||
github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tidwall/gjson v1.14.3 h1:9jvXn7olKEHU1S9vwoMGliaT8jq1vJ7IH/n9zD9Dnlw=
|
||||
github.com/tidwall/gjson v1.14.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
|
||||
@@ -53,18 +53,31 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
// recover the public key
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
HardForkName: v.Message.HardForkName,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
var publicKey string
|
||||
var err error
|
||||
if v.Message.HardForkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
HardForkName: v.Message.HardForkName,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: v.Message.Challenge,
|
||||
ProverName: v.Message.ProverName,
|
||||
ProverVersion: v.Message.ProverVersion,
|
||||
},
|
||||
Signature: v.Signature,
|
||||
}
|
||||
publicKey, err = authMsg.PublicKey()
|
||||
}
|
||||
|
||||
publicKey, err := authMsg.PublicKey()
|
||||
if err != nil {
|
||||
return jwt.MapClaims{}
|
||||
}
|
||||
|
||||
@@ -30,8 +30,8 @@ func (c *Collector) cleanupChallenge() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopCleanChallengeChan:
|
||||
log.Info("the coordinator cleanupChallenge run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,10 @@ type Collector struct {
|
||||
db *gorm.DB
|
||||
ctx context.Context
|
||||
|
||||
stopTimeoutChan chan struct{}
|
||||
stopChunkTimeoutChan chan struct{}
|
||||
stopBatchTimeoutChan chan struct{}
|
||||
stopBatchAllChunkReadyChan chan struct{}
|
||||
stopCleanChallengeChan chan struct{}
|
||||
|
||||
proverTaskOrm *orm.ProverTask
|
||||
chunkOrm *orm.Chunk
|
||||
@@ -40,14 +43,17 @@ type Collector struct {
|
||||
// NewCollector create a collector to cron collect the data to send to prover
|
||||
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector {
|
||||
c := &Collector{
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopTimeoutChan: make(chan struct{}),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
challenge: orm.NewChallenge(db),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopChunkTimeoutChan: make(chan struct{}),
|
||||
stopBatchTimeoutChan: make(chan struct{}),
|
||||
stopBatchAllChunkReadyChan: make(chan struct{}),
|
||||
stopCleanChallengeChan: make(chan struct{}),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
challenge: orm.NewChallenge(db),
|
||||
|
||||
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_timeout_checker_run_total",
|
||||
@@ -83,7 +89,10 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
|
||||
// Stop all the collector
|
||||
func (c *Collector) Stop() {
|
||||
c.stopTimeoutChan <- struct{}{}
|
||||
c.stopChunkTimeoutChan <- struct{}{}
|
||||
c.stopBatchTimeoutChan <- struct{}{}
|
||||
c.stopBatchAllChunkReadyChan <- struct{}{}
|
||||
c.stopCleanChallengeChan <- struct{}{}
|
||||
}
|
||||
|
||||
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
|
||||
@@ -113,8 +122,8 @@ func (c *Collector) timeoutBatchProofTask() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopBatchTimeoutChan:
|
||||
log.Info("the coordinator timeoutBatchProofTask run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -146,8 +155,8 @@ func (c *Collector) timeoutChunkProofTask() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopChunkTimeoutChan:
|
||||
log.Info("the coordinator timeoutChunkProofTask run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -253,8 +262,8 @@ func (c *Collector) checkBatchAllChunkReady() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopBatchAllChunkReadyChan:
|
||||
log.Info("the coordinator checkBatchAllChunkReady run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,5 +21,5 @@ func NewLoginLogic(db *gorm.DB) *LoginLogic {
|
||||
|
||||
// InsertChallengeString insert and check the challenge string is existed
|
||||
func (l *LoginLogic) InsertChallengeString(ctx *gin.Context, challenge string) error {
|
||||
return l.challengeOrm.InsertChallenge(ctx, challenge)
|
||||
return l.challengeOrm.InsertChallenge(ctx.Copy(), challenge)
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
var endChunkIndex uint64 = math.MaxInt64
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum)
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -94,7 +94,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
startChunkIndex = startChunk.Index
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum)
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -112,7 +112,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBatchTask *orm.Batch
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -121,7 +121,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpBatchTask == nil {
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx, startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx.Copy(), startChunkIndex, endChunkIndex, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -133,7 +133,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx, tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -168,13 +168,13 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx, &proverTask)
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
|
||||
@@ -242,7 +242,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
|
||||
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx, batchTask.Hash); err != nil {
|
||||
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
|
||||
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpChunkTask *orm.Chunk
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -94,7 +94,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpChunkTask == nil {
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx, fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx.Copy(), fromBlockNum, toBlockNum, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -106,7 +106,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx, tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
@@ -140,13 +140,13 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
AssignedAt: utils.NowUTC(),
|
||||
}
|
||||
|
||||
if err = cp.proverTaskOrm.InsertProverTask(ctx, &proverTask); err != nil {
|
||||
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx, &proverTask)
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
|
||||
|
||||
@@ -100,7 +100,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx, publicKey.(string))
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
|
||||
}
|
||||
@@ -108,7 +108,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
|
||||
}
|
||||
|
||||
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
|
||||
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if prover %s is assigned a task, err: %w", publicKey.(string), err)
|
||||
}
|
||||
|
||||
@@ -139,14 +139,14 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
var proverTask *orm.ProverTask
|
||||
var err error
|
||||
if proofParameter.UUID != "" {
|
||||
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx, proofParameter.UUID, pk)
|
||||
proverTask, err = m.proverTaskOrm.GetProverTaskByUUIDAndPublicKey(ctx.Copy(), proofParameter.UUID, pk)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "uuid", proofParameter.UUID, "key", pk, "taskID", proofMsg.ID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
}
|
||||
} else {
|
||||
// TODO When prover all have upgrade, need delete this logic
|
||||
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx, proofMsg.Type, proofMsg.ID, pk, pv)
|
||||
proverTask, err = m.proverTaskOrm.GetAssignedProverTaskByTaskIDAndProver(ctx.Copy(), proofMsg.Type, proofMsg.ID, pk, pv)
|
||||
if proverTask == nil || err != nil {
|
||||
log.Error("get none prover task for the proof", "key", pk, "taskID", proofMsg.ID, "error", err)
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
@@ -159,7 +159,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName,
|
||||
"proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName)
|
||||
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
|
||||
if err = m.validator(ctx.Copy(), proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if verifyErr != nil || !success {
|
||||
m.verifierFailureTotal.WithLabelValues(pv).Inc()
|
||||
|
||||
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr)
|
||||
@@ -191,10 +191,10 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName)
|
||||
|
||||
if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil {
|
||||
if err := m.closeProofTask(ctx.Copy(), proverTask, proofMsg, proofTimeSec); err != nil {
|
||||
m.proofSubmitFailure.Inc()
|
||||
|
||||
m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
|
||||
m.proofRecover(ctx.Copy(), proverTask, types.ProverTaskFailureTypeServerError, proofMsg)
|
||||
|
||||
return ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -54,6 +55,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -248,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -262,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ActiveAttempts: 0,
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -48,6 +48,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -300,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
TotalAttempts: 0,
|
||||
ActiveAttempts: 0,
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -77,18 +77,31 @@ func (r *mockProver) challenge(t *testing.T) string {
|
||||
}
|
||||
|
||||
func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
HardForkName: forkName,
|
||||
},
|
||||
var body string
|
||||
if forkName != "" {
|
||||
authMsg := message.AuthMsg{
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
HardForkName: forkName,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
|
||||
} else {
|
||||
authMsg := message.LegacyAuthMsg{
|
||||
Identity: &message.LegacyIdentity{
|
||||
Challenge: challengeString,
|
||||
ProverName: r.proverName,
|
||||
ProverVersion: r.proverVersion,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
body = fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature)
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
|
||||
body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}",
|
||||
authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature)
|
||||
|
||||
var result ctypes.Response
|
||||
client := resty.New()
|
||||
|
||||
@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(16), cur)
|
||||
assert.Equal(t, int64(18), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), cur)
|
||||
assert.Equal(t, int64(18), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(16), version)
|
||||
assert.Equal(t, int64(18), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
|
||||
27
database/migrate/migrations/00017_add_blob_meta_data.sql
Normal file
27
database/migrate/migrations/00017_add_blob_meta_data.sql
Normal file
@@ -0,0 +1,27 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE chunk
|
||||
ADD COLUMN crc_max INTEGER DEFAULT 0,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
ALTER TABLE batch
|
||||
ADD COLUMN data_hash VARCHAR DEFAULT '',
|
||||
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
|
||||
ADD COLUMN blob_size INTEGER DEFAULT 0;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
ALTER TABLE IF EXISTS batch
|
||||
DROP COLUMN data_hash,
|
||||
DROP COLUMN blob_data_proof,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
ALTER TABLE IF EXISTS chunk
|
||||
DROP COLUMN crc_max,
|
||||
DROP COLUMN blob_size;
|
||||
|
||||
-- +goose StatementEnd
|
||||
@@ -0,0 +1,18 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create index if not exists idx_prover_task_created_at on prover_task(created_at) where deleted_at IS NULL;
|
||||
|
||||
create index if not exists idx_prover_task_task_id on prover_task(task_id) where deleted_at IS NULL;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
drop index if exists idx_prover_task_created_at;
|
||||
|
||||
drop index if exists idx_prover_task_task_id;
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
@@ -24,7 +24,7 @@ rollup_relayer: ## Builds the rollup_relayer bin
|
||||
test:
|
||||
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
lint: mock_abi ## Lint the files - used for CI
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
|
||||
@@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
})
|
||||
|
||||
log.Info("Start event-watcher successfully")
|
||||
log.Info("Start event-watcher successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -109,7 +109,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start gas-oracle successfully")
|
||||
log.Info("Start gas-oracle successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
log.Info("Start rollup-relayer successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -578,7 +578,7 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash)
|
||||
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String())
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, dbBatch.Hash, txHash.String(), types.RollupFinalizing); err != nil {
|
||||
|
||||
@@ -25,6 +25,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -53,6 +54,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
|
||||
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
|
||||
@@ -257,6 +262,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: batchMeta.BatchHash.Hex(),
|
||||
DataHash: batchMeta.BatchDataHash.Hex(),
|
||||
StartChunkHash: batchMeta.StartChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: batchMeta.EndChunkHash.Hex(),
|
||||
@@ -271,6 +277,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
BlobDataProof: batchMeta.BatchBlobDataProof,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -44,6 +44,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -212,6 +216,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: metrics.CrcMax,
|
||||
BlobSize: metrics.L1CommitBlobSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -191,10 +191,12 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code
|
||||
|
||||
// BatchMetadata represents the metadata of a batch.
|
||||
type BatchMetadata struct {
|
||||
BatchHash common.Hash
|
||||
BatchBytes []byte
|
||||
StartChunkHash common.Hash
|
||||
EndChunkHash common.Hash
|
||||
BatchHash common.Hash
|
||||
BatchDataHash common.Hash
|
||||
BatchBlobDataProof []byte
|
||||
BatchBytes []byte
|
||||
StartChunkHash common.Hash
|
||||
EndChunkHash common.Hash
|
||||
}
|
||||
|
||||
// GetBatchMetadata retrieves the metadata of a batch.
|
||||
@@ -212,9 +214,11 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
|
||||
}
|
||||
|
||||
// BatchBlobDataProof is left as empty for codecv0.
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
@@ -243,9 +247,16 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
|
||||
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
|
||||
}
|
||||
|
||||
blobDataProof, err := daBatch.BlobDataProof()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codecv1 blob data proof: %w", err)
|
||||
}
|
||||
|
||||
batchMeta := &BatchMetadata{
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchBytes: daBatch.Encode(),
|
||||
BatchHash: daBatch.Hash(),
|
||||
BatchDataHash: daBatch.DataHash,
|
||||
BatchBlobDataProof: blobDataProof,
|
||||
BatchBytes: daBatch.Encode(),
|
||||
}
|
||||
|
||||
startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
|
||||
|
||||
@@ -21,6 +21,7 @@ type Batch struct {
|
||||
// batch
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
DataHash string `json:"data_hash" gorm:"column:data_hash"`
|
||||
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
|
||||
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
|
||||
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
|
||||
@@ -49,6 +50,10 @@ type Batch struct {
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
@@ -151,6 +156,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
newBatch := Batch{
|
||||
Index: batch.Index,
|
||||
Hash: daBatch.Hash().Hex(),
|
||||
DataHash: daBatch.DataHash.Hex(),
|
||||
StartChunkHash: startDAChunkHash.Hex(),
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endDAChunkHash.Hex(),
|
||||
@@ -163,6 +169,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
@@ -43,6 +43,10 @@ type Chunk struct {
|
||||
// batch
|
||||
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
|
||||
// metadata
|
||||
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
|
||||
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
|
||||
@@ -150,6 +154,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
|
||||
ParentChunkStateRoot: parentChunkStateRoot,
|
||||
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
|
||||
}
|
||||
|
||||
db := o.db
|
||||
|
||||
Reference in New Issue
Block a user