Compare commits

..

1 Commits

Author SHA1 Message Date
Péter Garamvölgyi
a8207ebdf3 add calldata log for commitBatch 2023-08-07 03:03:53 +08:00
66 changed files with 434 additions and 1915 deletions

View File

@@ -43,8 +43,6 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Lint
working-directory: 'bridge'
run: |
@@ -94,8 +92,6 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -88,8 +88,6 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -104,8 +104,6 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -81,8 +81,6 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -6,7 +6,7 @@ on:
- v**
jobs:
event_watcher:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -27,18 +27,6 @@ jobs:
tags: scrolltech/event-watcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
gas_oracle:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push gas_oracle docker
uses: docker/build-push-action@v2
with:
@@ -48,18 +36,6 @@ jobs:
tags: scrolltech/gas-oracle:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
msg_relayer:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push msg_relayer docker
uses: docker/build-push-action@v2
with:
@@ -69,18 +45,6 @@ jobs:
tags: scrolltech/msg-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
rollup_relayer:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push rollup_relayer docker
uses: docker/build-push-action@v2
with:
@@ -90,18 +54,6 @@ jobs:
tags: scrolltech/rollup-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-cross-msg-fetcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-cross-msg-fetcher docker
uses: docker/build-push-action@v2
with:
@@ -111,18 +63,6 @@ jobs:
tags: scrolltech/bridgehistoryapi-cross-msg-fetcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-server:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-server docker
uses: docker/build-push-action@v2
with:
@@ -132,18 +72,6 @@ jobs:
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
@@ -153,24 +81,3 @@ jobs:
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
prover-stats-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push prover-stats-api docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/prover-stats-api.Dockerfile
push: true
tags: scrolltech/prover-stats-api:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

View File

@@ -33,8 +33,6 @@ jobs:
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
with:
version: 1.10.19
- name: Build prerequisites
run: |
make dev_docker

View File

@@ -110,7 +110,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
go utils.Loop(subCtx, 60*time.Second, l2relayer.ProcessCommittedBatches)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")

View File

@@ -374,16 +374,20 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// send transaction
txID := batch.Hash + "-commit"
log.Info(
"Sending commitBatch",
"batch.Index", batch.Index,
"batch.Hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"parentBatch.BatchHeader", common.Bytes2Hex(parentBatch.BatchHeader),
"batch.BatchHeader", common.Bytes2Hex(batch.BatchHeader),
)
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
log.Error(
"Failed to send commitBatch tx to layer1",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"err", err,
)
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send commitBatch tx to layer1 ", "err", err)
}
return
}
@@ -429,6 +433,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
success := false
var parentBatchStateRoot string
if batch.Index > 0 {
@@ -442,14 +447,24 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
parentBatchStateRoot = parentBatch.StateRoot
}
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
}
}()
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash)
if err != nil {
log.Error("get verified proof by hash failed", "hash", hash, "err", err)
log.Warn("get verified proof by hash failed", "hash", hash, "err", err)
return
}
if err = aggProof.SanityCheck(); err != nil {
log.Error("agg_proof sanity check fails", "hash", hash, "error", err)
log.Warn("agg_proof sanity check fails", "hash", hash, "error", err)
return
}
@@ -472,18 +487,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
// This can happen normally if we try to finalize 2 or more
// batches around the same time. The 2nd tx might fail since
// the client does not see the 1st tx's updates at this point.
// TODO: add more fine-grained error handling
log.Error(
"finalizeBatchWithProof in layer1 failed",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(data),
"err", err,
)
log.Error("finalizeBatchWithProof in layer1 failed",
"index", batch.Index, "hash", batch.Hash, "err", err)
}
return
}
@@ -493,10 +498,11 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// record and sync with db, @todo handle db error
err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed",
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
"index", batch.Index, "batch hash", batch.Hash,
"tx hash", finalizeTxHash.String(), "err", err)
}
success = true
r.processingFinalization.Store(txID, hash)
case types.ProvingTaskFailed:

View File

@@ -90,9 +90,10 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupCommitted, statuses[0])
assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}

View File

@@ -37,7 +37,7 @@ COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && make coordinator_skip_libzkp && mv ./build/bin/coordinator /bin/coordinator && mv internal/logic/verifier/lib /bin/
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv internal/logic/verifier/lib /bin/
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
@@ -46,6 +46,6 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/li
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/coordinator /bin/
RUN /bin/coordinator --version
ENTRYPOINT ["/bin/coordinator"]

View File

@@ -1,31 +0,0 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
# Support mainland environment.
#ENV GOPROXY="https://goproxy.cn,direct"
RUN go mod download -x
# Build prover-stats-api
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/prover-stats-api/cmd/ && go build -v -p 4 -o /bin/prover-stats-api
# Pull prover-stats-api into a second stage deploy alpine container \
FROM alpine:latest
COPY --from=builder /bin/prover-stats-api /bin/
ENTRYPOINT ["prover-stats-api"]

View File

@@ -32,7 +32,7 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"ark-std",
"env_logger 0.10.0",
@@ -432,7 +432,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"eth-types",
"ethers-core",
@@ -630,12 +630,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
[[package]]
name = "convert_case"
version = "0.6.0"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca"
dependencies = [
"unicode-segmentation",
]
checksum = "fb4a24b1aaf0fd0ce8b45161144d6f42cd91677fd5940fd431183eb023b3a2b8"
[[package]]
name = "core-foundation-sys"
@@ -1048,7 +1045,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1141,12 +1138,13 @@ dependencies = [
[[package]]
name = "ethers-core"
version = "0.17.0"
source = "git+https://github.com/scroll-tech/ethers-rs.git?branch=v0.17.0#739ec9a0df8daf536937739c87e85612bd73212f"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ebdd63c828f58aa067f40f9adcbea5e114fb1f90144b3a1e2858e0c9b1ff4e8"
dependencies = [
"arrayvec",
"bytes",
"chrono",
"convert_case 0.6.0",
"convert_case 0.5.0",
"elliptic-curve",
"ethabi",
"fastrlp",
@@ -1225,7 +1223,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"eth-types",
"geth-utils",
@@ -1438,7 +1436,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1478,7 +1476,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"env_logger 0.9.3",
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
@@ -2076,7 +2074,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"env_logger 0.9.3",
"eth-types",
@@ -2263,7 +2261,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"eth-types",
"ethers-core",
@@ -2278,7 +2276,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"bus-mapping",
"eth-types",
@@ -2754,7 +2752,7 @@ dependencies = [
[[package]]
name = "prover"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.12#ef945901bee26c4b79be8af60e259443392368fa"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
dependencies = [
"aggregator",
"anyhow",
@@ -4039,7 +4037,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.12#ef945901bee26c4b79be8af60e259443392368fa"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.3#337089ac40bac756d88b9ae30a3be1f82538b216"
dependencies = [
"base64 0.13.1",
"blake2",
@@ -4084,12 +4082,6 @@ dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-segmentation"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
[[package]]
name = "unicode-xid"
version = "0.2.4"
@@ -4490,7 +4482,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.12#554bdcf00334bab45670b8daa72d687a7ff2b919"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.3#2c8c749b3e4a61e89028289f4ff93157c5671d7b"
dependencies = [
"array-init",
"bus-mapping",

View File

@@ -7,8 +7,6 @@ edition = "2021"
[lib]
crate-type = ["cdylib"]
[patch.crates-io]
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v0.17.0" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
@@ -20,8 +18,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.12" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.12" }
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.3" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
log = "0.4"

View File

@@ -169,7 +169,7 @@ func TestBatchHeaderEncode(t *testing.T) {
assert.NotNil(t, batchHeader)
bytes = batchHeader.Encode()
assert.Equal(t, 121, len(bytes))
assert.Equal(t, "010000000000000001000000000000000b000000000000000b34f419ce7e882295bdb5aec6cce56ffa788a5fed4744d7fbd77e4acbf409f1ca4136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
assert.Equal(t, "010000000000000001000000000000000b000000000000000b457a9e90e8e51ba2de2f66c6b589540b88cf594dac7fa7d04b99cdcfecf24e384136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
}
func TestBatchHeaderHash(t *testing.T) {
@@ -230,7 +230,7 @@ func TestBatchHeaderHash(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
hash = batchHeader.Hash()
assert.Equal(t, "1c3007880f0eafe74572ede7d164ff1ee5376e9ac9bff6f7fb837b2630cddc9a", common.Bytes2Hex(hash.Bytes()))
assert.Equal(t, "0ec9547c6645d5f0c1254e121f49e93f54525cfda5bfb2236440fb3470f48902", common.Bytes2Hex(hash.Bytes()))
}
func TestBatchHeaderDecode(t *testing.T) {

View File

@@ -36,17 +36,6 @@ func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// NumL2Transactions returns the number of L2 transactions in this block.
func (w *WrappedBlock) NumL2Transactions() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
bytes := make([]byte, 60)
@@ -54,25 +43,20 @@ func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error)
if !w.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
if len(w.Transactions) > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
// note: numL1Messages includes skipped messages
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
// note: numTransactions includes skipped messages
numL2Transactions := w.NumL2Transactions()
numTransactions := numL1Messages + numL2Transactions
if numTransactions > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], uint16(numTransactions))
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
return bytes, nil

View File

@@ -65,10 +65,9 @@ func TestChunkEncode(t *testing.T) {
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
assert.Equal(t, 97, len(bytes))
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
// Test case 5: when the chunk contains two blocks each with 1 L1MsgTx
// TODO: revise this test, we cannot reuse the same L1MsgTx twice
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
@@ -79,7 +78,7 @@ func TestChunkEncode(t *testing.T) {
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
assert.Equal(t, 193, len(bytes))
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000001000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
}
func TestChunkHash(t *testing.T) {
@@ -134,5 +133,5 @@ func TestChunkHash(t *testing.T) {
}
hash, err = chunk.Hash(0)
assert.NoError(t, err)
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
assert.Equal(t, "0x42967825696a129e7a83f082097aca982747480956dcaa448c9296e795c9a91a", hash.Hex())
}

View File

@@ -126,7 +126,7 @@ const (
ProvingTaskUnassigned
// ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned
// ProvingTaskProved DEPRECATED: proof has been returned by prover
// ProvingTaskProved : proof has been returned by prover
ProvingTaskProved
// ProvingTaskVerified : proof is valid
ProvingTaskVerified

View File

@@ -20,17 +20,12 @@ var (
MessageRelayerApp MockAppName = "message-relayer-test"
// RollupRelayerApp the name of mock rollup-relayer app.
RollupRelayerApp MockAppName = "rollup-relayer-test"
// DBCliApp the name of mock database app.
DBCliApp MockAppName = "db_cli-test"
// CoordinatorApp the name of mock coordinator app.
CoordinatorApp MockAppName = "coordinator-test"
// ChunkProverApp the name of mock chunk prover app.
ChunkProverApp MockAppName = "chunkProver-test"
// BatchProverApp the name of mock batch prover app.
BatchProverApp MockAppName = "batchProver-test"
// DBCliApp the name of mock database app.
DBCliApp MockAppName = "db_cli-test"
// ProverApp the name of mock prover app.
ProverApp MockAppName = "prover-test"
)
// RegisterSimulation register initializer function for integration-test.

View File

@@ -3,10 +3,9 @@ package version
import (
"fmt"
"runtime/debug"
"strings"
)
var tag = "v4.1.31"
var tag = "v4.1.13"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -20,29 +19,11 @@ var commit = func() string {
}
}
}
// Set default value for integration test.
return "000000"
return ""
}()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
var ZkVersion = "000000-000000"
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover
var ZkVersion string
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
local := strings.Split(Version, "-")
if len(local) != 4 {
return false
}
// compare the `scroll_prover` version
return remote[2] == local[2]
}

View File

@@ -1,16 +1,16 @@
{
"ProxyAdmin": "0x95ec888F34b23063aeaB47c8E229B3a25DBF8e4F",
"ProxyAdmin": null,
"ZKRollup": {
"implementation": null,
"proxy": null
},
"L1ScrollMessenger": {
"implementation": "0xC71532468A74084cfd824c0445E96f9A2dc3Bd7E",
"proxy": "0x50c7d3e7f7c656493D1D76aaa1a836CedfCBB16A"
"implementation": null,
"proxy": null
},
"L1GatewayRouter": {
"implementation": "0x5431937CF9cb638df5e3587Ae0a2F62130CEE27e",
"proxy": "0x13FBE0D0e5552b8c9c4AE9e2435F38f37355998a"
"implementation": null,
"proxy": null
},
"L1StandardERC20Gateway": {
"implementation": null,
@@ -19,9 +19,5 @@
"L1WETHGateway": {
"implementation": null,
"proxy": null
},
"L1USDCGateway": {
"implementation": "0x2Cf090069Bc47CA931f350fe1D0a160dba4c7C53",
"proxy": "0xeED47C513265cefe6846Dd51B624F1102A9a89d3"
}
}

View File

@@ -1,16 +1,13 @@
{
"ProxyAdmin": "0x95ec888F34b23063aeaB47c8E229B3a25DBF8e4F",
"ProxyAdmin": null,
"WETH": null,
"Whitelist": null,
"ScrollStandardERC20": null,
"ScrollStandardERC20Factory": null,
"L2ScrollMessenger": {
"implementation": "0x45BA70424D61e6A0D2A5FF9093927350471A2728",
"proxy": "0xBa50f5340FB9F3Bd074bD638c9BE13eCB36E603d"
},
"L2ScrollMessenger": null,
"L2GatewayRouter": {
"implementation": "0x0378D0F56f13f018b8d4803f09349781e143453e",
"proxy": "0x9aD3c5617eCAa556d6E166787A97081907171230"
"implementation": null,
"proxy": null
},
"L2StandardERC20Gateway": {
"implementation": null,
@@ -19,9 +16,5 @@
"L2WETHGateway": {
"implementation": null,
"proxy": null
},
"L2USDCGateway": {
"implementation": "0xCad8ba59173F3d6b3d15E8eeE2CB5C8b43e50fC0",
"proxy": "0x78a5dacf40E26c21A69fA3D701F9e75f19FD7113"
}
}

View File

@@ -60,10 +60,14 @@ const config: HardhatUserConfig = {
},
l1geth: {
url: SCROLL_L1_RPC,
gasPrice: 20000000000,
gasMultiplier: 1.1,
accounts: [L1_DEPLOYER_PRIVATE_KEY],
},
l2geth: {
url: SCROLL_L2_RPC,
gasPrice: 20000000000,
gasMultiplier: 1.1,
accounts: [L2_DEPLOYER_PRIVATE_KEY],
},
},
@@ -79,16 +83,6 @@ const config: HardhatUserConfig = {
},
etherscan: {
apiKey: process.env.ETHERSCAN_API_KEY,
customChains: [
{
network: "l2geth",
chainId: 534351,
urls: {
apiURL: "https://sepolia-blockscout.scroll.io/api",
browserURL: "https://sepolia-blockscout.scroll.io",
},
},
],
},
dodoc: {
runOnCompile: true,

View File

@@ -21,7 +21,7 @@ async function main() {
if (!addressFile.get(`${contractName}.implementation`)) {
console.log(`>> Deploy ${contractName} implementation`);
const ContractImpl = await ethers.getContractFactory(contractName, deployer);
const impl = await ContractImpl.deploy(process.env.L1_USDC_ADDR, process.env.L2_USDC_ADDR);
const impl = await ContractImpl.deploy();
console.log(`>> waiting for transaction: ${impl.deployTransaction.hash}`);
await impl.deployed();
console.log(`${contractName} implementation deployed at ${impl.address}`);

View File

@@ -22,7 +22,6 @@ contract InitializeL1BridgeContracts is Script {
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
uint256 MAX_L1_MESSAGE_GAS_LIMIT = vm.envUint("MAX_L1_MESSAGE_GAS_LIMIT");
address L1_ROLLUP_OPERATOR_ADDR = vm.envAddress("L1_ROLLUP_OPERATOR_ADDR");
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
@@ -85,7 +84,7 @@ contract InitializeL1BridgeContracts is Script {
L1_SCROLL_CHAIN_PROXY_ADDR,
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR,
L2_GAS_PRICE_ORACLE_PROXY_ADDR,
MAX_L1_MESSAGE_GAS_LIMIT
10000000
);
// initialize L1ScrollMessenger

View File

@@ -25,18 +25,18 @@ async function main() {
const L2StandardERC20Impl = process.env.L2_SCROLL_STANDARD_ERC20_ADDR!;
const L2StandardERC20FactoryAddress = process.env.L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR!;
if ((await L1StandardERC20Gateway.counterpart()) === constants.AddressZero) {
const tx = await L1StandardERC20Gateway.initialize(
L2StandardERC20GatewayAddress,
L1GatewayRouterAddress,
L1ScrollMessengerAddress,
L2StandardERC20Impl,
L2StandardERC20FactoryAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
// if ((await L1StandardERC20Gateway.counterpart()) === constants.AddressZero) {
const tx = await L1StandardERC20Gateway.initialize(
L2StandardERC20GatewayAddress,
L1GatewayRouterAddress,
L1ScrollMessengerAddress,
L2StandardERC20Impl,
L2StandardERC20FactoryAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -23,16 +23,16 @@ async function main() {
const L1ScrollMessengerAddress = addressFile.get("L1ScrollMessenger.proxy");
const L2GatewayRouterAddress = process.env.L2_GATEWAY_ROUTER_PROXY_ADDR!;
if ((await L1GatewayRouter.counterpart()) === constants.AddressZero) {
const tx = await L1GatewayRouter.initialize(
L1StandardERC20GatewayAddress,
L2GatewayRouterAddress,
L1ScrollMessengerAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
// if ((await L1GatewayRouter.counterpart()) === constants.AddressZero) {
const tx = await L1GatewayRouter.initialize(
L1StandardERC20GatewayAddress,
L2GatewayRouterAddress,
L1ScrollMessengerAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -21,12 +21,12 @@ async function main() {
const ZKRollupAddress = addressFile.get("ZKRollup.proxy");
if ((await L1ScrollMessenger.rollup()) === constants.AddressZero) {
const tx = await L1ScrollMessenger.initialize(ZKRollupAddress);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
// if ((await L1ScrollMessenger.rollup()) === constants.AddressZero) {
const tx = await L1ScrollMessenger.initialize(ZKRollupAddress);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -1,35 +0,0 @@
/* eslint-disable node/no-missing-import */
import * as dotenv from "dotenv";
import { constants } from "ethers";
import * as hre from "hardhat";
import { ethers } from "hardhat";
import { selectAddressFile } from "./utils";
dotenv.config();
async function main() {
const addressFile = selectAddressFile(hre.network.name);
const [deployer] = await ethers.getSigners();
const L1USDCGateway = await ethers.getContractAt("L1USDCGateway", addressFile.get("L1USDCGateway.proxy"), deployer);
const L1GatewayRouterAddress = addressFile.get("L1GatewayRouter.proxy");
const L1ScrollMessengerAddress = addressFile.get("L1ScrollMessenger.proxy");
const L2USDCGatewayAddress = process.env.L2_USDC_GATEWAY_PROXY_ADDR!;
if ((await L1USDCGateway.counterpart()) === constants.AddressZero) {
const tx = await L1USDCGateway.initialize(L2USDCGatewayAddress, L1GatewayRouterAddress, L1ScrollMessengerAddress);
console.log("initialize L1USDCGateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

View File

@@ -24,17 +24,17 @@ async function main() {
const L2StandardERC20FactoryAddress = addressFile.get("ScrollStandardERC20Factory");
const L1StandardERC20GatewayAddress = process.env.L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR!;
if ((await L2StandardERC20Gateway.counterpart()) === constants.AddressZero) {
const tx = await L2StandardERC20Gateway.initialize(
L1StandardERC20GatewayAddress,
L2GatewayRouterAddress,
L2ScrollMessengerAddress,
L2StandardERC20FactoryAddress
);
console.log("initialize L2StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
// if ((await L2StandardERC20Gateway.counterpart()) === constants.AddressZero) {
const tx = await L2StandardERC20Gateway.initialize(
L1StandardERC20GatewayAddress,
L2GatewayRouterAddress,
L2ScrollMessengerAddress,
L2StandardERC20FactoryAddress
);
console.log("initialize L2StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -23,16 +23,16 @@ async function main() {
const L2ScrollMessengerAddress = addressFile.get("L2ScrollMessenger");
const L1GatewayRouterAddress = process.env.L1_GATEWAY_ROUTER_PROXY_ADDR!;
if ((await L2GatewayRouter.counterpart()) === constants.AddressZero) {
const tx = await L2GatewayRouter.initialize(
L2StandardERC20GatewayAddress,
L1GatewayRouterAddress,
L2ScrollMessengerAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
// if ((await L2GatewayRouter.counterpart()) === constants.AddressZero) {
const tx = await L2GatewayRouter.initialize(
L2StandardERC20GatewayAddress,
L1GatewayRouterAddress,
L2ScrollMessengerAddress
);
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
// }
}
// We recommend this pattern to be able to use async/await everywhere

View File

@@ -1,35 +0,0 @@
/* eslint-disable node/no-missing-import */
import * as dotenv from "dotenv";
import { constants } from "ethers";
import * as hre from "hardhat";
import { ethers } from "hardhat";
import { selectAddressFile } from "./utils";
dotenv.config();
async function main() {
const addressFile = selectAddressFile(hre.network.name);
const [deployer] = await ethers.getSigners();
const L2USDCGateway = await ethers.getContractAt("L2USDCGateway", addressFile.get("L2USDCGateway.proxy"), deployer);
const L2GatewayRouterAddress = addressFile.get("L2GatewayRouter.proxy");
const L2ScrollMessengerAddress = addressFile.get("L2ScrollMessenger.proxy");
const L1USDCGatewayAddress = process.env.L1_USDC_GATEWAY_PROXY_ADDR!;
if ((await L2USDCGateway.counterpart()) === constants.AddressZero) {
const tx = await L2USDCGateway.initialize(L1USDCGatewayAddress, L2GatewayRouterAddress, L2ScrollMessengerAddress);
console.log("initialize L2USDCGateway, hash:", tx.hash);
const receipt = await tx.wait();
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
}
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main().catch((error) => {
console.error(error);
process.exitCode = 1;
});

View File

@@ -68,7 +68,7 @@ abstract contract L1ERC20Gateway is IL1ERC20Gateway, IMessageDropCallback, Scrol
address _to,
uint256 _amount,
bytes calldata _data
) external payable virtual override onlyCallByCounterpart nonReentrant {
) external payable override onlyCallByCounterpart nonReentrant {
_beforeFinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
// @note can possible trigger reentrant call to this contract or messenger,

View File

@@ -2,163 +2,10 @@
pragma solidity =0.8.16;
import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {L1CustomERC20Gateway} from "../L1CustomERC20Gateway.sol";
import {IFiatToken} from "../../../interfaces/IFiatToken.sol";
import {IUSDCBurnableSourceBridge} from "../../../interfaces/IUSDCBurnableSourceBridge.sol";
import {IL2ERC20Gateway} from "../../../L2/gateways/IL2ERC20Gateway.sol";
import {IL1ScrollMessenger} from "../../IL1ScrollMessenger.sol";
import {IL1ERC20Gateway} from "../IL1ERC20Gateway.sol";
// solhint-disable no-empty-blocks
import {ScrollGatewayBase} from "../../../libraries/gateway/ScrollGatewayBase.sol";
import {L1ERC20Gateway} from "../L1ERC20Gateway.sol";
contract L1USDCGateway is L1CustomERC20Gateway {
/// @title L1USDCGateway
/// @notice The `L1USDCGateway` contract is used to deposit `USDC` token in layer 1 and
/// finalize withdraw `USDC` from layer 2, before USDC become native in layer 2.
contract L1USDCGateway is L1ERC20Gateway, IUSDCBurnableSourceBridge {
using SafeERC20Upgradeable for IERC20Upgradeable;
/*************
* Constants *
*************/
/// @notice The address of L1 USDC address.
// solhint-disable-next-line var-name-mixedcase
address public immutable l1USDC;
/// @notice The address of L2 USDC address.
address public immutable l2USDC;
/*************
* Variables *
*************/
address public circleCaller;
bool public depositPaused;
bool public withdrawPaused;
/***************
* Constructor *
***************/
constructor(address _l1USDC, address _l2USDC) {
_disableInitializers();
l1USDC = _l1USDC;
l2USDC = _l2USDC;
}
/// @notice Initialize the storage of L1WETHGateway.
/// @param _counterpart The address of L2ETHGateway in L2.
/// @param _router The address of L1GatewayRouter.
/// @param _messenger The address of L1ScrollMessenger.
function initialize(
address _counterpart,
address _router,
address _messenger
) external initializer {
require(_router != address(0), "zero router address");
ScrollGatewayBase._initialize(_counterpart, _router, _messenger);
}
/*************************
* Public View Functions *
*************************/
/// @inheritdoc IL1ERC20Gateway
function getL2ERC20Address(address) public view override returns (address) {
return l2USDC;
}
/*******************************
* Public Restricted Functions *
*******************************/
/// @inheritdoc IUSDCBurnableSourceBridge
function burnAllLockedUSDC() external override {
require(msg.sender == circleCaller, "only circle caller");
uint256 _balance = IERC20Upgradeable(l1USDC).balanceOf(address(this));
require(IFiatToken(l1USDC).burn(_balance), "burn USDC failed");
}
/// @notice Update the Circle EOA address.
/// @param _caller The address to update.
function updateCircleCaller(address _caller) external onlyOwner {
circleCaller = _caller;
}
/// @notice Change the deposit pause status of this contract.
/// @param _paused The new status, `true` means paused and `false` means not paused.
function pauseDeposit(bool _paused) external onlyOwner {
depositPaused = _paused;
}
/// @notice Change the withdraw pause status of this contract.
/// @param _paused The new status, `true` means paused and `false` means not paused.
function pauseWithdraw(bool _paused) external onlyOwner {
withdrawPaused = _paused;
}
/**********************
* Internal Functions *
**********************/
/// @inheritdoc L1ERC20Gateway
function _beforeFinalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address,
address,
uint256,
bytes calldata
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
require(_l1Token == l1USDC, "l1 token not USDC");
require(_l2Token == l2USDC, "l2 token not USDC");
require(!withdrawPaused, "withdraw paused");
}
/// @inheritdoc L1ERC20Gateway
function _beforeDropMessage(
address,
address,
uint256
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
}
/// @inheritdoc L1ERC20Gateway
function _deposit(
address _token,
address _to,
uint256 _amount,
bytes memory _data,
uint256 _gasLimit
) internal virtual override nonReentrant {
require(_amount > 0, "deposit zero amount");
require(_token == l1USDC, "only USDC is allowed");
require(!depositPaused, "deposit paused");
// 1. Transfer token into this contract.
address _from;
(_from, _amount, _data) = _transferERC20In(_token, _amount, _data);
require(_data.length == 0, "call is not allowed");
// 2. Generate message passed to L2USDCGateway.
bytes memory _message = abi.encodeCall(
IL2ERC20Gateway.finalizeDepositERC20,
(_token, l2USDC, _from, _to, _amount, _data)
);
// 3. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit, _from);
emit DepositERC20(_token, l2USDC, _from, _to, _amount, _data);
}
}

View File

@@ -1,184 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {ITokenMessenger} from "../../../interfaces/ITokenMessenger.sol";
import {IL2ERC20Gateway} from "../../../L2/gateways/IL2ERC20Gateway.sol";
import {IL1ScrollMessenger} from "../../IL1ScrollMessenger.sol";
import {IL1ERC20Gateway} from "../IL1ERC20Gateway.sol";
import {CCTPGatewayBase} from "../../../libraries/gateway/CCTPGatewayBase.sol";
import {ScrollGatewayBase} from "../../../libraries/gateway/ScrollGatewayBase.sol";
import {L1ERC20Gateway} from "../L1ERC20Gateway.sol";
/// @title L1USDCGatewayCCTP
/// @notice The `L1USDCGateway` contract is used to deposit `USDC` token in layer 1 and
/// finalize withdraw `USDC` from layer 2, after USDC become native in layer 2.
contract L1USDCGatewayCCTP is CCTPGatewayBase, L1ERC20Gateway {
using SafeERC20Upgradeable for IERC20Upgradeable;
/***************
* Constructor *
***************/
constructor(
address _l1USDC,
address _l2USDC,
uint32 _destinationDomain
) CCTPGatewayBase(_l1USDC, _l2USDC, _destinationDomain) {}
/// @notice Initialize the storage of L1USDCGatewayCCTP.
/// @param _counterpart The address of L2USDCGatewayCCTP in L2.
/// @param _router The address of L1GatewayRouter.
/// @param _messenger The address of L1ScrollMessenger.
/// @param _cctpMessenger The address of TokenMessenger in local domain.
/// @param _cctpTransmitter The address of MessageTransmitter in local domain.
function initialize(
address _counterpart,
address _router,
address _messenger,
address _cctpMessenger,
address _cctpTransmitter
) external initializer {
require(_router != address(0), "zero router address");
ScrollGatewayBase._initialize(_counterpart, _router, _messenger);
CCTPGatewayBase._initialize(_cctpMessenger, _cctpTransmitter);
OwnableUpgradeable.__Ownable_init();
}
/*************************
* Public View Functions *
*************************/
/// @inheritdoc IL1ERC20Gateway
function getL2ERC20Address(address) public view override returns (address) {
return l2USDC;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Relay cross chain message and claim USDC that has been cross chained.
/// @dev The `_scrollMessage` is actually encoded calldata for `L1ScrollMessenger.relayMessageWithProof`.
///
/// @dev This helper function is aimed to claim USDC in single transaction.
/// Normally, an user should call `L1ScrollMessenger.relayMessageWithProof` first,
/// then `L1USDCGatewayCCTP.claimUSDC`.
///
/// @param _nonce The nonce of the message from CCTP.
/// @param _cctpMessage The message passed to MessageTransmitter contract in CCTP.
/// @param _cctpSignature The message passed to MessageTransmitter contract in CCTP.
/// @param _scrollMessage The message passed to L1ScrollMessenger contract.
function relayAndClaimUSDC(
uint256 _nonce,
bytes calldata _cctpMessage,
bytes calldata _cctpSignature,
bytes calldata _scrollMessage
) external {
require(status[_nonce] == CCTPMessageStatus.None, "message relayed");
// call messenger to set `status[_nonce]` to `CCTPMessageStatus.Pending`.
(bool _success, ) = messenger.call(_scrollMessage);
require(_success, "call messenger failed");
claimUSDC(_nonce, _cctpMessage, _cctpSignature);
}
/// @inheritdoc IL1ERC20Gateway
/// @dev The function will not mint the USDC, users need to call `claimUSDC` after this function is done.
function finalizeWithdrawERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes memory _data
) external payable override onlyCallByCounterpart {
require(msg.value == 0, "nonzero msg.value");
require(_l1Token == l1USDC, "l1 token not USDC");
require(_l2Token == l2USDC, "l2 token not USDC");
uint256 _nonce;
(_nonce, _data) = abi.decode(_data, (uint256, bytes));
require(status[_nonce] == CCTPMessageStatus.None, "message relayed");
status[_nonce] = CCTPMessageStatus.Pending;
emit FinalizeWithdrawERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
/*******************************
* Public Restricted Functions *
*******************************/
/// @notice Update the CCTP contract addresses.
/// @param _messenger The address of TokenMessenger in local domain.
/// @param _transmitter The address of MessageTransmitter in local domain.
function updateCCTPContracts(address _messenger, address _transmitter) external onlyOwner {
cctpMessenger = _messenger;
cctpTransmitter = _transmitter;
}
/**********************
* Internal Functions *
**********************/
/// @inheritdoc L1ERC20Gateway
function _beforeFinalizeWithdrawERC20(
address,
address,
address,
address,
uint256,
bytes calldata
) internal virtual override {}
/// @inheritdoc L1ERC20Gateway
function _beforeDropMessage(
address,
address,
uint256
) internal virtual override {
require(msg.value == 0, "nonzero msg.value");
}
/// @inheritdoc L1ERC20Gateway
function _deposit(
address _token,
address _to,
uint256 _amount,
bytes memory _data,
uint256 _gasLimit
) internal virtual override nonReentrant {
require(_amount > 0, "deposit zero amount");
require(_token == l1USDC, "only USDC is allowed");
// 1. Extract real sender if this call is from L1GatewayRouter.
address _from;
(_from, _amount, _data) = _transferERC20In(_token, _amount, _data);
// 2. Burn token through CCTP TokenMessenger
uint256 _nonce = ITokenMessenger(cctpMessenger).depositForBurnWithCaller(
_amount,
destinationDomain,
bytes32(uint256(uint160(_to))),
address(this),
bytes32(uint256(uint160(counterpart)))
);
// 3. Generate message passed to L2USDCGatewayCCTP.
bytes memory _message = abi.encodeCall(
IL2ERC20Gateway.finalizeDepositERC20,
(_token, l2USDC, _from, _to, _amount, abi.encode(_nonce, _data))
);
// 4. Send message to L1ScrollMessenger.
IL1ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
emit DepositERC20(_token, l2USDC, _from, _to, _amount, _data);
}
}

View File

@@ -492,7 +492,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
// concatenate l2 transaction hashes
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
require(_numTransactionsInBlock >= _numL1MessagesInBlock, "num txs less than num L1 msgs");
for (uint256 j = _numL1MessagesInBlock; j < _numTransactionsInBlock; j++) {
bytes32 txHash;
(txHash, l2TxPtr) = ChunkCodec.loadL2TxHash(l2TxPtr);

View File

@@ -6,7 +6,6 @@ import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {IFiatToken} from "../../../interfaces/IFiatToken.sol";
import {IUSDCDestinationBridge} from "../../../interfaces/IUSDCDestinationBridge.sol";
import {IL1ERC20Gateway} from "../../../L1/gateways/IL1ERC20Gateway.sol";
import {IL2ScrollMessenger} from "../../IL2ScrollMessenger.sol";
import {IL2ERC20Gateway} from "../IL2ERC20Gateway.sol";
@@ -17,7 +16,7 @@ import {L2ERC20Gateway} from "../L2ERC20Gateway.sol";
/// @title L2USDCGateway
/// @notice The `L2USDCGateway` contract is used to withdraw `USDC` token on layer 2 and
/// finalize deposit `USDC` from layer 1.
contract L2USDCGateway is L2ERC20Gateway, IUSDCDestinationBridge {
contract L2USDCGateway is L2ERC20Gateway {
using SafeERC20Upgradeable for IERC20Upgradeable;
/*************
@@ -34,8 +33,6 @@ contract L2USDCGateway is L2ERC20Gateway, IUSDCDestinationBridge {
* Variables *
*************/
address public circleCaller;
bool public depositPaused;
bool public withdrawPaused;
@@ -94,8 +91,7 @@ contract L2USDCGateway is L2ERC20Gateway, IUSDCDestinationBridge {
require(IFiatToken(_l2Token).mint(_to, _amount), "mint USDC failed");
// disable call for USDC
// _doCallback(_to, _data);
_doCallback(_to, _data);
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
@@ -104,19 +100,6 @@ contract L2USDCGateway is L2ERC20Gateway, IUSDCDestinationBridge {
* Public Restricted Functions *
*******************************/
/// @inheritdoc IUSDCDestinationBridge
function transferUSDCRoles(address _owner) external {
require(msg.sender == circleCaller, "only circle caller");
_transferOwnership(_owner);
}
/// @notice Update the Circle EOA address.
/// @param _caller The address to update.
function updateCircleCaller(address _caller) external onlyOwner {
circleCaller = _caller;
}
/// @notice Change the deposit pause status of this contract.
/// @param _paused The new status, `true` means paused and `false` means not paused.
function pauseDeposit(bool _paused) external onlyOwner {
@@ -150,7 +133,6 @@ contract L2USDCGateway is L2ERC20Gateway, IUSDCDestinationBridge {
if (router == msg.sender) {
(_from, _data) = abi.decode(_data, (address, bytes));
}
require(_data.length == 0, "call is not allowed");
// 2. Transfer token into this contract.
IERC20Upgradeable(_token).safeTransferFrom(_from, address(this), _amount);

View File

@@ -1,156 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity =0.8.16;
import {OwnableUpgradeable} from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol";
import {IERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/IERC20Upgradeable.sol";
import {SafeERC20Upgradeable} from "@openzeppelin/contracts-upgradeable/token/ERC20/utils/SafeERC20Upgradeable.sol";
import {ITokenMessenger} from "../../../interfaces/ITokenMessenger.sol";
import {IL1ERC20Gateway} from "../../../L1/gateways/IL1ERC20Gateway.sol";
import {IL2ScrollMessenger} from "../../IL2ScrollMessenger.sol";
import {IL2ERC20Gateway} from "../IL2ERC20Gateway.sol";
import {CCTPGatewayBase} from "../../../libraries/gateway/CCTPGatewayBase.sol";
import {ScrollGatewayBase} from "../../../libraries/gateway/ScrollGatewayBase.sol";
import {L2ERC20Gateway} from "../L2ERC20Gateway.sol";
/// @title L2USDCGatewayCCTP
/// @notice The `L2USDCGatewayCCTP` contract is used to withdraw `USDC` token in layer 2 and
/// finalize deposit `USDC` from layer 1.
contract L2USDCGatewayCCTP is CCTPGatewayBase, L2ERC20Gateway {
using SafeERC20Upgradeable for IERC20Upgradeable;
/***************
* Constructor *
***************/
constructor(
address _l1USDC,
address _l2USDC,
uint32 _destinationDomain
) CCTPGatewayBase(_l1USDC, _l2USDC, _destinationDomain) {}
/// @notice Initialize the storage of L2USDCGatewayCCTP.
/// @param _counterpart The address of L1USDCGatewayCCTP in L1.
/// @param _router The address of L2GatewayRouter.
/// @param _messenger The address of L2ScrollMessenger.
/// @param _cctpMessenger The address of TokenMessenger in local domain.
/// @param _cctpTransmitter The address of MessageTransmitter in local domain.
function initialize(
address _counterpart,
address _router,
address _messenger,
address _cctpMessenger,
address _cctpTransmitter
) external initializer {
require(_router != address(0), "zero router address");
ScrollGatewayBase._initialize(_counterpart, _router, _messenger);
CCTPGatewayBase._initialize(_cctpMessenger, _cctpTransmitter);
OwnableUpgradeable.__Ownable_init();
}
/*************************
* Public View Functions *
*************************/
/// @inheritdoc IL2ERC20Gateway
function getL1ERC20Address(address) external view override returns (address) {
return l1USDC;
}
/// @inheritdoc IL2ERC20Gateway
function getL2ERC20Address(address) public view override returns (address) {
return l2USDC;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @inheritdoc IL2ERC20Gateway
/// @dev The function will not mint the USDC, users need to call `claimUSDC` after this function is done.
function finalizeDepositERC20(
address _l1Token,
address _l2Token,
address _from,
address _to,
uint256 _amount,
bytes memory _data
) external payable override onlyCallByCounterpart {
require(msg.value == 0, "nonzero msg.value");
require(_l1Token == l1USDC, "l1 token not USDC");
require(_l2Token == l2USDC, "l2 token not USDC");
uint256 _nonce;
(_nonce, _data) = abi.decode(_data, (uint256, bytes));
require(status[_nonce] == CCTPMessageStatus.None, "message relayed");
status[_nonce] = CCTPMessageStatus.Pending;
emit FinalizeDepositERC20(_l1Token, _l2Token, _from, _to, _amount, _data);
}
/*******************************
* Public Restricted Functions *
*******************************/
/// @notice Update the CCTP contract addresses.
/// @param _messenger The address of TokenMessenger in local domain.
/// @param _transmitter The address of MessageTransmitter in local domain.
function updateCCTPContracts(address _messenger, address _transmitter) external onlyOwner {
cctpMessenger = _messenger;
cctpTransmitter = _transmitter;
}
/**********************
* Internal Functions *
**********************/
/// @inheritdoc L2ERC20Gateway
function _withdraw(
address _token,
address _to,
uint256 _amount,
bytes memory _data,
uint256 _gasLimit
) internal virtual override {
require(_amount > 0, "withdraw zero amount");
require(_token == l2USDC, "only USDC is allowed");
// 1. Extract real sender if this call is from L1GatewayRouter.
address _from = msg.sender;
if (router == msg.sender) {
(_from, _data) = abi.decode(_data, (address, bytes));
}
// 2. Transfer token into this contract.
IERC20Upgradeable(_token).safeTransferFrom(_from, address(this), _amount);
// 3. Burn token through CCTP TokenMessenger
uint256 _nonce = ITokenMessenger(cctpMessenger).depositForBurnWithCaller(
_amount,
destinationDomain,
bytes32(uint256(uint160(_to))),
address(this),
bytes32(uint256(uint160(counterpart)))
);
// 4. Generate message passed to L1USDCGateway.
address _l1USDC = l1USDC;
bytes memory _message = abi.encodeWithSelector(
IL1ERC20Gateway.finalizeWithdrawERC20.selector,
_l1USDC,
_token,
_from,
_to,
_amount,
abi.encode(_nonce, _data)
);
// 4. Send message to L1ScrollMessenger.
IL2ScrollMessenger(messenger).sendMessage{value: msg.value}(counterpart, 0, _message, _gasLimit);
emit WithdrawERC20(_l1USDC, _token, _from, _to, _amount, _data);
}
}

View File

@@ -1,6 +1,6 @@
A library for interacting with Scroll contracts.
This library includes contracts and interfaces needed to interact with the Scroll Smart Contracts deployed on both Layer 1 and Layer 2. This includes deposting and withdrawing ETH, ERC20 tokens and NFTs or sending arbitrary messages.
This library includes contracts and interfaces needed to interact with the Scroll Smart Contracts deployed on both Layer 1 and Layer 2. This includes deposting and withdrawing ETH, ERC20 tokens and NFTs or sending arbitrary messages.
# Overview
@@ -21,11 +21,10 @@ pragma solidity 0.8.20;
import "@scroll-tech/contracts/L1/gateways/IL1ETHGateway.sol";
contract MyContract {
function bridgeETH(address scrollBridge, uint256 gasLimit) public payable {
IL1ETHGateway(scrollBridge).depositETH(msg.sender, msg.value, gasLimit);
}
function bridgeETH(address scrollBridge, uint gasLimit) public payable {
IL1ETHGateway(scrollBridge).depositETH(msg.sender, msg.value, gasLimit);
}
}
```
Visit the Bridge Documentation for API reference, architecture overview and guides with code examples.

View File

@@ -1,16 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface IMessageTransmitter {
function usedNonces(bytes32 _sourceAndNonce) external view returns (uint256);
/**
* @notice Receives an incoming message, validating the header and passing
* the body to application-specific handler.
* @param message The message raw bytes
* @param signature The message signature
* @return success bool, true if successful
*/
function receiveMessage(bytes calldata message, bytes calldata signature) external returns (bool success);
}

View File

@@ -1,63 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
interface ITokenMessenger {
/**
* @notice Deposits and burns tokens from sender to be minted on destination domain. The mint
* on the destination domain must be called by `destinationCaller`.
* WARNING: if the `destinationCaller` does not represent a valid address as bytes32, then it will not be possible
* to broadcast the message on the destination domain. This is an advanced feature, and the standard
* depositForBurn() should be preferred for use cases where a specific destination caller is not required.
* Emits a `DepositForBurn` event.
* @dev reverts if:
* - given destinationCaller is zero address
* - given burnToken is not supported
* - given destinationDomain has no TokenMessenger registered
* - transferFrom() reverts. For example, if sender's burnToken balance or approved allowance
* to this contract is less than `amount`.
* - burn() reverts. For example, if `amount` is 0.
* - MessageTransmitter returns false or reverts.
* @param amount amount of tokens to burn
* @param destinationDomain destination domain
* @param mintRecipient address of mint recipient on destination domain
* @param burnToken address of contract to burn deposited tokens, on local domain
* @param destinationCaller caller on the destination domain, as bytes32
* @return nonce unique nonce reserved by message
*/
function depositForBurnWithCaller(
uint256 amount,
uint32 destinationDomain,
bytes32 mintRecipient,
address burnToken,
bytes32 destinationCaller
) external returns (uint64 nonce);
/**
* @notice Replace a BurnMessage to change the mint recipient and/or
* destination caller. Allows the sender of a previous BurnMessage
* (created by depositForBurn or depositForBurnWithCaller)
* to send a new BurnMessage to replace the original.
* The new BurnMessage will reuse the amount and burn token of the original,
* without requiring a new deposit.
* @dev The new message will reuse the original message's nonce. For a
* given nonce, all replacement message(s) and the original message are
* valid to broadcast on the destination domain, until the first message
* at the nonce confirms, at which point all others are invalidated.
* Note: The msg.sender of the replaced message must be the same as the
* msg.sender of the original message.
* @param originalMessage original message bytes (to replace)
* @param originalAttestation original attestation bytes
* @param newDestinationCaller the new destination caller, which may be the
* same as the original destination caller, a new destination caller, or an empty
* destination caller (bytes32(0), indicating that any destination caller is valid.)
* @param newMintRecipient the new mint recipient, which may be the same as the
* original mint recipient, or different.
*/
function replaceDepositForBurn(
bytes calldata originalMessage,
bytes calldata originalAttestation,
bytes32 newDestinationCaller,
bytes32 newMintRecipient
) external;
}

View File

@@ -1,12 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
// Implement this on the source chain (Ethereum).
interface IUSDCBurnableSourceBridge {
/**
* @notice Called by Circle, this executes a burn on the source
* chain.
*/
function burnAllLockedUSDC() external;
}

View File

@@ -1,11 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.16;
// Implement this on the destination chain (Scroll).
interface IUSDCDestinationBridge {
/**
* @notice Called by Circle, this transfers FiatToken roles to the designated owner.
*/
function transferUSDCRoles(address owner) external;
}

View File

@@ -1,95 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {IMessageTransmitter} from "../../interfaces/IMessageTransmitter.sol";
import {ScrollGatewayBase} from "./ScrollGatewayBase.sol";
abstract contract CCTPGatewayBase is ScrollGatewayBase {
/*********
* Enums *
*********/
enum CCTPMessageStatus {
None,
Pending,
Relayed
}
/*************
* Constants *
*************/
/// @notice The address of L1 USDC address.
address public immutable l1USDC;
/// @notice The address of L2 USDC address.
address public immutable l2USDC;
/// @notice The destination domain for layer2.
uint32 public immutable destinationDomain;
/*************
* Variables *
*************/
/// @notice The address of TokenMessenger in local domain.
address public cctpMessenger;
/// @notice The address of MessageTransmitter in local domain.
address public cctpTransmitter;
/// @notice Mapping from destination domain CCTP nonce to status.
mapping(uint256 => CCTPMessageStatus) public status;
/***************
* Constructor *
***************/
constructor(
address _l1USDC,
address _l2USDC,
uint32 _destinationDomain
) {
l1USDC = _l1USDC;
l2USDC = _l2USDC;
destinationDomain = _destinationDomain;
}
function _initialize(address _cctpMessenger, address _cctpTransmitter) internal {
cctpMessenger = _cctpMessenger;
cctpTransmitter = _cctpTransmitter;
}
/*****************************
* Public Mutating Functions *
*****************************/
/// @notice Claim USDC that has been cross chained.
/// @param _nonce The nonce of the message from CCTP.
/// @param _cctpMessage The message passed to MessageTransmitter contract in CCTP.
/// @param _cctpSignature The message passed to MessageTransmitter contract in CCTP.
function claimUSDC(
uint256 _nonce,
bytes calldata _cctpMessage,
bytes calldata _cctpSignature
) public {
// Check `_nonce` match with `_cctpMessage`.
// According to the encoding of `_cctpMessage`, the nonce is in bytes 12 to 16.
// See here: https://github.com/circlefin/evm-cctp-contracts/blob/master/src/messages/Message.sol#L29
uint256 _expectedMessageNonce;
assembly {
_expectedMessageNonce := and(shr(96, calldataload(_cctpMessage.offset)), 0xffffffffffffffff)
}
require(_expectedMessageNonce == _nonce, "nonce mismatch");
require(status[_nonce] == CCTPMessageStatus.Pending, "message not relayed");
// call transmitter to mint USDC
bool _success = IMessageTransmitter(cctpTransmitter).receiveMessage(_cctpMessage, _cctpSignature);
require(_success, "call transmitter failed");
status[_nonce] = CCTPMessageStatus.Relayed;
}
}

View File

@@ -1,521 +0,0 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.0;
import {MockERC20} from "solmate/test/utils/mocks/MockERC20.sol";
import {ERC1967Proxy} from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Proxy.sol";
import {L1GatewayRouter} from "../L1/gateways/L1GatewayRouter.sol";
import {IL1ERC20Gateway, L1USDCGateway} from "../L1/gateways/usdc/L1USDCGateway.sol";
import {IL1ScrollMessenger} from "../L1/IL1ScrollMessenger.sol";
import {IL2ERC20Gateway, L2USDCGateway} from "../L2/gateways/usdc/L2USDCGateway.sol";
import {AddressAliasHelper} from "../libraries/common/AddressAliasHelper.sol";
import {L1GatewayTestBase} from "./L1GatewayTestBase.t.sol";
import {MockScrollMessenger} from "./mocks/MockScrollMessenger.sol";
import {MockGatewayRecipient} from "./mocks/MockGatewayRecipient.sol";
contract L1USDCGatewayTest is L1GatewayTestBase {
// from L1USDCGateway
event FinalizeWithdrawERC20(
address indexed _l1Token,
address indexed _l2Token,
address indexed _from,
address _to,
uint256 _amount,
bytes _data
);
event DepositERC20(
address indexed _l1Token,
address indexed _l2Token,
address indexed _from,
address _to,
uint256 _amount,
bytes _data
);
MockERC20 private l1USDC;
MockERC20 private l2USDC;
L1USDCGateway private gateway;
L1GatewayRouter private router;
L2USDCGateway private counterpartGateway;
function setUp() public {
setUpBase();
// Deploy tokens
l1USDC = new MockERC20("USDC", "USDC", 6);
l2USDC = new MockERC20("USDC", "USDC", 6);
// Deploy L1 contracts
gateway = _deployGateway();
router = L1GatewayRouter(address(new ERC1967Proxy(address(new L1GatewayRouter()), new bytes(0))));
// Deploy L2 contracts
counterpartGateway = new L2USDCGateway(address(l1USDC), address(l2USDC));
// Initialize L1 contracts
gateway.initialize(address(counterpartGateway), address(router), address(l1Messenger));
router.initialize(address(0), address(gateway));
// Prepare token balances
l1USDC.mint(address(this), type(uint128).max);
l1USDC.approve(address(gateway), type(uint256).max);
l1USDC.approve(address(router), type(uint256).max);
}
function testInitialized() public {
assertEq(address(counterpartGateway), gateway.counterpart());
assertEq(address(router), gateway.router());
assertEq(address(l1Messenger), gateway.messenger());
assertEq(address(l1USDC), gateway.l1USDC());
assertEq(address(l2USDC), gateway.l2USDC());
assertEq(address(l2USDC), gateway.getL2ERC20Address(address(l1USDC)));
hevm.expectRevert("Initializable: contract is already initialized");
gateway.initialize(address(counterpartGateway), address(router), address(l1Messenger));
}
function testDepositPaused() public {
// non-owner call pause, should revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
gateway.pauseDeposit(false);
hevm.expectRevert("Ownable: caller is not the owner");
gateway.pauseDeposit(true);
hevm.stopPrank();
// pause deposit
gateway.pauseDeposit(true);
// deposit paused, should revert
hevm.expectRevert("deposit paused");
gateway.depositERC20(address(l1USDC), 1, 0);
hevm.expectRevert("deposit paused");
gateway.depositERC20(address(l1USDC), address(this), 1, 0);
hevm.expectRevert("deposit paused");
gateway.depositERC20AndCall(address(l1USDC), address(this), 1, new bytes(0), 0);
}
function testPauseWithdraw() public {
// non-owner call pause, should revert
hevm.startPrank(address(1));
hevm.expectRevert("Ownable: caller is not the owner");
gateway.pauseWithdraw(false);
hevm.expectRevert("Ownable: caller is not the owner");
gateway.pauseWithdraw(true);
hevm.stopPrank();
}
function testDepositERC20(
uint256 amount,
uint256 gasLimit,
uint256 feePerGas
) public {
_depositERC20(false, amount, gasLimit, feePerGas);
}
function testDepositERC20WithRecipient(
uint256 amount,
address recipient,
uint256 gasLimit,
uint256 feePerGas
) public {
_depositERC20WithRecipient(false, amount, recipient, gasLimit, feePerGas);
}
function testRouterDepositERC20(
uint256 amount,
uint256 gasLimit,
uint256 feePerGas
) public {
_depositERC20(true, amount, gasLimit, feePerGas);
}
function testRouterDepositERC20WithRecipient(
uint256 amount,
address recipient,
uint256 gasLimit,
uint256 feePerGas
) public {
_depositERC20WithRecipient(true, amount, recipient, gasLimit, feePerGas);
}
function testFinalizeWithdrawERC20FailedMocking(
address sender,
address recipient,
uint256 amount,
bytes memory dataToCall
) public {
amount = bound(amount, 1, 100000);
// revert when caller is not messenger
hevm.expectRevert("only messenger can call");
gateway.finalizeWithdrawERC20(address(l1USDC), address(l2USDC), sender, recipient, amount, dataToCall);
MockScrollMessenger mockMessenger = new MockScrollMessenger();
gateway = _deployGateway();
gateway.initialize(address(counterpartGateway), address(router), address(mockMessenger));
// only call by conterpart
hevm.expectRevert("only call by counterpart");
mockMessenger.callTarget(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
)
);
mockMessenger.setXDomainMessageSender(address(counterpartGateway));
// nonzero msg.value
hevm.expectRevert("nonzero msg.value");
mockMessenger.callTarget{value: 1}(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
)
);
// l1 token not USDC
hevm.expectRevert("l1 token not USDC");
mockMessenger.callTarget(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l2USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
)
);
// l2 token not USDC
hevm.expectRevert("l2 token not USDC");
mockMessenger.callTarget(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l1USDC),
sender,
recipient,
amount,
dataToCall
)
);
// withdraw paused
gateway.pauseWithdraw(true);
hevm.expectRevert("withdraw paused");
mockMessenger.callTarget(
address(gateway),
abi.encodeWithSelector(
gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
)
);
}
function testFinalizeWithdrawERC20Failed(
address sender,
address recipient,
uint256 amount,
bytes memory dataToCall
) public {
// blacklist some addresses
hevm.assume(recipient != address(0));
hevm.assume(recipient != address(gateway));
amount = bound(amount, 1, l1USDC.balanceOf(address(this)));
// deposit some USDC to L1ScrollMessenger
gateway.depositERC20(address(l1USDC), amount, 0);
// do finalize withdraw usdc
bytes memory message = abi.encodeWithSelector(
IL1ERC20Gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
recipient,
amount,
dataToCall
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(uint160(address(counterpartGateway)) + 1),
address(gateway),
0,
0,
message
);
prepareL2MessageRoot(keccak256(xDomainCalldata));
IL1ScrollMessenger.L2MessageProof memory proof;
proof.batchIndex = rollup.lastFinalizedBatchIndex();
// conterpart is not L2USDCGateway
// emit FailedRelayedMessage from L1ScrollMessenger
hevm.expectEmit(true, false, false, true);
emit FailedRelayedMessage(keccak256(xDomainCalldata));
uint256 gatewayBalance = l1USDC.balanceOf(address(gateway));
uint256 recipientBalance = l1USDC.balanceOf(recipient);
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
l1Messenger.relayMessageWithProof(
address(uint160(address(counterpartGateway)) + 1),
address(gateway),
0,
0,
message,
proof
);
assertEq(gatewayBalance, l1USDC.balanceOf(address(gateway)));
assertEq(recipientBalance, l1USDC.balanceOf(recipient));
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
}
function testFinalizeWithdrawERC20(
address sender,
uint256 amount,
bytes memory dataToCall
) public {
MockGatewayRecipient recipient = new MockGatewayRecipient();
amount = bound(amount, 1, l1USDC.balanceOf(address(this)));
// deposit some USDC to gateway
gateway.depositERC20(address(l1USDC), amount, 0);
// do finalize withdraw usdc
bytes memory message = abi.encodeWithSelector(
IL1ERC20Gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
sender,
address(recipient),
amount,
dataToCall
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(counterpartGateway),
address(gateway),
0,
0,
message
);
prepareL2MessageRoot(keccak256(xDomainCalldata));
IL1ScrollMessenger.L2MessageProof memory proof;
proof.batchIndex = rollup.lastFinalizedBatchIndex();
// emit FinalizeWithdrawERC20 from L1USDCGateway
{
hevm.expectEmit(true, true, true, true);
emit FinalizeWithdrawERC20(
address(l1USDC),
address(l2USDC),
sender,
address(recipient),
amount,
dataToCall
);
}
// emit RelayedMessage from L1ScrollMessenger
{
hevm.expectEmit(true, false, false, true);
emit RelayedMessage(keccak256(xDomainCalldata));
}
uint256 gatewayBalance = l1USDC.balanceOf(address(gateway));
uint256 recipientBalance = l1USDC.balanceOf(address(recipient));
assertBoolEq(false, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
l1Messenger.relayMessageWithProof(address(counterpartGateway), address(gateway), 0, 0, message, proof);
assertEq(gatewayBalance - amount, l1USDC.balanceOf(address(gateway)));
assertEq(recipientBalance + amount, l1USDC.balanceOf(address(recipient)));
assertBoolEq(true, l1Messenger.isL2MessageExecuted(keccak256(xDomainCalldata)));
}
function _depositERC20(
bool useRouter,
uint256 amount,
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l1USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 0, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
gasOracle.setL2BaseFee(feePerGas);
uint256 feeToPay = feePerGas * gasLimit;
bytes memory message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector,
address(l1USDC),
address(l2USDC),
address(this),
address(this),
amount,
new bytes(0)
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(gateway),
address(counterpartGateway),
0,
0,
message
);
if (amount == 0) {
hevm.expectRevert("deposit zero amount");
if (useRouter) {
router.depositERC20{value: feeToPay + extraValue}(address(l1USDC), amount, gasLimit);
} else {
gateway.depositERC20{value: feeToPay + extraValue}(address(l1USDC), amount, gasLimit);
}
} else {
// token is not l1USDC
hevm.expectRevert("only USDC is allowed");
gateway.depositERC20(address(l2USDC), amount, gasLimit);
// emit QueueTransaction from L1MessageQueue
{
hevm.expectEmit(true, true, false, true);
address sender = AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger));
emit QueueTransaction(sender, address(l2Messenger), 0, 0, gasLimit, xDomainCalldata);
}
// emit SentMessage from L1ScrollMessenger
{
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, gasLimit, message);
}
// emit DepositERC20 from L1USDCGateway
hevm.expectEmit(true, true, true, true);
emit DepositERC20(address(l1USDC), address(l2USDC), address(this), address(this), amount, new bytes(0));
uint256 gatewayBalance = l1USDC.balanceOf(address(gateway));
uint256 feeVaultBalance = address(feeVault).balance;
assertBoolEq(false, l1Messenger.isL1MessageSent(keccak256(xDomainCalldata)));
if (useRouter) {
router.depositERC20{value: feeToPay + extraValue}(address(l1USDC), amount, gasLimit);
} else {
gateway.depositERC20{value: feeToPay + extraValue}(address(l1USDC), amount, gasLimit);
}
assertEq(amount + gatewayBalance, l1USDC.balanceOf(address(gateway)));
assertEq(feeToPay + feeVaultBalance, address(feeVault).balance);
assertBoolEq(true, l1Messenger.isL1MessageSent(keccak256(xDomainCalldata)));
}
}
function _depositERC20WithRecipient(
bool useRouter,
uint256 amount,
address recipient,
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l1USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 0, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
gasOracle.setL2BaseFee(feePerGas);
uint256 feeToPay = feePerGas * gasLimit;
bytes memory message = abi.encodeWithSelector(
IL2ERC20Gateway.finalizeDepositERC20.selector,
address(l1USDC),
address(l2USDC),
address(this),
recipient,
amount,
new bytes(0)
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(gateway),
address(counterpartGateway),
0,
0,
message
);
if (amount == 0) {
hevm.expectRevert("deposit zero amount");
if (useRouter) {
router.depositERC20{value: feeToPay + extraValue}(address(l1USDC), recipient, amount, gasLimit);
} else {
gateway.depositERC20{value: feeToPay + extraValue}(address(l1USDC), recipient, amount, gasLimit);
}
} else {
// token is not l1USDC
hevm.expectRevert("only USDC is allowed");
gateway.depositERC20(address(l2USDC), recipient, amount, gasLimit);
// emit QueueTransaction from L1MessageQueue
{
hevm.expectEmit(true, true, false, true);
address sender = AddressAliasHelper.applyL1ToL2Alias(address(l1Messenger));
emit QueueTransaction(sender, address(l2Messenger), 0, 0, gasLimit, xDomainCalldata);
}
// emit SentMessage from L1ScrollMessenger
{
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, gasLimit, message);
}
// emit DepositERC20 from L1USDCGateway
hevm.expectEmit(true, true, true, true);
emit DepositERC20(address(l1USDC), address(l2USDC), address(this), recipient, amount, new bytes(0));
uint256 gatewayBalance = l1USDC.balanceOf(address(gateway));
uint256 feeVaultBalance = address(feeVault).balance;
assertBoolEq(false, l1Messenger.isL1MessageSent(keccak256(xDomainCalldata)));
if (useRouter) {
router.depositERC20{value: feeToPay + extraValue}(address(l1USDC), recipient, amount, gasLimit);
} else {
gateway.depositERC20{value: feeToPay + extraValue}(address(l1USDC), recipient, amount, gasLimit);
}
assertEq(amount + gatewayBalance, l1USDC.balanceOf(address(gateway)));
assertEq(feeToPay + feeVaultBalance, address(feeVault).balance);
assertBoolEq(true, l1Messenger.isL1MessageSent(keccak256(xDomainCalldata)));
}
}
function _deployGateway() internal returns (L1USDCGateway) {
return
L1USDCGateway(
payable(new ERC1967Proxy(address(new L1USDCGateway(address(l1USDC), address(l2USDC))), new bytes(0)))
);
}
}

View File

@@ -56,7 +56,7 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
router = L2GatewayRouter(address(new ERC1967Proxy(address(new L2GatewayRouter()), new bytes(0))));
// Deploy L1 contracts
counterpartGateway = new L1USDCGateway(address(l1USDC), address(l2USDC));
counterpartGateway = new L1USDCGateway();
// Initialize L2 contracts
gateway.initialize(address(counterpartGateway), address(router), address(l2Messenger));
@@ -128,6 +128,16 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
_withdrawERC20WithRecipient(false, amount, recipient, gasLimit, feePerGas);
}
function testWithdrawERC20WithRecipientAndCalldata(
uint256 amount,
address recipient,
bytes memory dataToCall,
uint256 gasLimit,
uint256 feePerGas
) public {
_withdrawERC20WithRecipientAndCalldata(false, amount, recipient, dataToCall, gasLimit, feePerGas);
}
function testRouterWithdrawERC20(
uint256 amount,
uint256 gasLimit,
@@ -145,6 +155,16 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
_withdrawERC20WithRecipient(true, amount, recipient, gasLimit, feePerGas);
}
function testRouterWithdrawERC20WithRecipientAndCalldata(
uint256 amount,
address recipient,
bytes memory dataToCall,
uint256 gasLimit,
uint256 feePerGas
) public {
_withdrawERC20WithRecipientAndCalldata(true, amount, recipient, dataToCall, gasLimit, feePerGas);
}
function testFinalizeDepositERC20FailedMocking(
address sender,
address recipient,
@@ -336,6 +356,7 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
) private {
amount = bound(amount, 0, l2USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
feePerGas = 0;
setL1BaseFee(feePerGas);
@@ -412,6 +433,7 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
) private {
amount = bound(amount, 0, l2USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
feePerGas = 0;
setL1BaseFee(feePerGas);
@@ -479,6 +501,85 @@ contract L2USDCGatewayTest is L2GatewayTestBase {
}
}
function _withdrawERC20WithRecipientAndCalldata(
bool useRouter,
uint256 amount,
address recipient,
bytes memory dataToCall,
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l2USDC.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = bound(feePerGas, 0, 1000);
// we don't charge fee now.
feePerGas = 0;
setL1BaseFee(feePerGas);
uint256 feeToPay = feePerGas * gasLimit;
bytes memory message = abi.encodeWithSelector(
IL1ERC20Gateway.finalizeWithdrawERC20.selector,
address(l1USDC),
address(l2USDC),
address(this),
recipient,
amount,
dataToCall
);
bytes memory xDomainCalldata = abi.encodeWithSignature(
"relayMessage(address,address,uint256,uint256,bytes)",
address(gateway),
address(counterpartGateway),
0,
0,
message
);
if (amount == 0) {
hevm.expectRevert("withdraw zero amount");
if (useRouter) {
router.withdrawERC20AndCall{value: feeToPay}(address(l2USDC), recipient, amount, dataToCall, gasLimit);
} else {
gateway.withdrawERC20AndCall{value: feeToPay}(address(l2USDC), recipient, amount, dataToCall, gasLimit);
}
} else {
// token is not l1USDC
hevm.expectRevert("only USDC is allowed");
gateway.withdrawERC20AndCall(address(l1USDC), recipient, amount, dataToCall, gasLimit);
// emit AppendMessage from L2MessageQueue
{
hevm.expectEmit(false, false, false, true);
emit AppendMessage(0, keccak256(xDomainCalldata));
}
// emit SentMessage from L2ScrollMessenger
{
hevm.expectEmit(true, true, false, true);
emit SentMessage(address(gateway), address(counterpartGateway), 0, 0, gasLimit, message);
}
// emit WithdrawERC20 from L2USDCGateway
hevm.expectEmit(true, true, true, true);
emit WithdrawERC20(address(l1USDC), address(l2USDC), address(this), recipient, amount, dataToCall);
uint256 senderBalance = l2USDC.balanceOf(address(this));
uint256 gatewayBalance = l2USDC.balanceOf(address(gateway));
uint256 feeVaultBalance = address(feeVault).balance;
assertBoolEq(false, l2Messenger.isL2MessageSent(keccak256(xDomainCalldata)));
if (useRouter) {
router.withdrawERC20AndCall{value: feeToPay}(address(l2USDC), recipient, amount, dataToCall, gasLimit);
} else {
gateway.withdrawERC20AndCall{value: feeToPay}(address(l2USDC), recipient, amount, dataToCall, gasLimit);
}
assertEq(senderBalance - amount, l2USDC.balanceOf(address(this)));
assertEq(gatewayBalance, l2USDC.balanceOf(address(gateway)));
assertEq(feeToPay + feeVaultBalance, address(feeVault).balance);
assertBoolEq(true, l2Messenger.isL2MessageSent(keccak256(xDomainCalldata)));
}
}
function _deployGateway() internal returns (L2USDCGateway) {
return
L2USDCGateway(

View File

@@ -410,7 +410,7 @@ contract L2WETHGatewayTest is L2GatewayTestBase {
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l2weth.balanceOf(address(this)));
amount = bound(amount, 0, l1weth.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = 0;
@@ -485,7 +485,7 @@ contract L2WETHGatewayTest is L2GatewayTestBase {
uint256 gasLimit,
uint256 feePerGas
) private {
amount = bound(amount, 0, l2weth.balanceOf(address(this)));
amount = bound(amount, 0, l1weth.balanceOf(address(this)));
gasLimit = bound(gasLimit, 21000, 1000000);
feePerGas = 0;

View File

@@ -106,17 +106,6 @@ contract ScrollChainTest is DSTestPlus {
hevm.expectRevert("invalid chunk length");
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
// num txs less than num L1 msgs, revert
chunk0 = new bytes(1 + 60);
bytes memory bitmap = new bytes(32);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
chunk0[58] = bytes1(uint8(1)); // numTransactions = 1
chunk0[60] = bytes1(uint8(3)); // numL1Messages = 3
bitmap[31] = bytes1(uint8(7));
chunks[0] = chunk0;
hevm.expectRevert("num txs less than num L1 msgs");
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
// incomplete l2 transaction data, revert
chunk0 = new bytes(1 + 60 + 1);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk

View File

@@ -1,4 +1,4 @@
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
.PHONY: lint docker clean coordinator mock_coordinator
IMAGE_NAME=coordinator
IMAGE_VERSION=latest
@@ -25,9 +25,6 @@ libzkp:
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
coordinator_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
mock_coordinator: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd

View File

@@ -13,7 +13,6 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
@@ -43,20 +42,12 @@ func NewBatchProverTask(cfg *config.Config, db *gorm.DB) *BatchProverTask {
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, fmt.Errorf("get public key from context failed")
return nil, fmt.Errorf("get public key from contex failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", proverVersion.(string), version.Version)
return nil, fmt.Errorf("get prover name from contex failed")
}
batchTasks, err := bp.batchOrm.UpdateUnassignedBatchReturning(ctx, 1)
@@ -73,7 +64,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
}
batchTask := batchTasks[0]
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", publicKey, "prover name", proverName)
log.Info("start batch proof generation session", "id", batchTask.Hash)
if !bp.checkAttemptsExceeded(batchTask.Hash, message.ProofTypeBatch) {
return nil, fmt.Errorf("the batch task id:%s check attempts have reach the maximum", batchTask.Hash)
@@ -84,7 +75,6 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
ProverPublicKey: publicKey.(string),
TaskType: int16(message.ProofTypeBatch),
ProverName: proverName.(string),
ProverVersion: proverVersion.(string),
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go

View File

@@ -13,7 +13,6 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
@@ -43,20 +42,12 @@ func NewChunkProverTask(cfg *config.Config, db *gorm.DB) *ChunkProverTask {
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
if !publicKeyExist {
return nil, fmt.Errorf("get public key from context failed")
return nil, fmt.Errorf("get public key from contex failed")
}
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
if !proverNameExist {
return nil, fmt.Errorf("get prover name from context failed")
}
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
if !proverVersionExist {
return nil, fmt.Errorf("get prover version from context failed")
}
if !version.CheckScrollProverVersion(proverVersion.(string)) {
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
return nil, fmt.Errorf("get prover name from contex failed")
}
// load and send chunk tasks
@@ -75,7 +66,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
chunkTask := chunkTasks[0]
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", publicKey, "prover name", proverName)
log.Info("start chunk generation session", "id", chunkTask.Hash)
if !cp.checkAttemptsExceeded(chunkTask.Hash, message.ProofTypeChunk) {
return nil, fmt.Errorf("chunk proof hash id:%s check attempts have reach the maximum", chunkTask.Hash)
@@ -86,7 +77,6 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
ProverPublicKey: publicKey.(string),
TaskType: int16(message.ProofTypeChunk),
ProverName: proverName.(string),
ProverVersion: proverVersion.(string),
ProvingStatus: int16(types.ProverAssigned),
FailureType: int16(types.ProverTaskFailureTypeUndefined),
// here why need use UTC time. see scroll/common/databased/db.go

View File

@@ -50,7 +50,7 @@ func (b *BaseProverTask) checkAttemptsExceeded(hash string, taskType message.Pro
if len(proverTasks) >= int(b.cfg.ProverManager.SessionAttempts) {
coordinatorSessionsTimeoutTotalCounter.Inc(1)
log.Warn("proof generation prover task reach the max attempts", "hash", hash)
log.Warn("proof generation prover task %s ended because reach the max attempts", hash)
transErr := b.db.Transaction(func(tx *gorm.DB) error {
switch message.ProofType(proverTasks[0].TaskType) {

View File

@@ -2,7 +2,6 @@ package submitproof
import (
"context"
"encoding/json"
"errors"
"fmt"
"time"
@@ -35,12 +34,8 @@ var (
ErrValidatorFailureProofMsgStatusNotOk = errors.New("validator failure proof msg status not ok")
// ErrValidatorFailureProverTaskEmpty get none prover task
ErrValidatorFailureProverTaskEmpty = errors.New("validator failure get none prover task for the proof")
// ErrValidatorFailureProverTaskCannotSubmitTwice prove task can not submit proof twice
ErrValidatorFailureProverTaskCannotSubmitTwice = errors.New("validator failure prove task cannot submit proof twice")
// ErrValidatorFailureProofTimeout the submit proof is timeout
ErrValidatorFailureProofTimeout = errors.New("validator failure submit proof timeout")
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
// ErrValidatorFailureProverInfoHasProofValid proof is vaild
ErrValidatorFailureProverInfoHasProofValid = errors.New("validator failure prover task info has proof valid")
)
// ProofReceiverLogic the proof receiver logic
@@ -79,7 +74,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB) *ProofR
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg) error {
pk := ctx.GetString(coordinatorType.PublicKey)
if len(pk) == 0 {
return fmt.Errorf("get public key from context failed")
return fmt.Errorf("get public key from contex failed")
}
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
@@ -88,15 +83,47 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
return ErrValidatorFailureProverTaskEmpty
}
if err = m.validator(proverTask, pk, proofMsg); err != nil {
if errors.Is(err, ErrValidatorFailureProofMsgStatusNotOk) {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
}
return err
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
if err = m.validator(ctx, proverTask, pk, proofMsg); err != nil {
return err
// store proof content
var storeProofErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
if dbErr := m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, proofTimeSec, tx); dbErr != nil {
return fmt.Errorf("failed to store chunk proof into db, err:%w", dbErr)
}
if dbErr := m.chunkOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
return fmt.Errorf("failed to update chunk task status as proved, error:%w", dbErr)
}
return nil
})
case message.ProofTypeBatch:
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
if dbErr := m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.BatchProof, proofTimeSec, tx); dbErr != nil {
return fmt.Errorf("failed to store batch proof into db, error:%w", dbErr)
}
if dbErr := m.batchOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
return fmt.Errorf("failed to update batch task status as proved, error:%w", dbErr)
}
return nil
})
}
if storeProofErr != nil {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
log.Error("failed to store basic proof into db", "error", storeProofErr)
return storeProofErr
}
coordinatorProofsReceivedTotalCounter.Inc(1)
var success bool
var verifyErr error
@@ -107,7 +134,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
}
if verifyErr != nil || !success {
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg)
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
@@ -122,10 +149,8 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
coordinatorProofsReceivedTotalCounter.Inc(1)
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg, proofTimeSec); err != nil {
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg); err != nil {
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg.Type)
return err
}
@@ -153,78 +178,49 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
return nil
}
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
func (m *ProofReceiverLogic) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
// Ensure this prover is eligible to participate in the prover task.
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the prover for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn("the prover task cannot submit twice", "hash", proofMsg.ID, "prover pk", proverTask.ProverPublicKey,
"prover name", proverTask.ProverName, "proof type", proverTask.TaskType)
return ErrValidatorFailureProverTaskCannotSubmitTwice
log.Warn("prover has already submitted valid proof in proof session", "prover name", proverTask.ProverName,
"prover pk", proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof id", proofMsg.ProofDetail.ID)
return ErrValidatorFailureProverInfoHasProofValid
}
proofTime := time.Since(proverTask.CreatedAt)
proofTimeSec := uint64(proofTime.Seconds())
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
if proofMsg.Status != message.StatusOk {
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
log.Info("proof generated by prover failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
"prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, proofMsg.ID, pk, types.ProverProofInvalid); updateErr != nil {
log.Error("proof generated by prover failed update prover task proving status failure", "proof id", proofMsg.ID,
"prover name", proverTask.ProverName, "prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
}
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
return ErrValidatorFailureProofMsgStatusNotOk
}
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "proof type", proverTask.TaskType,
"prover name", proverTask.ProverName, "prover public key", pk, "proof time", proofTimeSec)
return ErrValidatorFailureProofTimeout
}
// store the proof to prover task
if updateTaskProofErr := m.updateProverTaskProof(ctx, pk, proofMsg); updateTaskProofErr != nil {
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "public key", pk,
"prover name", proverTask.ProverName, "error", updateTaskProofErr)
}
// if the batch/chunk have proved and verifier success, need skip this submit proof
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
"proof type", proverTask.TaskType, "prover name", proverTask.ProverName, "prover public key", pk)
return ErrValidatorFailureTaskHaveVerifiedSuccess
}
return nil
}
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskFailed); err != nil {
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
}
coordinatorSessionsFailedTotalCounter.Inc(1)
}
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
log.Info("proof recover update proof status", "hash", hash, "public key", pubKey,
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskUnassigned); err != nil {
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
}
}
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
log.Info("proof close task update proof status", "hash", hash, "public key", pubKey,
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg.Type, types.ProvingTaskVerified); err != nil {
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
return err
}
@@ -232,7 +228,13 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pu
}
// UpdateProofStatus update the chunk/batch task and session info status
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsg *message.ProofMsg, status types.ProvingStatus, proofTimeSec uint64) error {
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
// if the prover task failure type is SessionInfoFailureTimeout,
// just skip update the status because the proof result come too late.
if m.checkIsTimeoutFailure(ctx, hash, proverPublicKey) {
return nil
}
var proverTaskStatus types.ProverProveStatus
switch status {
case types.ProvingTaskFailed, types.ProvingTaskUnassigned:
@@ -242,31 +244,16 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
}
err := m.db.Transaction(func(tx *gorm.DB) error {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, hash, proverPublicKey, proverTaskStatus, tx); updateErr != nil {
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsgType, hash, proverPublicKey, proverTaskStatus, tx); updateErr != nil {
return updateErr
}
// if the block batch has proof verified, so the failed status not update block batch proving status
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
log.Info("update proof status ProvingTaskFailed skip because other prover have prove success", "hash", hash, "public key", proverPublicKey)
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsgType) {
return nil
}
if status == types.ProvingTaskVerified {
var storeProofErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
storeProofErr = m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, proofTimeSec, tx)
case message.ProofTypeBatch:
storeProofErr = m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.BatchProof, proofTimeSec, tx)
}
if storeProofErr != nil {
log.Error("failed to store chunk/batch proof into db", "hash", hash, "public key", proverPublicKey, "error", storeProofErr)
return storeProofErr
}
}
switch proofMsg.Type {
switch proofMsgType {
case message.ProofTypeChunk:
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
@@ -285,7 +272,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
return err
}
if status == types.ProvingTaskVerified && proofMsg.Type == message.ProofTypeChunk {
if status == types.ProvingTaskVerified && proofMsgType == message.ProofTypeChunk {
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, hash); checkReadyErr != nil {
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
return checkReadyErr
@@ -315,19 +302,14 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
return provingStatus == types.ProvingTaskVerified
}
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, pk string, proofMsg *message.ProofMsg) error {
// store the proof to prover task
var proofBytes []byte
var marshalErr error
switch proofMsg.Type {
case message.ProofTypeChunk:
proofBytes, marshalErr = json.Marshal(proofMsg.ChunkProof)
case message.ProofTypeBatch:
proofBytes, marshalErr = json.Marshal(proofMsg.BatchProof)
func (m *ProofReceiverLogic) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, hash, proverPublicKey)
if err != nil {
return false
}
if len(proofBytes) == 0 || marshalErr != nil {
return fmt.Errorf("updateProverTaskProof marshal proof error:%w", marshalErr)
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
return true
}
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proofMsg.Type, proofMsg.ID, pk, proofBytes)
return false
}

View File

@@ -294,7 +294,6 @@ func (o *Batch) UpdateUnassignedBatchReturning(ctx context.Context, limit int) (
var batches []*Batch
db = db.Model(&batches).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Batch.UpdateUnassignedBatchReturning error: %w", err)
}

View File

@@ -364,7 +364,6 @@ func (o *Chunk) UpdateUnassignedChunkReturning(ctx context.Context, height, limi
var chunks []*Chunk
db = db.Model(&chunks).Clauses(clause.Returning{})
db = db.Where("index = (?)", subQueryDB)
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
if err := db.Update("proving_status", types.ProvingTaskAssigned).Error; err != nil {
return nil, fmt.Errorf("Chunk.UpdateUnassignedBatchReturning error: %w", err)
}

View File

@@ -22,7 +22,6 @@ type ProverTask struct {
// prover
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
ProverName string `json:"prover_name" gorm:"column:prover_name"`
ProverVersion string `json:"prover_version" gorm:"column:prover_version"`
// task
TaskID string `json:"task_id" gorm:"column:task_id"`
@@ -112,20 +111,6 @@ func (o *ProverTask) GetProverTaskByTaskIDAndPubKey(ctx context.Context, taskID,
return &proverTask, nil
}
// GetProvingStatusByTaskID retrieves the proving status of a prover task
func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskID string) (types.ProverProveStatus, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Select("proving_status")
db = db.Where("task_id = ?", taskID)
var proverTask ProverTask
if err := db.Find(&proverTask).Error; err != nil {
return types.ProverProofInvalid, fmt.Errorf("ProverTask.GetProvingStatusByTaskID error: %w, taskID: %v", err, taskID)
}
return types.ProverProveStatus(proverTask.ProvingStatus), nil
}
// GetAssignedProverTasks get the assigned prover task
func (o *ProverTask) GetAssignedProverTasks(ctx context.Context, limit int) ([]ProverTask, error) {
db := o.db.WithContext(ctx)
@@ -151,7 +136,7 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {
@@ -160,19 +145,6 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
return nil
}
// UpdateProverTaskProof update the prover task's proof
func (o *ProverTask) UpdateProverTaskProof(ctx context.Context, proofType message.ProofType, taskID string, pk string, proof []byte) error {
db := o.db
db = db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
if err := db.Update("proof", proof).Error; err != nil {
return fmt.Errorf("ProverTask.UpdateProverTaskProof error: %w, proof type: %v, taskID: %v, prover public key: %v", err, proofType.String(), taskID, pk)
}
return nil
}
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
db := o.db

View File

@@ -23,7 +23,6 @@ import (
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
@@ -38,11 +37,10 @@ var (
base *docker.App
db *gorm.DB
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
proverTaskOrm *orm.ProverTask
db *gorm.DB
l2BlockOrm *orm.L2Block
chunkOrm *orm.Chunk
batchOrm *orm.Batch
wrappedBlock1 *types.WrappedBlock
wrappedBlock2 *types.WrappedBlock
@@ -109,8 +107,6 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
}
func setEnv(t *testing.T) {
version.Version = "v1.2.3-aaa-bbb-ccc"
base = docker.NewDockerApp()
base.RunDBImage(t)
@@ -131,7 +127,6 @@ func setEnv(t *testing.T) {
batchOrm = orm.NewBatch(db)
chunkOrm = orm.NewChunk(db)
l2BlockOrm = orm.NewL2Block(db)
proverTaskOrm = orm.NewProverTask(db)
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
@@ -367,12 +362,8 @@ func testProofGeneratedFailed(t *testing.T) {
tickStop = time.Tick(time.Minute)
)
var (
chunkProofStatus types.ProvingStatus
batchProofStatus types.ProvingStatus
chunkProverTaskProvingStatus types.ProverProveStatus
batchProverTaskProvingStatus types.ProverProveStatus
)
var chunkProofStatus types.ProvingStatus
var batchProofStatus types.ProvingStatus
for {
select {
@@ -381,15 +372,7 @@ func testProofGeneratedFailed(t *testing.T) {
assert.NoError(t, err)
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
return
}
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), dbChunk.Hash)
assert.NoError(t, err)
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), batch.Hash)
assert.NoError(t, err)
if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid {
if chunkProofStatus == types.ProvingTaskFailed && batchProofStatus == types.ProvingTaskFailed {
return
}
case <-tickStop:

View File

@@ -14,7 +14,6 @@ import (
ctypes "scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/logic/verifier"
"scroll-tech/coordinator/internal/types"
@@ -79,7 +78,7 @@ func (r *mockProver) login(t *testing.T, challengeString string) string {
Identity: &message.Identity{
Challenge: challengeString,
ProverName: "test",
ProverVersion: version.Version,
ProverVersion: "v1.0.0",
},
}
assert.NoError(t, authMsg.SignWithKey(r.privKey))
@@ -163,16 +162,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) *t
}
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
proofMsgStatus := message.StatusOk
if proofStatus == generatedFailed {
proofMsgStatus = message.StatusProofError
}
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: proverTaskSchema.TaskID,
Type: message.ProofType(proverTaskSchema.TaskType),
Status: proofMsgStatus,
Status: message.RespStatus(proofStatus),
ChunkProof: &message.ChunkProof{},
BatchProof: &message.BatchProof{},
},

View File

@@ -8,7 +8,6 @@ create table prover_task
-- prover
prover_public_key VARCHAR NOT NULL,
prover_name VARCHAR NOT NULL,
prover_version VARCHAR NOT NULL,
-- task
task_id VARCHAR NOT NULL,

View File

@@ -1,8 +1,4 @@
.PHONY: clean build test docker
IMAGE_NAME=prover-stats-api
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
.PHONY: clean build test
build:
GOBIN=$(PWD)/build/bin go build -o $(PWD)/build/bin/prover-stats-api ./cmd
@@ -18,6 +14,3 @@ test:
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/prover-stats-api.Dockerfile

View File

@@ -20,7 +20,6 @@ type ProverTask struct {
TaskID string `json:"task_id" gorm:"column:task_id"`
ProverPublicKey string `json:"prover_public_key" gorm:"column:prover_public_key"`
ProverName string `json:"prover_name" gorm:"column:prover_name"`
ProverVersion string `json:"prover_version" gorm:"column:prover_version"`
TaskType int16 `json:"task_type" gorm:"column:task_type;default:0"`
ProvingStatus int16 `json:"proving_status" gorm:"column:proving_status;default:0"`
FailureType int16 `json:"failure_type" gorm:"column:failure_type;default:0"`
@@ -95,7 +94,7 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
db = db.Model(&ProverTask{})
db = db.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "task_type"}, {Name: "task_id"}, {Name: "prover_public_key"}},
DoUpdates: clause.AssignmentColumns([]string{"prover_version", "proving_status", "failure_type", "assigned_at"}),
DoUpdates: clause.AssignmentColumns([]string{"proving_status", "failure_type", "assigned_at"}),
})
if err := db.Create(&proverTask).Error; err != nil {

View File

@@ -1,21 +1,21 @@
.PHONY: lint docker clean prover mock-prover
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
ifeq (${HALO2_GPU_VERSION},)
# use halo2_proofs with CPU
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_VERSION}
else
# use halo2_gpu
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
ZK_VERSION=${ZKEVM_VERSION}-${HALO2_GPU_VERSION}
endif
libzkp:

View File

@@ -31,8 +31,7 @@ func init() {
}
// Register `prover-test` app for integration-test.
utils.RegisterSimulation(app, utils.ChunkProverApp)
utils.RegisterSimulation(app, utils.BatchProverApp)
utils.RegisterSimulation(app, utils.ProverApp)
}
func action(ctx *cli.Context) error {

View File

@@ -6,21 +6,11 @@ import (
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
"scroll-tech/common/version"
)
func TestRunChunkProver(t *testing.T) {
prover := cmd.NewCmd(string(utils.ChunkProverApp), "--version")
defer prover.WaitExit()
// wait result
prover.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("prover version %s", version.Version))
prover.RunApp(nil)
}
func TestRunBatchProver(t *testing.T) {
prover := cmd.NewCmd(string(utils.BatchProverApp), "--version")
func TestRunProver(t *testing.T) {
prover := cmd.NewCmd("prover-test", "--version")
defer prover.WaitExit()
// wait result

View File

@@ -4,12 +4,15 @@ import (
"encoding/json"
"fmt"
"os"
"sync"
"testing"
"time"
"github.com/google/uuid"
"github.com/scroll-tech/go-ethereum/rpc"
"golang.org/x/sync/errgroup"
"scroll-tech/prover/config"
proverConfig "scroll-tech/prover/config"
"scroll-tech/common/cmd"
"scroll-tech/common/docker"
@@ -28,7 +31,7 @@ func getIndex() int {
// ProverApp prover-test client manager.
type ProverApp struct {
Config *config.Config
Config *proverConfig.Config
base *docker.App
@@ -38,30 +41,23 @@ type ProverApp struct {
index int
name string
args []string
docker.AppAPI
}
// NewProverApp return a new proverApp manager.
func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
var proofType message.ProofType
switch mockName {
case utils.ChunkProverApp:
proofType = message.ProofTypeChunk
case utils.BatchProverApp:
proofType = message.ProofTypeBatch
default:
return nil
}
name := string(mockName)
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", base.Timestamp, name)
func NewProverApp(base *docker.App, file string, httpURL string, proofType message.ProofType) *ProverApp {
uuid := uuid.New().String()
proverFile := fmt.Sprintf("/tmp/%s_%d_prover-config.json", uuid, base.Timestamp)
proverApp := &ProverApp{
base: base,
originFile: file,
proverFile: proverFile,
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", base.Timestamp, name),
bboltDB: fmt.Sprintf("/tmp/%s_%d_bbolt_db", uuid, base.Timestamp),
index: getIndex(),
name: name,
AppAPI: cmd.NewCmd(name, []string{"--log.debug", "--config", proverFile}...),
name: string(utils.ProverApp),
args: []string{"--log.debug", "--config", proverFile},
}
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
panic(err)
@@ -70,10 +66,18 @@ func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, htt
}
// RunApp run prover-test child process by multi parameters.
func (r *ProverApp) RunApp(t *testing.T) {
func (r *ProverApp) RunApp(t *testing.T, args ...string) {
r.AppAPI = cmd.NewCmd(r.name, append(r.args, args...)...)
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") })
}
// RunAppWithExpectedResult runs the prover-test child process with multiple parameters,
// and checks for a specific expected result in the output.
func (r *ProverApp) RunAppWithExpectedResult(t *testing.T, expectedResult string, args ...string) {
r.AppAPI = cmd.NewCmd(r.name, append(r.args, args...)...)
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, expectedResult) })
}
// Free stop and release prover-test.
func (r *ProverApp) Free() {
if !utils.IsNil(r.AppAPI) {
@@ -86,7 +90,7 @@ func (r *ProverApp) Free() {
// MockConfig creates a new prover config.
func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.ProofType) error {
cfg, err := config.NewConfig(r.originFile)
cfg, err := proverConfig.NewConfig(r.originFile)
if err != nil {
return err
}
@@ -119,3 +123,47 @@ func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.Pro
}
return os.WriteFile(r.proverFile, data, 0600)
}
// ProverApps proverApp list.
type ProverApps []*ProverApp
// RunApps starts all the proverApps.
func (r ProverApps) RunApps(t *testing.T, args ...string) {
var eg errgroup.Group
for i := range r {
i := i
eg.Go(func() error {
r[i].RunApp(t, args...)
return nil
})
}
_ = eg.Wait()
}
// Free releases proverApps.
func (r ProverApps) Free() {
var wg sync.WaitGroup
wg.Add(len(r))
for i := range r {
i := i
go func() {
r[i].Free()
wg.Done()
}()
}
wg.Wait()
}
// WaitExit wait proverApps stopped.
func (r ProverApps) WaitExit() {
var wg sync.WaitGroup
wg.Add(len(r))
for i := range r {
i := i
go func() {
r[i].WaitExit()
wg.Done()
}()
}
wg.Wait()
}

View File

@@ -4,10 +4,12 @@ go 1.19
require (
github.com/go-resty/resty/v2 v2.7.0
github.com/google/uuid v1.3.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
go.etcd.io/bbolt v1.3.7
golang.org/x/sync v0.3.0
)
require (
@@ -19,7 +21,6 @@ require (
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/holiman/uint256 v1.2.3 // indirect
github.com/huin/goupnp v1.0.3 // indirect
@@ -44,7 +45,6 @@ require (
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect

View File

@@ -22,8 +22,8 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/docker"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/common/version"
bcmd "scroll-tech/bridge/cmd"
)
@@ -40,8 +40,8 @@ func TestMain(m *testing.M) {
base = docker.NewDockerApp()
bridgeApp = bcmd.NewBridgeApp(base, "../../bridge/conf/config.json")
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json")
chunkProverApp = rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
batchProverApp = rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
chunkProverApp = rapp.NewProverApp(base, "../../prover/config.json", coordinatorApp.HTTPEndpoint(), message.ProofTypeChunk)
batchProverApp = rapp.NewProverApp(base, "../../prover/config.json", coordinatorApp.HTTPEndpoint(), message.ProofTypeBatch)
m.Run()
bridgeApp.Free()
coordinatorApp.Free()
@@ -112,21 +112,17 @@ func TestCoordinatorProverInteraction(t *testing.T) {
assert.NoError(t, err)
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
assert.NoError(t, err)
t.Log(version.Version)
// Run coordinator app.
coordinatorApp.RunApp(t)
// Run prover app.
chunkProverApp.ExpectWithTimeout(t, true, time.Second*40, "proof submitted successfully") // chunk prover login -> get task -> submit proof.
batchProverApp.ExpectWithTimeout(t, true, time.Second*40, "proof submitted successfully") // batch prover login -> get task -> submit proof.
chunkProverApp.RunAppWithExpectedResult(t, "proof submitted successfully") // chunk prover login -> get task -> submit proof.
batchProverApp.RunAppWithExpectedResult(t, "proof submitted successfully") // batch prover login -> get task -> submit proof.
// All task has been proven, coordinator would not return any task.
chunkProverApp.ExpectWithTimeout(t, true, 60*time.Second, "get empty prover task")
batchProverApp.ExpectWithTimeout(t, true, 60*time.Second, "get empty prover task")
chunkProverApp.RunApp(t)
batchProverApp.RunApp(t)
chunkProverApp.ExpectWithTimeout(t, false, 60*time.Second, "get empty prover task")
batchProverApp.ExpectWithTimeout(t, false, 60*time.Second, "get empty prover task")
// Free apps.
chunkProverApp.WaitExit()
@@ -143,12 +139,10 @@ func TestProverReLogin(t *testing.T) {
// Run coordinator app.
coordinatorApp.RunApp(t) // login timeout: 1 sec
chunkProverApp.RunApp(t)
batchProverApp.RunApp(t)
// Run prover app.
chunkProverApp.WaitResult(t, time.Second*40, "re-login success") // chunk prover login.
batchProverApp.WaitResult(t, time.Second*40, "re-login success") // batch prover login.
chunkProverApp.RunAppWithExpectedResult(t, "re-login success") // chunk prover login.
batchProverApp.RunAppWithExpectedResult(t, "re-login success") // batch prover login.
// Free apps.
chunkProverApp.WaitExit()