mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-13 07:57:58 -05:00
Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7118746e34 | ||
|
|
f180a40ed9 | ||
|
|
fc577a535e | ||
|
|
8b018327f2 | ||
|
|
98cb624951 | ||
|
|
9dba3f8f41 | ||
|
|
fb44382297 | ||
|
|
2a745ad7a9 | ||
|
|
7270a2a9bc | ||
|
|
20fafd9ca4 | ||
|
|
870565a18f | ||
|
|
0319b11a57 | ||
|
|
6d4374a5cc | ||
|
|
7ca897d887 | ||
|
|
f414523045 | ||
|
|
99ba4acdc0 | ||
|
|
9caa3d491e | ||
|
|
81589c55f5 | ||
|
|
d234156475 | ||
|
|
62f7cbad46 | ||
|
|
186e76852b | ||
|
|
3b045817dd | ||
|
|
e7dc628563 | ||
|
|
d743f2ce96 | ||
|
|
798179ee6d | ||
|
|
b706cb69d3 | ||
|
|
aa24cdd1db | ||
|
|
4398a36ee2 | ||
|
|
9a27499c03 |
14
.github/pull_request_template.md
vendored
14
.github/pull_request_template.md
vendored
@@ -1,9 +1,9 @@
|
||||
## 1. Purpose or design rationale of this PR
|
||||
### Purpose or design rationale of this PR
|
||||
|
||||
...
|
||||
*Describe your change. Make sure to answer these three questions: What does this PR do? Why does it do it? How does it do it?*
|
||||
|
||||
|
||||
## 2. PR title
|
||||
### PR title
|
||||
|
||||
Your PR title must follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) (as we are doing squash merge for each PR), so it must start with one of the following [types](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#type):
|
||||
|
||||
@@ -18,17 +18,17 @@ Your PR title must follow [conventional commits](https://www.conventionalcommits
|
||||
- [ ] test: Adding missing tests or correcting existing tests
|
||||
|
||||
|
||||
## 3. Deployment tag versioning
|
||||
### Deployment tag versioning
|
||||
|
||||
Has `tag` in `common/version.go` been updated?
|
||||
|
||||
- [ ] This PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] Yes
|
||||
|
||||
|
||||
## 4. Breaking change label
|
||||
### Breaking change label
|
||||
|
||||
Does this PR have the `breaking-change` label?
|
||||
|
||||
- [ ] This PR is not a breaking change
|
||||
- [ ] No, this PR is not a breaking change
|
||||
- [ ] Yes
|
||||
|
||||
90
.github/workflows/bridge.yml
vendored
90
.github/workflows/bridge.yml
vendored
@@ -9,6 +9,8 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -18,14 +20,61 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'bridge'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Lint
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make mock_abi
|
||||
make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
run: goimports -local scroll-tech/bridge/ -w .
|
||||
working-directory: 'bridge'
|
||||
- name: Run go mod tidy
|
||||
run: go mod tidy
|
||||
working-directory: 'bridge'
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -43,31 +92,18 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Lint
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make mock_abi
|
||||
make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/bridge/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
make dev_docker
|
||||
make -C bridge mock_abi
|
||||
- name: Build bridge binaries
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
make bridge_bins
|
||||
- name: Test bridge packages
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic ./...
|
||||
# docker-build:
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
|
||||
9
.github/workflows/bridge_history_api.yml
vendored
9
.github/workflows/bridge_history_api.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: BridgeHistoryApi
|
||||
name: BridgeHistoryAPI
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
# - name: Install Go
|
||||
# uses: actions/setup-go@v2
|
||||
# with:
|
||||
# go-version: 1.20.x
|
||||
# go-version: 1.19.x
|
||||
# - name: Checkout code
|
||||
# uses: actions/checkout@v2
|
||||
# - name: Lint
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.20.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -73,4 +73,3 @@ jobs:
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
43
.github/workflows/common.yml
vendored
43
.github/workflows/common.yml
vendored
@@ -20,10 +20,6 @@ on:
|
||||
- 'common/**'
|
||||
- '.github/workflows/common.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'common'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -37,7 +33,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -45,6 +41,7 @@ jobs:
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
@@ -55,16 +52,46 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/common/ -w .
|
||||
- run: go mod tidy
|
||||
- name: Run goimports lint
|
||||
working-directory: 'common'
|
||||
run: goimports -local scroll-tech/common/ -w .
|
||||
- name: Run go mod tidy
|
||||
working-directory: 'common'
|
||||
run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
- name: Test common packages
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic ./...
|
||||
|
||||
47
.github/workflows/coordinator.yml
vendored
47
.github/workflows/coordinator.yml
vendored
@@ -9,6 +9,8 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -18,12 +20,10 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'coordinator'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -37,10 +37,11 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
@@ -51,15 +52,20 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/coordinator/ -w .
|
||||
- run: go mod tidy
|
||||
- name: Run goimports lint
|
||||
working-directory: 'coordinator'
|
||||
run: goimports -local scroll-tech/coordinator/ -w .
|
||||
- name: Run go mod tidy
|
||||
working-directory: 'coordinator'
|
||||
run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
@@ -80,3 +86,28 @@ jobs:
|
||||
# push: false
|
||||
# # cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# # cache-to: type=gha,scope=${{ github.workflow }}
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
- name: Test coordinator packages
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic -tags mock_verifier ./...
|
||||
|
||||
45
.github/workflows/database.yml
vendored
45
.github/workflows/database.yml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '.github/workflows/database.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -18,12 +19,9 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '.github/workflows/database.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'database'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -32,10 +30,11 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
@@ -46,16 +45,46 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/database/ -w .
|
||||
- run: go mod tidy
|
||||
- name: Run goimports lint
|
||||
working-directory: 'database'
|
||||
run: goimports -local scroll-tech/database/ -w .
|
||||
- name: Run go mod tidy
|
||||
working-directory: 'database'
|
||||
run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
- name: Test database packages
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic ./...
|
||||
|
||||
43
.github/workflows/integration.yaml
vendored
Normal file
43
.github/workflows/integration.yaml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C bridge mock_abi
|
||||
make -C common/bytecode all
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
go test -v -tags="mock_prover mock_verifier" -p 1 scroll-tech/integration-test/...
|
||||
59
.github/workflows/intermediate-docker.yml
vendored
Normal file
59
.github/workflows/intermediate-docker.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Intermediate Docker
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
GO_VERSION:
|
||||
description: 'Go version'
|
||||
required: true
|
||||
type: string
|
||||
default: '1.19'
|
||||
RUST_VERSION:
|
||||
description: 'Rust toolchain version'
|
||||
required: true
|
||||
type: string
|
||||
default: 'nightly-2022-12-10'
|
||||
PYTHON_VERSION:
|
||||
description: 'Python version'
|
||||
required: false
|
||||
type: string
|
||||
default: '3.10'
|
||||
CUDA_VERSION:
|
||||
description: 'Cuda version'
|
||||
required: false
|
||||
type: string
|
||||
default: '11.7.1'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'build/dockerfiles/intermediate'
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build
|
||||
run: |
|
||||
make all
|
||||
env:
|
||||
GO_VERSION: ${{ inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ inputs.RUST_VERSION }}
|
||||
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
|
||||
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
|
||||
- name: Publish
|
||||
run: |
|
||||
make publish
|
||||
env:
|
||||
GO_VERSION: ${{ inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ inputs.RUST_VERSION }}
|
||||
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
|
||||
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
|
||||
6
.github/workflows/roller.yml
vendored
6
.github/workflows/roller.yml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
|
||||
7
Jenkinsfile
vendored
7
Jenkinsfile
vendored
@@ -7,10 +7,11 @@ pipeline {
|
||||
timeout (20)
|
||||
}
|
||||
tools {
|
||||
go 'go-1.18'
|
||||
nodejs "nodejs"
|
||||
go 'go-1.19'
|
||||
}
|
||||
environment {
|
||||
GOBIN = '/home/ubuntu/go/bin/'
|
||||
GO111MODULE = 'on'
|
||||
PATH="/home/ubuntu/.cargo/bin:$PATH"
|
||||
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
|
||||
@@ -81,7 +82,7 @@ pipeline {
|
||||
}
|
||||
stage('Compare Coverage') {
|
||||
steps {
|
||||
sh "./build/post-test-report-coverage.sh"
|
||||
sh './build/post-test-report-coverage.sh'
|
||||
script {
|
||||
currentBuild.result = 'SUCCESS'
|
||||
}
|
||||
@@ -96,4 +97,4 @@ pipeline {
|
||||
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
# Scroll Monorepo
|
||||
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.18
|
||||
+ Go 1.19
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
+ Hardhat / Foundry
|
||||
+ Docker
|
||||
|
||||
@@ -78,7 +78,7 @@ func action(ctx *cli.Context) error {
|
||||
mvc.Configure(bridgeApp.Party("/api/txsbyhashes"), setupQueryByHashHandler)
|
||||
|
||||
// TODO: make debug mode configurable
|
||||
err = bridgeApp.Listen(":8080", iris.WithLogLevel("debug"))
|
||||
err = bridgeApp.Listen(cfg.Server.HostPort, iris.WithLogLevel("debug"))
|
||||
if err != nil {
|
||||
log.Crit("can not start server", "err", err)
|
||||
}
|
||||
|
||||
@@ -98,6 +98,14 @@ func action(ctx *cli.Context) error {
|
||||
go l2crossMsgFetcher.Start()
|
||||
defer l2crossMsgFetcher.Stop()
|
||||
|
||||
l1BlocktimeFetcher := cross_msg.NewBlocktimestampFetcher(subCtx, uint(cfg.L1.Confirmation), int(cfg.L1.BlockTime), l1client, db.UpdateL1Blocktimestamp, db.GetL1EarliestNoBlocktimestampHeight)
|
||||
go l1BlocktimeFetcher.Start()
|
||||
defer l1BlocktimeFetcher.Stop()
|
||||
|
||||
l2BlocktimeFetcher := cross_msg.NewBlocktimestampFetcher(subCtx, uint(cfg.L2.Confirmation), int(cfg.L2.BlockTime), l2client, db.UpdateL2Blocktimestamp, db.GetL2EarliestNoBlocktimestampHeight)
|
||||
go l2BlocktimeFetcher.Start()
|
||||
defer l2BlocktimeFetcher.Stop()
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
"l1": {
|
||||
"confirmation": 64,
|
||||
"endpoint": "https://rpc.ankr.com/eth_goerli",
|
||||
"startHeight": 8890194 ,
|
||||
"startHeight": 9890194 ,
|
||||
"blockTime": 10,
|
||||
"MessengerAddr": "0x5260e38080BFe97e6C4925d9209eCc5f964373b6",
|
||||
"ETHGatewayAddr": "0x429b73A21cF3BF1f3E696a21A95408161daF311f",
|
||||
@@ -16,7 +16,7 @@
|
||||
"confirmation": 1,
|
||||
"endpoint": "https://alpha-rpc.scroll.io/l2",
|
||||
"blockTime": 3,
|
||||
"startHeight": 1600068,
|
||||
"startHeight": 1900068,
|
||||
"CustomERC20GatewayAddr": "0xa07Cb742657294C339fB4d5d6CdF3fdBeE8C1c68",
|
||||
"ERC721GatewayAddr": "0x8Fee20e0C0Ef16f2898a8073531a857D11b9C700",
|
||||
"StandardERC20Gateway": "0xB878F37BB278bf0e4974856fFe86f5e6F66BD725",
|
||||
@@ -30,5 +30,8 @@
|
||||
"driverName": "postgres",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"server": {
|
||||
"hostPort": "0.0.0.0:20006"
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,10 @@ type LayerConfig struct {
|
||||
CustomERC20GatewayAddr string `json:"CustomERC20GatewayAddr"`
|
||||
}
|
||||
|
||||
type ServerConfig struct {
|
||||
HostPort string `json:"hostPort"`
|
||||
}
|
||||
|
||||
// Config is the configuration of the bridge history backend
|
||||
type Config struct {
|
||||
// chain config
|
||||
@@ -37,7 +41,8 @@ type Config struct {
|
||||
L2 *LayerConfig `json:"l2"`
|
||||
|
||||
// data source name
|
||||
DB *DBConfig `json:"db"`
|
||||
DB *DBConfig `json:"db"`
|
||||
Server *ServerConfig `json:"server"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
|
||||
79
bridge-history-api/cross_msg/block_timestamp_fetcher.go
Normal file
79
bridge-history-api/cross_msg/block_timestamp_fetcher.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package cross_msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
type GetEarliestNoBlocktimestampHeightFunc func() (uint64, error)
|
||||
type UpdateBlocktimestampFunc func(height uint64, timestamp time.Time) error
|
||||
|
||||
type BlocktimestampFetcher struct {
|
||||
ctx context.Context
|
||||
confirmation uint
|
||||
blockTimeInSec int
|
||||
client *ethclient.Client
|
||||
updateBlocktimestampFunc UpdateBlocktimestampFunc
|
||||
getEarliestNoBlocktimestampHeightFunc GetEarliestNoBlocktimestampHeightFunc
|
||||
}
|
||||
|
||||
func NewBlocktimestampFetcher(ctx context.Context, confirmation uint, blockTimeInSec int, client *ethclient.Client, updateBlocktimestampFunc UpdateBlocktimestampFunc, getEarliestNoBlocktimestampHeightFunc GetEarliestNoBlocktimestampHeightFunc) *BlocktimestampFetcher {
|
||||
return &BlocktimestampFetcher{
|
||||
ctx: ctx,
|
||||
confirmation: confirmation,
|
||||
blockTimeInSec: blockTimeInSec,
|
||||
client: client,
|
||||
getEarliestNoBlocktimestampHeightFunc: getEarliestNoBlocktimestampHeightFunc,
|
||||
updateBlocktimestampFunc: updateBlocktimestampFunc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BlocktimestampFetcher) Start() {
|
||||
go func() {
|
||||
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
number, err := b.client.BlockNumber(b.ctx)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest block number", "err", err)
|
||||
continue
|
||||
}
|
||||
startHeight, err := b.getEarliestNoBlocktimestampHeightFunc()
|
||||
if err != nil {
|
||||
log.Error("Can not get latest record without block timestamp", "err", err)
|
||||
continue
|
||||
}
|
||||
for height := startHeight; number >= height+uint64(b.confirmation) && height > 0; {
|
||||
block, err := b.client.HeaderByNumber(b.ctx, new(big.Int).SetUint64(height))
|
||||
if err != nil {
|
||||
log.Error("Can not get block by number", "err", err)
|
||||
break
|
||||
}
|
||||
err = b.updateBlocktimestampFunc(height, time.Unix(int64(block.Time), 0))
|
||||
if err != nil {
|
||||
log.Error("Can not update blocktimstamp into DB ", "err", err)
|
||||
break
|
||||
}
|
||||
height, err = b.getEarliestNoBlocktimestampHeightFunc()
|
||||
if err != nil {
|
||||
log.Error("Can not get latest record without block timestamp", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (b *BlocktimestampFetcher) Stop() {
|
||||
log.Info("BlocktimestampFetcher Stop")
|
||||
b.ctx.Done()
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package cross_msg_test
|
||||
|
||||
import (
|
||||
"bridge-history-api/cross_msg"
|
||||
"crypto/rand"
|
||||
"math/big"
|
||||
"testing"
|
||||
@@ -9,6 +8,8 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/cross_msg"
|
||||
)
|
||||
|
||||
func TestMergeIntoList(t *testing.T) {
|
||||
|
||||
190
bridge-history-api/cross_msg/withdraw_trie.go
Normal file
190
bridge-history-api/cross_msg/withdraw_trie.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package cross_msg
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// MaxHeight is the maixium possible height of withdraw trie
|
||||
const MaxHeight = 40
|
||||
|
||||
// WithdrawTrie is an append only merkle trie
|
||||
type WithdrawTrie struct {
|
||||
// used to rebuild the merkle tree
|
||||
NextMessageNonce uint64
|
||||
|
||||
height int // current height of withdraw trie
|
||||
|
||||
branches []common.Hash
|
||||
zeroes []common.Hash
|
||||
}
|
||||
|
||||
// NewWithdrawTrie will return a new instance of WithdrawTrie
|
||||
func NewWithdrawTrie() *WithdrawTrie {
|
||||
zeroes := make([]common.Hash, MaxHeight)
|
||||
branches := make([]common.Hash, MaxHeight)
|
||||
|
||||
zeroes[0] = common.Hash{}
|
||||
for i := 1; i < MaxHeight; i++ {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
return &WithdrawTrie{
|
||||
zeroes: zeroes,
|
||||
branches: branches,
|
||||
height: -1,
|
||||
NextMessageNonce: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize will initialize the merkle trie with rightest leaf node
|
||||
func (w *WithdrawTrie) Initialize(currentMessageNonce uint64, msgHash common.Hash, proofBytes []byte) {
|
||||
proof := DecodeBytesToMerkleProof(proofBytes)
|
||||
branches := RecoverBranchFromProof(proof, currentMessageNonce, msgHash)
|
||||
|
||||
w.height = len(proof)
|
||||
w.branches = branches
|
||||
w.NextMessageNonce = currentMessageNonce + 1
|
||||
}
|
||||
|
||||
// AppendMessages appends a list of new messages as leaf nodes to the rightest of the tree and returns the proofs for all messages.
|
||||
func (w *WithdrawTrie) AppendMessages(hashes []common.Hash) [][]byte {
|
||||
length := len(hashes)
|
||||
if length == 0 {
|
||||
return make([][]byte, 0)
|
||||
}
|
||||
|
||||
cache := make([]map[uint64]common.Hash, MaxHeight)
|
||||
for h := 0; h < MaxHeight; h++ {
|
||||
cache[h] = make(map[uint64]common.Hash)
|
||||
}
|
||||
|
||||
// cache all branches will be used later.
|
||||
if w.NextMessageNonce != 0 {
|
||||
index := w.NextMessageNonce
|
||||
for h := 0; h <= w.height; h++ {
|
||||
if index%2 == 1 {
|
||||
// right child, `w.branches[h]` is the corresponding left child
|
||||
// the index of left child should be `index ^ 1`.
|
||||
cache[h][index^1] = w.branches[h]
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
}
|
||||
// cache all new leaves
|
||||
for i := 0; i < length; i++ {
|
||||
cache[0][w.NextMessageNonce+uint64(i)] = hashes[i]
|
||||
}
|
||||
|
||||
// build withdraw trie with new hashes
|
||||
minIndex := w.NextMessageNonce
|
||||
maxIndex := w.NextMessageNonce + uint64(length) - 1
|
||||
for h := 0; maxIndex > 0; h++ {
|
||||
if minIndex%2 == 1 {
|
||||
minIndex--
|
||||
}
|
||||
if maxIndex%2 == 0 {
|
||||
cache[h][maxIndex^1] = w.zeroes[h]
|
||||
}
|
||||
for i := minIndex; i <= maxIndex; i += 2 {
|
||||
cache[h+1][i>>1] = utils.Keccak2(cache[h][i], cache[h][i^1])
|
||||
}
|
||||
minIndex >>= 1
|
||||
maxIndex >>= 1
|
||||
}
|
||||
|
||||
// update branches using hashes one by one
|
||||
for i := 0; i < length; i++ {
|
||||
proof := UpdateBranchWithNewMessage(w.zeroes, w.branches, w.NextMessageNonce, hashes[i])
|
||||
w.NextMessageNonce++
|
||||
w.height = len(proof)
|
||||
}
|
||||
|
||||
proofs := make([][]byte, length)
|
||||
// retrieve merkle proof from cache
|
||||
for i := 0; i < length; i++ {
|
||||
index := w.NextMessageNonce + uint64(i) - uint64(length)
|
||||
var merkleProof []common.Hash
|
||||
for h := 0; h < w.height; h++ {
|
||||
merkleProof = append(merkleProof, cache[h][index^1])
|
||||
index >>= 1
|
||||
}
|
||||
proofs[i] = EncodeMerkleProofToBytes(merkleProof)
|
||||
}
|
||||
|
||||
return proofs
|
||||
}
|
||||
|
||||
// MessageRoot return the current root hash of withdraw trie.
|
||||
func (w *WithdrawTrie) MessageRoot() common.Hash {
|
||||
if w.height == -1 {
|
||||
return common.Hash{}
|
||||
}
|
||||
return w.branches[w.height]
|
||||
}
|
||||
|
||||
// DecodeBytesToMerkleProof transfer byte array to bytes32 array. The caller should make sure the length is matched.
|
||||
func DecodeBytesToMerkleProof(proofBytes []byte) []common.Hash {
|
||||
proof := make([]common.Hash, len(proofBytes)/32)
|
||||
for i := 0; i < len(proofBytes); i += 32 {
|
||||
proof[i/32] = common.BytesToHash(proofBytes[i : i+32])
|
||||
}
|
||||
return proof
|
||||
}
|
||||
|
||||
// EncodeMerkleProofToBytes transfer byte32 array to byte array by concatenation.
|
||||
func EncodeMerkleProofToBytes(proof []common.Hash) []byte {
|
||||
var proofBytes []byte
|
||||
for i := 0; i < len(proof); i++ {
|
||||
proofBytes = append(proofBytes, proof[i][:]...)
|
||||
}
|
||||
return proofBytes
|
||||
}
|
||||
|
||||
// UpdateBranchWithNewMessage update the branches to latest with new message and return the merkle proof for the message.
|
||||
func UpdateBranchWithNewMessage(zeroes []common.Hash, branches []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
|
||||
root := msgHash
|
||||
var merkleProof []common.Hash
|
||||
var height uint64
|
||||
for height = 0; index > 0; height++ {
|
||||
if index%2 == 0 {
|
||||
// it may be used in next round.
|
||||
branches[height] = root
|
||||
merkleProof = append(merkleProof, zeroes[height])
|
||||
// it's a left child, the right child must be null
|
||||
root = utils.Keccak2(root, zeroes[height])
|
||||
} else {
|
||||
// it's a right child, use previously computed hash
|
||||
root = utils.Keccak2(branches[height], root)
|
||||
merkleProof = append(merkleProof, branches[height])
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
branches[height] = root
|
||||
return merkleProof
|
||||
}
|
||||
|
||||
// RecoverBranchFromProof will recover latest branches from merkle proof and message hash
|
||||
func RecoverBranchFromProof(proof []common.Hash, index uint64, msgHash common.Hash) []common.Hash {
|
||||
branches := make([]common.Hash, 64)
|
||||
root := msgHash
|
||||
var height uint64
|
||||
for height = 0; index > 0; height++ {
|
||||
if index%2 == 0 {
|
||||
branches[height] = root
|
||||
// it's a left child, the right child must be null
|
||||
root = utils.Keccak2(root, proof[height])
|
||||
} else {
|
||||
// it's a right child, use previously computed hash
|
||||
branches[height] = proof[height]
|
||||
root = utils.Keccak2(proof[height], root)
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
branches[height] = root
|
||||
for height++; height < 64; height++ {
|
||||
branches[height] = common.Hash{}
|
||||
}
|
||||
return branches
|
||||
}
|
||||
213
bridge-history-api/cross_msg/withdraw_trie_test.go
Normal file
213
bridge-history-api/cross_msg/withdraw_trie_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package cross_msg_test
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/cross_msg"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
func TestUpdateBranchWithNewMessage(t *testing.T) {
|
||||
zeroes := make([]common.Hash, 64)
|
||||
branches := make([]common.Hash, 64)
|
||||
zeroes[0] = common.Hash{}
|
||||
for i := 1; i < 64; i++ {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if branches[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0x0000000000000000000000000000000000000000000000000000000000000001", branches[0].Hex())
|
||||
}
|
||||
|
||||
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
if branches[1] != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", branches[1].Hex())
|
||||
}
|
||||
|
||||
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
if branches[2] != common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c", branches[2].Hex())
|
||||
}
|
||||
|
||||
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
if branches[2] != common.HexToHash("0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36", branches[2].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeEncodeMerkleProof(t *testing.T) {
|
||||
proof := cross_msg.DecodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
|
||||
if len(proof) != 4 {
|
||||
t.Fatalf("proof length mismatch, want %d, got %d", 4, len(proof))
|
||||
}
|
||||
if proof[0] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901") {
|
||||
t.Fatalf("proof[0] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4901", proof[0].Hex())
|
||||
}
|
||||
if proof[1] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902") {
|
||||
t.Fatalf("proof[1] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4902", proof[0].Hex())
|
||||
}
|
||||
if proof[2] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903") {
|
||||
t.Fatalf("proof[2] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4903", proof[0].Hex())
|
||||
}
|
||||
if proof[3] != common.HexToHash("0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904") {
|
||||
t.Fatalf("proof[3] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904", proof[0].Hex())
|
||||
}
|
||||
|
||||
bytes := cross_msg.EncodeMerkleProofToBytes(proof)
|
||||
if common.Bytes2Hex(bytes) != "2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904" {
|
||||
t.Fatalf("wrong encoded bytes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecoverBranchFromProof(t *testing.T) {
|
||||
zeroes := make([]common.Hash, 64)
|
||||
branches := make([]common.Hash, 64)
|
||||
zeroes[0] = common.Hash{}
|
||||
for i := 1; i < 64; i++ {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
proof := cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
tmpBranches := cross_msg.RecoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
tmpBranches = cross_msg.RecoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
tmpBranches = cross_msg.RecoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
tmpBranches = cross_msg.RecoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithdrawTrieOneByOne(t *testing.T) {
|
||||
for initial := 0; initial < 128; initial++ {
|
||||
withdrawTrie := cross_msg.NewWithdrawTrie()
|
||||
var hashes []common.Hash
|
||||
for i := 0; i < initial; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
hashes = append(hashes, hash)
|
||||
withdrawTrie.AppendMessages([]common.Hash{
|
||||
hash,
|
||||
})
|
||||
}
|
||||
|
||||
for i := initial; i < 128; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
hashes = append(hashes, hash)
|
||||
expectedRoot := computeMerkleRoot(hashes)
|
||||
proofBytes := withdrawTrie.AppendMessages([]common.Hash{
|
||||
hash,
|
||||
})
|
||||
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(i+1))
|
||||
assert.Equal(t, expectedRoot.String(), withdrawTrie.MessageRoot().String())
|
||||
proof := cross_msg.DecodeBytesToMerkleProof(proofBytes[0])
|
||||
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
|
||||
assert.Equal(t, expectedRoot.String(), verifiedRoot.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithdrawTrieMultiple(t *testing.T) {
|
||||
var expectedRoots []common.Hash
|
||||
|
||||
{
|
||||
var hashes []common.Hash
|
||||
for i := 0; i < 128; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
hashes = append(hashes, hash)
|
||||
expectedRoots = append(expectedRoots, computeMerkleRoot(hashes))
|
||||
}
|
||||
}
|
||||
|
||||
for initial := 0; initial < 100; initial++ {
|
||||
var hashes []common.Hash
|
||||
for i := 0; i < initial; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
hashes = append(hashes, hash)
|
||||
}
|
||||
|
||||
for finish := initial; finish < 100; finish++ {
|
||||
withdrawTrie := cross_msg.NewWithdrawTrie()
|
||||
withdrawTrie.AppendMessages(hashes)
|
||||
|
||||
var newHashes []common.Hash
|
||||
for i := initial; i <= finish; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
newHashes = append(newHashes, hash)
|
||||
}
|
||||
proofBytes := withdrawTrie.AppendMessages(newHashes)
|
||||
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(finish+1))
|
||||
assert.Equal(t, expectedRoots[finish].String(), withdrawTrie.MessageRoot().String())
|
||||
|
||||
for i := initial; i <= finish; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
proof := cross_msg.DecodeBytesToMerkleProof(proofBytes[i-initial])
|
||||
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
|
||||
assert.Equal(t, expectedRoots[finish].String(), verifiedRoot.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verifyMerkleProof(index uint64, leaf common.Hash, proof []common.Hash) common.Hash {
|
||||
root := leaf
|
||||
for _, h := range proof {
|
||||
if index%2 == 0 {
|
||||
root = utils.Keccak2(root, h)
|
||||
} else {
|
||||
root = utils.Keccak2(h, root)
|
||||
}
|
||||
index >>= 1
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
func computeMerkleRoot(hashes []common.Hash) common.Hash {
|
||||
if len(hashes) == 0 {
|
||||
return common.Hash{}
|
||||
}
|
||||
|
||||
zeroHash := common.Hash{}
|
||||
for {
|
||||
if len(hashes) == 1 {
|
||||
break
|
||||
}
|
||||
var newHashes []common.Hash
|
||||
for i := 0; i < len(hashes); i += 2 {
|
||||
if i+1 < len(hashes) {
|
||||
newHashes = append(newHashes, utils.Keccak2(hashes[i], hashes[i+1]))
|
||||
} else {
|
||||
newHashes = append(newHashes, utils.Keccak2(hashes[i], zeroHash))
|
||||
}
|
||||
}
|
||||
hashes = newHashes
|
||||
zeroHash = utils.Keccak2(zeroHash, zeroHash)
|
||||
}
|
||||
return hashes[0]
|
||||
}
|
||||
@@ -2,23 +2,24 @@
|
||||
-- +goose StatementBegin
|
||||
create table cross_message
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
msg_hash VARCHAR NOT NULL DEFAULT '',
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
amount VARCHAR NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer1_token VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_token VARCHAR NOT NULL DEFAULT '',
|
||||
token_id BIGINT NOT NULL DEFAULT 0,
|
||||
asset SMALLINT NOT NULL,
|
||||
msg_type SMALLINT NOT NULL,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
msg_hash VARCHAR NOT NULL DEFAULT '',
|
||||
height BIGINT NOT NULL,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
amount VARCHAR NOT NULL,
|
||||
layer1_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_hash VARCHAR NOT NULL DEFAULT '',
|
||||
layer1_token VARCHAR NOT NULL DEFAULT '',
|
||||
layer2_token VARCHAR NOT NULL DEFAULT '',
|
||||
token_id BIGINT NOT NULL DEFAULT 0,
|
||||
asset SMALLINT NOT NULL,
|
||||
msg_type SMALLINT NOT NULL,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
block_timestamp TIMESTAMP(0) DEFAULT NULL,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
|
||||
@@ -54,6 +54,7 @@ type CrossMsg struct {
|
||||
Asset int `json:"asset" db:"asset"`
|
||||
MsgType int `json:"msg_type" db:"msg_type"`
|
||||
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
|
||||
Timestamp *time.Time `json:"timestamp" db:"block_timestamp"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
@@ -76,6 +77,8 @@ type L1CrossMsgOrm interface {
|
||||
UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error
|
||||
GetLatestL1ProcessedHeight() (int64, error)
|
||||
DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
UpdateL1Blocktimestamp(height uint64, timestamp time.Time) error
|
||||
GetL1EarliestNoBlocktimestampHeight() (uint64, error)
|
||||
}
|
||||
|
||||
// L2CrossMsgOrm provides operations on l2_cross_message table
|
||||
@@ -88,6 +91,8 @@ type L2CrossMsgOrm interface {
|
||||
UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error
|
||||
GetLatestL2ProcessedHeight() (int64, error)
|
||||
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
UpdateL2Blocktimestamp(height uint64, timestamp time.Time) error
|
||||
GetL2EarliestNoBlocktimestampHeight() (uint64, error)
|
||||
}
|
||||
|
||||
type RelayedMsgOrm interface {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -118,3 +119,22 @@ func (l *l1CrossMsgOrm) DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height in
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1Blocktimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1EarliestNoBlocktimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer1Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -121,3 +122,22 @@ func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2Blocktimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer2Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2EarliestNoBlocktimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer2Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return 0, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
|
||||
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
|
||||
para := sender
|
||||
var results []*orm.CrossMsg
|
||||
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND NOT is_deleted ORDER BY id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
|
||||
rows, err := o.DB.Queryx(`SELECT * FROM cross_message WHERE sender = $1 AND NOT is_deleted ORDER BY block_timestamp DESC NULLS FIRST, id DESC LIMIT $2 OFFSET $3;`, para, limit, offset)
|
||||
if err != nil || rows == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
module bridge-history-api
|
||||
|
||||
go 1.20
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/ethereum/go-ethereum v1.11.6
|
||||
github.com/ethereum/go-ethereum v1.12.0
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/kataras/iris/v12 v12.2.0
|
||||
github.com/lib/pq v1.10.7
|
||||
@@ -26,6 +26,7 @@ require (
|
||||
github.com/andybalholm/brotli v1.0.5 // indirect
|
||||
github.com/aymerick/douceur v0.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.7.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||
@@ -34,14 +35,17 @@ require (
|
||||
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
|
||||
github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect
|
||||
github.com/cockroachdb/redact v1.1.3 // indirect
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.10.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/docker/docker v20.10.21+incompatible // indirect
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
|
||||
github.com/fatih/structs v1.1.0 // indirect
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/flosch/pongo2/v4 v4.0.2 // indirect
|
||||
@@ -92,6 +96,7 @@ require (
|
||||
github.com/microcosm-cc/bluemonday v1.0.23 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/mmcloughlin/addchain v0.4.0 // indirect
|
||||
github.com/nats-io/nats.go v1.23.0 // indirect
|
||||
github.com/nats-io/nkeys v0.3.0 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
@@ -115,6 +120,7 @@ require (
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tdewolff/minify/v2 v2.12.4 // indirect
|
||||
github.com/tdewolff/parse/v2 v2.6.4 // indirect
|
||||
@@ -142,5 +148,6 @@ require (
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
rsc.io/tmplfunc v0.0.3 // indirect
|
||||
|
||||
)
|
||||
|
||||
@@ -32,6 +32,8 @@ github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd3
|
||||
github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
|
||||
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
@@ -65,12 +67,18 @@ github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lg
|
||||
github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ=
|
||||
github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
|
||||
github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM=
|
||||
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
|
||||
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
|
||||
github.com/consensys/gnark-crypto v0.10.0 h1:zRh22SR7o4K35SoNqouS9J/TKHTyU2QWaj5ldehyXtA=
|
||||
github.com/consensys/gnark-crypto v0.10.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.2.0 h1:UVuHOE+5tIWrim4zf/Xaa43+MIsDCPyW76QhUpiMGj4=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.2.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -92,8 +100,6 @@ github.com/djherbis/atime v1.1.0/go.mod h1:28OF6Y8s3NQWwacXc5eZTsEsiMzp7LF8MbXE+
|
||||
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
|
||||
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
@@ -101,8 +107,10 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/ethereum/go-ethereum v1.11.6 h1:2VF8Mf7XiSUfmoNOy3D+ocfl9Qu8baQBrCNbo2CXQ8E=
|
||||
github.com/ethereum/go-ethereum v1.11.6/go.mod h1:+a8pUj1tOyJ2RinsNQD4326YS+leSoKGiG/uVVb0x6Y=
|
||||
github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q=
|
||||
github.com/ethereum/c-kzg-4844 v0.2.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbpQR9OqE5Qqa8=
|
||||
github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0=
|
||||
github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
@@ -193,6 +201,7 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -290,6 +299,7 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
||||
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||
github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
|
||||
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
@@ -341,6 +351,9 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
|
||||
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
|
||||
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@@ -453,6 +466,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM=
|
||||
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tdewolff/minify/v2 v2.12.4 h1:kejsHQMM17n6/gwdw53qsi6lg0TGddZADVyQOz1KMdE=
|
||||
@@ -720,3 +735,5 @@ modernc.org/sqlite v1.18.1 h1:ko32eKt3jf7eqIkCgPAeHMBXw3riNSLhl2f3loEF7o8=
|
||||
modernc.org/strutil v1.1.2 h1:iFBDH6j1Z0bN/Q9udJnnFoFpENA4252qe/7/5woE5MI=
|
||||
modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
|
||||
moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs=
|
||||
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
|
||||
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
|
||||
|
||||
@@ -77,12 +77,13 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
|
||||
}
|
||||
for _, msg := range result {
|
||||
txHistory := &TxHistoryInfo{
|
||||
Hash: msg.MsgHash,
|
||||
Amount: msg.Amount,
|
||||
To: msg.Target,
|
||||
IsL1: msg.MsgType == int(orm.Layer1Msg),
|
||||
BlockNumber: msg.Height,
|
||||
CreatedAt: msg.CreatedAt,
|
||||
Hash: msg.MsgHash,
|
||||
Amount: msg.Amount,
|
||||
To: msg.Target,
|
||||
IsL1: msg.MsgType == int(orm.Layer1Msg),
|
||||
BlockNumber: msg.Height,
|
||||
BlockTimestamp: msg.Timestamp,
|
||||
CreatedAt: msg.CreatedAt,
|
||||
FinalizeTx: &Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
@@ -102,12 +103,13 @@ func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, erro
|
||||
}
|
||||
if l1result != nil {
|
||||
txHistory := &TxHistoryInfo{
|
||||
Hash: l1result.Layer1Hash,
|
||||
Amount: l1result.Amount,
|
||||
To: l1result.Target,
|
||||
IsL1: true,
|
||||
BlockNumber: l1result.Height,
|
||||
CreatedAt: l1result.CreatedAt,
|
||||
Hash: l1result.Layer1Hash,
|
||||
Amount: l1result.Amount,
|
||||
To: l1result.Target,
|
||||
IsL1: true,
|
||||
BlockNumber: l1result.Height,
|
||||
BlockTimestamp: l1result.Timestamp,
|
||||
CreatedAt: l1result.CreatedAt,
|
||||
FinalizeTx: &Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
@@ -122,12 +124,13 @@ func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, erro
|
||||
}
|
||||
if l2result != nil {
|
||||
txHistory := &TxHistoryInfo{
|
||||
Hash: l2result.Layer2Hash,
|
||||
Amount: l2result.Amount,
|
||||
To: l2result.Target,
|
||||
IsL1: false,
|
||||
BlockNumber: l2result.Height,
|
||||
CreatedAt: l2result.CreatedAt,
|
||||
Hash: l2result.Layer2Hash,
|
||||
Amount: l2result.Amount,
|
||||
To: l2result.Target,
|
||||
IsL1: false,
|
||||
BlockNumber: l2result.Height,
|
||||
BlockTimestamp: l2result.Timestamp,
|
||||
CreatedAt: l2result.CreatedAt,
|
||||
FinalizeTx: &Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
|
||||
@@ -14,6 +14,11 @@ import (
|
||||
backendabi "bridge-history-api/abi"
|
||||
)
|
||||
|
||||
// Keccak2 compute the keccack256 of two concatenations of bytes32
|
||||
func Keccak2(a common.Hash, b common.Hash) common.Hash {
|
||||
return common.BytesToHash(crypto.Keccak256(append(a.Bytes()[:], b.Bytes()[:]...)))
|
||||
}
|
||||
|
||||
func GetSafeBlockNumber(ctx context.Context, client *ethclient.Client, confirmations uint64) (uint64, error) {
|
||||
number, err := client.BlockNumber(ctx)
|
||||
if err != nil || number <= confirmations {
|
||||
|
||||
20
bridge-history-api/utils/utils_test.go
Normal file
20
bridge-history-api/utils/utils_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
func TestKeccak2(t *testing.T) {
|
||||
a := common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0")
|
||||
b := common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c")
|
||||
c := utils.Keccak2(a, b)
|
||||
assert.NotEmpty(t, c)
|
||||
assert.NotEqual(t, a, c)
|
||||
assert.NotEqual(t, b, c)
|
||||
assert.Equal(t, "0xc0ffbd7f501bd3d49721b0724b2bff657cb2378f15d5a9b97cd7ea5bf630d512", c.Hex())
|
||||
}
|
||||
@@ -80,6 +80,7 @@
|
||||
"batch_commit_time_sec": 1200,
|
||||
"batch_blocks_limit": 100,
|
||||
"commit_tx_calldata_size_limit": 200000,
|
||||
"commit_tx_batch_count_limit": 30,
|
||||
"public_input_config": {
|
||||
"max_tx_num": 44,
|
||||
"padding_tx_hash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module scroll-tech/bridge
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
|
||||
@@ -44,6 +44,8 @@ type BatchProposerConfig struct {
|
||||
CommitTxCalldataSizeLimit uint64 `json:"commit_tx_calldata_size_limit"`
|
||||
// Commit tx calldata min size limit in bytes
|
||||
CommitTxCalldataMinSize uint64 `json:"commit_tx_calldata_min_size,omitempty"`
|
||||
// Max number of batches in a commit transaction
|
||||
CommitTxBatchCountLimit uint64 `json:"commit_tx_batch_count_limit"`
|
||||
// The public input hash config
|
||||
PublicInputConfig *types.PublicInputHashConfig `json:"public_input_config"`
|
||||
}
|
||||
|
||||
@@ -457,27 +457,20 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}()
|
||||
|
||||
proofBuffer, icBuffer, err := r.blockBatchOrm.GetVerifiedProofAndInstanceCommitmentsByHash(hash)
|
||||
aggProof, err := r.blockBatchOrm.GetVerifiedProofByHash(hash)
|
||||
if err != nil {
|
||||
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
if proofBuffer == nil || icBuffer == nil {
|
||||
log.Warn("proof or instance not ready", "hash", hash)
|
||||
return
|
||||
}
|
||||
if len(proofBuffer)%32 != 0 {
|
||||
log.Error("proof buffer has wrong length", "hash", hash, "length", len(proofBuffer))
|
||||
return
|
||||
}
|
||||
if len(icBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(icBuffer))
|
||||
log.Warn("get verified proof by hash failed", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
proof := utils.BufferToUint256Le(proofBuffer)
|
||||
instance := utils.BufferToUint256Le(icBuffer)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, instance)
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
log.Warn("agg_proof sanity check fails", "hash", hash, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
proof := utils.BufferToUint256Le(aggProof.Proof)
|
||||
finalPair := utils.BufferToUint256Le(aggProof.FinalPair)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, finalPair)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
@@ -156,9 +157,11 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
@@ -199,9 +202,11 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, provingStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -46,6 +46,7 @@ type BatchProposer struct {
|
||||
commitCalldataSizeLimit uint64
|
||||
batchDataBufferSizeLimit uint64
|
||||
commitCalldataMinSize uint64
|
||||
commitBatchCountLimit int
|
||||
|
||||
proofGenerationFreq uint64
|
||||
batchDataBuffer []*bridgeTypes.BatchData
|
||||
@@ -72,6 +73,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
|
||||
batchCommitTimeSec: cfg.BatchCommitTimeSec,
|
||||
commitCalldataSizeLimit: cfg.CommitTxCalldataSizeLimit,
|
||||
commitCalldataMinSize: cfg.CommitTxCalldataMinSize,
|
||||
commitBatchCountLimit: int(cfg.CommitTxBatchCountLimit),
|
||||
batchDataBufferSizeLimit: 100*cfg.CommitTxCalldataSizeLimit + 1*1024*1024, // @todo: determine the value.
|
||||
proofGenerationFreq: cfg.ProofGenerationFreq,
|
||||
piCfg: cfg.PublicInputConfig,
|
||||
@@ -202,7 +204,7 @@ func (p *BatchProposer) TryCommitBatches() {
|
||||
index := 0
|
||||
commit := false
|
||||
calldataByteLen := uint64(0)
|
||||
for ; index < len(p.batchDataBuffer); index++ {
|
||||
for ; index < len(p.batchDataBuffer) && index < p.commitBatchCountLimit; index++ {
|
||||
calldataByteLen += bridgeAbi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch)
|
||||
if calldataByteLen > p.commitCalldataSizeLimit {
|
||||
commit = true
|
||||
|
||||
@@ -113,11 +113,12 @@ func testBatchProposerBatchGeneration(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
CommitTxBatchCountLimit: 30,
|
||||
}, relayer, db)
|
||||
proposer.TryProposeBatch()
|
||||
|
||||
@@ -189,11 +190,12 @@ func testBatchProposerGracefulRestart(t *testing.T) {
|
||||
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
|
||||
// test p.recoverBatchDataBuffer().
|
||||
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
CommitTxBatchCountLimit: 30,
|
||||
}, relayer, db)
|
||||
|
||||
batchHashes, err = blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32)
|
||||
|
||||
@@ -2,6 +2,7 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
@@ -17,31 +19,30 @@ import (
|
||||
type BlockBatch struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"column:parent_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" gorm:"column:total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" gorm:"column:total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" gorm:"column:total_l2_gas"`
|
||||
ProvingStatus int `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof"`
|
||||
InstanceCommitments []byte `json:"instance_commitments" gorm:"column:instance_commitments"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:0"`
|
||||
RollupStatus int `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
|
||||
OracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at;default:CURRENT_TIMESTAMP()"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"column:parent_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" gorm:"column:total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" gorm:"column:total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" gorm:"column:total_l2_gas"`
|
||||
ProvingStatus int `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:0"`
|
||||
RollupStatus int `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
|
||||
OracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at;default:CURRENT_TIMESTAMP()"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewBlockBatch create an blockBatchOrm instance
|
||||
@@ -100,14 +101,24 @@ func (o *BlockBatch) GetBlockBatchesHashByRollupStatus(status types.RollupStatus
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
// GetVerifiedProofAndInstanceCommitmentsByHash get verified proof and instance comments by hash
|
||||
func (o *BlockBatch) GetVerifiedProofAndInstanceCommitmentsByHash(hash string) ([]byte, []byte, error) {
|
||||
var blockBatch BlockBatch
|
||||
err := o.db.Select("proof, instance_commitments").Where("hash", hash).Where("proving_status", int(types.ProvingTaskVerified)).Find(&blockBatch).Error
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
// GetVerifiedProofByHash get verified proof and instance comments by hash
|
||||
func (o *BlockBatch) GetVerifiedProofByHash(hash string) (*message.AggProof, error) {
|
||||
result := o.db.Model(&BlockBatch{}).Select("proof").Where("hash", hash).Where("proving_status", int(types.ProvingTaskVerified)).Row()
|
||||
if result.Err() != nil {
|
||||
return nil, result.Err()
|
||||
}
|
||||
return blockBatch.Proof, blockBatch.InstanceCommitments, nil
|
||||
|
||||
var proofBytes []byte
|
||||
if err := result.Scan(&proofBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var proof message.AggProof
|
||||
if err := json.Unmarshal(proofBytes, &proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &proof, nil
|
||||
}
|
||||
|
||||
// GetLatestBatch get the latest batch
|
||||
@@ -147,10 +158,17 @@ func (o *BlockBatch) GetRollupStatusByHashList(hashes []string) ([]types.RollupS
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var statuses []types.RollupStatus
|
||||
for _, v := range blockBatches {
|
||||
statuses = append(statuses, types.RollupStatus(v.RollupStatus))
|
||||
var (
|
||||
statuses []types.RollupStatus
|
||||
_statusMap = make(map[string]types.RollupStatus, len(hashes))
|
||||
)
|
||||
for _, _batch := range blockBatches {
|
||||
_statusMap[_batch.Hash] = types.RollupStatus(_batch.RollupStatus)
|
||||
}
|
||||
for _, _hash := range hashes {
|
||||
statuses = append(statuses, _statusMap[_hash])
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
@@ -279,12 +297,16 @@ func (o *BlockBatch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context,
|
||||
|
||||
// UpdateProofByHash update the block batch proof by hash
|
||||
// for unit test
|
||||
func (o *BlockBatch) UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
|
||||
func (o *BlockBatch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proof
|
||||
updateFields["instance_commitments"] = instanceCommitments
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error
|
||||
err = o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error
|
||||
if err != nil {
|
||||
log.Error("failed to update proof", "err", err)
|
||||
}
|
||||
|
||||
@@ -13,8 +13,8 @@ create table l1_message
|
||||
layer1_hash VARCHAR NOT NULL,
|
||||
layer2_hash VARCHAR DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
@@ -32,7 +32,7 @@ create index l1_message_height_index
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_time = CURRENT_TIMESTAMP;
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
@@ -13,8 +13,8 @@ create table l2_message
|
||||
layer1_hash VARCHAR DEFAULT NULL,
|
||||
proof TEXT DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
@@ -32,7 +32,7 @@ create index l2_message_height_index
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_time = CURRENT_TIMESTAMP;
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
@@ -16,7 +16,6 @@ create table block_batch
|
||||
total_l2_gas BIGINT NOT NULL,
|
||||
proving_status INTEGER DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
instance_commitments BYTEA DEFAULT NULL,
|
||||
proof_time_sec INTEGER DEFAULT 0,
|
||||
rollup_status INTEGER DEFAULT 1,
|
||||
commit_tx_hash VARCHAR DEFAULT NULL,
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
@@ -110,9 +111,11 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add dummy proof
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
@@ -117,9 +118,11 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
// add dummy proof
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -1,45 +1,46 @@
|
||||
GO_VERSION := 1.18
|
||||
PYTHON_VERSION := 3.10
|
||||
RUST_VERSION := nightly-2022-12-10
|
||||
ifeq ($(GO_VERSION),)
|
||||
GO_VERSION=1.19
|
||||
endif
|
||||
ifeq ($(RUST_VERSION),)
|
||||
RUST_VERSION=nightly-2022-12-10
|
||||
endif
|
||||
ifeq ($(PYTHON_VERSION),)
|
||||
PYTHON_VERSION=3.10
|
||||
endif
|
||||
ifeq ($(CUDA_VERSION),)
|
||||
CUDA_VERSION=11.7.1
|
||||
endif
|
||||
|
||||
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
|
||||
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder cuda-go-rust-builder py-runner
|
||||
|
||||
cuda-go-rust-builder:
|
||||
docker build -t scrolltech/cuda-go-rust-builder:cuda-$(CUDA_VERSION)-go-$(GO_VERSION)-rust-$(RUST_VERSION) -f cuda-go-rust-builder.Dockerfile ./ --build-arg CUDA_VERSION=$(CUDA_VERSION) --build-arg GO_VERSION=$(GO_VERSION) --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
|
||||
go-rust-builder:
|
||||
docker build -t scrolltech/go-rust-builder:latest -f go-rust-builder.Dockerfile ./
|
||||
docker image tag scrolltech/go-rust-builder:latest scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
docker build -t scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION) -f go-rust-builder.Dockerfile ./ --build-arg GO_VERSION=$(GO_VERSION) --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
|
||||
go-alpine-builder:
|
||||
docker build -t scrolltech/go-alpine-builder:latest -f go-alpine-builder.Dockerfile ./
|
||||
docker image tag scrolltech/go-alpine-builder:latest scrolltech/go-alpine-builder:$(GO_VERSION)
|
||||
docker build -t scrolltech/go-alpine-builder:$(GO_VERSION) -f go-alpine-builder.Dockerfile ./ --build-arg GO_VERSION=$(GO_VERSION)
|
||||
|
||||
rust-builder:
|
||||
docker build -t scrolltech/rust-builder:latest -f rust-builder.Dockerfile ./
|
||||
docker image tag scrolltech/rust-builder:latest scrolltech/rust-builder:$(RUST_VERSION)
|
||||
docker build -t scrolltech/rust-builder:$(RUST_VERSION) -f rust-builder.Dockerfile ./ --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
|
||||
rust-alpine-builder:
|
||||
docker build -t scrolltech/rust-alpine-builder:latest -f rust-alpine-builder.Dockerfile ./
|
||||
docker image tag scrolltech/rust-alpine-builder:latest scrolltech/rust-alpine-builder:$(RUST_VERSION)
|
||||
docker build -t scrolltech/rust-alpine-builder:$(RUST_VERSION) -f rust-alpine-builder.Dockerfile ./ --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
|
||||
go-rust-alpine-builder:
|
||||
docker build -t scrolltech/go-rust-alpine-builder:latest -f go-rust-alpine-builder.Dockerfile ./
|
||||
docker image tag scrolltech/go-rust-alpine-builder:latest scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
docker build -t scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION) -f go-rust-alpine-builder.Dockerfile ./ --build-arg GO_VERSION=$(GO_VERSION) --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
|
||||
py-runner:
|
||||
docker build -t scrolltech/py-runner:latest -f py-runner.Dockerfile ./
|
||||
docker image tag scrolltech/py-runner:latest scrolltech/py-runner:$(PYTHON_VERSION)
|
||||
docker build -t scrolltech/py-runner:$(PYTHON_VERSION) -f py-runner.Dockerfile ./ --build-arg PYTHON_VERSION=$(PYTHON_VERSION)
|
||||
|
||||
all: go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
|
||||
all: go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder cuda-go-rust-builder py-runner
|
||||
|
||||
publish:
|
||||
docker push scrolltech/go-alpine-builder:latest
|
||||
docker push scrolltech/go-alpine-builder:$(GO_VERSION)
|
||||
docker push scrolltech/rust-builder:latest
|
||||
docker push scrolltech/rust-builder:$(RUST_VERSION)
|
||||
docker push scrolltech/rust-alpine-builder:latest
|
||||
docker push scrolltech/rust-alpine-builder:$(RUST_VERSION)
|
||||
docker push scrolltech/go-rust-alpine-builder:latest
|
||||
docker push scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
docker push scrolltech/go-rust-builder:latest
|
||||
docker push scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
docker push scrolltech/py-runner:latest
|
||||
docker push scrolltech/cuda-go-rust-builder:cuda-$(CUDA_VERSION)-go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
docker push scrolltech/py-runner:$(PYTHON_VERSION)
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
ARG CUDA_VERSION=11.7.1
|
||||
ARG GO_VERSION=1.19
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
|
||||
RUN apt-get update
|
||||
# Install basic packages
|
||||
RUN apt-get install build-essential curl wget git pkg-config --no-install-recommends -y
|
||||
# Install dev-packages
|
||||
RUN apt-get install libclang-dev libssl-dev cmake llvm --no-install-recommends -y
|
||||
# Install related libs
|
||||
RUN apt install libprocps-dev libboost-all-dev libmpfr-dev libgmp-dev --no-install-recommends -y
|
||||
# Clean installed cache
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
# Add Toolchain
|
||||
ARG RUST_VERSION
|
||||
RUN rustup toolchain install ${RUST_VERSION}
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
|
||||
# Install Go
|
||||
ARG GO_VERSION
|
||||
RUN rm -rf /usr/local/go
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
@@ -1,4 +1,6 @@
|
||||
FROM golang:1.18-alpine
|
||||
ARG GO_VERSION=1.19
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
# ENV GOPROXY https://goproxy.cn,direct
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
FROM golang:1.18-alpine
|
||||
ARG GO_VERSION=1.19
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
|
||||
|
||||
@@ -24,12 +26,14 @@ RUN set -eux; \
|
||||
wget "$url"; \
|
||||
chmod +x rustup-init;
|
||||
|
||||
RUN ./rustup-init -y --no-modify-path --default-toolchain ${DEFAULT_RUST_TOOLCHAIN}; \
|
||||
ARG RUST_VERSION
|
||||
RUN ./rustup-init -y --no-modify-path --default-toolchain ${RUST_VERSION}; \
|
||||
rm rustup-init; \
|
||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||
rustup --version; \
|
||||
cargo --version; \
|
||||
rustc --version;
|
||||
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
ARG GO_VERSION=1.19
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt-get update && ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime
|
||||
|
||||
# Install basic packages
|
||||
RUN apt-get install build-essential curl wget git pkg-config -y
|
||||
|
||||
# Install dev-packages
|
||||
RUN apt-get install libclang-dev libssl-dev llvm -y
|
||||
|
||||
@@ -12,23 +15,17 @@ RUN apt-get install libclang-dev libssl-dev llvm -y
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
# Add Toolchain
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
|
||||
# TODO: make this ARG
|
||||
ENV CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
ARG RUST_VERSION
|
||||
RUN rustup toolchain install ${RUST_VERSION}
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
|
||||
# Install Go
|
||||
ARG GO_VERSION
|
||||
RUN rm -rf /usr/local/go
|
||||
# for 1.17
|
||||
# RUN wget https://go.dev/dl/go1.17.13.linux-amd64.tar.gz
|
||||
# RUN tar -C /usr/local -xzf go1.17.13.linux-amd64.tar.gz
|
||||
# for 1.18
|
||||
RUN wget https://go.dev/dl/go1.18.9.linux-amd64.tar.gz
|
||||
RUN tar -C /usr/local -xzf go1.18.9.linux-amd64.tar.gz
|
||||
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
FROM python:3.10-alpine3.15
|
||||
ARG PYTHON_VERSION=3.10
|
||||
FROM python:${PYTHON_VERSION}-alpine
|
||||
|
||||
RUN apk add --no-cache gcc g++ make musl-dev
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
ARG ALPINE_VERSION=3.15
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
@@ -27,12 +28,14 @@ RUN set -eux; \
|
||||
wget "$url"; \
|
||||
chmod +x rustup-init;
|
||||
|
||||
RUN ./rustup-init -y --no-modify-path --default-toolchain ${DEFAULT_RUST_TOOLCHAIN}; \
|
||||
ARG RUST_VERSION
|
||||
RUN ./rustup-init -y --no-modify-path --default-toolchain ${RUST_VERSION}; \
|
||||
rm rustup-init; \
|
||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||
rustup --version; \
|
||||
cargo --version; \
|
||||
rustc --version;
|
||||
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt-get update && ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime
|
||||
@@ -11,6 +14,10 @@ RUN apt-get install libclang-dev libssl-dev llvm -y
|
||||
# Install Rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
# Add Toolchain
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
ARG RUST_VERSION
|
||||
RUN rustup toolchain install ${RUST_VERSION}
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
const (
|
||||
// GolangCIVersion to be used for linting.
|
||||
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.0"
|
||||
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2"
|
||||
)
|
||||
|
||||
// GOBIN environment variable.
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.db.txt > coverage.db.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.common.txt > coverage.common.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
|
||||
#${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
|
||||
|
||||
npx cobertura-merge -o cobertura.xml \
|
||||
|
||||
@@ -7,7 +7,7 @@ pipeline {
|
||||
timeout (20)
|
||||
}
|
||||
tools {
|
||||
go 'go-1.18'
|
||||
go 'go-1.19'
|
||||
nodejs "nodejs"
|
||||
}
|
||||
environment {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module scroll-tech/common
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/docker/docker v20.10.21+incompatible
|
||||
|
||||
2
common/testdata/blockTrace_03.json
vendored
2
common/testdata/blockTrace_03.json
vendored
@@ -29,7 +29,7 @@
|
||||
{
|
||||
"type": 2,
|
||||
"nonce": 2,
|
||||
"txHash": "0xaaaeb971adfac989c7db5426737bc2932756091a5730ea6d5324f93e4cff9713",
|
||||
"txHash": "0x6b50040f5f14bad253f202b0775d6742131bcaee6b992f05578386f00e53b7e4",
|
||||
"gas": 1152994,
|
||||
"gasPrice": "0x3b9b0a17",
|
||||
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
|
||||
66
common/types/batch_header.go
Normal file
66
common/types/batch_header.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// BatchHeader contains batch header info to be committed.
|
||||
type BatchHeader struct {
|
||||
// Encoded in BatchHeaderV0Codec
|
||||
version uint8
|
||||
batchIndex uint64
|
||||
l1MessagePopped uint64
|
||||
totalL1MessagePopped uint64
|
||||
dataHash common.Hash
|
||||
parentBatchHash common.Hash
|
||||
skippedL1MessageBitmap []*big.Int // LSB is the first L1 message
|
||||
}
|
||||
|
||||
// NewBatchHeader creates a new BatchHeader
|
||||
func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64, parentBatchHash common.Hash, chunks []*Chunk) (*BatchHeader, error) {
|
||||
// TODO calculate `l1MessagePopped`, `totalL1MessagePopped`, and `skippedL1MessageBitmap` based on `chunks`
|
||||
var dataBytes []byte
|
||||
for _, chunk := range chunks {
|
||||
// Build dataHash
|
||||
chunkBytes, err := chunk.Hash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dataBytes = append(dataBytes, chunkBytes...)
|
||||
}
|
||||
dataHash := crypto.Keccak256Hash(dataBytes)
|
||||
|
||||
return &BatchHeader{
|
||||
version: version,
|
||||
batchIndex: batchIndex,
|
||||
l1MessagePopped: 0, // TODO
|
||||
totalL1MessagePopped: totalL1MessagePoppedBefore, // TODO
|
||||
dataHash: dataHash,
|
||||
parentBatchHash: parentBatchHash,
|
||||
skippedL1MessageBitmap: nil, // TODO
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encode encodes the BatchHeader into RollupV2 BatchHeaderV0Codec Encoding.
|
||||
func (b *BatchHeader) Encode() []byte {
|
||||
batchBytes := make([]byte, 89)
|
||||
batchBytes[0] = b.version
|
||||
binary.BigEndian.PutUint64(batchBytes[1:], b.batchIndex)
|
||||
binary.BigEndian.PutUint64(batchBytes[9:], b.l1MessagePopped)
|
||||
binary.BigEndian.PutUint64(batchBytes[17:], b.totalL1MessagePopped)
|
||||
copy(batchBytes[25:], b.dataHash[:])
|
||||
copy(batchBytes[57:], b.parentBatchHash[:])
|
||||
// TODO: encode skippedL1MessageBitmap
|
||||
|
||||
return batchBytes
|
||||
}
|
||||
|
||||
// Hash calculates the hash of the batch header.
|
||||
func (b *BatchHeader) Hash() common.Hash {
|
||||
return crypto.Keccak256Hash(b.Encode())
|
||||
}
|
||||
106
common/types/batch_header_test.go
Normal file
106
common/types/batch_header_test.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewBatchHeader(t *testing.T) {
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
parentBatchHeader := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 0,
|
||||
l1MessagePopped: 0,
|
||||
totalL1MessagePopped: 0,
|
||||
dataHash: common.HexToHash("0x0"),
|
||||
parentBatchHash: common.HexToHash("0x0"),
|
||||
skippedL1MessageBitmap: nil,
|
||||
}
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
}
|
||||
|
||||
func TestBatchHeaderEncode(t *testing.T) {
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
parentBatchHeader := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 0,
|
||||
l1MessagePopped: 0,
|
||||
totalL1MessagePopped: 0,
|
||||
dataHash: common.HexToHash("0x0"),
|
||||
parentBatchHash: common.HexToHash("0x0"),
|
||||
skippedL1MessageBitmap: nil,
|
||||
}
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
bytes := batchHeader.Encode()
|
||||
assert.Equal(t, 89, len(bytes))
|
||||
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
|
||||
}
|
||||
|
||||
func TestBatchHeaderHash(t *testing.T) {
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
parentBatchHeader := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 0,
|
||||
l1MessagePopped: 0,
|
||||
totalL1MessagePopped: 0,
|
||||
dataHash: common.HexToHash("0x0"),
|
||||
parentBatchHash: common.HexToHash("0x0"),
|
||||
skippedL1MessageBitmap: nil,
|
||||
}
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
hash := batchHeader.Hash()
|
||||
assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes()))
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock2))
|
||||
chunk2 := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
batchHeader2, err := NewBatchHeader(1, 2, 0, batchHeader.Hash(), []*Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader2)
|
||||
hash2 := batchHeader2.Hash()
|
||||
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
|
||||
}
|
||||
@@ -1,6 +1,10 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
@@ -12,3 +16,30 @@ type WrappedBlock struct {
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
}
|
||||
|
||||
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
|
||||
func (w *WrappedBlock) Encode() ([]byte, error) {
|
||||
bytes := make([]byte, 60)
|
||||
|
||||
if !w.Header.Number.IsUint64() {
|
||||
return nil, errors.New("block number is not uint64")
|
||||
}
|
||||
|
||||
if len(w.Transactions) > math.MaxUint16 {
|
||||
return nil, errors.New("number of transactions exceeds max uint16")
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
|
||||
|
||||
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
|
||||
|
||||
// TODO: Currently, baseFee is 0
|
||||
|
||||
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
|
||||
|
||||
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
|
||||
|
||||
// TODO: set numL1Messages properly
|
||||
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
117
common/types/chunk.go
Normal file
117
common/types/chunk.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// Chunk contains blocks to be encoded
|
||||
type Chunk struct {
|
||||
Blocks []*WrappedBlock `json:"blocks"`
|
||||
}
|
||||
|
||||
// Encode encodes the Chunk into RollupV2 Chunk Encoding.
|
||||
func (c *Chunk) Encode() ([]byte, error) {
|
||||
numBlocks := len(c.Blocks)
|
||||
|
||||
if numBlocks > 255 {
|
||||
return nil, errors.New("number of blocks exceeds 1 byte")
|
||||
}
|
||||
if numBlocks == 0 {
|
||||
return nil, errors.New("number of blocks is 0")
|
||||
}
|
||||
|
||||
var chunkBytes []byte
|
||||
chunkBytes = append(chunkBytes, byte(numBlocks))
|
||||
|
||||
var l2TxDataBytes []byte
|
||||
|
||||
for _, block := range c.Blocks {
|
||||
blockBytes, err := block.Encode()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode block: %v", err)
|
||||
}
|
||||
|
||||
if len(blockBytes) != 60 {
|
||||
return nil, fmt.Errorf("block encoding is not 60 bytes long %x", len(blockBytes))
|
||||
}
|
||||
|
||||
chunkBytes = append(chunkBytes, blockBytes...)
|
||||
|
||||
// Append l2Tx Hashes
|
||||
for _, txData := range block.Transactions {
|
||||
if txData.Type == 0x7E {
|
||||
continue
|
||||
}
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
// right now we only support legacy tx
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
var txLen [4]byte
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
l2TxDataBytes = append(l2TxDataBytes, txLen[:]...)
|
||||
l2TxDataBytes = append(l2TxDataBytes, rlpTxData...)
|
||||
}
|
||||
}
|
||||
|
||||
chunkBytes = append(chunkBytes, l2TxDataBytes...)
|
||||
|
||||
return chunkBytes, nil
|
||||
}
|
||||
|
||||
// Hash hashes the Chunk into RollupV2 Chunk Hash
|
||||
func (c *Chunk) Hash() ([]byte, error) {
|
||||
chunkBytes, err := c.Encode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numBlocks := chunkBytes[0]
|
||||
|
||||
// concatenate block contexts
|
||||
// only first 58 bytes is needed
|
||||
var dataBytes []byte
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
// only first 58 bytes is needed
|
||||
dataBytes = append(dataBytes, chunkBytes[1+60*i:60*i+59]...)
|
||||
}
|
||||
|
||||
// concatenate l1 and l2 tx hashes
|
||||
var l2TxHashes []byte
|
||||
for _, block := range c.Blocks {
|
||||
for _, txData := range block.Transactions {
|
||||
// TODO: concatenate l1 message hashes
|
||||
if txData.Type == 0x7E {
|
||||
continue
|
||||
}
|
||||
// concatenate l2 txs hashes
|
||||
// retrieve the number of transactions in current block.
|
||||
txHash := strings.TrimPrefix(txData.TxHash, "0x")
|
||||
hashBytes, err := hex.DecodeString(txHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l2TxHashes = append(l2TxHashes, hashBytes...)
|
||||
}
|
||||
}
|
||||
|
||||
dataBytes = append(dataBytes, l2TxHashes...)
|
||||
hash := crypto.Keccak256Hash(dataBytes).Bytes()
|
||||
return hash, nil
|
||||
}
|
||||
92
common/types/chunk_test.go
Normal file
92
common/types/chunk_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChunkEncode(t *testing.T) {
|
||||
// Test case 1: when the chunk contains no blocks.
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
bytes, err := chunk.Encode()
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
|
||||
// Test case 2: when the chunk contains more than 255 blocks.
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
for i := 0; i < 256; i++ {
|
||||
chunk.Blocks = append(chunk.Blocks, &WrappedBlock{})
|
||||
}
|
||||
bytes, err = chunk.Encode()
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
|
||||
|
||||
// Test case 3: when the chunk contains one block.
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Encode()
|
||||
hexString := hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 299, len(bytes))
|
||||
assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString)
|
||||
}
|
||||
|
||||
func TestChunkHash(t *testing.T) {
|
||||
// Test case 1: when the chunk contains no blocks
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
bytes, err := chunk.Hash()
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
|
||||
// Test case 2: successfully hashing a chunk on one block
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Hash()
|
||||
hexString := hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hexString)
|
||||
|
||||
// Test case 3: successfully hashing a chunk on two blocks
|
||||
templateBlockTrace1, err := os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
wrappedBlock1,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Hash()
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "aa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hexString)
|
||||
}
|
||||
@@ -233,31 +233,30 @@ const (
|
||||
|
||||
// BlockBatch is structure of stored block_batch
|
||||
type BlockBatch struct {
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
Index uint64 `json:"index" db:"index"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
|
||||
StateRoot string `json:"state_root" db:"state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
InstanceCommitments []byte `json:"instance_commitments" db:"instance_commitments"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
|
||||
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
|
||||
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
|
||||
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
|
||||
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
|
||||
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
|
||||
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
|
||||
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
Index uint64 `json:"index" db:"index"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
|
||||
StateRoot string `json:"state_root" db:"state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
|
||||
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
|
||||
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
|
||||
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
|
||||
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
|
||||
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
|
||||
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
|
||||
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
|
||||
}
|
||||
|
||||
// AggTask is a wrapper type around db AggProveTask type.
|
||||
@@ -269,6 +268,6 @@ type AggTask struct {
|
||||
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
CreatedTime *time.Time `json:"created_time" db:"created_time"`
|
||||
UpdatedTime *time.Time `json:"updated_time" db:"updated_time"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
@@ -205,7 +207,7 @@ type TaskMsg struct {
|
||||
// For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers.
|
||||
BlockHashes []common.Hash `json:"block_hashes,omitempty"`
|
||||
// Only applicable for aggregator rollers.
|
||||
SubProofs [][]byte `json:"sub_proofs,omitempty"`
|
||||
SubProofs []*AggProof `json:"sub_proofs,omitempty"`
|
||||
}
|
||||
|
||||
// ProofDetail is the message received from rollers that contains zk proof, the status of
|
||||
@@ -237,3 +239,26 @@ type AggProof struct {
|
||||
Vk []byte `json:"vk"`
|
||||
BlockCount uint `json:"block_count"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether an AggProof is in a legal format
|
||||
// TODO: change to check Proof&Instance when upgrading to snark verifier v0.4
|
||||
func (ap *AggProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.Proof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
if len(ap.FinalPair) == 0 {
|
||||
return errors.New("final_pair not ready")
|
||||
}
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer has wrong length, expected: 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
if len(ap.FinalPair)%32 != 0 {
|
||||
return fmt.Errorf("final_pair buffer has wrong length, expected: 32, got: %d", len(ap.FinalPair))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v3.1.2"
|
||||
var tag = "v3.3.4"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
yarn lint-staged && forge build && npx hardhat compile
|
||||
cd contracts
|
||||
yarn lint-staged
|
||||
@@ -2,7 +2,6 @@
|
||||
|
||||
Note: For more comprehensive documentation, see [`./docs/`](./docs).
|
||||
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
@@ -24,10 +23,8 @@ remappings.txt - "foundry dependency mappings"
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
## Dependencies
|
||||
|
||||
|
||||
### Foundry
|
||||
|
||||
First run the command below to get foundryup, the Foundry toolchain installer:
|
||||
@@ -42,24 +39,22 @@ Then, run `foundryup` in a new terminal session or after reloading your `PATH`.
|
||||
|
||||
Other ways to install Foundry can be found [here](https://github.com/foundry-rs/foundry#installation).
|
||||
|
||||
|
||||
### Hardhat
|
||||
|
||||
```
|
||||
yarn install
|
||||
```
|
||||
|
||||
|
||||
## Build
|
||||
|
||||
+ Run `git submodule update --init --recursive` to initialise git submodules.
|
||||
+ Run `yarn prettier:solidity` to run linting in fix mode, will auto-format all solidity codes.
|
||||
+ Run `yarn prettier` to run linting in fix mode, will auto-format all typescript codes.
|
||||
+ Run `forge build` to compile contracts with foundry.
|
||||
+ Run `npx hardhat compile` to compile with hardhat.
|
||||
+ Run `forge test -vvv` to run foundry units tests. It will compile all contracts before running the unit tests.
|
||||
+ Run `npx hardhat test` to run integration tests. It may not compile all contracts before running, it's better to run `npx hardhat compile` first.
|
||||
|
||||
- Run `git submodule update --init --recursive` to initialise git submodules.
|
||||
- Run `yarn prettier:solidity` to run linting in fix mode, will auto-format all solidity codes.
|
||||
- Run `yarn prettier` to run linting in fix mode, will auto-format all typescript codes.
|
||||
- Run `yarn prepare` to install the precommit linting hook
|
||||
- Run `forge build` to compile contracts with foundry.
|
||||
- Run `npx hardhat compile` to compile with hardhat.
|
||||
- Run `forge test -vvv` to run foundry units tests. It will compile all contracts before running the unit tests.
|
||||
- Run `npx hardhat test` to run integration tests. It may not compile all contracts before running, it's better to run `npx hardhat compile` first.
|
||||
|
||||
## TODO
|
||||
|
||||
|
||||
@@ -11,7 +11,8 @@
|
||||
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",
|
||||
"lint:ts": "./node_modules/.bin/prettier --write 'integration-test/**/*.ts' 'scripts/**/*.ts' *.ts",
|
||||
"lint": "yarn lint:ts && yarn lint:sol",
|
||||
"coverage": "hardhat coverage"
|
||||
"coverage": "hardhat coverage",
|
||||
"prepare": "cd .. && husky install contracts/.husky"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@nomiclabs/hardhat-ethers": "^2.0.0",
|
||||
|
||||
27
contracts/scripts/foundry/DeployFallbackContracts.s.sol
Normal file
27
contracts/scripts/foundry/DeployFallbackContracts.s.sol
Normal file
@@ -0,0 +1,27 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {Fallback} from "../../src/misc/Fallback.sol";
|
||||
|
||||
contract DeployFallbackContracts is Script {
|
||||
uint256 DEPLOYER_PRIVATE_KEY = vm.envUint("DEPLOYER_PRIVATE_KEY");
|
||||
uint256 NUM_CONTRACTS = vm.envUint("NUM_CONTRACTS");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
for (uint256 ii = 0; ii < NUM_CONTRACTS; ++ii) {
|
||||
Fallback fallbackContract = new Fallback();
|
||||
logAddress("FALLBACK", address(fallbackContract));
|
||||
}
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
|
||||
contract DeployL1BridgeContracts is Script {
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
|
||||
uint32 CHAIN_ID_L2 = uint32(vm.envUint("CHAIN_ID_L2"));
|
||||
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
|
||||
@@ -19,7 +19,7 @@ async function main() {
|
||||
console.log("Using rollup proxy address:", rollupAddr);
|
||||
|
||||
const ScrollChain = await ethers.getContractAt("ScrollChain", rollupAddr, deployer);
|
||||
const genesis = JSON.parse(fs.readFileSync(GENESIS_FILE_PATH, 'utf8'));
|
||||
const genesis = JSON.parse(fs.readFileSync(GENESIS_FILE_PATH, "utf8"));
|
||||
console.log("Using genesis block:", genesis.blockHash);
|
||||
|
||||
const tx = await ScrollChain.importGenesisBatch(genesis);
|
||||
|
||||
@@ -26,16 +26,16 @@ async function main() {
|
||||
const L2StandardERC20FactoryAddress = process.env.L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR!;
|
||||
|
||||
// if ((await L1StandardERC20Gateway.counterpart()) === constants.AddressZero) {
|
||||
const tx = await L1StandardERC20Gateway.initialize(
|
||||
L2StandardERC20GatewayAddress,
|
||||
L1GatewayRouterAddress,
|
||||
L1ScrollMessengerAddress,
|
||||
L2StandardERC20Impl,
|
||||
L2StandardERC20FactoryAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L1StandardERC20Gateway.initialize(
|
||||
L2StandardERC20GatewayAddress,
|
||||
L1GatewayRouterAddress,
|
||||
L1ScrollMessengerAddress,
|
||||
L2StandardERC20Impl,
|
||||
L2StandardERC20FactoryAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -24,14 +24,14 @@ async function main() {
|
||||
const L2GatewayRouterAddress = process.env.L2_GATEWAY_ROUTER_PROXY_ADDR!;
|
||||
|
||||
// if ((await L1GatewayRouter.counterpart()) === constants.AddressZero) {
|
||||
const tx = await L1GatewayRouter.initialize(
|
||||
L1StandardERC20GatewayAddress,
|
||||
L2GatewayRouterAddress,
|
||||
L1ScrollMessengerAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L1GatewayRouter.initialize(
|
||||
L1StandardERC20GatewayAddress,
|
||||
L2GatewayRouterAddress,
|
||||
L1ScrollMessengerAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -22,10 +22,10 @@ async function main() {
|
||||
const ZKRollupAddress = addressFile.get("ZKRollup.proxy");
|
||||
|
||||
// if ((await L1ScrollMessenger.rollup()) === constants.AddressZero) {
|
||||
const tx = await L1ScrollMessenger.initialize(ZKRollupAddress);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L1ScrollMessenger.initialize(ZKRollupAddress);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -25,15 +25,15 @@ async function main() {
|
||||
const L1StandardERC20GatewayAddress = process.env.L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR!;
|
||||
|
||||
// if ((await L2StandardERC20Gateway.counterpart()) === constants.AddressZero) {
|
||||
const tx = await L2StandardERC20Gateway.initialize(
|
||||
L1StandardERC20GatewayAddress,
|
||||
L2GatewayRouterAddress,
|
||||
L2ScrollMessengerAddress,
|
||||
L2StandardERC20FactoryAddress
|
||||
);
|
||||
console.log("initialize L2StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L2StandardERC20Gateway.initialize(
|
||||
L1StandardERC20GatewayAddress,
|
||||
L2GatewayRouterAddress,
|
||||
L2ScrollMessengerAddress,
|
||||
L2StandardERC20FactoryAddress
|
||||
);
|
||||
console.log("initialize L2StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -24,14 +24,14 @@ async function main() {
|
||||
const L1GatewayRouterAddress = process.env.L1_GATEWAY_ROUTER_PROXY_ADDR!;
|
||||
|
||||
// if ((await L2GatewayRouter.counterpart()) === constants.AddressZero) {
|
||||
const tx = await L2GatewayRouter.initialize(
|
||||
L2StandardERC20GatewayAddress,
|
||||
L1GatewayRouterAddress,
|
||||
L2ScrollMessengerAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L2GatewayRouter.initialize(
|
||||
L2StandardERC20GatewayAddress,
|
||||
L1GatewayRouterAddress,
|
||||
L2ScrollMessengerAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -21,10 +21,10 @@ async function main() {
|
||||
const L2StandardERC20GatewayAddress = addressFile.get("L2StandardERC20Gateway.proxy");
|
||||
|
||||
// if ((await ScrollStandardERC20Factory.owner()) !== L2StandardERC20GatewayAddress) {
|
||||
const tx = await ScrollStandardERC20Factory.transferOwnership(L2StandardERC20GatewayAddress);
|
||||
console.log("transfer ownernship ScrollStandardERC20Factory, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await ScrollStandardERC20Factory.transferOwnership(L2StandardERC20GatewayAddress);
|
||||
console.log("transfer ownernship ScrollStandardERC20Factory, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -10,9 +10,7 @@ export function selectAddressFile(network: string) {
|
||||
}
|
||||
|
||||
let filename: string;
|
||||
if (["hardhat",
|
||||
"l1geth", "l2geth",
|
||||
].includes(network)) {
|
||||
if (["hardhat", "l1geth", "l2geth"].includes(network)) {
|
||||
filename = path.join(CONFIG_FILE_DIR, `${network}.json`);
|
||||
} else {
|
||||
throw new Error(`network ${network} not supported yet`);
|
||||
|
||||
@@ -22,4 +22,32 @@ interface IL1GatewayRouter is IL1ETHGateway, IL1ERC20Gateway {
|
||||
/// @param token The address of token updated.
|
||||
/// @param gateway The corresponding address of gateway updated.
|
||||
event SetERC20Gateway(address indexed token, address indexed gateway);
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return the corresponding gateway address for given token address.
|
||||
/// @param _token The address of token to query.
|
||||
function getERC20Gateway(address _token) external view returns (address);
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of ETH gateway contract.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _ethGateway The address to update.
|
||||
function setETHGateway(address _ethGateway) external;
|
||||
|
||||
/// @notice Update the address of default ERC20 gateway contract.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _defaultERC20Gateway The address to update.
|
||||
function setDefaultERC20Gateway(address _defaultERC20Gateway) external;
|
||||
|
||||
/// @notice Update the mapping from token address to gateway address.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _tokens The list of addresses of tokens to update.
|
||||
/// @param _gateways The list of addresses of gateways to update.
|
||||
function setERC20Gateway(address[] memory _tokens, address[] memory _gateways) external;
|
||||
}
|
||||
|
||||
@@ -68,8 +68,7 @@ contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
|
||||
return IL1ERC20Gateway(_gateway).getL2ERC20Address(_l1Address);
|
||||
}
|
||||
|
||||
/// @notice Return the corresponding gateway address for given token address.
|
||||
/// @param _token The address of token to query.
|
||||
/// @inheritdoc IL1GatewayRouter
|
||||
function getERC20Gateway(address _token) public view returns (address) {
|
||||
address _gateway = ERC20Gateway[_token];
|
||||
if (_gateway == address(0)) {
|
||||
@@ -178,28 +177,21 @@ contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of ETH gateway contract.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _ethGateway The address to update.
|
||||
/// @inheritdoc IL1GatewayRouter
|
||||
function setETHGateway(address _ethGateway) external onlyOwner {
|
||||
ethGateway = _ethGateway;
|
||||
|
||||
emit SetETHGateway(_ethGateway);
|
||||
}
|
||||
|
||||
/// @notice Update the address of default ERC20 gateway contract.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _defaultERC20Gateway The address to update.
|
||||
/// @inheritdoc IL1GatewayRouter
|
||||
function setDefaultERC20Gateway(address _defaultERC20Gateway) external onlyOwner {
|
||||
defaultERC20Gateway = _defaultERC20Gateway;
|
||||
|
||||
emit SetDefaultERC20Gateway(_defaultERC20Gateway);
|
||||
}
|
||||
|
||||
/// @notice Update the mapping from token address to gateway address.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _tokens The list of addresses of tokens to update.
|
||||
/// @param _gateways The list of addresses of gateways to update.
|
||||
/// @inheritdoc IL1GatewayRouter
|
||||
function setERC20Gateway(address[] memory _tokens, address[] memory _gateways) external onlyOwner {
|
||||
require(_tokens.length == _gateways.length, "length mismatch");
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import {BatchHeaderV0Codec} from "../../libraries/codec/BatchHeaderV0Codec.sol";
|
||||
import {ChunkCodec} from "../../libraries/codec/ChunkCodec.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
|
||||
// solhint-disable no-inline-assembly
|
||||
// solhint-disable reason-string
|
||||
|
||||
/// @title ScrollChain
|
||||
@@ -39,7 +40,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
*************/
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint256 public immutable layer2ChainId;
|
||||
uint32 public immutable layer2ChainId;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
@@ -83,7 +84,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(uint256 _chainId) {
|
||||
constructor(uint32 _chainId) {
|
||||
layer2ChainId = _chainId;
|
||||
}
|
||||
|
||||
@@ -295,7 +296,9 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
require(finalizedStateRoots[_batchIndex] == bytes32(0), "batch already verified");
|
||||
|
||||
// compute public input hash
|
||||
bytes32 _publicInputHash = keccak256(abi.encode(_prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash));
|
||||
bytes32 _publicInputHash = keccak256(
|
||||
abi.encodePacked(layer2ChainId, _prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash)
|
||||
);
|
||||
|
||||
// verify batch
|
||||
IRollupVerifier(verifier).verifyAggregateProof(_aggrProof, _publicInputHash);
|
||||
@@ -399,11 +402,13 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
bytes calldata _skippedL1MessageBitmap
|
||||
) internal view returns (uint256 _totalNumL1MessagesInChunk) {
|
||||
uint256 chunkPtr;
|
||||
uint256 startDataPtr;
|
||||
uint256 dataPtr;
|
||||
uint256 blockPtr;
|
||||
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
startDataPtr := dataPtr
|
||||
chunkPtr := add(_chunk, 0x20) // skip chunkLength
|
||||
blockPtr := add(chunkPtr, 1) // skip numBlocks
|
||||
}
|
||||
@@ -411,15 +416,23 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
uint256 _numBlocks = ChunkCodec.validateChunkLength(chunkPtr, _chunk.length);
|
||||
|
||||
// concatenate block contexts
|
||||
uint256 _totalTransactionsInChunk;
|
||||
for (uint256 i = 0; i < _numBlocks; i++) {
|
||||
dataPtr = ChunkCodec.copyBlockContext(chunkPtr, dataPtr, i);
|
||||
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock;
|
||||
blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH;
|
||||
}
|
||||
}
|
||||
|
||||
assembly {
|
||||
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
|
||||
blockPtr := add(chunkPtr, 1) // reset block ptr
|
||||
}
|
||||
|
||||
// concatenate tx hashes
|
||||
uint256 l2TxPtr = ChunkCodec.l2TxPtr(chunkPtr, _numBlocks);
|
||||
|
||||
// avoid stack too deep on forge coverage
|
||||
uint256 _totalTransactionsInChunk;
|
||||
while (_numBlocks > 0) {
|
||||
// concatenate l1 message hashes
|
||||
uint256 _numL1MessagesInBlock = ChunkCodec.numL1Messages(blockPtr);
|
||||
@@ -443,7 +456,6 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
}
|
||||
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock;
|
||||
_totalNumL1MessagesInChunk += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
|
||||
@@ -464,9 +476,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
|
||||
// compute data hash and store to memory
|
||||
assembly {
|
||||
let startPtr := mload(0x40)
|
||||
let dataHash := keccak256(startPtr, sub(dataPtr, startPtr))
|
||||
|
||||
let dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
|
||||
mstore(memPtr, dataHash)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
// solhint-disable no-inline-assembly
|
||||
|
||||
/// @dev Below is the encoding for `BatchHeader` V0, total 89 + ceil(l1MessagePopped / 256) * 32 bytes.
|
||||
/// ```text
|
||||
/// * Field Bytes Type Index Comments
|
||||
|
||||
36
contracts/src/misc/Fallback.sol
Normal file
36
contracts/src/misc/Fallback.sol
Normal file
@@ -0,0 +1,36 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
|
||||
import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol";
|
||||
|
||||
contract Fallback is Ownable {
|
||||
using SafeERC20 for IERC20;
|
||||
|
||||
/// @notice Withdraw stucked token from this contract.
|
||||
/// @param _token The address of token to withdraw, use `address(0)` if withdraw ETH.
|
||||
/// @param _amount The amount of token to withdraw.
|
||||
/// @param _recipient The address of receiver.
|
||||
function withdraw(
|
||||
address _token,
|
||||
uint256 _amount,
|
||||
address _recipient
|
||||
) external onlyOwner {
|
||||
if (_token == address(0)) {
|
||||
(bool _success, ) = _recipient.call{value: _amount}("");
|
||||
require(_success, "transfer ETH failed");
|
||||
} else {
|
||||
IERC20(_token).safeTransfer(_recipient, _amount);
|
||||
}
|
||||
}
|
||||
|
||||
/// @notice Execute an arbitrary message.
|
||||
/// @param _target The address of contract to call.
|
||||
/// @param _data The calldata passed to target contract.
|
||||
function execute(address _target, bytes calldata _data) external payable onlyOwner {
|
||||
(bool _success, ) = _target.call{value: msg.value}(_data);
|
||||
require(_success, "call failed");
|
||||
}
|
||||
}
|
||||
@@ -10,6 +10,8 @@ import {ScrollChain, IScrollChain} from "../L1/rollup/ScrollChain.sol";
|
||||
import {MockScrollChain} from "./mocks/MockScrollChain.sol";
|
||||
import {MockRollupVerifier} from "./mocks/MockRollupVerifier.sol";
|
||||
|
||||
// solhint-disable no-inline-assembly
|
||||
|
||||
contract ScrollChainTest is DSTestPlus {
|
||||
// from ScrollChain
|
||||
event UpdateSequencer(address indexed account, bool status);
|
||||
@@ -42,35 +44,6 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.initialize(address(messageQueue), address(0), 100);
|
||||
}
|
||||
|
||||
/*
|
||||
function testPublicInputHash() public {
|
||||
IScrollChain.Batch memory batch;
|
||||
batch.prevStateRoot = bytes32(0x000000000000000000000000000000000000000000000000000000000000cafe);
|
||||
batch.newStateRoot = bytes32(0);
|
||||
batch.withdrawTrieRoot = bytes32(0);
|
||||
batch
|
||||
.l2Transactions = hex"0000007402f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae";
|
||||
|
||||
batch.blocks = new IScrollChain.BlockContext[](1);
|
||||
batch.blocks[0].blockHash = bytes32(0);
|
||||
batch.blocks[0].parentHash = bytes32(0);
|
||||
batch.blocks[0].blockNumber = 51966;
|
||||
batch.blocks[0].timestamp = 123456789;
|
||||
batch.blocks[0].baseFee = 0;
|
||||
batch.blocks[0].gasLimit = 10000000000000000;
|
||||
batch.blocks[0].numTransactions = 1;
|
||||
batch.blocks[0].numL1Messages = 0;
|
||||
|
||||
(bytes32 hash, , , ) = chain.computePublicInputHash(0, batch);
|
||||
assertEq(hash, bytes32(0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805));
|
||||
|
||||
batch
|
||||
.l2Transactions = hex"00000064f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc";
|
||||
(hash, , , ) = chain.computePublicInputHash(0, batch);
|
||||
assertEq(hash, bytes32(0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b));
|
||||
}
|
||||
*/
|
||||
|
||||
function testCommitBatch() public {
|
||||
bytes memory batchHeader0 = new bytes(89);
|
||||
|
||||
@@ -260,13 +233,34 @@ contract ScrollChainTest is DSTestPlus {
|
||||
bytes memory chunk1;
|
||||
|
||||
// commit batch1, one chunk with one block, 1 tx, 1 L1 message, no skip
|
||||
// => payload for data hash of chunk0
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 0001
|
||||
// 50c3caa727394b95dc4885b7d25033ed22ac772b985fb274f2a7c0699a11346d
|
||||
// => data hash for chunk0
|
||||
// bb88f47194a07d59ed17bc9b2015f83d0afea8f7892d9c5f0b6565563bf06b26
|
||||
// => data hash for all chunks
|
||||
// 038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340
|
||||
// => payload for batch header
|
||||
// 00
|
||||
// 0000000000000002
|
||||
// 0000000000000001
|
||||
// 0000000000000001
|
||||
// 038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340
|
||||
// 119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba61
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// => hash for batch header
|
||||
// cef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e
|
||||
bytes memory batchHeader1 = new bytes(89 + 32);
|
||||
assembly {
|
||||
mstore(add(batchHeader1, 0x20), 0) // version
|
||||
mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex = 1
|
||||
mstore(add(batchHeader1, add(0x20, 9)), shl(192, 1)) // l1MessagePopped = 1
|
||||
mstore(add(batchHeader1, add(0x20, 17)), shl(192, 1)) // totalL1MessagePopped = 1
|
||||
mstore(add(batchHeader1, add(0x20, 25)), 0xfe21c37fa013c76f86b8593ddf15d685a04b55061d06797597ee866f6ff2edf8) // dataHash
|
||||
mstore(add(batchHeader1, add(0x20, 25)), 0x038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340) // dataHash
|
||||
mstore(add(batchHeader1, add(0x20, 57)), batchHash0) // parentBatchHash
|
||||
mstore(add(batchHeader1, add(0x20, 89)), 0) // bitmap0
|
||||
}
|
||||
@@ -280,9 +274,9 @@ contract ScrollChainTest is DSTestPlus {
|
||||
chunks[0] = chunk0;
|
||||
bitmap = new bytes(32);
|
||||
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
|
||||
assertGt(uint256(rollup.committedBatches(1)), 0);
|
||||
assertBoolEq(rollup.isBatchFinalized(1), false);
|
||||
bytes32 batchHash1 = rollup.committedBatches(1);
|
||||
assertEq(batchHash1, bytes32(0xcef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e));
|
||||
|
||||
// finalize batch1
|
||||
rollup.finalizeBatchWithProof(
|
||||
@@ -301,17 +295,57 @@ contract ScrollChainTest is DSTestPlus {
|
||||
|
||||
// commit batch2 with two chunks, correctly
|
||||
// 1. chunk0 has one block, 3 tx, no L1 messages
|
||||
// => payload for chunk0
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 0003
|
||||
// ... (some tx hashes)
|
||||
// => data hash for chunk0
|
||||
// 2ac1dad3f3696e5581dfc10f2c7a7a8fc5b344285f7d332c7895a8825fca609a
|
||||
// 2. chunk1 has three blocks
|
||||
// 2.1 block0 has 5 tx, 3 L1 messages, no skips
|
||||
// 2.2 block1 has 10 tx, 5 L1 messages, even is skipped.
|
||||
// 2.2 block1 has 300 tx, 256 L1 messages, odd position is skipped.
|
||||
// => payload for chunk1
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 0005
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 000a
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 012c
|
||||
// ... (some tx hashes)
|
||||
// => data hash for chunk2
|
||||
// 5c91563ee8be18cb94accfc83728f883ff5e3aa600fd0799e0a4e39afc7970b9
|
||||
// => data hash for all chunks
|
||||
// bf38f308e0a87ed7bf92fa2da038fa1d59a7b9801eb0f6d487f8eef528632145
|
||||
// => payload for batch header
|
||||
// 00
|
||||
// 0000000000000002
|
||||
// 0000000000000108
|
||||
// 0000000000000109
|
||||
// bf38f308e0a87ed7bf92fa2da038fa1d59a7b9801eb0f6d487f8eef528632145
|
||||
// cef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e
|
||||
// aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa
|
||||
// => hash for batch header
|
||||
// 17fe6c12739f3a6261ae6db6486f41758dbd5d0508f19a5ca9ac37df67bbfec2
|
||||
bytes memory batchHeader2 = new bytes(89 + 32 + 32);
|
||||
assembly {
|
||||
mstore(add(batchHeader2, 0x20), 0) // version
|
||||
mstore(add(batchHeader2, add(0x20, 1)), shl(192, 2)) // batchIndex = 2
|
||||
mstore(add(batchHeader2, add(0x20, 9)), shl(192, 264)) // l1MessagePopped = 264
|
||||
mstore(add(batchHeader2, add(0x20, 17)), shl(192, 265)) // totalL1MessagePopped = 265
|
||||
mstore(add(batchHeader2, add(0x20, 25)), 0xa447b3c80bc6c3ee1aebc1af746c7c6b35a05c4a7af89d2103e9e0788b54375c) // dataHash
|
||||
mstore(add(batchHeader2, add(0x20, 25)), 0xbf38f308e0a87ed7bf92fa2da038fa1d59a7b9801eb0f6d487f8eef528632145) // dataHash
|
||||
mstore(add(batchHeader2, add(0x20, 57)), batchHash1) // parentBatchHash
|
||||
mstore(
|
||||
add(batchHeader2, add(0x20, 89)),
|
||||
@@ -358,9 +392,9 @@ contract ScrollChainTest is DSTestPlus {
|
||||
}
|
||||
|
||||
rollup.commitBatch(0, batchHeader1, chunks, bitmap);
|
||||
assertGt(uint256(rollup.committedBatches(2)), 0);
|
||||
assertBoolEq(rollup.isBatchFinalized(2), false);
|
||||
bytes32 batchHash2 = rollup.committedBatches(2);
|
||||
assertEq(batchHash2, bytes32(0x17fe6c12739f3a6261ae6db6486f41758dbd5d0508f19a5ca9ac37df67bbfec2));
|
||||
|
||||
// verify committed batch correctly
|
||||
rollup.finalizeBatchWithProof(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: lint docker clean coordinator
|
||||
.PHONY: lint docker clean coordinator mock_coordinator
|
||||
|
||||
IMAGE_NAME=coordinator
|
||||
IMAGE_VERSION=latest
|
||||
@@ -25,6 +25,9 @@ libzkp:
|
||||
coordinator: libzkp ## Builds the Coordinator instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
mock_coordinator: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
test-verifier: libzkp
|
||||
go test -tags ffi -timeout 0 -v ./verifier
|
||||
|
||||
@@ -32,6 +35,7 @@ test-gpu-verifier: libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -v ./verifier
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
cp -r ../common/libzkp/interface ./verifier/lib
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
|
||||
@@ -99,5 +99,5 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(c.coordinatorFile, data, 0644)
|
||||
return os.WriteFile(c.coordinatorFile, data, 0600)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module scroll-tech/coordinator
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
@@ -9,6 +9,7 @@ require (
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230508165858-27a3830afa61
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771
|
||||
golang.org/x/sync v0.1.0
|
||||
)
|
||||
|
||||
|
||||
@@ -138,6 +138,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
mathrand "math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
geth_metrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"golang.org/x/exp/rand"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
@@ -359,7 +359,7 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
|
||||
|
||||
// store proof content
|
||||
if msg.Type == message.BasicProve {
|
||||
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof.Proof, msg.Proof.FinalPair, proofTimeSec); dbErr != nil {
|
||||
if dbErr = m.orm.UpdateProofByHash(m.ctx, msg.ID, msg.Proof, proofTimeSec); dbErr != nil {
|
||||
log.Error("failed to store basic proof into db", "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
@@ -505,7 +505,7 @@ func (m *Manager) CollectProofs(sess *session) {
|
||||
//When all rollers have finished submitting their tasks, select a winner within rollers with valid proof, and return, terminate the for loop.
|
||||
if finished && len(validRollers) > 0 {
|
||||
//Select a random index for this slice.
|
||||
randIndex := mathrand.Intn(len(validRollers))
|
||||
randIndex := rand.Int63n(int64(len(validRollers)))
|
||||
_ = validRollers[randIndex]
|
||||
// TODO: reward winner
|
||||
|
||||
@@ -569,39 +569,39 @@ func (m *Manager) APIs() []rpc.API {
|
||||
|
||||
// StartBasicProofGenerationSession starts a basic proof generation session
|
||||
func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevSession *session) (success bool) {
|
||||
var taskId string
|
||||
var taskID string
|
||||
if task != nil {
|
||||
taskId = task.Hash
|
||||
taskID = task.Hash
|
||||
} else {
|
||||
taskId = prevSession.info.ID
|
||||
taskID = prevSession.info.ID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.BasicProve) == 0 {
|
||||
log.Warn("no idle basic roller when starting proof generation session", "id", taskId)
|
||||
log.Warn("no idle basic roller when starting proof generation session", "id", taskID)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start basic proof generation session", "id", taskId)
|
||||
log.Info("start basic proof generation session", "id", taskID)
|
||||
|
||||
defer func() {
|
||||
if !success {
|
||||
if task != nil {
|
||||
if err := m.orm.UpdateProvingStatus(taskId, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskId, "err", err)
|
||||
if err := m.orm.UpdateProvingStatus(taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
|
||||
}
|
||||
} else {
|
||||
if err := m.orm.UpdateProvingStatus(taskId, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskId, "err", err)
|
||||
if err := m.orm.UpdateProvingStatus(taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Get block traces.
|
||||
blockInfos, err := m.orm.GetL2BlockInfos(map[string]interface{}{"batch_hash": taskId})
|
||||
blockInfos, err := m.orm.GetL2BlockInfos(map[string]interface{}{"batch_hash": taskID})
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"could not GetBlockInfos",
|
||||
"batch_hash", taskId,
|
||||
"batch_hash", taskID,
|
||||
"error", err,
|
||||
)
|
||||
return false
|
||||
@@ -619,10 +619,10 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", taskId, "name", roller.Name, "public key", roller.PublicKey)
|
||||
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "public key", roller.PublicKey)
|
||||
// send trace to roller
|
||||
if !roller.sendTask(&message.TaskMsg{ID: taskId, Type: message.BasicProve, BlockHashes: blockHashes}) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskId)
|
||||
if !roller.sendTask(&message.TaskMsg{ID: taskID, Type: message.BasicProve, BlockHashes: blockHashes}) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
|
||||
continue
|
||||
}
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
@@ -630,20 +630,20 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
log.Error("no roller assigned", "id", taskId, "number of idle basic rollers", m.GetNumberOfIdleRollers(message.BasicProve))
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle basic rollers", m.GetNumberOfIdleRollers(message.BasicProve))
|
||||
return false
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err = m.orm.UpdateProvingStatus(taskId, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskId, "err", err)
|
||||
if err = m.orm.UpdateProvingStatus(taskID, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskID, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: taskId,
|
||||
ID: taskID,
|
||||
Rollers: rollers,
|
||||
ProveType: message.BasicProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
@@ -672,7 +672,7 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.sessions[taskId] = sess
|
||||
m.sessions[taskID] = sess
|
||||
m.mu.Unlock()
|
||||
go m.CollectProofs(sess)
|
||||
|
||||
@@ -681,26 +681,26 @@ func (m *Manager) StartBasicProofGenerationSession(task *types.BlockBatch, prevS
|
||||
|
||||
// StartAggProofGenerationSession starts an aggregator proof generation.
|
||||
func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSession *session) (success bool) {
|
||||
var taskId string
|
||||
var taskID string
|
||||
if task != nil {
|
||||
taskId = task.ID
|
||||
taskID = task.ID
|
||||
} else {
|
||||
taskId = prevSession.info.ID
|
||||
taskID = prevSession.info.ID
|
||||
}
|
||||
if m.GetNumberOfIdleRollers(message.AggregatorProve) == 0 {
|
||||
log.Warn("no idle common roller when starting proof generation session", "id", taskId)
|
||||
log.Warn("no idle common roller when starting proof generation session", "id", taskID)
|
||||
return false
|
||||
}
|
||||
|
||||
log.Info("start aggregator proof generation session", "id", taskId)
|
||||
log.Info("start aggregator proof generation session", "id", taskID)
|
||||
|
||||
defer func() {
|
||||
if !success {
|
||||
if task != nil {
|
||||
if err := m.orm.UpdateAggTaskStatus(taskId, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskId, "err", err)
|
||||
} else if err := m.orm.UpdateAggTaskStatus(taskId, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskId, "err", err)
|
||||
if err := m.orm.UpdateAggTaskStatus(taskID, types.ProvingTaskUnassigned); err != nil {
|
||||
log.Error("fail to reset task_status as Unassigned", "id", taskID, "err", err)
|
||||
} else if err := m.orm.UpdateAggTaskStatus(taskID, types.ProvingTaskFailed); err != nil {
|
||||
log.Error("fail to reset task_status as Failed", "id", taskID, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -708,9 +708,9 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
}()
|
||||
|
||||
// get agg task from db
|
||||
subProofs, err := m.orm.GetSubProofsByAggTaskID(taskId)
|
||||
subProofs, err := m.orm.GetSubProofsByAggTaskID(taskID)
|
||||
if err != nil {
|
||||
log.Error("failed to get sub proofs for aggregator task", "id", taskId, "error", err)
|
||||
log.Error("failed to get sub proofs for aggregator task", "id", taskID, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -722,14 +722,14 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
log.Info("selectRoller returns nil")
|
||||
break
|
||||
}
|
||||
log.Info("roller is picked", "session id", taskId, "name", roller.Name, "type", roller.Type, "public key", roller.PublicKey)
|
||||
log.Info("roller is picked", "session id", taskID, "name", roller.Name, "type", roller.Type, "public key", roller.PublicKey)
|
||||
// send trace to roller
|
||||
if !roller.sendTask(&message.TaskMsg{
|
||||
ID: taskId,
|
||||
ID: taskID,
|
||||
Type: message.AggregatorProve,
|
||||
SubProofs: subProofs,
|
||||
}) {
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskId)
|
||||
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", taskID)
|
||||
continue
|
||||
}
|
||||
m.updateMetricRollerProofsLastAssignedTimestampGauge(roller.PublicKey)
|
||||
@@ -737,20 +737,20 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
}
|
||||
// No roller assigned.
|
||||
if len(rollers) == 0 {
|
||||
log.Error("no roller assigned", "id", taskId, "number of idle aggregator rollers", m.GetNumberOfIdleRollers(message.AggregatorProve))
|
||||
log.Error("no roller assigned", "id", taskID, "number of idle aggregator rollers", m.GetNumberOfIdleRollers(message.AggregatorProve))
|
||||
return false
|
||||
}
|
||||
|
||||
// Update session proving status as assigned.
|
||||
if err = m.orm.UpdateAggTaskStatus(taskId, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskId, "err", err)
|
||||
if err = m.orm.UpdateAggTaskStatus(taskID, types.ProvingTaskAssigned); err != nil {
|
||||
log.Error("failed to update task status", "id", taskID, "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Create a proof generation session.
|
||||
sess := &session{
|
||||
info: &types.SessionInfo{
|
||||
ID: taskId,
|
||||
ID: taskID,
|
||||
Rollers: rollers,
|
||||
ProveType: message.AggregatorProve,
|
||||
StartTimestamp: time.Now().Unix(),
|
||||
@@ -779,7 +779,7 @@ func (m *Manager) StartAggProofGenerationSession(task *types.AggTask, prevSessio
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.sessions[taskId] = sess
|
||||
m.sessions[taskID] = sess
|
||||
m.mu.Unlock()
|
||||
go m.CollectProofs(sess)
|
||||
|
||||
@@ -797,7 +797,7 @@ func (m *Manager) VerifyToken(authMsg *message.AuthMsg) (bool, error) {
|
||||
pubkey, _ := authMsg.PublicKey()
|
||||
// GetValue returns nil if value is expired
|
||||
if token, ok := m.tokenCache.Get(pubkey); !ok || token != authMsg.Identity.Token {
|
||||
return false, fmt.Errorf("failed to find corresponding token. roller name: %s. roller pk: %s.", authMsg.Identity.Name, pubkey)
|
||||
return false, fmt.Errorf("failed to find corresponding token. roller name: %s. roller pk: %s", authMsg.Identity.Name, pubkey)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -816,8 +816,8 @@ func (m *Manager) verifyProof(proof *message.AggProof) (bool, error) {
|
||||
return false, errors.New("coordinator has stopped before verification")
|
||||
}
|
||||
verifyResultChan := m.addVerifyTask(proof)
|
||||
verifyResult := <-verifyResultChan
|
||||
return verifyResult.result, verifyResult.err
|
||||
result := <-verifyResultChan
|
||||
return result.result, result.err
|
||||
}
|
||||
|
||||
type verifyResult struct {
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
package verifier
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -L${SRCDIR}/lib/ -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -L${SRCDIR}/lib/ -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -L${SRCDIR}/lib/ -lcudart -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// InvalidTestProof invalid proof used in tests
|
||||
const InvalidTestProof = "this is a invalid proof"
|
||||
|
||||
// Verifier represents a rust ffi to a halo2 verifier.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module scroll-tech/database
|
||||
|
||||
go 1.18
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
|
||||
@@ -13,8 +13,8 @@ create table l1_message
|
||||
layer1_hash VARCHAR NOT NULL,
|
||||
layer2_hash VARCHAR DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
@@ -32,7 +32,7 @@ create index l1_message_height_index
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_time = CURRENT_TIMESTAMP;
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
@@ -13,8 +13,8 @@ create table l2_message
|
||||
layer1_hash VARCHAR DEFAULT NULL,
|
||||
proof TEXT DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
@@ -32,7 +32,7 @@ create index l2_message_height_index
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_time = CURRENT_TIMESTAMP;
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
@@ -16,7 +16,6 @@ create table block_batch
|
||||
total_l2_gas BIGINT NOT NULL,
|
||||
proving_status INTEGER DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
instance_commitments BYTEA DEFAULT NULL,
|
||||
proof_time_sec INTEGER DEFAULT 0,
|
||||
rollup_status INTEGER DEFAULT 1,
|
||||
commit_tx_hash VARCHAR DEFAULT NULL,
|
||||
|
||||
@@ -10,8 +10,8 @@ create table agg_task
|
||||
end_batch_hash VARCHAR NOT NULL,
|
||||
proving_status SMALLINT DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
create unique index agg_task_hash_uindex
|
||||
@@ -21,7 +21,7 @@ create unique index agg_task_hash_uindex
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_time = CURRENT_TIMESTAMP;
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
@@ -20,7 +20,7 @@ func NewAggTaskOrm(db *sqlx.DB) AggTaskOrm {
|
||||
return &aggTaskOrm{db: db}
|
||||
}
|
||||
|
||||
func (a *aggTaskOrm) GetSubProofsByAggTaskID(id string) ([][]byte, error) {
|
||||
func (a *aggTaskOrm) GetSubProofsByAggTaskID(id string) ([]*message.AggProof, error) {
|
||||
var (
|
||||
startIdx uint64
|
||||
endIdx uint64
|
||||
@@ -34,14 +34,22 @@ func (a *aggTaskOrm) GetSubProofsByAggTaskID(id string) ([][]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var subProofs [][]byte
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var subProofs []*message.AggProof
|
||||
for rows.Next() {
|
||||
var proofByt []byte
|
||||
err = rows.Scan(&proofByt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
subProofs = append(subProofs, proofByt)
|
||||
|
||||
var proof message.AggProof
|
||||
if err := json.Unmarshal(proofByt, &proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subProofs = append(subProofs, &proof)
|
||||
}
|
||||
return subProofs, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package orm
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
type blockBatchOrm struct {
|
||||
@@ -37,6 +39,7 @@ func (o *blockBatchOrm) GetBlockBatches(fields map[string]interface{}, args ...s
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var batches []*types.BlockBatch
|
||||
for rows.Next() {
|
||||
@@ -50,7 +53,7 @@ func (o *blockBatchOrm) GetBlockBatches(fields map[string]interface{}, args ...s
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return batches, rows.Close()
|
||||
return batches, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetProvingStatusByHash(hash string) (types.ProvingStatus, error) {
|
||||
@@ -62,22 +65,31 @@ func (o *blockBatchOrm) GetProvingStatusByHash(hash string) (types.ProvingStatus
|
||||
return status, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetVerifiedProofAndInstanceCommitmentsByHash(hash string) ([]byte, []byte, error) {
|
||||
var proof []byte
|
||||
var instanceCommitments []byte
|
||||
row := o.db.QueryRow(`SELECT proof, instance_commitments FROM block_batch WHERE hash = $1 and proving_status = $2`, hash, types.ProvingTaskVerified)
|
||||
|
||||
if err := row.Scan(&proof, &instanceCommitments); err != nil {
|
||||
return nil, nil, err
|
||||
func (o *blockBatchOrm) GetVerifiedProofByHash(hash string) (*message.AggProof, error) {
|
||||
var proofBytes []byte
|
||||
row := o.db.QueryRow(`SELECT proof FROM block_batch WHERE hash = $1 and proving_status = $2`, hash, types.ProvingTaskVerified)
|
||||
if err := row.Scan(&proofBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proof, instanceCommitments, nil
|
||||
|
||||
var proof message.AggProof
|
||||
if err := json.Unmarshal(proofBytes, &proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &proof, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
|
||||
func (o *blockBatchOrm) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if _, err := db.ExecContext(ctx,
|
||||
db.Rebind(`UPDATE block_batch set proof = ?, instance_commitments = ?, proof_time_sec = ? where hash = ?;`),
|
||||
proof, instanceCommitments, proofTimeSec, hash,
|
||||
db.Rebind(`UPDATE block_batch set proof = ?, proof_time_sec = ? where hash = ?;`),
|
||||
proofBytes, proofTimeSec, hash,
|
||||
); err != nil {
|
||||
log.Error("failed to update proof", "err", err)
|
||||
}
|
||||
@@ -149,6 +161,7 @@ func (o *blockBatchOrm) GetPendingBatches(limit uint64) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var hashes []string
|
||||
for rows.Next() {
|
||||
@@ -164,7 +177,7 @@ func (o *blockBatchOrm) GetPendingBatches(limit uint64) ([]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hashes, rows.Close()
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetLatestBatch() (*types.BlockBatch, error) {
|
||||
@@ -208,6 +221,7 @@ func (o *blockBatchOrm) GetCommittedBatches(limit uint64) ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var hashes []string
|
||||
for rows.Next() {
|
||||
@@ -223,7 +237,7 @@ func (o *blockBatchOrm) GetCommittedBatches(limit uint64) ([]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return hashes, rows.Close()
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetRollupStatus(hash string) (types.RollupStatus, error) {
|
||||
@@ -248,6 +262,10 @@ func (o *blockBatchOrm) GetRollupStatusByHashList(hashes []string) ([]types.Roll
|
||||
query = o.db.Rebind(query)
|
||||
|
||||
rows, err := o.db.Query(query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
statusMap := make(map[string]types.RollupStatus)
|
||||
for rows.Next() {
|
||||
@@ -258,6 +276,10 @@ func (o *blockBatchOrm) GetRollupStatusByHashList(hashes []string) ([]types.Roll
|
||||
}
|
||||
statusMap[hash] = status
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var statuses []types.RollupStatus
|
||||
if err != nil {
|
||||
return statuses, err
|
||||
@@ -329,6 +351,7 @@ func (o *blockBatchOrm) GetAssignedBatchHashes() ([]string, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var hashes []string
|
||||
for rows.Next() {
|
||||
@@ -339,7 +362,7 @@ func (o *blockBatchOrm) GetAssignedBatchHashes() ([]string, error) {
|
||||
hashes = append(hashes, hash)
|
||||
}
|
||||
|
||||
return hashes, rows.Close()
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
func (o *blockBatchOrm) GetBatchCount() (int64, error) {
|
||||
|
||||
@@ -63,6 +63,7 @@ func (o *blockTraceOrm) GetL2WrappedBlocks(fields map[string]interface{}, args .
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
for rows.Next() {
|
||||
@@ -81,7 +82,7 @@ func (o *blockTraceOrm) GetL2WrappedBlocks(fields map[string]interface{}, args .
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return wrappedBlocks, rows.Close()
|
||||
return wrappedBlocks, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetL2BlockInfos(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error) {
|
||||
@@ -96,6 +97,7 @@ func (o *blockTraceOrm) GetL2BlockInfos(fields map[string]interface{}, args ...s
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var blocks []*types.BlockInfo
|
||||
for rows.Next() {
|
||||
@@ -109,7 +111,7 @@ func (o *blockTraceOrm) GetL2BlockInfos(fields map[string]interface{}, args ...s
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blocks, rows.Close()
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetUnbatchedL2Blocks(fields map[string]interface{}, args ...string) ([]*types.BlockInfo, error) {
|
||||
@@ -124,6 +126,7 @@ func (o *blockTraceOrm) GetUnbatchedL2Blocks(fields map[string]interface{}, args
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var blocks []*types.BlockInfo
|
||||
for rows.Next() {
|
||||
@@ -137,7 +140,7 @@ func (o *blockTraceOrm) GetUnbatchedL2Blocks(fields map[string]interface{}, args
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blocks, rows.Close()
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
func (o *blockTraceOrm) GetL2BlockHashByNumber(number uint64) (*common.Hash, error) {
|
||||
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
)
|
||||
|
||||
// L1BlockOrm l1_block operation interface
|
||||
@@ -50,7 +50,7 @@ type SessionInfoOrm interface {
|
||||
type AggTaskOrm interface {
|
||||
GetAssignedAggTasks() ([]*types.AggTask, error)
|
||||
GetUnassignedAggTasks() ([]*types.AggTask, error)
|
||||
GetSubProofsByAggTaskID(id string) ([][]byte, error)
|
||||
GetSubProofsByAggTaskID(id string) ([]*message.AggProof, error)
|
||||
InsertAggTask(id string, startBatchIndex uint64, startBatchHash string, endBatchIndex uint64, endBatchHash string) error
|
||||
UpdateAggTaskStatus(aggTaskID string, status types.ProvingStatus) error
|
||||
UpdateProofForAggTask(aggTaskID string, proof *message.AggProof) error
|
||||
@@ -60,8 +60,8 @@ type AggTaskOrm interface {
|
||||
type BlockBatchOrm interface {
|
||||
GetBlockBatches(fields map[string]interface{}, args ...string) ([]*types.BlockBatch, error)
|
||||
GetProvingStatusByHash(hash string) (types.ProvingStatus, error)
|
||||
GetVerifiedProofAndInstanceCommitmentsByHash(hash string) ([]byte, []byte, error)
|
||||
UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error
|
||||
GetVerifiedProofByHash(hash string) (*message.AggProof, error)
|
||||
UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error
|
||||
UpdateProvingStatus(hash string, status types.ProvingStatus) error
|
||||
ResetProvingStatusFor(before types.ProvingStatus) error
|
||||
NewBatchInDBTx(dbTx *sqlx.Tx, batchData *types.BatchData) error
|
||||
|
||||
@@ -37,6 +37,7 @@ func (l *l1BlockOrm) GetL1BlockInfos(fields map[string]interface{}, args ...stri
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var blocks []*types.L1BlockInfo
|
||||
for rows.Next() {
|
||||
@@ -50,7 +51,7 @@ func (l *l1BlockOrm) GetL1BlockInfos(fields map[string]interface{}, args ...stri
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return blocks, rows.Close()
|
||||
return blocks, nil
|
||||
}
|
||||
|
||||
func (l *l1BlockOrm) InsertL1Blocks(ctx context.Context, blocks []*types.L1BlockInfo) error {
|
||||
|
||||
@@ -52,6 +52,7 @@ func (m *l1MessageOrm) GetL1MessagesByStatus(status types.MsgStatus, limit uint6
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var msgs []*types.L1Message
|
||||
for rows.Next() {
|
||||
@@ -67,7 +68,7 @@ func (m *l1MessageOrm) GetL1MessagesByStatus(status types.MsgStatus, limit uint6
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msgs, rows.Close()
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
// GetL1ProcessedQueueIndex fetch latest processed message queue_index
|
||||
|
||||
@@ -103,6 +103,7 @@ func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = rows.Close() }()
|
||||
|
||||
var msgs []*types.L2Message
|
||||
for rows.Next() {
|
||||
@@ -118,7 +119,7 @@ func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return msgs, rows.Close()
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
// SaveL2Messages batch save a list of layer2 messages
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user