mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
3 Commits
v3.3.4
...
local_disa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d10816de6a | ||
|
|
c3b1713d21 | ||
|
|
87974b560a |
14
.github/pull_request_template.md
vendored
14
.github/pull_request_template.md
vendored
@@ -1,9 +1,9 @@
|
||||
### Purpose or design rationale of this PR
|
||||
## 1. Purpose or design rationale of this PR
|
||||
|
||||
*Describe your change. Make sure to answer these three questions: What does this PR do? Why does it do it? How does it do it?*
|
||||
...
|
||||
|
||||
|
||||
### PR title
|
||||
## 2. PR title
|
||||
|
||||
Your PR title must follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) (as we are doing squash merge for each PR), so it must start with one of the following [types](https://github.com/angular/angular/blob/22b96b9/CONTRIBUTING.md#type):
|
||||
|
||||
@@ -18,17 +18,17 @@ Your PR title must follow [conventional commits](https://www.conventionalcommits
|
||||
- [ ] test: Adding missing tests or correcting existing tests
|
||||
|
||||
|
||||
### Deployment tag versioning
|
||||
## 3. Deployment tag versioning
|
||||
|
||||
Has `tag` in `common/version.go` been updated?
|
||||
|
||||
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] This PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] Yes
|
||||
|
||||
|
||||
### Breaking change label
|
||||
## 4. Breaking change label
|
||||
|
||||
Does this PR have the `breaking-change` label?
|
||||
|
||||
- [ ] No, this PR is not a breaking change
|
||||
- [ ] This PR is not a breaking change
|
||||
- [ ] Yes
|
||||
|
||||
90
.github/workflows/bridge.yml
vendored
90
.github/workflows/bridge.yml
vendored
@@ -9,8 +9,6 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -20,61 +18,14 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'bridge'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Lint
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make mock_abi
|
||||
make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
run: goimports -local scroll-tech/bridge/ -w .
|
||||
working-directory: 'bridge'
|
||||
- name: Run go mod tidy
|
||||
run: go mod tidy
|
||||
working-directory: 'bridge'
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -92,18 +43,31 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
- name: Lint
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C bridge mock_abi
|
||||
- name: Build bridge binaries
|
||||
working-directory: 'bridge'
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make mock_abi
|
||||
make lint
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/bridge/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
run: |
|
||||
make bridge_bins
|
||||
- name: Test bridge packages
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic ./...
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
# docker-build:
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
|
||||
9
.github/workflows/bridge_history_api.yml
vendored
9
.github/workflows/bridge_history_api.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: BridgeHistoryAPI
|
||||
name: BridgeHistoryApi
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -32,7 +32,7 @@ jobs:
|
||||
# - name: Install Go
|
||||
# uses: actions/setup-go@v2
|
||||
# with:
|
||||
# go-version: 1.19.x
|
||||
# go-version: 1.20.x
|
||||
# - name: Checkout code
|
||||
# uses: actions/checkout@v2
|
||||
# - name: Lint
|
||||
@@ -46,7 +46,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
@@ -60,7 +60,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.20.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
@@ -73,3 +73,4 @@ jobs:
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
43
.github/workflows/common.yml
vendored
43
.github/workflows/common.yml
vendored
@@ -20,6 +20,10 @@ on:
|
||||
- 'common/**'
|
||||
- '.github/workflows/common.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'common'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -33,7 +37,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -41,7 +45,6 @@ jobs:
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
@@ -52,46 +55,16 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
working-directory: 'common'
|
||||
run: goimports -local scroll-tech/common/ -w .
|
||||
- name: Run go mod tidy
|
||||
working-directory: 'common'
|
||||
run: go mod tidy
|
||||
- run: goimports -local scroll-tech/common/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
- name: Test common packages
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic ./...
|
||||
|
||||
47
.github/workflows/coordinator.yml
vendored
47
.github/workflows/coordinator.yml
vendored
@@ -9,8 +9,6 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -20,10 +18,12 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'coordinator'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -37,11 +37,10 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
@@ -52,20 +51,15 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
working-directory: 'coordinator'
|
||||
run: goimports -local scroll-tech/coordinator/ -w .
|
||||
- name: Run go mod tidy
|
||||
working-directory: 'coordinator'
|
||||
run: go mod tidy
|
||||
- run: goimports -local scroll-tech/coordinator/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
@@ -86,28 +80,3 @@ jobs:
|
||||
# push: false
|
||||
# # cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# # cache-to: type=gha,scope=${{ github.workflow }}
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
- name: Test coordinator packages
|
||||
working-directory: 'coordinator'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic -tags mock_verifier ./...
|
||||
|
||||
45
.github/workflows/database.yml
vendored
45
.github/workflows/database.yml
vendored
@@ -9,7 +9,6 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '.github/workflows/database.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -19,9 +18,12 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '.github/workflows/database.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'database'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
@@ -30,11 +32,10 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
@@ -45,46 +46,16 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
working-directory: 'database'
|
||||
run: goimports -local scroll-tech/database/ -w .
|
||||
- name: Run go mod tidy
|
||||
working-directory: 'database'
|
||||
run: go mod tidy
|
||||
- run: goimports -local scroll-tech/database/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
- name: Test database packages
|
||||
working-directory: 'database'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -covermode=atomic ./...
|
||||
|
||||
43
.github/workflows/integration.yaml
vendored
43
.github/workflows/integration.yaml
vendored
@@ -1,43 +0,0 @@
|
||||
name: Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Solc
|
||||
uses: supplypike/setup-bin@v3
|
||||
with:
|
||||
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
|
||||
name: 'solc'
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C bridge mock_abi
|
||||
make -C common/bytecode all
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
go test -v -tags="mock_prover mock_verifier" -p 1 scroll-tech/integration-test/...
|
||||
59
.github/workflows/intermediate-docker.yml
vendored
59
.github/workflows/intermediate-docker.yml
vendored
@@ -1,59 +0,0 @@
|
||||
name: Intermediate Docker
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
GO_VERSION:
|
||||
description: 'Go version'
|
||||
required: true
|
||||
type: string
|
||||
default: '1.19'
|
||||
RUST_VERSION:
|
||||
description: 'Rust toolchain version'
|
||||
required: true
|
||||
type: string
|
||||
default: 'nightly-2022-12-10'
|
||||
PYTHON_VERSION:
|
||||
description: 'Python version'
|
||||
required: false
|
||||
type: string
|
||||
default: '3.10'
|
||||
CUDA_VERSION:
|
||||
description: 'Cuda version'
|
||||
required: false
|
||||
type: string
|
||||
default: '11.7.1'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'build/dockerfiles/intermediate'
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build
|
||||
run: |
|
||||
make all
|
||||
env:
|
||||
GO_VERSION: ${{ inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ inputs.RUST_VERSION }}
|
||||
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
|
||||
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
|
||||
- name: Publish
|
||||
run: |
|
||||
make publish
|
||||
env:
|
||||
GO_VERSION: ${{ inputs.GO_VERSION }}
|
||||
RUST_VERSION: ${{ inputs.RUST_VERSION }}
|
||||
PYTHON_VERSION: ${{ inputs.PYTHON_VERSION }}
|
||||
CUDA_VERSION: ${{ inputs.CUDA_VERSION }}
|
||||
6
.github/workflows/roller.yml
vendored
6
.github/workflows/roller.yml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Cache cargo
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
go-version: 1.18.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
|
||||
7
Jenkinsfile
vendored
7
Jenkinsfile
vendored
@@ -7,11 +7,10 @@ pipeline {
|
||||
timeout (20)
|
||||
}
|
||||
tools {
|
||||
go 'go-1.18'
|
||||
nodejs "nodejs"
|
||||
go 'go-1.19'
|
||||
}
|
||||
environment {
|
||||
GOBIN = '/home/ubuntu/go/bin/'
|
||||
GO111MODULE = 'on'
|
||||
PATH="/home/ubuntu/.cargo/bin:$PATH"
|
||||
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:./coordinator/verifier/lib"
|
||||
@@ -82,7 +81,7 @@ pipeline {
|
||||
}
|
||||
stage('Compare Coverage') {
|
||||
steps {
|
||||
sh './build/post-test-report-coverage.sh'
|
||||
sh "./build/post-test-report-coverage.sh"
|
||||
script {
|
||||
currentBuild.result = 'SUCCESS'
|
||||
}
|
||||
@@ -97,4 +96,4 @@ pipeline {
|
||||
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
# Scroll Monorepo
|
||||
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yaml) [](https://github.com/scroll-tech/scroll/actions/workflows/bridge.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/database.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/common.yml) [](https://github.com/scroll-tech/scroll/actions/workflows/roller.yml)
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.19
|
||||
+ Go 1.18
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
+ Hardhat / Foundry
|
||||
+ Docker
|
||||
|
||||
@@ -43,6 +43,8 @@ var (
|
||||
|
||||
// ScrollChainABI holds information about ScrollChain's context and available invokable methods.
|
||||
ScrollChainABI *abi.ABI
|
||||
// ScrollChainV2ABI holds information about ScrollChainV2's context and available invokable methods.
|
||||
ScrollChainV2ABI *abi.ABI
|
||||
// L1ScrollMessengerABI holds information about L1ScrollMessenger's context and available invokable methods.
|
||||
L1ScrollMessengerABI *abi.ABI
|
||||
// L1MessageQueueABI holds information about L1MessageQueue contract's context and available invokable methods.
|
||||
@@ -116,6 +118,7 @@ func init() {
|
||||
|
||||
// scroll monorepo
|
||||
ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
|
||||
ScrollChainV2ABI, _ = ScrollChainV2MetaData.GetAbi()
|
||||
L1ScrollMessengerABI, _ = L1ScrollMessengerMetaData.GetAbi()
|
||||
L1MessageQueueABI, _ = L1MessageQueueMetaData.GetAbi()
|
||||
L2GasPriceOracleABI, _ = L2GasPriceOracleMetaData.GetAbi()
|
||||
@@ -198,6 +201,11 @@ var ScrollChainMetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch\",\"name\":\"batch\",\"type\":\"tuple\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"blockHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"parentHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"blockNumber\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"timestamp\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"baseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"uint16\",\"name\":\"numTransactions\",\"type\":\"uint16\"},{\"internalType\":\"uint16\",\"name\":\"numL1Messages\",\"type\":\"uint16\"}],\"internalType\":\"structIScrollChain.BlockContext[]\",\"name\":\"blocks\",\"type\":\"tuple[]\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"newStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawTrieRoot\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"batchIndex\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"parentBatchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"l2Transactions\",\"type\":\"bytes\"}],\"internalType\":\"structIScrollChain.Batch[]\",\"name\":\"batches\",\"type\":\"tuple[]\"}],\"name\":\"commitBatches\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"},{\"internalType\":\"uint256[]\",\"name\":\"proof\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"instances\",\"type\":\"uint256[]\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"getL2MessageRoot\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"batchId\",\"type\":\"bytes32\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
|
||||
}
|
||||
|
||||
// ScrollChainV2MetaData contains all meta data concerning the ScrollChain contract.
|
||||
var ScrollChainV2MetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"CommitBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"stateRoot\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"}],\"name\":\"FinalizeBatch\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"}],\"name\":\"RevertBatch\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"},{\"internalType\":\"bytes\",\"name\":\"parentBatchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes[]\",\"name\":\"chunks\",\"type\":\"bytes[]\"},{\"internalType\":\"bytes\",\"name\":\"skippedL1MessageBitmap\",\"type\":\"bytes\"}],\"name\":\"commitBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"committedBatches\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"bytes32\",\"name\":\"prevStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"postStateRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"withdrawRoot\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"aggrProof\",\"type\":\"bytes\"}],\"name\":\"finalizeBatchWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"finalizedStateRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"isBatchFinalized\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"batchHeader\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"count\",\"type\":\"uint256\"}],\"name\":\"revertBatch\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"batchIndex\",\"type\":\"uint256\"}],\"name\":\"withdrawRoots\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
|
||||
}
|
||||
|
||||
// L1ScrollMessengerMetaData contains all meta data concerning the L1ScrollMessenger contract.
|
||||
var L1ScrollMessengerMetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"FailedRelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"nonce\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"batchHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes\",\"name\":\"merkleProof\",\"type\":\"bytes\"}],\"internalType\":\"structIL1ScrollMessenger.L2MessageProof\",\"name\":\"proof\",\"type\":\"tuple\"}],\"name\":\"relayMessageWithProof\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"from\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint32\",\"name\":\"oldGasLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"newGasLimit\",\"type\":\"uint32\"}],\"name\":\"replayMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"}],\"name\":\"sendMessage\",\"outputs\":[],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"xDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]",
|
||||
@@ -325,3 +333,31 @@ type L2FailedRelayedMessageEvent struct {
|
||||
type L2RelayedMessageEvent struct {
|
||||
MessageHash common.Hash
|
||||
}
|
||||
|
||||
// IScrollChainBatch is an auto generated low-level Go binding around an user-defined struct.
|
||||
type IScrollChainBatch struct {
|
||||
Blocks []IScrollChainBlockContext
|
||||
PrevStateRoot common.Hash
|
||||
NewStateRoot common.Hash
|
||||
WithdrawTrieRoot common.Hash
|
||||
BatchIndex uint64
|
||||
ParentBatchHash common.Hash
|
||||
L2Transactions []byte
|
||||
}
|
||||
|
||||
// L1CommitBatchEvent represents a CommitBatch event raised by the ScrollChain contract.
|
||||
type L1CommitBatchEvent struct {
|
||||
BatchHash common.Hash
|
||||
}
|
||||
|
||||
// IScrollChainBlockContext is an auto generated low-level Go binding around an user-defined struct.
|
||||
type IScrollChainBlockContext struct {
|
||||
BlockHash common.Hash
|
||||
ParentHash common.Hash
|
||||
BlockNumber uint64
|
||||
Timestamp uint64
|
||||
BaseFee *big.Int
|
||||
GasLimit uint64
|
||||
NumTransactions uint16
|
||||
NumL1Messages uint16
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/iris-contrib/middleware/cors"
|
||||
"github.com/kataras/iris/v12"
|
||||
"github.com/kataras/iris/v12/mvc"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -60,6 +61,11 @@ func init() {
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
corsOptions := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE"},
|
||||
AllowCredentials: true,
|
||||
})
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(cutils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
@@ -72,6 +78,7 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
defer database.Close()
|
||||
bridgeApp := iris.New()
|
||||
bridgeApp.UseRouter(corsOptions)
|
||||
bridgeApp.Get("/ping", pong).Describe("healthcheck")
|
||||
|
||||
mvc.Configure(bridgeApp.Party("/api/txs"), setupQueryByAddressHandler)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"bridge-history-api/config"
|
||||
"bridge-history-api/cross_msg"
|
||||
"bridge-history-api/cross_msg/message_proof"
|
||||
"bridge-history-api/db"
|
||||
cutils "bridge-history-api/utils"
|
||||
)
|
||||
@@ -54,6 +55,7 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
db, err := db.NewOrmFactory(cfg)
|
||||
defer db.Close()
|
||||
if err != nil {
|
||||
log.Crit("failed to connect to db", "config file", cfgFile, "error", err)
|
||||
}
|
||||
@@ -98,13 +100,20 @@ func action(ctx *cli.Context) error {
|
||||
go l2crossMsgFetcher.Start()
|
||||
defer l2crossMsgFetcher.Stop()
|
||||
|
||||
l1BlocktimeFetcher := cross_msg.NewBlocktimestampFetcher(subCtx, uint(cfg.L1.Confirmation), int(cfg.L1.BlockTime), l1client, db.UpdateL1Blocktimestamp, db.GetL1EarliestNoBlocktimestampHeight)
|
||||
go l1BlocktimeFetcher.Start()
|
||||
defer l1BlocktimeFetcher.Stop()
|
||||
// BlockTimestamp fetcher for l1 and l2
|
||||
l1BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db.UpdateL1BlockTimestamp, db.GetL1EarliestNoBlockTimestampHeight)
|
||||
go l1BlockTimeFetcher.Start()
|
||||
defer l1BlockTimeFetcher.Stop()
|
||||
|
||||
l2BlocktimeFetcher := cross_msg.NewBlocktimestampFetcher(subCtx, uint(cfg.L2.Confirmation), int(cfg.L2.BlockTime), l2client, db.UpdateL2Blocktimestamp, db.GetL2EarliestNoBlocktimestampHeight)
|
||||
go l2BlocktimeFetcher.Start()
|
||||
defer l2BlocktimeFetcher.Stop()
|
||||
l2BlockTimeFetcher := cross_msg.NewBlockTimestampFetcher(subCtx, cfg.L2.Confirmation, int(cfg.L2.BlockTime), l2client, db.UpdateL2BlockTimestamp, db.GetL2EarliestNoBlockTimestampHeight)
|
||||
go l2BlockTimeFetcher.Start()
|
||||
defer l2BlockTimeFetcher.Stop()
|
||||
|
||||
// Proof updater and batch fetcher
|
||||
l2msgProofUpdater := message_proof.NewMsgProofUpdater(subCtx, cfg.L1.Confirmation, cfg.BatchInfoFetcher.BatchIndexStartBlock, db)
|
||||
batchFetcher := cross_msg.NewBatchInfoFetcher(subCtx, common.HexToAddress(cfg.BatchInfoFetcher.ScrollChainAddr), cfg.BatchInfoFetcher.BatchIndexStartBlock, cfg.L1.Confirmation, int(cfg.L1.BlockTime), l1client, db, l2msgProofUpdater)
|
||||
go batchFetcher.Start()
|
||||
defer batchFetcher.Stop()
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
|
||||
@@ -1,29 +1,33 @@
|
||||
{
|
||||
"batchInfoFetcher": {
|
||||
"batchIndexStartBlock": 9091265,
|
||||
"ScrollChainAddr": "0xcD00DB804C819175B381b2B44Aa16A391c8a01D6"
|
||||
},
|
||||
"l1": {
|
||||
"confirmation": 64,
|
||||
"endpoint": "https://rpc.ankr.com/eth_goerli",
|
||||
"startHeight": 9890194 ,
|
||||
"startHeight": 9090194 ,
|
||||
"blockTime": 10,
|
||||
"MessengerAddr": "0x5260e38080BFe97e6C4925d9209eCc5f964373b6",
|
||||
"ETHGatewayAddr": "0x429b73A21cF3BF1f3E696a21A95408161daF311f",
|
||||
"WETHGatewayAddr": "0x8be69E499D8848DfFb4cF9bac909f3e2cF2FeFa0",
|
||||
"StandardERC20Gateway": "0xeF37207c1A1efF6D6a9d7BfF3cF4270e406d319b",
|
||||
"CustomERC20GatewayAddr": "0x920f906B814597cF5DC76F95100F09CBAF9c5748",
|
||||
"ERC721GatewayAddr": "0x1C441Dfc5C2eD7A2AA8636748A664E59CB029157",
|
||||
"ERC1155GatewayAddr": "0xd1bE599aaCBC21448fD6373bbc7c1b4c7806f135"
|
||||
"MessengerAddr": "0x326517Eb8eB1Ce5eaB5b513C2e9A24839b402d90",
|
||||
"ETHGatewayAddr": "0x8305cB7B8448677736095965B63d7431017328fe",
|
||||
"WETHGatewayAddr": "0xe3bA3c60d99a2d9a5f817734bC85353470b23931",
|
||||
"StandardERC20Gateway": "0x16c1079B27eD9c363B7D08aC5Ae937A398972A5C",
|
||||
"CustomERC20GatewayAddr": "0x61f08caD3d6F77801167d3bA8669433701586643",
|
||||
"ERC721GatewayAddr": "0x4A73D25A4C99CB912acaf6C5B5e554f2982201c5",
|
||||
"ERC1155GatewayAddr": "0xa3F5DD3033698c2832C53f3C3Fe6E062F58cD808"
|
||||
},
|
||||
"l2": {
|
||||
"confirmation": 1,
|
||||
"endpoint": "https://alpha-rpc.scroll.io/l2",
|
||||
"endpoint": "http://staging-l2geth-rpc0.scroll.tech:8545",
|
||||
"blockTime": 3,
|
||||
"startHeight": 1900068,
|
||||
"CustomERC20GatewayAddr": "0xa07Cb742657294C339fB4d5d6CdF3fdBeE8C1c68",
|
||||
"ERC721GatewayAddr": "0x8Fee20e0C0Ef16f2898a8073531a857D11b9C700",
|
||||
"StandardERC20Gateway": "0xB878F37BB278bf0e4974856fFe86f5e6F66BD725",
|
||||
"MessengerAddr": "0xb75d7e84517e1504C151B270255B087Fd746D34C",
|
||||
"ETHGatewayAddr": "0x32139B5C8838E94fFcD83E60dff95Daa7F0bA14c",
|
||||
"WETHGatewayAddr": "0xBb88bF582F2BBa46702621dae5CB9271057bC85b",
|
||||
"ERC1155GatewayAddr": "0x2946cB860028276b3C4bccE1767841641C2E0828"
|
||||
"startHeight": 0,
|
||||
"CustomERC20GatewayAddr": "0x905db21f836749fEeD12de781afc4A5Ab4Dd0d51",
|
||||
"ERC721GatewayAddr": "0xC53D835514780664BCd7eCfcE7c2E5d9554dc41B",
|
||||
"StandardERC20Gateway": "0x90271634BCB020e06ea4840C3f7aa61b8F860651",
|
||||
"MessengerAddr": "0xE8b0956Ac75c65Aa1669e83888DA13afF2E108f4",
|
||||
"ETHGatewayAddr": "0xD5938590D5dD8ce95812D4D515a219C12C551D67",
|
||||
"WETHGatewayAddr": "0xb0aaA582564fade4232a16fdB1383004A6A7247F",
|
||||
"ERC1155GatewayAddr": "0x4f33B1655619c2C0B7C450128Df760B4365Cb549"
|
||||
},
|
||||
"db": {
|
||||
"dsn": "postgres://postgres:1234@localhost:5444/test?sslmode=disable",
|
||||
|
||||
@@ -6,6 +6,11 @@ import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
type BatchInfoFetcherConfig struct {
|
||||
BatchIndexStartBlock uint64 `json:"batchIndexStartBlock"`
|
||||
ScrollChainAddr string `json:"ScrollChainAddr"`
|
||||
}
|
||||
|
||||
// DBConfig db config
|
||||
type DBConfig struct {
|
||||
// data source name
|
||||
@@ -41,8 +46,9 @@ type Config struct {
|
||||
L2 *LayerConfig `json:"l2"`
|
||||
|
||||
// data source name
|
||||
DB *DBConfig `json:"db"`
|
||||
Server *ServerConfig `json:"server"`
|
||||
DB *DBConfig `json:"db"`
|
||||
Server *ServerConfig `json:"server"`
|
||||
BatchInfoFetcher *BatchInfoFetcherConfig `json:"batchInfoFetcher"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
|
||||
@@ -16,7 +16,7 @@ type QueryHashController struct {
|
||||
}
|
||||
|
||||
func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.QueryByAddressResponse, error) {
|
||||
message, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
|
||||
message, total, err := c.Service.GetTxsByAddress(common.HexToAddress(req.Address), int64(req.Offset), int64(req.Limit))
|
||||
if err != nil {
|
||||
return &model.QueryByAddressResponse{Message: "500", Data: &model.Data{}}, err
|
||||
}
|
||||
@@ -24,7 +24,7 @@ func (c *QueryAddressController) Get(req model.QueryByAddressRequest) (*model.Qu
|
||||
return &model.QueryByAddressResponse{Message: "ok",
|
||||
Data: &model.Data{
|
||||
Result: message,
|
||||
Total: len(message),
|
||||
Total: total,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
|
||||
105
bridge-history-api/cross_msg/batch_info_fetcher.go
Normal file
105
bridge-history-api/cross_msg/batch_info_fetcher.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package cross_msg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"bridge-history-api/cross_msg/message_proof"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
type BatchInfoFetcher struct {
|
||||
ctx context.Context
|
||||
scrollChainAddr common.Address
|
||||
batchInfoStartNumber uint64
|
||||
confirmation uint64
|
||||
blockTimeInSec int
|
||||
client *ethclient.Client
|
||||
db db.OrmFactory
|
||||
msgProofUpdater *message_proof.MsgProofUpdater
|
||||
}
|
||||
|
||||
func NewBatchInfoFetcher(ctx context.Context, scrollChainAddr common.Address, batchInfoStartNumber uint64, confirmation uint64, blockTimeInSec int, client *ethclient.Client, db db.OrmFactory, msgProofUpdater *message_proof.MsgProofUpdater) *BatchInfoFetcher {
|
||||
return &BatchInfoFetcher{
|
||||
ctx: ctx,
|
||||
scrollChainAddr: scrollChainAddr,
|
||||
batchInfoStartNumber: batchInfoStartNumber,
|
||||
confirmation: confirmation,
|
||||
blockTimeInSec: blockTimeInSec,
|
||||
client: client,
|
||||
db: db,
|
||||
msgProofUpdater: msgProofUpdater,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BatchInfoFetcher) Start() {
|
||||
log.Info("BatchInfoFetcher Start")
|
||||
// Fetch batch info at beginning
|
||||
// Then start msg proof updater after db have some bridge batch
|
||||
err := b.fetchBatchInfo()
|
||||
if err != nil {
|
||||
log.Error("fetch batch info at begining failed: ", "err", err)
|
||||
}
|
||||
|
||||
go b.msgProofUpdater.Start()
|
||||
|
||||
go func() {
|
||||
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
err := b.fetchBatchInfo()
|
||||
if err != nil {
|
||||
log.Error("fetch batch info failed: ", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (b *BatchInfoFetcher) Stop() {
|
||||
log.Info("BatchInfoFetcher Stop")
|
||||
b.msgProofUpdater.Stop()
|
||||
}
|
||||
|
||||
func (b *BatchInfoFetcher) fetchBatchInfo() error {
|
||||
number, err := utils.GetSafeBlockNumber(b.ctx, b.client, b.confirmation)
|
||||
if err != nil {
|
||||
log.Error("Can not get latest block number: ", "err", err)
|
||||
return err
|
||||
}
|
||||
latestBatch, err := b.db.GetLatestRollupBatch()
|
||||
if err != nil {
|
||||
log.Error("Can not get latest BatchInfo: ", "err", err)
|
||||
return err
|
||||
}
|
||||
var startHeight uint64
|
||||
if latestBatch == nil {
|
||||
startHeight = b.batchInfoStartNumber
|
||||
} else {
|
||||
startHeight = latestBatch.CommitHeight + 1
|
||||
}
|
||||
for from := startHeight; number >= from; from += uint64(fetchLimit) {
|
||||
to := from + uint64(fetchLimit) - 1
|
||||
// number - confirmation can never less than 0 since the for loop condition
|
||||
// but watch out the overflow
|
||||
if to > number {
|
||||
to = number
|
||||
}
|
||||
// filter logs to fetch batches
|
||||
err = FetchAndSaveBatchIndex(b.ctx, b.client, b.db, int64(from), int64(to), b.scrollChainAddr)
|
||||
if err != nil {
|
||||
log.Error("Can not fetch and save from chain: ", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -9,30 +9,30 @@ import (
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
type GetEarliestNoBlocktimestampHeightFunc func() (uint64, error)
|
||||
type UpdateBlocktimestampFunc func(height uint64, timestamp time.Time) error
|
||||
type GetEarliestNoBlockTimestampHeightFunc func() (uint64, error)
|
||||
type UpdateBlockTimestampFunc func(height uint64, timestamp time.Time) error
|
||||
|
||||
type BlocktimestampFetcher struct {
|
||||
type BlockTimestampFetcher struct {
|
||||
ctx context.Context
|
||||
confirmation uint
|
||||
confirmation uint64
|
||||
blockTimeInSec int
|
||||
client *ethclient.Client
|
||||
updateBlocktimestampFunc UpdateBlocktimestampFunc
|
||||
getEarliestNoBlocktimestampHeightFunc GetEarliestNoBlocktimestampHeightFunc
|
||||
updateBlockTimestampFunc UpdateBlockTimestampFunc
|
||||
getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc
|
||||
}
|
||||
|
||||
func NewBlocktimestampFetcher(ctx context.Context, confirmation uint, blockTimeInSec int, client *ethclient.Client, updateBlocktimestampFunc UpdateBlocktimestampFunc, getEarliestNoBlocktimestampHeightFunc GetEarliestNoBlocktimestampHeightFunc) *BlocktimestampFetcher {
|
||||
return &BlocktimestampFetcher{
|
||||
func NewBlockTimestampFetcher(ctx context.Context, confirmation uint64, blockTimeInSec int, client *ethclient.Client, updateBlockTimestampFunc UpdateBlockTimestampFunc, getEarliestNoBlockTimestampHeightFunc GetEarliestNoBlockTimestampHeightFunc) *BlockTimestampFetcher {
|
||||
return &BlockTimestampFetcher{
|
||||
ctx: ctx,
|
||||
confirmation: confirmation,
|
||||
blockTimeInSec: blockTimeInSec,
|
||||
client: client,
|
||||
getEarliestNoBlocktimestampHeightFunc: getEarliestNoBlocktimestampHeightFunc,
|
||||
updateBlocktimestampFunc: updateBlocktimestampFunc,
|
||||
getEarliestNoBlockTimestampHeightFunc: getEarliestNoBlockTimestampHeightFunc,
|
||||
updateBlockTimestampFunc: updateBlockTimestampFunc,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BlocktimestampFetcher) Start() {
|
||||
func (b *BlockTimestampFetcher) Start() {
|
||||
go func() {
|
||||
tick := time.NewTicker(time.Duration(b.blockTimeInSec) * time.Second)
|
||||
for {
|
||||
@@ -46,23 +46,23 @@ func (b *BlocktimestampFetcher) Start() {
|
||||
log.Error("Can not get latest block number", "err", err)
|
||||
continue
|
||||
}
|
||||
startHeight, err := b.getEarliestNoBlocktimestampHeightFunc()
|
||||
startHeight, err := b.getEarliestNoBlockTimestampHeightFunc()
|
||||
if err != nil {
|
||||
log.Error("Can not get latest record without block timestamp", "err", err)
|
||||
continue
|
||||
}
|
||||
for height := startHeight; number >= height+uint64(b.confirmation) && height > 0; {
|
||||
for height := startHeight; number >= height+b.confirmation && height > 0; {
|
||||
block, err := b.client.HeaderByNumber(b.ctx, new(big.Int).SetUint64(height))
|
||||
if err != nil {
|
||||
log.Error("Can not get block by number", "err", err)
|
||||
break
|
||||
}
|
||||
err = b.updateBlocktimestampFunc(height, time.Unix(int64(block.Time), 0))
|
||||
err = b.updateBlockTimestampFunc(height, time.Unix(int64(block.Time), 0))
|
||||
if err != nil {
|
||||
log.Error("Can not update blocktimstamp into DB ", "err", err)
|
||||
log.Error("Can not update blockTimestamp into DB ", "err", err)
|
||||
break
|
||||
}
|
||||
height, err = b.getEarliestNoBlocktimestampHeightFunc()
|
||||
height, err = b.getEarliestNoBlockTimestampHeightFunc()
|
||||
if err != nil {
|
||||
log.Error("Can not get latest record without block timestamp", "err", err)
|
||||
break
|
||||
@@ -73,7 +73,6 @@ func (b *BlocktimestampFetcher) Start() {
|
||||
}()
|
||||
}
|
||||
|
||||
func (b *BlocktimestampFetcher) Stop() {
|
||||
log.Info("BlocktimestampFetcher Stop")
|
||||
b.ctx.Done()
|
||||
func (b *BlockTimestampFetcher) Stop() {
|
||||
log.Info("BlockTimestampFetcher Stop")
|
||||
}
|
||||
|
||||
@@ -27,13 +27,12 @@ type CrossMsgFetcher struct {
|
||||
reorgHandling ReorgHandling
|
||||
addressList []common.Address
|
||||
cachedHeaders []*types.Header
|
||||
mu *sync.Mutex
|
||||
mu sync.Mutex
|
||||
reorgStartCh chan struct{}
|
||||
reorgEndCh chan struct{}
|
||||
}
|
||||
|
||||
func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.OrmFactory, client *ethclient.Client, worker *FetchEventWorker, addressList []common.Address, reorg ReorgHandling) (*CrossMsgFetcher, error) {
|
||||
newMU := &sync.Mutex{}
|
||||
crossMsgFetcher := &CrossMsgFetcher{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
@@ -41,7 +40,6 @@ func NewCrossMsgFetcher(ctx context.Context, config *config.LayerConfig, db db.O
|
||||
client: client,
|
||||
worker: worker,
|
||||
reorgHandling: reorg,
|
||||
mu: newMU,
|
||||
addressList: addressList,
|
||||
cachedHeaders: make([]*types.Header, 0),
|
||||
reorgStartCh: make(chan struct{}),
|
||||
@@ -97,7 +95,6 @@ func (c *CrossMsgFetcher) Start() {
|
||||
}
|
||||
|
||||
func (c *CrossMsgFetcher) Stop() {
|
||||
c.db.Close()
|
||||
log.Info("CrossMsgFetcher Stop")
|
||||
}
|
||||
|
||||
@@ -106,7 +103,7 @@ func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64)
|
||||
// if we fetch to the latest block, shall not exceed cachedHeaders
|
||||
var number uint64
|
||||
var err error
|
||||
if len(c.cachedHeaders) != 0 && confirmation <= 0 {
|
||||
if len(c.cachedHeaders) != 0 && confirmation == 0 {
|
||||
number = c.cachedHeaders[len(c.cachedHeaders)-1].Number.Uint64() - 1
|
||||
} else {
|
||||
number, err = utils.GetSafeBlockNumber(c.ctx, c.client, confirmation)
|
||||
@@ -119,22 +116,22 @@ func (c *CrossMsgFetcher) forwardFetchAndSaveMissingEvents(confirmation uint64)
|
||||
log.Error(fmt.Sprintf("%s: invalid get/fetch function", c.worker.Name))
|
||||
return
|
||||
}
|
||||
processed_height, err := c.worker.G(c.db)
|
||||
processedHeight, err := c.worker.G(c.db)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("%s: can not get latest processed block height", c.worker.Name))
|
||||
}
|
||||
log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processed_height)
|
||||
if processed_height <= 0 || processed_height < int64(c.config.StartHeight) {
|
||||
processed_height = int64(c.config.StartHeight)
|
||||
log.Info(fmt.Sprintf("%s: ", c.worker.Name), "height", processedHeight)
|
||||
if processedHeight <= 0 || processedHeight < int64(c.config.StartHeight) {
|
||||
processedHeight = int64(c.config.StartHeight)
|
||||
} else {
|
||||
processed_height += 1
|
||||
processedHeight += 1
|
||||
}
|
||||
for n := processed_height; n <= int64(number); n += FETCH_LIMIT {
|
||||
iter_end := n + FETCH_LIMIT - 1
|
||||
if iter_end > int64(number) {
|
||||
iter_end = int64(number)
|
||||
for from := processedHeight; from <= int64(number); from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
if to > int64(number) {
|
||||
to = int64(number)
|
||||
}
|
||||
err := c.worker.F(c.ctx, c.client, c.db, n, iter_end, c.addressList)
|
||||
err := c.worker.F(c.ctx, c.client, c.db, from, to, c.addressList)
|
||||
if err != nil {
|
||||
log.Error(fmt.Sprintf("%s: failed!", c.worker.Name), "err", err)
|
||||
break
|
||||
|
||||
@@ -6,21 +6,19 @@ import (
|
||||
|
||||
geth "github.com/ethereum/go-ethereum"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/db/orm"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// Todo : read from config
|
||||
var (
|
||||
// the number of blocks fetch per round
|
||||
FETCH_LIMIT = int64(3000)
|
||||
fetchLimit = int64(3000)
|
||||
)
|
||||
|
||||
// FetchAndSave is a function type that fetches events from blockchain and saves them to database
|
||||
@@ -36,11 +34,6 @@ type FetchEventWorker struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
type msgHashWrapper struct {
|
||||
msgHash common.Hash
|
||||
txHash common.Hash
|
||||
}
|
||||
|
||||
func GetLatestL1ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
crossHeight, err := db.GetLatestL1ProcessedHeight()
|
||||
if err != nil {
|
||||
@@ -70,15 +63,22 @@ func GetLatestL2ProcessedHeight(db db.OrmFactory) (int64, error) {
|
||||
log.Error("failed to get L2 relayed message processed height", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
if crossHeight > relayedHeight {
|
||||
return crossHeight, nil
|
||||
} else {
|
||||
return relayedHeight, nil
|
||||
l2SentHeight, err := db.GetLatestSentMsgHeightOnL2()
|
||||
if err != nil {
|
||||
log.Error("failed to get L2 sent message processed height", "err", err)
|
||||
return 0, err
|
||||
}
|
||||
maxHeight := crossHeight
|
||||
if maxHeight < relayedHeight {
|
||||
maxHeight = relayedHeight
|
||||
}
|
||||
if maxHeight < l2SentHeight {
|
||||
maxHeight = l2SentHeight
|
||||
}
|
||||
return maxHeight, nil
|
||||
}
|
||||
|
||||
func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, addrList []common.Address) error {
|
||||
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
@@ -99,7 +99,7 @@ func L1FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Warn("Failed to get l1 event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
depositL1CrossMsgs, msgHashes, relayedMsg, err := parseBackendL1EventLogs(logs)
|
||||
depositL1CrossMsgs, msgHashes, relayedMsg, err := utils.ParseBackendL1EventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("l1FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
@@ -157,7 +157,7 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
log.Warn("Failed to get l2 event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
depositL2CrossMsgs, msgHashes, relayedMsg, err := parseBackendL2EventLogs(logs)
|
||||
depositL2CrossMsgs, msgHashes, relayedMsg, l2sentMsgs, err := utils.ParseBackendL2EventLogs(logs)
|
||||
if err != nil {
|
||||
log.Error("l2FetchAndSaveEvents: Failed to parse cross msg event logs", "err", err)
|
||||
return err
|
||||
@@ -178,11 +178,19 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert relayed message event logs", "err", err)
|
||||
}
|
||||
|
||||
err = updateL2CrossMsgMsgHash(ctx, dbTx, database, msgHashes)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to update msgHash in L2 cross msg", "err", err)
|
||||
}
|
||||
|
||||
err = database.BatchInsertL2SentMsgDBTx(dbTx, l2sentMsgs)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("l2FetchAndSaveEvents: Failed to insert l2 sent message", "err", err)
|
||||
}
|
||||
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
@@ -194,234 +202,61 @@ func L2FetchAndSaveEvents(ctx context.Context, client *ethclient.Client, databas
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []msgHashWrapper, []*orm.RelayedMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l1CrossMsg []*orm.CrossMsg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var msgHashes []msgHashWrapper
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1DepositETHSig:
|
||||
event := backendabi.DepositETH{}
|
||||
err := utils.UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositETH event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
case backendabi.L1DepositERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC20 event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ERC20),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
})
|
||||
case backendabi.L1DepositERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC721 event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
})
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
Amount: event.Amount.String(),
|
||||
})
|
||||
case backendabi.L1SentMessageEventSignature:
|
||||
event := backendabi.L1SentMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
msgHash := utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
msgHashes = append(msgHashes, msgHashWrapper{
|
||||
msgHash: msgHash,
|
||||
txHash: vlog.TxHash})
|
||||
case backendabi.L1RelayedMessageEventSignature:
|
||||
event := backendabi.L1RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
Height: vlog.BlockNumber,
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func FetchAndSaveBatchIndex(ctx context.Context, client *ethclient.Client, database db.OrmFactory, from int64, to int64, scrollChainAddr common.Address) error {
|
||||
query := geth.FilterQuery{
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{scrollChainAddr},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, nil
|
||||
query.Topics[0] = make([]common.Hash, 1)
|
||||
query.Topics[0][0] = backendabi.L1CommitBatchEventSignature
|
||||
logs, err := client.FilterLogs(ctx, query)
|
||||
if err != nil {
|
||||
log.Warn("Failed to get batch commit event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
rollupBatches, err := utils.ParseBatchInfoFromScrollChain(ctx, client, logs)
|
||||
if err != nil {
|
||||
log.Error("FetchAndSaveBatchIndex: Failed to parse batch commit msg event logs", "err", err)
|
||||
return err
|
||||
}
|
||||
dbTx, err := database.Beginx()
|
||||
if err != nil {
|
||||
log.Error("FetchAndSaveBatchIndex: Failed to begin db transaction", "err", err)
|
||||
return err
|
||||
}
|
||||
err = database.BatchInsertRollupBatchDBTx(dbTx, rollupBatches)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("FetchAndSaveBatchIndex: Failed to insert batch commit msg event logs", "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
// if we can not insert into DB, there must something wrong, need a on-call member handle the dababase manually
|
||||
dbTx.Rollback()
|
||||
log.Error("FetchAndSaveBatchIndex: Failed to commit db transaction", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []msgHashWrapper, []*orm.RelayedMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2CrossMsg []*orm.CrossMsg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var msgHashes []msgHashWrapper
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L2WithdrawETHSig:
|
||||
event := backendabi.DepositETH{}
|
||||
err := utils.UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawETH event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
case backendabi.L2WithdrawERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ERC20),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
})
|
||||
case backendabi.L2WithdrawERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
})
|
||||
case backendabi.L2WithdrawERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
Amount: event.Amount.String(),
|
||||
})
|
||||
case backendabi.L2SentMessageEventSignature:
|
||||
event := backendabi.L2SentMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
msgHash := utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
msgHashes = append(msgHashes, msgHashWrapper{
|
||||
msgHash: msgHash,
|
||||
txHash: vlog.TxHash})
|
||||
case backendabi.L2RelayedMessageEventSignature:
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := utils.UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
Height: vlog.BlockNumber,
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, nil
|
||||
}
|
||||
|
||||
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []msgHashWrapper) error {
|
||||
func updateL1CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
|
||||
for _, msgHash := range msgHashes {
|
||||
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.txHash, msgHash.msgHash)
|
||||
err := database.UpdateL1CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
|
||||
if err != nil {
|
||||
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.txHash, "err", err)
|
||||
log.Error("updateL1CrossMsgMsgHash: can not update layer1 cross msg MsgHash", "layer1 hash", msgHash.TxHash, "err", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []msgHashWrapper) error {
|
||||
func updateL2CrossMsgMsgHash(ctx context.Context, dbTx *sqlx.Tx, database db.OrmFactory, msgHashes []utils.MsgHashWrapper) error {
|
||||
for _, msgHash := range msgHashes {
|
||||
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.txHash, msgHash.msgHash)
|
||||
err := database.UpdateL2CrossMsgHashDBTx(ctx, dbTx, msgHash.TxHash, msgHash.MsgHash)
|
||||
if err != nil {
|
||||
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.txHash, "err", err)
|
||||
log.Error("updateL2CrossMsgMsgHash: can not update layer2 cross msg MsgHash", "layer2 hash", msgHash.TxHash, "err", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
241
bridge-history-api/cross_msg/message_proof/msg_proof_updater.go
Normal file
241
bridge-history-api/cross_msg/message_proof/msg_proof_updater.go
Normal file
@@ -0,0 +1,241 @@
|
||||
package message_proof
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
"bridge-history-api/db"
|
||||
"bridge-history-api/db/orm"
|
||||
)
|
||||
|
||||
type MsgProofUpdater struct {
|
||||
ctx context.Context
|
||||
db db.OrmFactory
|
||||
withdrawTrie *WithdrawTrie
|
||||
}
|
||||
|
||||
func NewMsgProofUpdater(ctx context.Context, confirmations uint64, startBlock uint64, db db.OrmFactory) *MsgProofUpdater {
|
||||
return &MsgProofUpdater{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
withdrawTrie: NewWithdrawTrie(),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MsgProofUpdater) Start() {
|
||||
log.Info("MsgProofUpdater Start")
|
||||
m.initialize(m.ctx)
|
||||
go func() {
|
||||
tick := time.NewTicker(10 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-m.ctx.Done():
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
latestBatch, err := m.db.GetLatestRollupBatch()
|
||||
if err != nil {
|
||||
log.Warn("MsgProofUpdater: Can not get latest RollupBatch: ", "err", err)
|
||||
continue
|
||||
}
|
||||
if latestBatch == nil {
|
||||
continue
|
||||
}
|
||||
latestBatchIndexWithProof, err := m.db.GetLatestL2SentMsgBatchIndex()
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err)
|
||||
continue
|
||||
}
|
||||
var start uint64
|
||||
if latestBatchIndexWithProof < 0 {
|
||||
start = 1
|
||||
} else {
|
||||
start = uint64(latestBatchIndexWithProof) + 1
|
||||
}
|
||||
for i := start; i <= latestBatch.BatchIndex; i++ {
|
||||
batch, err := m.db.GetRollupBatchByIndex(i)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: Can not get RollupBatch: ", "err", err, "index", i)
|
||||
break
|
||||
}
|
||||
// get all l2 messages in this batch
|
||||
msgs, proofs, err := m.appendL2Messages(batch.StartBlockNumber, batch.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: can not append l2messages", "startBlockNumber", batch.StartBlockNumber, "endBlockNumber", batch.EndBlockNumber, "err", err)
|
||||
break
|
||||
}
|
||||
err = m.updateMsgProof(msgs, proofs, batch.BatchIndex)
|
||||
if err != nil {
|
||||
log.Error("MsgProofUpdater: can not update msg proof", "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
func (m *MsgProofUpdater) Stop() {
|
||||
log.Info("MsgProofUpdater Stop")
|
||||
}
|
||||
|
||||
func (m *MsgProofUpdater) initialize(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
err := m.initializeWithdrawTrie()
|
||||
if err != nil {
|
||||
log.Error("can not initialize withdraw trie", "err", err)
|
||||
// give it some time to retry
|
||||
time.Sleep(10 * time.Second)
|
||||
continue
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MsgProofUpdater) initializeWithdrawTrie() error {
|
||||
var batch *orm.RollupBatch
|
||||
firstMsg, err := m.db.GetL2SentMessageByNonce(0)
|
||||
if err != nil && !errors.Is(err, sql.ErrNoRows) {
|
||||
return fmt.Errorf("failed to get first l2 message: %v", err)
|
||||
}
|
||||
// no l2 message
|
||||
// TO DO: check if we realy dont have l2 sent message with nonce 0
|
||||
if firstMsg == nil {
|
||||
log.Info("No first l2sentmsg in db")
|
||||
return nil
|
||||
}
|
||||
|
||||
// if no batch, return and wait for next try round
|
||||
batch, err = m.db.GetLatestRollupBatch()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest batch: %v", err)
|
||||
}
|
||||
if batch == nil {
|
||||
return fmt.Errorf("no batch found")
|
||||
}
|
||||
|
||||
var batches []*orm.RollupBatch
|
||||
batchIndex := batch.BatchIndex
|
||||
for {
|
||||
var msg *orm.L2SentMsg
|
||||
msg, err = m.db.GetLatestL2SentMsgLEHeight(batch.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Warn("failed to get l2 sent message less than height", "endBlocknum", batch.EndBlockNumber, "err", err)
|
||||
}
|
||||
if msg != nil && msg.MsgProof != "" {
|
||||
log.Info("Found latest l2 sent msg with proof: ", "msg_proof", msg.MsgProof, "height", msg.Height, "msg_hash", msg.MsgHash)
|
||||
// initialize withdrawTrie
|
||||
proofBytes := common.Hex2Bytes(msg.MsgProof)
|
||||
m.withdrawTrie.Initialize(msg.Nonce, common.HexToHash(msg.MsgHash), proofBytes)
|
||||
break
|
||||
}
|
||||
|
||||
// append unprocessed batch
|
||||
batches = append(batches, batch)
|
||||
|
||||
if batchIndex == 1 {
|
||||
// otherwise overflow
|
||||
// and batchIndex 0 is not in DB
|
||||
// To Do: check if we dont have batch with index 0 in future
|
||||
break
|
||||
}
|
||||
// iterate for next batch
|
||||
batchIndex--
|
||||
|
||||
batch, err = m.db.GetRollupBatchByIndex(batchIndex)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get block batch %v: %v", batchIndex, err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Build withdraw trie with pending messages")
|
||||
for i := len(batches) - 1; i >= 0; i-- {
|
||||
b := batches[i]
|
||||
msgs, proofs, err := m.appendL2Messages(b.StartBlockNumber, b.EndBlockNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.updateMsgProof(msgs, proofs, b.BatchIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Info("Build withdraw trie finished")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MsgProofUpdater) updateMsgProof(msgs []*orm.L2SentMsg, proofs [][]byte, batchIndex uint64) error {
|
||||
if len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
// this should not happend, but double checked
|
||||
if len(msgs) != len(proofs) {
|
||||
return fmt.Errorf("illegal state: len(msgs) != len(proofs)")
|
||||
}
|
||||
dbTx, err := m.db.Beginx()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, msg := range msgs {
|
||||
proofHex := common.Bytes2Hex(proofs[i])
|
||||
log.Debug("updateMsgProof", "msgHash", msg.MsgHash, "batchIndex", batchIndex, "proof", proofHex)
|
||||
if dbTxErr := m.db.UpdateL2MessageProofInDBTx(m.ctx, dbTx, msg.MsgHash, proofHex, batchIndex); dbTxErr != nil {
|
||||
if err := dbTx.Rollback(); err != nil {
|
||||
log.Error("dbTx.Rollback()", "err", err)
|
||||
}
|
||||
return dbTxErr
|
||||
}
|
||||
}
|
||||
|
||||
if dbTxErr := dbTx.Commit(); dbTxErr != nil {
|
||||
if err := dbTx.Rollback(); err != nil {
|
||||
log.Error("dbTx.Rollback()", "err", err)
|
||||
}
|
||||
return dbTxErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// appendL2Messages will append all messages between firstBlock and lastBlock (both inclusive) to withdrawTrie and compute corresponding merkle proof of each message.
|
||||
func (m *MsgProofUpdater) appendL2Messages(firstBlock, lastBlock uint64) ([]*orm.L2SentMsg, [][]byte, error) {
|
||||
var msgProofs [][]byte
|
||||
messages, err := m.db.GetL2SentMsgMsgHashByHeightRange(firstBlock, lastBlock)
|
||||
if err != nil {
|
||||
log.Error("GetL2SentMsgMsgHashByHeightRange failed", "error", err, "firstBlock", firstBlock, "lastBlock", lastBlock)
|
||||
return messages, msgProofs, err
|
||||
}
|
||||
if len(messages) == 0 {
|
||||
return messages, msgProofs, nil
|
||||
}
|
||||
|
||||
// double check whether nonce is matched
|
||||
if messages[0].Nonce != m.withdrawTrie.NextMessageNonce {
|
||||
log.Error("L2 message nonce mismatch", "expected", m.withdrawTrie.NextMessageNonce, "found", messages[0].Nonce)
|
||||
return messages, msgProofs, fmt.Errorf("l2 message nonce mismatch, expected: %v, found: %v", m.withdrawTrie.NextMessageNonce, messages[0].Nonce)
|
||||
}
|
||||
|
||||
var hashes []common.Hash
|
||||
for _, msg := range messages {
|
||||
hashes = append(hashes, common.HexToHash(msg.MsgHash))
|
||||
}
|
||||
msgProofs = m.withdrawTrie.AppendMessages(hashes)
|
||||
|
||||
return messages, msgProofs, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package cross_msg
|
||||
package message_proof
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -42,7 +42,6 @@ func NewWithdrawTrie() *WithdrawTrie {
|
||||
func (w *WithdrawTrie) Initialize(currentMessageNonce uint64, msgHash common.Hash, proofBytes []byte) {
|
||||
proof := DecodeBytesToMerkleProof(proofBytes)
|
||||
branches := RecoverBranchFromProof(proof, currentMessageNonce, msgHash)
|
||||
|
||||
w.height = len(proof)
|
||||
w.branches = branches
|
||||
w.NextMessageNonce = currentMessageNonce + 1
|
||||
@@ -1,4 +1,4 @@
|
||||
package cross_msg_test
|
||||
package message_proof
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/cross_msg"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
@@ -19,29 +18,29 @@ func TestUpdateBranchWithNewMessage(t *testing.T) {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if branches[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0x0000000000000000000000000000000000000000000000000000000000000001", branches[0].Hex())
|
||||
}
|
||||
|
||||
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
if branches[1] != common.HexToHash("0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0xe90b7bceb6e7df5418fb78d8ee546e97c83a08bbccc01a0644d599ccd2a7c2e0", branches[1].Hex())
|
||||
}
|
||||
|
||||
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
if branches[2] != common.HexToHash("0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0x222ff5e0b5877792c2bc1670e2ccd0c2c97cd7bb1672a57d598db05092d3d72c", branches[2].Hex())
|
||||
}
|
||||
|
||||
cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
if branches[2] != common.HexToHash("0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36") {
|
||||
t.Fatalf("Invalid root, want %s, got %s", "0xa9bb8c3f1f12e9aa903a50c47f314b57610a3ab32f2d463293f58836def38d36", branches[2].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeEncodeMerkleProof(t *testing.T) {
|
||||
proof := cross_msg.DecodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
|
||||
proof := DecodeBytesToMerkleProof(common.Hex2Bytes("2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904"))
|
||||
if len(proof) != 4 {
|
||||
t.Fatalf("proof length mismatch, want %d, got %d", 4, len(proof))
|
||||
}
|
||||
@@ -58,7 +57,7 @@ func TestDecodeEncodeMerkleProof(t *testing.T) {
|
||||
t.Fatalf("proof[3] mismatch, want %s, got %s", "0x2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904", proof[0].Hex())
|
||||
}
|
||||
|
||||
bytes := cross_msg.EncodeMerkleProofToBytes(proof)
|
||||
bytes := EncodeMerkleProofToBytes(proof)
|
||||
if common.Bytes2Hex(bytes) != "2ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49012ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49022ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d49032ebffc1a6671c51e30777a680904b103992630ec995b6e6ff76a04d5259d4904" {
|
||||
t.Fatalf("wrong encoded bytes")
|
||||
}
|
||||
@@ -72,32 +71,32 @@ func TestRecoverBranchFromProof(t *testing.T) {
|
||||
zeroes[i] = utils.Keccak2(zeroes[i-1], zeroes[i-1])
|
||||
}
|
||||
|
||||
proof := cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
tmpBranches := cross_msg.RecoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
proof := UpdateBranchWithNewMessage(zeroes, branches, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
tmpBranches := RecoverBranchFromProof(proof, 0, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
tmpBranches = cross_msg.RecoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
proof = UpdateBranchWithNewMessage(zeroes, branches, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
tmpBranches = RecoverBranchFromProof(proof, 1, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
tmpBranches = cross_msg.RecoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
proof = UpdateBranchWithNewMessage(zeroes, branches, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
tmpBranches = RecoverBranchFromProof(proof, 2, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
}
|
||||
}
|
||||
|
||||
proof = cross_msg.UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
tmpBranches = cross_msg.RecoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
proof = UpdateBranchWithNewMessage(zeroes, branches, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
tmpBranches = RecoverBranchFromProof(proof, 3, common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"))
|
||||
for i := 0; i < 64; i++ {
|
||||
if tmpBranches[i] != branches[i] {
|
||||
t.Fatalf("Invalid branch, want %s, got %s", branches[i].Hex(), tmpBranches[i].Hex())
|
||||
@@ -107,7 +106,7 @@ func TestRecoverBranchFromProof(t *testing.T) {
|
||||
|
||||
func TestWithdrawTrieOneByOne(t *testing.T) {
|
||||
for initial := 0; initial < 128; initial++ {
|
||||
withdrawTrie := cross_msg.NewWithdrawTrie()
|
||||
withdrawTrie := NewWithdrawTrie()
|
||||
var hashes []common.Hash
|
||||
for i := 0; i < initial; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
@@ -126,7 +125,7 @@ func TestWithdrawTrieOneByOne(t *testing.T) {
|
||||
})
|
||||
assert.Equal(t, withdrawTrie.NextMessageNonce, uint64(i+1))
|
||||
assert.Equal(t, expectedRoot.String(), withdrawTrie.MessageRoot().String())
|
||||
proof := cross_msg.DecodeBytesToMerkleProof(proofBytes[0])
|
||||
proof := DecodeBytesToMerkleProof(proofBytes[0])
|
||||
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
|
||||
assert.Equal(t, expectedRoot.String(), verifiedRoot.String())
|
||||
}
|
||||
@@ -153,7 +152,7 @@ func TestWithdrawTrieMultiple(t *testing.T) {
|
||||
}
|
||||
|
||||
for finish := initial; finish < 100; finish++ {
|
||||
withdrawTrie := cross_msg.NewWithdrawTrie()
|
||||
withdrawTrie := NewWithdrawTrie()
|
||||
withdrawTrie.AppendMessages(hashes)
|
||||
|
||||
var newHashes []common.Hash
|
||||
@@ -167,7 +166,7 @@ func TestWithdrawTrieMultiple(t *testing.T) {
|
||||
|
||||
for i := initial; i <= finish; i++ {
|
||||
hash := common.BigToHash(big.NewInt(int64(i + 1)))
|
||||
proof := cross_msg.DecodeBytesToMerkleProof(proofBytes[i-initial])
|
||||
proof := DecodeBytesToMerkleProof(proofBytes[i-initial])
|
||||
verifiedRoot := verifyMerkleProof(uint64(i), hash, proof)
|
||||
assert.Equal(t, expectedRoots[finish].String(), verifiedRoot.String())
|
||||
}
|
||||
@@ -94,6 +94,11 @@ func L2ReorgHandling(ctx context.Context, reorgHeight int64, db db.OrmFactory) e
|
||||
dbTx.Rollback()
|
||||
log.Crit("delete l2 relayed hash from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = db.DeleteL2SentMsgAfterHeightDBTx(dbTx, reorgHeight)
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
log.Crit("delete l2 sent msg from height", "height", reorgHeight, "err", err)
|
||||
}
|
||||
err = dbTx.Commit()
|
||||
if err != nil {
|
||||
dbTx.Rollback()
|
||||
|
||||
@@ -49,20 +49,20 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON cross_message FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION delete_at_trigger()
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE cross_message SET delete_at = NOW() WHERE id = NEW.id;
|
||||
UPDATE cross_message SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER delete_at_trigger
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON cross_message
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION delete_at_trigger();
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
@@ -31,20 +31,20 @@ CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON relayed_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION delete_at_trigger()
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE relayed_msg SET delete_at = NOW() WHERE id = NEW.id;
|
||||
UPDATE relayed_msg SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER delete_at_trigger
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON relayed_msg
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION delete_at_trigger();
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
@@ -0,0 +1,57 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table l2_sent_msg
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
sender VARCHAR NOT NULL,
|
||||
target VARCHAR NOT NULL,
|
||||
value VARCHAR NOT NULL,
|
||||
msg_hash VARCHAR NOT NULL,
|
||||
height BIGINT NOT NULL,
|
||||
nonce BIGINT NOT NULL,
|
||||
batch_index BIGINT NOT NULL DEFAULT 0,
|
||||
msg_proof TEXT NOT NULL DEFAULT '',
|
||||
msg_data TEXT NOT NULL DEFAULT '',
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
comment
|
||||
on column l2_sent_msg.is_deleted is 'NotDeleted, Deleted';
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON l2_sent_msg FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE l2_sent_msg SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON l2_sent_msg
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists l2_sent_msg;
|
||||
-- +goose StatementEnd
|
||||
@@ -0,0 +1,49 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
create table rollup_batch
|
||||
(
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
batch_index BIGINT NOT NULL,
|
||||
commit_height BIGINT NOT NULL,
|
||||
start_block_number BIGINT NOT NULL,
|
||||
end_block_number BIGINT NOT NULL,
|
||||
batch_hash VARCHAR NOT NULL,
|
||||
is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
deleted_at TIMESTAMP(0) DEFAULT NULL
|
||||
);
|
||||
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
CREATE TRIGGER update_timestamp BEFORE UPDATE
|
||||
ON rollup_batch FOR EACH ROW EXECUTE PROCEDURE
|
||||
update_timestamp();
|
||||
|
||||
CREATE OR REPLACE FUNCTION deleted_at_trigger()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF NEW.is_deleted AND OLD.is_deleted != NEW.is_deleted THEN
|
||||
UPDATE rollup_batch SET deleted_at = NOW() WHERE id = NEW.id;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER deleted_at_trigger
|
||||
AFTER UPDATE ON rollup_batch
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION deleted_at_trigger();
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
drop table if exists rollup_batch;
|
||||
-- +goose StatementEnd
|
||||
79
bridge-history-api/db/orm/batch.go
Normal file
79
bridge-history-api/db/orm/batch.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type rollupBatchOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
type RollupBatch struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
BatchHash string `json:"batch_hash" db:"batch_hash"`
|
||||
CommitHeight uint64 `json:"commit_height" db:"commit_height"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
|
||||
}
|
||||
|
||||
// NewRollupBatchOrm create an NewRollupBatchOrm instance
|
||||
func NewRollupBatchOrm(db *sqlx.DB) RollupBatchOrm {
|
||||
return &rollupBatchOrm{db: db}
|
||||
}
|
||||
|
||||
func (b *rollupBatchOrm) BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, batches []*RollupBatch) error {
|
||||
if len(batches) == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
batchMaps := make([]map[string]interface{}, len(batches))
|
||||
for i, batch := range batches {
|
||||
batchMaps[i] = map[string]interface{}{
|
||||
"commit_height": batch.CommitHeight,
|
||||
"batch_index": batch.BatchIndex,
|
||||
"batch_hash": batch.BatchHash,
|
||||
"start_block_number": batch.StartBlockNumber,
|
||||
"end_block_number": batch.EndBlockNumber,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM rollup_batch WHERE batch_index = $1 AND NOT is_deleted)`, batch.BatchIndex).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertRollupBatchDBTx: batch index %v already exists at height %v", batch.BatchIndex, batch.CommitHeight)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into rollup_batch(commit_height, batch_index, batch_hash, start_block_number, end_block_number) values(:commit_height, :batch_index, :batch_hash, :start_block_number, :end_block_number);`, batchMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertRollupBatchDBTx: failed to insert batch event msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *rollupBatchOrm) GetLatestRollupBatch() (*RollupBatch, error) {
|
||||
result := &RollupBatch{}
|
||||
row := b.db.QueryRowx(`SELECT id, batch_index, commit_height, batch_hash, start_block_number, end_block_number FROM rollup_batch ORDER BY batch_index DESC LIMIT 1;`)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (b *rollupBatchOrm) GetRollupBatchByIndex(index uint64) (*RollupBatch, error) {
|
||||
result := &RollupBatch{}
|
||||
row := b.db.QueryRowx(`SELECT id, batch_index, batch_hash, commit_height, start_block_number, end_block_number FROM rollup_batch WHERE batch_index = $1;`, index)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -72,27 +72,27 @@ type L1CrossMsgOrm interface {
|
||||
GetL1CrossMsgByHash(l1Hash common.Hash) (*CrossMsg, error)
|
||||
GetL1CrossMsgsByAddress(sender common.Address) ([]*CrossMsg, error)
|
||||
BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
|
||||
// UpdateL1CrossMsgHash invoked when SentMessage event is received
|
||||
// UpdateL1CrossMsgHashDBTx invoked when SentMessage event is received
|
||||
UpdateL1CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l1Hash, msgHash common.Hash) error
|
||||
UpdateL1CrossMsgHash(ctx context.Context, l1Hash, msgHash common.Hash) error
|
||||
GetLatestL1ProcessedHeight() (int64, error)
|
||||
DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
UpdateL1Blocktimestamp(height uint64, timestamp time.Time) error
|
||||
GetL1EarliestNoBlocktimestampHeight() (uint64, error)
|
||||
UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error
|
||||
GetL1EarliestNoBlockTimestampHeight() (uint64, error)
|
||||
}
|
||||
|
||||
// L2CrossMsgOrm provides operations on l2_cross_message table
|
||||
// L2CrossMsgOrm provides operations on cross_message table
|
||||
type L2CrossMsgOrm interface {
|
||||
GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error)
|
||||
GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error)
|
||||
BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*CrossMsg) error
|
||||
// UpdateL2CrossMsgHash invoked when SentMessage event is received
|
||||
// UpdateL2CrossMsgHashDBTx invoked when SentMessage event is received
|
||||
UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error
|
||||
UpdateL2CrossMsgHash(ctx context.Context, l2Hash, msgHash common.Hash) error
|
||||
GetLatestL2ProcessedHeight() (int64, error)
|
||||
DeleteL2CrossMsgFromHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
UpdateL2Blocktimestamp(height uint64, timestamp time.Time) error
|
||||
GetL2EarliestNoBlocktimestampHeight() (uint64, error)
|
||||
UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error
|
||||
GetL2EarliestNoBlockTimestampHeight() (uint64, error)
|
||||
}
|
||||
|
||||
type RelayedMsgOrm interface {
|
||||
@@ -103,3 +103,21 @@ type RelayedMsgOrm interface {
|
||||
DeleteL1RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
DeleteL2RelayedHashAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
}
|
||||
|
||||
type L2SentMsgOrm interface {
|
||||
BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error
|
||||
GetL2SentMsgByHash(l2Hash string) (*L2SentMsg, error)
|
||||
GetLatestSentMsgHeightOnL2() (int64, error)
|
||||
GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error)
|
||||
GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error)
|
||||
GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error)
|
||||
UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error
|
||||
GetLatestL2SentMsgBatchIndex() (int64, error)
|
||||
DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error
|
||||
}
|
||||
|
||||
type RollupBatchOrm interface {
|
||||
GetLatestRollupBatch() (*RollupBatch, error)
|
||||
GetRollupBatchByIndex(index uint64) (*RollupBatch, error)
|
||||
BatchInsertRollupBatchDBTx(dbTx *sqlx.Tx, messages []*RollupBatch) error
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -71,14 +72,22 @@ func (l *l1CrossMsgOrm) BatchInsertL1CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
"token_id": msg.TokenID,
|
||||
"msg_type": Layer1Msg,
|
||||
}
|
||||
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_id, amount, msg_type) select :height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type WHERE NOT EXISTS (SELECT 1 FROM cross_message WHERE layer1_hash = :layer1_hash AND NOT is_deleted);`, messageMaps[i])
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM cross_message WHERE layer1_hash = $1 AND NOT is_deleted)`, msg.Layer1Hash).Scan(&exists)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "l1hashes", msg.Layer1Hash, "heights", msg.Height, "err", err)
|
||||
break
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL1CrossMsgDBTx: l1 cross msg layer1Hash %v already exists at height %v", msg.Layer1Hash, msg.Height)
|
||||
}
|
||||
}
|
||||
return err
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer1_hash, layer1_token, layer2_token, token_id, amount, msg_type) values(:height, :sender, :target, :asset, :layer1_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL1CrossMsgDBTx: failed to insert l1 cross msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateL1CrossMsgHashDBTx update l1 cross msg hash in db, no need to check msg_type since layer1_hash wont be empty if its layer1 msg
|
||||
@@ -120,14 +129,14 @@ func (l *l1CrossMsgOrm) DeleteL1CrossMsgAfterHeightDBTx(dbTx *sqlx.Tx, height in
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) UpdateL1Blocktimestamp(height uint64, timestamp time.Time) error {
|
||||
func (l *l1CrossMsgOrm) UpdateL1BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer1Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l1CrossMsgOrm) GetL1EarliestNoBlocktimestampHeight() (uint64, error) {
|
||||
func (l *l1CrossMsgOrm) GetL1EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer1Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -22,7 +23,7 @@ func NewL2CrossMsgOrm(db *sqlx.DB) L2CrossMsgOrm {
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, error) {
|
||||
result := &CrossMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
|
||||
row := l.db.QueryRowx(`SELECT * FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted;`, l2Hash.String())
|
||||
if err := row.StructScan(result); err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil, nil
|
||||
@@ -32,7 +33,7 @@ func (l *l2CrossMsgOrm) GetL2CrossMsgByHash(l2Hash common.Hash) (*CrossMsg, erro
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetL2CrossMsgsByAddress returns all layer2 cross messages under given address
|
||||
// GetL2CrossMsgByAddress returns all layer2 cross messages under given address
|
||||
// Warning: return empty slice if no data found
|
||||
func (l *l2CrossMsgOrm) GetL2CrossMsgByAddress(sender common.Address) ([]*CrossMsg, error) {
|
||||
var results []*CrossMsg
|
||||
@@ -84,14 +85,21 @@ func (l *l2CrossMsgOrm) BatchInsertL2CrossMsgDBTx(dbTx *sqlx.Tx, messages []*Cro
|
||||
"amount": msg.Amount,
|
||||
"msg_type": Layer2Msg,
|
||||
}
|
||||
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_id, amount, msg_type) select :height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type WHERE NOT EXISTS (SELECT 1 FROM cross_message WHERE layer2_hash = :layer2_hash AND NOT is_deleted);`, messageMaps[i])
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM cross_message WHERE layer2_hash = $1 AND NOT is_deleted)`, msg.Layer2Hash).Scan(&exists)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "layer2hash", msg.Layer2Hash, "heights", msg.Height, "err", err)
|
||||
break
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL2CrossMsgDBTx: l2 cross msg layer2Hash %v already exists at height %v", msg.Layer2Hash, msg.Height)
|
||||
}
|
||||
}
|
||||
return err
|
||||
_, err = dbTx.NamedExec(`insert into cross_message(height, sender, target, asset, layer2_hash, layer1_token, layer2_token, token_id, amount, msg_type) values(:height, :sender, :target, :asset, :layer2_hash, :layer1_token, :layer2_token, :token_id, :amount, :msg_type);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2CrossMsgDBTx: failed to insert l2 cross msgs", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2CrossMsgHashDBTx(ctx context.Context, dbTx *sqlx.Tx, l2Hash, msgHash common.Hash) error {
|
||||
@@ -123,14 +131,14 @@ func (l *l2CrossMsgOrm) GetLatestL2ProcessedHeight() (int64, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) UpdateL2Blocktimestamp(height uint64, timestamp time.Time) error {
|
||||
func (l *l2CrossMsgOrm) UpdateL2BlockTimestamp(height uint64, timestamp time.Time) error {
|
||||
if _, err := l.db.Exec(`UPDATE cross_message SET block_timestamp = $1 where height = $2 AND msg_type = $3 AND NOT is_deleted`, timestamp, height, Layer2Msg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2CrossMsgOrm) GetL2EarliestNoBlocktimestampHeight() (uint64, error) {
|
||||
func (l *l2CrossMsgOrm) GetL2EarliestNoBlockTimestampHeight() (uint64, error) {
|
||||
row := l.db.QueryRowx(`SELECT height FROM cross_message WHERE block_timestamp IS NULL AND msg_type = $1 AND NOT is_deleted ORDER BY height ASC LIMIT 1;`, Layer2Msg)
|
||||
var result uint64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
|
||||
159
bridge-history-api/db/orm/l2_sent_msg.go
Normal file
159
bridge-history-api/db/orm/l2_sent_msg.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type L2SentMsg struct {
|
||||
ID uint64 `json:"id" db:"id"`
|
||||
MsgHash string `json:"msg_hash" db:"msg_hash"`
|
||||
Sender string `json:"sender" db:"sender"`
|
||||
Target string `json:"target" db:"target"`
|
||||
Value string `json:"value" db:"value"`
|
||||
Height uint64 `json:"height" db:"height"`
|
||||
Nonce uint64 `json:"nonce" db:"nonce"`
|
||||
BatchIndex uint64 `json:"batch_index" db:"batch_index"`
|
||||
MsgProof string `json:"msg_proof" db:"msg_proof"`
|
||||
MsgData string `json:"msg_data" db:"msg_data"`
|
||||
IsDeleted bool `json:"is_deleted" db:"is_deleted"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
DeletedAt *time.Time `json:"deleted_at" db:"deleted_at"`
|
||||
}
|
||||
|
||||
type l2SentMsgOrm struct {
|
||||
db *sqlx.DB
|
||||
}
|
||||
|
||||
// NewL2SentMsgOrm create an NewRollupBatchOrm instance
|
||||
func NewL2SentMsgOrm(db *sqlx.DB) L2SentMsgOrm {
|
||||
return &l2SentMsgOrm{db: db}
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgByHash(msgHash string) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE msg_hash = $1 AND NOT is_deleted;`, msgHash)
|
||||
if err := row.StructScan(result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) BatchInsertL2SentMsgDBTx(dbTx *sqlx.Tx, messages []*L2SentMsg) error {
|
||||
if len(messages) == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
messageMaps := make([]map[string]interface{}, len(messages))
|
||||
for i, msg := range messages {
|
||||
messageMaps[i] = map[string]interface{}{
|
||||
"sender": msg.Sender,
|
||||
"target": msg.Target,
|
||||
"value": msg.Value,
|
||||
"msg_hash": msg.MsgHash,
|
||||
"height": msg.Height,
|
||||
"nonce": msg.Nonce,
|
||||
"batch_index": msg.BatchIndex,
|
||||
"msg_proof": msg.MsgProof,
|
||||
"msg_data": msg.MsgData,
|
||||
}
|
||||
var exists bool
|
||||
err = dbTx.QueryRow(`SELECT EXISTS(SELECT 1 FROM l2_sent_msg WHERE (msg_hash = $1 OR nonce = $2) AND NOT is_deleted)`, msg.MsgHash, msg.Nonce).Scan(&exists)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf("BatchInsertL2SentMsgDBTx: l2 sent msg_hash %v already exists at height %v", msg.MsgHash, msg.Height)
|
||||
}
|
||||
}
|
||||
_, err = dbTx.NamedExec(`insert into l2_sent_msg(sender, target, value, msg_hash, height, nonce, batch_index, msg_proof, msg_data) values(:sender, :target, :value, :msg_hash, :height, :nonce, :batch_index, :msg_proof, :msg_data);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertL2SentMsgDBTx: failed to insert l2 sent msgs", "msg_Hash", "err", err)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestSentMsgHeightOnL2() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT height FROM l2_sent_msg WHERE NOT is_deleted ORDER BY nonce DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
return -1, nil
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if result.Valid {
|
||||
return result.Int64, nil
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) UpdateL2MessageProofInDBTx(ctx context.Context, dbTx *sqlx.Tx, msgHash string, proof string, batch_index uint64) error {
|
||||
if _, err := dbTx.ExecContext(ctx, l.db.Rebind("update l2_sent_msg set msg_proof = ?, batch_index = ? where msg_hash = ? AND NOT is_deleted;"), proof, batch_index, msgHash); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgBatchIndex() (int64, error) {
|
||||
row := l.db.QueryRow(`SELECT batch_index FROM l2_sent_msg WHERE msg_proof != '' AND NOT is_deleted ORDER BY batch_index DESC LIMIT 1;`)
|
||||
var result sql.NullInt64
|
||||
if err := row.Scan(&result); err != nil {
|
||||
if err == sql.ErrNoRows || !result.Valid {
|
||||
return -1, nil
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
if result.Valid {
|
||||
return result.Int64, nil
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMsgMsgHashByHeightRange(startHeight, endHeight uint64) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
rows, err := l.db.Queryx(`SELECT * FROM l2_sent_msg WHERE height >= $1 AND height <= $2 AND NOT is_deleted ORDER BY nonce ASC;`, startHeight, endHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for rows.Next() {
|
||||
msg := &L2SentMsg{}
|
||||
if err = rows.StructScan(msg); err != nil {
|
||||
break
|
||||
}
|
||||
results = append(results, msg)
|
||||
}
|
||||
return results, err
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetL2SentMessageByNonce(nonce uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`SELECT * FROM l2_sent_msg WHERE nonce = $1 AND NOT is_deleted;`, nonce)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) GetLatestL2SentMsgLEHeight(endBlockNumber uint64) (*L2SentMsg, error) {
|
||||
result := &L2SentMsg{}
|
||||
row := l.db.QueryRowx(`select * from l2_sent_msg where height <= $1 AND NOT is_deleted order by nonce desc limit 1`, endBlockNumber)
|
||||
err := row.StructScan(result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (l *l2SentMsgOrm) DeleteL2SentMsgAfterHeightDBTx(dbTx *sqlx.Tx, height int64) error {
|
||||
_, err := dbTx.Exec(`UPDATE l2_sent_msg SET is_deleted = true WHERE height > $1;`, height)
|
||||
return err
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package orm
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/jmoiron/sqlx"
|
||||
@@ -31,14 +30,13 @@ func (l *relayedMsgOrm) BatchInsertRelayedMsgDBTx(dbTx *sqlx.Tx, messages []*Rel
|
||||
"layer1_hash": msg.Layer1Hash,
|
||||
"layer2_hash": msg.Layer2Hash,
|
||||
}
|
||||
|
||||
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps[i])
|
||||
if err != nil && !strings.Contains(err.Error(), "pq: duplicate key value violates unique constraint \"relayed_msg_hash_uindex") {
|
||||
log.Error("BatchInsertRelayedMsgDBTx: failed to insert l1 cross msgs", "msg_Hashe", msg.MsgHash, "height", msg.Height, "err", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
_, err = dbTx.NamedExec(`insert into relayed_msg(msg_hash, height, layer1_hash, layer2_hash) values(:msg_hash, :height, :layer1_hash, :layer2_hash);`, messageMaps)
|
||||
if err != nil {
|
||||
log.Error("BatchInsertRelayedMsgDBTx: failed to insert l1 cross msgs", "msg_Hashe", "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *relayedMsgOrm) GetRelayedMsgByHash(msg_hash string) (*RelayedMsg, error) {
|
||||
|
||||
@@ -16,6 +16,9 @@ type OrmFactory interface {
|
||||
orm.L1CrossMsgOrm
|
||||
orm.L2CrossMsgOrm
|
||||
orm.RelayedMsgOrm
|
||||
orm.L2SentMsgOrm
|
||||
orm.RollupBatchOrm
|
||||
GetTotalCrossMsgCountByAddress(sender string) (uint64, error)
|
||||
GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error)
|
||||
GetDB() *sqlx.DB
|
||||
Beginx() (*sqlx.Tx, error)
|
||||
@@ -26,6 +29,8 @@ type ormFactory struct {
|
||||
orm.L1CrossMsgOrm
|
||||
orm.L2CrossMsgOrm
|
||||
orm.RelayedMsgOrm
|
||||
orm.L2SentMsgOrm
|
||||
orm.RollupBatchOrm
|
||||
*sqlx.DB
|
||||
}
|
||||
|
||||
@@ -44,10 +49,12 @@ func NewOrmFactory(cfg *config.Config) (OrmFactory, error) {
|
||||
}
|
||||
|
||||
return &ormFactory{
|
||||
L1CrossMsgOrm: orm.NewL1CrossMsgOrm(db),
|
||||
L2CrossMsgOrm: orm.NewL2CrossMsgOrm(db),
|
||||
RelayedMsgOrm: orm.NewRelayedMsgOrm(db),
|
||||
DB: db,
|
||||
L1CrossMsgOrm: orm.NewL1CrossMsgOrm(db),
|
||||
L2CrossMsgOrm: orm.NewL2CrossMsgOrm(db),
|
||||
RelayedMsgOrm: orm.NewRelayedMsgOrm(db),
|
||||
L2SentMsgOrm: orm.NewL2SentMsgOrm(db),
|
||||
RollupBatchOrm: orm.NewRollupBatchOrm(db),
|
||||
DB: db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -59,6 +66,15 @@ func (o *ormFactory) Beginx() (*sqlx.Tx, error) {
|
||||
return o.DB.Beginx()
|
||||
}
|
||||
|
||||
func (o *ormFactory) GetTotalCrossMsgCountByAddress(sender string) (uint64, error) {
|
||||
var count uint64
|
||||
row := o.DB.QueryRowx(`SELECT COUNT(*) FROM cross_message WHERE sender = $1 AND NOT is_deleted;`, sender)
|
||||
if err := row.Scan(&count); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (o *ormFactory) GetCrossMsgsByAddressWithOffset(sender string, offset int64, limit int64) ([]*orm.CrossMsg, error) {
|
||||
para := sender
|
||||
var results []*orm.CrossMsg
|
||||
|
||||
@@ -4,6 +4,7 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/ethereum/go-ethereum v1.12.0
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/kataras/iris/v12 v12.2.0
|
||||
github.com/lib/pq v1.10.7
|
||||
@@ -133,12 +134,12 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yosssi/ace v0.0.5 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/crypto v0.10.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
golang.org/x/text v0.10.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
|
||||
@@ -242,6 +242,8 @@ github.com/iris-contrib/go.uuid v2.0.0+incompatible h1:XZubAYg61/JwnJNbZilGjf3b3
|
||||
github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0=
|
||||
github.com/iris-contrib/httpexpect/v2 v2.12.1 h1:3cTZSyBBen/kfjCtgNFoUKi1u0FVXNaAjyRJOo6AVS4=
|
||||
github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk=
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458 h1:V60rHQJc6DieKV1BqHIGclraPdO4kinuFAZIrPGHN7s=
|
||||
github.com/iris-contrib/middleware/cors v0.0.0-20230531125531-980d3a09a458/go.mod h1:7eVziAp1yUwFB/ZMg71n84VWQH+7wukvxcHuF2e7cbg=
|
||||
github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g=
|
||||
github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw=
|
||||
github.com/iris-contrib/schema v0.0.6 h1:CPSBLyx2e91H2yJzPuhGuifVRnZBBJ3pCOMbOvPZaTw=
|
||||
@@ -535,8 +537,8 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||
golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM=
|
||||
golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
@@ -626,8 +628,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -636,8 +638,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
|
||||
@@ -4,7 +4,7 @@ import "bridge-history-api/service"
|
||||
|
||||
type Data struct {
|
||||
Result []*service.TxHistoryInfo `json:"result"`
|
||||
Total int `json:"total"`
|
||||
Total uint64 `json:"total"`
|
||||
}
|
||||
|
||||
type QueryByAddressResponse struct {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -19,20 +20,32 @@ type Finalized struct {
|
||||
BlockTimestamp *time.Time `json:"blockTimestamp"` // uselesss
|
||||
}
|
||||
|
||||
type UserClaimInfo struct {
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
Value string `json:"value"`
|
||||
Nonce string `json:"nonce"`
|
||||
BatchHash string `json:"batch_hash"`
|
||||
Message string `json:"message"`
|
||||
Proof string `json:"proof"`
|
||||
BatchIndex string `json:"batch_index"`
|
||||
}
|
||||
|
||||
type TxHistoryInfo struct {
|
||||
Hash string `json:"hash"`
|
||||
Amount string `json:"amount"`
|
||||
To string `json:"to"` // useless
|
||||
IsL1 bool `json:"isL1"`
|
||||
BlockNumber uint64 `json:"blockNumber"`
|
||||
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
|
||||
FinalizeTx *Finalized `json:"finalizeTx"`
|
||||
CreatedAt *time.Time `json:"createdTime"`
|
||||
Hash string `json:"hash"`
|
||||
Amount string `json:"amount"`
|
||||
To string `json:"to"` // useless
|
||||
IsL1 bool `json:"isL1"`
|
||||
BlockNumber uint64 `json:"blockNumber"`
|
||||
BlockTimestamp *time.Time `json:"blockTimestamp"` // useless
|
||||
FinalizeTx *Finalized `json:"finalizeTx"`
|
||||
ClaimInfo *UserClaimInfo `json:"claimInfo"`
|
||||
CreatedAt *time.Time `json:"createdTime"`
|
||||
}
|
||||
|
||||
// HistoryService example service.
|
||||
type HistoryService interface {
|
||||
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, error)
|
||||
GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error)
|
||||
GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error)
|
||||
}
|
||||
|
||||
@@ -47,6 +60,30 @@ type historyBackend struct {
|
||||
db db.OrmFactory
|
||||
}
|
||||
|
||||
func GetCrossTxClaimInfo(msgHash string, db db.OrmFactory) *UserClaimInfo {
|
||||
l2sentMsg, err := db.GetL2SentMsgByHash(msgHash)
|
||||
if err != nil {
|
||||
log.Debug("GetCrossTxClaimInfo failed", "error", err)
|
||||
return &UserClaimInfo{}
|
||||
}
|
||||
batch, err := db.GetRollupBatchByIndex(l2sentMsg.BatchIndex)
|
||||
if err != nil {
|
||||
log.Debug("GetCrossTxClaimInfo failed", "error", err)
|
||||
return &UserClaimInfo{}
|
||||
}
|
||||
return &UserClaimInfo{
|
||||
From: l2sentMsg.Sender,
|
||||
To: l2sentMsg.Target,
|
||||
Value: l2sentMsg.Value,
|
||||
Nonce: strconv.FormatUint(l2sentMsg.Nonce, 10),
|
||||
Message: l2sentMsg.MsgData,
|
||||
Proof: l2sentMsg.MsgProof,
|
||||
BatchHash: batch.BatchHash,
|
||||
BatchIndex: strconv.FormatUint(l2sentMsg.BatchIndex, 10),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory) {
|
||||
relayed, err := db.GetRelayedMsgByHash(msgHash)
|
||||
if err != nil {
|
||||
@@ -69,15 +106,20 @@ func updateCrossTxHash(msgHash string, txInfo *TxHistoryInfo, db db.OrmFactory)
|
||||
|
||||
}
|
||||
|
||||
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, error) {
|
||||
txHistories := make([]*TxHistoryInfo, 0)
|
||||
func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, limit int64) ([]*TxHistoryInfo, uint64, error) {
|
||||
var txHistories []*TxHistoryInfo
|
||||
total, err := h.db.GetTotalCrossMsgCountByAddress(address.String())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
result, err := h.db.GetCrossMsgsByAddressWithOffset(address.String(), offset, limit)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, 0, err
|
||||
}
|
||||
for _, msg := range result {
|
||||
txHistory := &TxHistoryInfo{
|
||||
Hash: msg.MsgHash,
|
||||
Hash: msg.Layer1Hash + msg.Layer2Hash,
|
||||
Amount: msg.Amount,
|
||||
To: msg.Target,
|
||||
IsL1: msg.MsgType == int(orm.Layer1Msg),
|
||||
@@ -87,11 +129,12 @@ func (h *historyBackend) GetTxsByAddress(address common.Address, offset int64, l
|
||||
FinalizeTx: &Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
ClaimInfo: GetCrossTxClaimInfo(msg.MsgHash, h.db),
|
||||
}
|
||||
updateCrossTxHash(msg.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
}
|
||||
return txHistories, nil
|
||||
return txHistories, total, nil
|
||||
}
|
||||
|
||||
func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, error) {
|
||||
@@ -134,6 +177,7 @@ func (h *historyBackend) GetTxsByHashes(hashes []string) ([]*TxHistoryInfo, erro
|
||||
FinalizeTx: &Finalized{
|
||||
Hash: "",
|
||||
},
|
||||
ClaimInfo: GetCrossTxClaimInfo(l2result.MsgHash, h.db),
|
||||
}
|
||||
updateCrossTxHash(l2result.MsgHash, txHistory, h.db)
|
||||
txHistories = append(txHistories, txHistory)
|
||||
|
||||
File diff suppressed because one or more lines are too long
305
bridge-history-api/utils/parse_event.go
Normal file
305
bridge-history-api/utils/parse_event.go
Normal file
@@ -0,0 +1,305 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/ethclient"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
|
||||
backendabi "bridge-history-api/abi"
|
||||
"bridge-history-api/db/orm"
|
||||
)
|
||||
|
||||
type MsgHashWrapper struct {
|
||||
MsgHash common.Hash
|
||||
TxHash common.Hash
|
||||
}
|
||||
|
||||
type CachedParsedTxCalldata struct {
|
||||
CallDataIndex uint64
|
||||
BatchIndices []uint64
|
||||
StartBlocks []uint64
|
||||
EndBlocks []uint64
|
||||
}
|
||||
|
||||
func ParseBackendL1EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l1CrossMsg []*orm.CrossMsg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var msgHashes []MsgHashWrapper
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1DepositETHSig:
|
||||
event := backendabi.DepositETH{}
|
||||
err := UnpackLog(backendabi.L1ETHGatewayABI, &event, "DepositETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositETH event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
case backendabi.L1DepositERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1StandardERC20GatewayABI, &event, "DepositERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC20 event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ERC20),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
})
|
||||
case backendabi.L1DepositERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC721GatewayABI, &event, "DepositERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC721 event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
})
|
||||
case backendabi.L1DepositERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ERC1155GatewayABI, &event, "DepositERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack DepositERC1155 event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
l1CrossMsg = append(l1CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
Amount: event.Amount.String(),
|
||||
})
|
||||
case backendabi.L1SentMessageEventSignature:
|
||||
event := backendabi.L1SentMessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
msgHashes = append(msgHashes, MsgHashWrapper{
|
||||
MsgHash: msgHash,
|
||||
TxHash: vlog.TxHash})
|
||||
case backendabi.L1RelayedMessageEventSignature:
|
||||
event := backendabi.L1RelayedMessageEvent{}
|
||||
err := UnpackLog(backendabi.L1ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
Height: vlog.BlockNumber,
|
||||
Layer1Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
return l1CrossMsg, msgHashes, relayedMsgs, nil
|
||||
}
|
||||
|
||||
func ParseBackendL2EventLogs(logs []types.Log) ([]*orm.CrossMsg, []MsgHashWrapper, []*orm.RelayedMsg, []*orm.L2SentMsg, error) {
|
||||
// Need use contract abi to parse event Log
|
||||
// Can only be tested after we have our contracts set up
|
||||
|
||||
var l2CrossMsg []*orm.CrossMsg
|
||||
// this is use to confirm finalized l1 msg
|
||||
var relayedMsgs []*orm.RelayedMsg
|
||||
var l2SentMsg []*orm.L2SentMsg
|
||||
var msgHashes []MsgHashWrapper
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L2WithdrawETHSig:
|
||||
event := backendabi.DepositETH{}
|
||||
err := UnpackLog(backendabi.L2ETHGatewayABI, &event, "WithdrawETH", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawETH event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ETH),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
case backendabi.L2WithdrawERC20Sig:
|
||||
event := backendabi.ERC20MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2StandardERC20GatewayABI, &event, "WithdrawERC20", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC20 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Amount: event.Amount.String(),
|
||||
Asset: int(orm.ERC20),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
})
|
||||
case backendabi.L2WithdrawERC721Sig:
|
||||
event := backendabi.ERC721MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC721GatewayABI, &event, "WithdrawERC721", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC721 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC721),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
})
|
||||
case backendabi.L2WithdrawERC1155Sig:
|
||||
event := backendabi.ERC1155MessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ERC1155GatewayABI, &event, "WithdrawERC1155", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack WithdrawERC1155 event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
l2CrossMsg = append(l2CrossMsg, &orm.CrossMsg{
|
||||
Height: vlog.BlockNumber,
|
||||
Sender: event.From.String(),
|
||||
Target: event.To.String(),
|
||||
Asset: int(orm.ERC1155),
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
Layer1Token: event.L1Token.Hex(),
|
||||
Layer2Token: event.L2Token.Hex(),
|
||||
TokenID: event.TokenID.Uint64(),
|
||||
Amount: event.Amount.String(),
|
||||
})
|
||||
case backendabi.L2SentMessageEventSignature:
|
||||
event := backendabi.L2SentMessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "SentMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack SentMessage event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
msgHash := ComputeMessageHash(event.Sender, event.Target, event.Value, event.MessageNonce, event.Message)
|
||||
msgHashes = append(msgHashes, MsgHashWrapper{
|
||||
MsgHash: msgHash,
|
||||
TxHash: vlog.TxHash})
|
||||
l2SentMsg = append(l2SentMsg, &orm.L2SentMsg{
|
||||
Sender: event.Sender.Hex(),
|
||||
Target: event.Target.Hex(),
|
||||
Value: event.Value.String(),
|
||||
MsgHash: msgHash.Hex(),
|
||||
Height: vlog.BlockNumber,
|
||||
Nonce: event.MessageNonce.Uint64(),
|
||||
MsgData: hexutil.Encode(event.Message),
|
||||
})
|
||||
case backendabi.L2RelayedMessageEventSignature:
|
||||
event := backendabi.L2RelayedMessageEvent{}
|
||||
err := UnpackLog(backendabi.L2ScrollMessengerABI, &event, "RelayedMessage", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack RelayedMessage event", "err", err)
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, err
|
||||
}
|
||||
relayedMsgs = append(relayedMsgs, &orm.RelayedMsg{
|
||||
MsgHash: event.MessageHash.String(),
|
||||
Height: vlog.BlockNumber,
|
||||
Layer2Hash: vlog.TxHash.Hex(),
|
||||
})
|
||||
|
||||
}
|
||||
}
|
||||
return l2CrossMsg, msgHashes, relayedMsgs, l2SentMsg, nil
|
||||
}
|
||||
|
||||
func ParseBatchInfoFromScrollChain(ctx context.Context, client *ethclient.Client, logs []types.Log) ([]*orm.RollupBatch, error) {
|
||||
var rollupBatches []*orm.RollupBatch
|
||||
cache := make(map[string]CachedParsedTxCalldata)
|
||||
for _, vlog := range logs {
|
||||
switch vlog.Topics[0] {
|
||||
case backendabi.L1CommitBatchEventSignature:
|
||||
event := backendabi.L1CommitBatchEvent{}
|
||||
err := UnpackLog(backendabi.ScrollChainABI, &event, "CommitBatch", vlog)
|
||||
if err != nil {
|
||||
log.Warn("Failed to unpack CommitBatch event", "err", err)
|
||||
return rollupBatches, err
|
||||
}
|
||||
if _, ok := cache[vlog.TxHash.Hex()]; ok {
|
||||
c := cache[vlog.TxHash.Hex()]
|
||||
c.CallDataIndex++
|
||||
rollupBatches = append(rollupBatches, &orm.RollupBatch{
|
||||
CommitHeight: vlog.BlockNumber,
|
||||
BatchIndex: c.BatchIndices[c.CallDataIndex],
|
||||
BatchHash: event.BatchHash.Hex(),
|
||||
StartBlockNumber: c.StartBlocks[c.CallDataIndex],
|
||||
EndBlockNumber: c.EndBlocks[c.CallDataIndex],
|
||||
})
|
||||
cache[vlog.TxHash.Hex()] = c
|
||||
continue
|
||||
}
|
||||
|
||||
commitTx, isPending, err := client.TransactionByHash(ctx, vlog.TxHash)
|
||||
if err != nil || isPending {
|
||||
log.Warn("Failed to get commit Batch tx receipt or the tx is still pending", "err", err)
|
||||
return rollupBatches, err
|
||||
}
|
||||
indices, startBlocks, endBlocks, err := GetBatchRangeFromCalldataV1(commitTx.Data())
|
||||
if err != nil {
|
||||
log.Warn("Failed to get batch range from calldata", "hash", commitTx.Hash().Hex(), "height", vlog.BlockNumber)
|
||||
return rollupBatches, err
|
||||
}
|
||||
cache[vlog.TxHash.Hex()] = CachedParsedTxCalldata{
|
||||
CallDataIndex: 0,
|
||||
BatchIndices: indices,
|
||||
StartBlocks: startBlocks,
|
||||
EndBlocks: endBlocks,
|
||||
}
|
||||
rollupBatches = append(rollupBatches, &orm.RollupBatch{
|
||||
CommitHeight: vlog.BlockNumber,
|
||||
BatchIndex: indices[0],
|
||||
BatchHash: event.BatchHash.Hex(),
|
||||
StartBlockNumber: startBlocks[0],
|
||||
EndBlockNumber: endBlocks[0],
|
||||
})
|
||||
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
return rollupBatches, nil
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
@@ -59,3 +62,95 @@ func ComputeMessageHash(
|
||||
data, _ := backendabi.L2ScrollMessengerABI.Pack("relayMessage", sender, target, value, messageNonce, message)
|
||||
return common.BytesToHash(crypto.Keccak256(data))
|
||||
}
|
||||
|
||||
type commitBatchArgs struct {
|
||||
Version uint8
|
||||
ParentBatchHeader []byte
|
||||
Chunks [][]byte
|
||||
SkippedL1MessageBitmap []byte
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldataV2 find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldataV2(calldata []byte) (uint64, uint64, uint64, error) {
|
||||
method := backendabi.ScrollChainV2ABI.Methods["commitBatch"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
args := commitBatchArgs{}
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
var startBlock uint64
|
||||
var finishBlock uint64
|
||||
|
||||
// decode batchIndex from ParentBatchHeader
|
||||
if len(args.ParentBatchHeader) < 9 {
|
||||
return 0, 0, 0, errors.New("invalid parent batch header")
|
||||
}
|
||||
batchIndex := binary.BigEndian.Uint64(args.ParentBatchHeader[1:9]) + 1
|
||||
|
||||
// decode blocks from chunk and assume that there's no empty chunk
|
||||
// | 1 byte | 60 bytes | ... | 60 bytes |
|
||||
// | num blocks | block 1 | ... | block n |
|
||||
if len(args.Chunks) == 0 {
|
||||
return 0, 0, 0, errors.New("invalid chunks")
|
||||
}
|
||||
chunk := args.Chunks[0]
|
||||
block := chunk[1:61] // first block in chunk
|
||||
startBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
chunk = args.Chunks[len(args.Chunks)-1]
|
||||
lastBlockIndex := int(chunk[0]) - 1
|
||||
block = chunk[1+lastBlockIndex*60 : 1+lastBlockIndex*60+60] // last block in chunk
|
||||
finishBlock = binary.BigEndian.Uint64(block[0:8])
|
||||
|
||||
return batchIndex, startBlock, finishBlock, err
|
||||
}
|
||||
|
||||
// GetBatchRangeFromCalldataV1 find the block range from calldata, both inclusive.
|
||||
func GetBatchRangeFromCalldataV1(calldata []byte) ([]uint64, []uint64, []uint64, error) {
|
||||
var batchIndices []uint64
|
||||
var startBlocks []uint64
|
||||
var finishBlocks []uint64
|
||||
if bytes.Equal(calldata[0:4], common.Hex2Bytes("cb905499")) {
|
||||
// commitBatches
|
||||
method := backendabi.ScrollChainABI.Methods["commitBatches"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
args := make([]backendabi.IScrollChainBatch, len(values))
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
|
||||
for i := 0; i < len(args); i++ {
|
||||
batchIndices = append(batchIndices, args[i].BatchIndex)
|
||||
startBlocks = append(startBlocks, args[i].Blocks[0].BlockNumber)
|
||||
finishBlocks = append(finishBlocks, args[i].Blocks[len(args[i].Blocks)-1].BlockNumber)
|
||||
}
|
||||
} else if bytes.Equal(calldata[0:4], common.Hex2Bytes("8c73235d")) {
|
||||
// commitBatch
|
||||
method := backendabi.ScrollChainABI.Methods["commitBatch"]
|
||||
values, err := method.Inputs.Unpack(calldata[4:])
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
|
||||
args := backendabi.IScrollChainBatch{}
|
||||
err = method.Inputs.Copy(&args, values)
|
||||
if err != nil {
|
||||
return batchIndices, startBlocks, finishBlocks, err
|
||||
}
|
||||
batchIndices = append(batchIndices, args.BatchIndex)
|
||||
startBlocks = append(startBlocks, args.Blocks[0].BlockNumber)
|
||||
finishBlocks = append(finishBlocks, args.Blocks[len(args.Blocks)-1].BlockNumber)
|
||||
} else {
|
||||
return batchIndices, startBlocks, finishBlocks, errors.New("invalid selector")
|
||||
}
|
||||
return batchIndices, startBlocks, finishBlocks, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package utils_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
@@ -18,3 +19,46 @@ func TestKeccak2(t *testing.T) {
|
||||
assert.NotEqual(t, b, c)
|
||||
assert.Equal(t, "0xc0ffbd7f501bd3d49721b0724b2bff657cb2378f15d5a9b97cd7ea5bf630d512", c.Hex())
|
||||
}
|
||||
|
||||
func TestGetBatchRangeFromCalldataV2(t *testing.T) {
|
||||
// single chunk
|
||||
batchIndex, start, finish, err := utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003d0100000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000100000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(1))
|
||||
assert.Equal(t, finish, uint64(1))
|
||||
assert.Equal(t, batchIndex, uint64(1))
|
||||
|
||||
// multiple chunk
|
||||
batchIndex, start, finish, err = utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("1325aca000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000003e0000000000000000000000000000000000000000000000000000000000000007900000000000000000100000000000000010000000000000001038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba610000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000000000000000000000000000000000000000004c01000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030000000000010000000001000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b403000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005000300000000000000000b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00050000000000000014000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000012c01000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000100000000010000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, start, uint64(10))
|
||||
assert.Equal(t, finish, uint64(20))
|
||||
assert.Equal(t, batchIndex, uint64(2))
|
||||
}
|
||||
|
||||
func TestGetBatchRangeFromCalldataV1(t *testing.T) {
|
||||
calldata, err := os.ReadFile("../testdata/commit-batches-0x3095e91db7ba4a6fbf4654d607db322e58ff5579c502219c8024acaea74cf311.txt")
|
||||
assert.NoError(t, err)
|
||||
|
||||
// multiple batches
|
||||
batchIndices, startBlocks, finishBlocks, err := utils.GetBatchRangeFromCalldataV1(common.Hex2Bytes(string(calldata[:])))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(batchIndices), 5)
|
||||
assert.Equal(t, len(startBlocks), 5)
|
||||
assert.Equal(t, len(finishBlocks), 5)
|
||||
assert.Equal(t, batchIndices[0], uint64(1))
|
||||
assert.Equal(t, batchIndices[1], uint64(2))
|
||||
assert.Equal(t, batchIndices[2], uint64(3))
|
||||
assert.Equal(t, batchIndices[3], uint64(4))
|
||||
assert.Equal(t, batchIndices[4], uint64(5))
|
||||
assert.Equal(t, startBlocks[0], uint64(1))
|
||||
assert.Equal(t, startBlocks[1], uint64(6))
|
||||
assert.Equal(t, startBlocks[2], uint64(7))
|
||||
assert.Equal(t, startBlocks[3], uint64(19))
|
||||
assert.Equal(t, startBlocks[4], uint64(20))
|
||||
assert.Equal(t, finishBlocks[0], uint64(5))
|
||||
assert.Equal(t, finishBlocks[1], uint64(6))
|
||||
assert.Equal(t, finishBlocks[2], uint64(18))
|
||||
assert.Equal(t, finishBlocks[3], uint64(19))
|
||||
assert.Equal(t, finishBlocks[4], uint64(20))
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.PHONY: lint docker clean bridge
|
||||
|
||||
IMAGE_NAME=bridge
|
||||
IMAGE_VERSION=latest
|
||||
IMAGE_VERSION=v9.9
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
mock_abi:
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -64,29 +63,16 @@ func action(ctx *cli.Context) error {
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start l1relayer process
|
||||
go cutils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
|
||||
|
||||
// Start l2relayer process
|
||||
go cutils.Loop(subCtx, 2*time.Second, l2relayer.ProcessSavedEvents)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start message-relayer successfully")
|
||||
|
||||
|
||||
@@ -80,7 +80,6 @@
|
||||
"batch_commit_time_sec": 1200,
|
||||
"batch_blocks_limit": 100,
|
||||
"commit_tx_calldata_size_limit": 200000,
|
||||
"commit_tx_batch_count_limit": 30,
|
||||
"public_input_config": {
|
||||
"max_tx_num": 44,
|
||||
"padding_tx_hash": "0x0000000000000000000000000000000000000000000000000000000000000000"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module scroll-tech/bridge
|
||||
|
||||
go 1.19
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
|
||||
@@ -44,8 +44,6 @@ type BatchProposerConfig struct {
|
||||
CommitTxCalldataSizeLimit uint64 `json:"commit_tx_calldata_size_limit"`
|
||||
// Commit tx calldata min size limit in bytes
|
||||
CommitTxCalldataMinSize uint64 `json:"commit_tx_calldata_min_size,omitempty"`
|
||||
// Max number of batches in a commit transaction
|
||||
CommitTxBatchCountLimit uint64 `json:"commit_tx_batch_count_limit"`
|
||||
// The public input hash config
|
||||
PublicInputConfig *types.PublicInputHashConfig `json:"public_input_config"`
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
@@ -13,9 +12,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"gorm.io/gorm"
|
||||
"modernc.org/mathutil"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
@@ -153,120 +150,6 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
|
||||
const processMsgLimit = 100
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer2Relayer) ProcessSavedEvents() {
|
||||
batch, err := r.blockBatchOrm.GetLatestBatchByRollupStatus([]types.RollupStatus{types.RollupFinalized})
|
||||
if err != nil {
|
||||
log.Error("GetLatestFinalizedBatch failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// msgs are sorted by nonce in increasing order
|
||||
fields := map[string]interface{}{
|
||||
"status": int(types.MsgPending),
|
||||
"height <= (?)": batch.EndBlockNumber,
|
||||
}
|
||||
orderByList := []string{
|
||||
"nonce ASC",
|
||||
}
|
||||
limit := processMsgLimit
|
||||
|
||||
msgs, err := r.l2MessageOrm.GetL2Messages(fields, orderByList, limit)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// process messages in batches
|
||||
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
|
||||
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
|
||||
if size = len(msgs); size > batchSize {
|
||||
size = batchSize
|
||||
}
|
||||
var g errgroup.Group
|
||||
for _, msg := range msgs[:size] {
|
||||
msg := msg
|
||||
g.Go(func() error {
|
||||
return r.processSavedEvent(&msg)
|
||||
})
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("failed to process l2 saved event", "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
|
||||
// @todo fetch merkle proof from l2geth
|
||||
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
|
||||
// Get the block info that contains the message
|
||||
blockInfos, err := r.blockTraceOrm.GetL2BlockInfos(map[string]interface{}{"number": msg.Height}, nil, 0)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetL2BlockInfos from DB", "number", msg.Height)
|
||||
}
|
||||
if len(blockInfos) == 0 {
|
||||
return errors.New("get block trace len is 0, exit")
|
||||
}
|
||||
|
||||
blockInfo := blockInfos[0]
|
||||
if blockInfo.BatchHash == "" {
|
||||
log.Error("Block has not been batched yet", "number", blockInfo.Number, "msg.nonce", msg.Nonce)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: rebuild the withdraw trie to generate the merkle proof
|
||||
proof := bridgeAbi.IL1ScrollMessengerL2MessageProof{
|
||||
BatchHash: common.HexToHash(blockInfo.BatchHash),
|
||||
MerkleProof: make([]byte, 0),
|
||||
}
|
||||
from := common.HexToAddress(msg.Sender)
|
||||
target := common.HexToAddress(msg.Target)
|
||||
value, ok := big.NewInt(0).SetString(msg.Value, 10)
|
||||
if !ok {
|
||||
// @todo maybe panic?
|
||||
log.Error("Failed to parse message value", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
}
|
||||
msgNonce := big.NewInt(int64(msg.Nonce))
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
data, err := r.l1MessengerABI.Pack("relayMessageWithProof", from, target, value, msgNonce, calldata, proof)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack relayMessageWithProof", "msg.nonce", msg.Nonce, "err", err)
|
||||
// TODO: need to skip this message by changing its status to MsgError
|
||||
return err
|
||||
}
|
||||
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, r.minGasLimitForMessageRelay)
|
||||
if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) {
|
||||
return r.l2MessageOrm.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgExpired)
|
||||
}
|
||||
if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) {
|
||||
return r.l2MessageOrm.UpdateLayer2Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
bridgeL2MsgsRelayedTotalCounter.Inc(1)
|
||||
log.Info("relayMessageWithProof to layer1", "msgHash", msg.MsgHash, "txhash", hash.String())
|
||||
|
||||
// save status in db
|
||||
// @todo handle db error
|
||||
err = r.l2MessageOrm.UpdateLayer2StatusAndLayer1Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
|
||||
return err
|
||||
}
|
||||
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
batch, err := r.blockBatchOrm.GetLatestBatch()
|
||||
@@ -457,20 +340,27 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}()
|
||||
|
||||
aggProof, err := r.blockBatchOrm.GetVerifiedProofByHash(hash)
|
||||
proofBuffer, icBuffer, err := r.blockBatchOrm.GetVerifiedProofAndInstanceCommitmentsByHash(hash)
|
||||
if err != nil {
|
||||
log.Warn("get verified proof by hash failed", "hash", hash, "err", err)
|
||||
log.Warn("fetch get proof by hash failed", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
if proofBuffer == nil || icBuffer == nil {
|
||||
log.Warn("proof or instance not ready", "hash", hash)
|
||||
return
|
||||
}
|
||||
if len(proofBuffer)%32 != 0 {
|
||||
log.Error("proof buffer has wrong length", "hash", hash, "length", len(proofBuffer))
|
||||
return
|
||||
}
|
||||
if len(icBuffer)%32 != 0 {
|
||||
log.Warn("instance buffer has wrong length", "hash", hash, "length", len(icBuffer))
|
||||
return
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
log.Warn("agg_proof sanity check fails", "hash", hash, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
proof := utils.BufferToUint256Le(aggProof.Proof)
|
||||
finalPair := utils.BufferToUint256Le(aggProof.FinalPair)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, finalPair)
|
||||
proof := utils.BufferToUint256Le(proofBuffer)
|
||||
instance := utils.BufferToUint256Le(icBuffer)
|
||||
data, err := r.l1RollupABI.Pack("finalizeBatchWithProof", common.HexToHash(hash), proof, instance)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/sender"
|
||||
@@ -157,11 +156,9 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
@@ -202,11 +199,9 @@ func testL2RelayerSkipBatches(t *testing.T) {
|
||||
err = blockBatchOrm.UpdateRollupStatus(context.Background(), batchHash, rollupStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, provingStatus)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -46,7 +46,6 @@ type BatchProposer struct {
|
||||
commitCalldataSizeLimit uint64
|
||||
batchDataBufferSizeLimit uint64
|
||||
commitCalldataMinSize uint64
|
||||
commitBatchCountLimit int
|
||||
|
||||
proofGenerationFreq uint64
|
||||
batchDataBuffer []*bridgeTypes.BatchData
|
||||
@@ -73,7 +72,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, rela
|
||||
batchCommitTimeSec: cfg.BatchCommitTimeSec,
|
||||
commitCalldataSizeLimit: cfg.CommitTxCalldataSizeLimit,
|
||||
commitCalldataMinSize: cfg.CommitTxCalldataMinSize,
|
||||
commitBatchCountLimit: int(cfg.CommitTxBatchCountLimit),
|
||||
batchDataBufferSizeLimit: 100*cfg.CommitTxCalldataSizeLimit + 1*1024*1024, // @todo: determine the value.
|
||||
proofGenerationFreq: cfg.ProofGenerationFreq,
|
||||
piCfg: cfg.PublicInputConfig,
|
||||
@@ -204,7 +202,7 @@ func (p *BatchProposer) TryCommitBatches() {
|
||||
index := 0
|
||||
commit := false
|
||||
calldataByteLen := uint64(0)
|
||||
for ; index < len(p.batchDataBuffer) && index < p.commitBatchCountLimit; index++ {
|
||||
for ; index < len(p.batchDataBuffer); index++ {
|
||||
calldataByteLen += bridgeAbi.GetBatchCalldataLength(&p.batchDataBuffer[index].Batch)
|
||||
if calldataByteLen > p.commitCalldataSizeLimit {
|
||||
commit = true
|
||||
|
||||
@@ -113,12 +113,11 @@ func testBatchProposerBatchGeneration(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
proposer := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
CommitTxBatchCountLimit: 30,
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, relayer, db)
|
||||
proposer.TryProposeBatch()
|
||||
|
||||
@@ -190,12 +189,11 @@ func testBatchProposerGracefulRestart(t *testing.T) {
|
||||
assert.Equal(t, batchData2.Hash().Hex(), batchHashes[0])
|
||||
// test p.recoverBatchDataBuffer().
|
||||
_ = NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
CommitTxBatchCountLimit: 30,
|
||||
ProofGenerationFreq: 1,
|
||||
BatchGasThreshold: 3000000,
|
||||
BatchTxNumThreshold: 135,
|
||||
BatchTimeSec: 1,
|
||||
BatchBlocksLimit: 100,
|
||||
}, relayer, db)
|
||||
|
||||
batchHashes, err = blockBatchOrm.GetBlockBatchesHashByRollupStatus(types.RollupPending, math.MaxInt32)
|
||||
|
||||
@@ -2,7 +2,6 @@ package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
@@ -10,7 +9,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
bridgeTypes "scroll-tech/bridge/internal/types"
|
||||
)
|
||||
@@ -19,30 +17,31 @@ import (
|
||||
type BlockBatch struct {
|
||||
db *gorm.DB `gorm:"column:-"`
|
||||
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"column:parent_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" gorm:"column:total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" gorm:"column:total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" gorm:"column:total_l2_gas"`
|
||||
ProvingStatus int `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:0"`
|
||||
RollupStatus int `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
|
||||
OracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at;default:CURRENT_TIMESTAMP()"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
Hash string `json:"hash" gorm:"column:hash"`
|
||||
Index uint64 `json:"index" gorm:"column:index"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" gorm:"column:start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" gorm:"column:start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" gorm:"column:end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" gorm:"column:end_block_hash"`
|
||||
ParentHash string `json:"parent_hash" gorm:"column:parent_hash"`
|
||||
StateRoot string `json:"state_root" gorm:"column:state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" gorm:"column:total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" gorm:"column:total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" gorm:"column:total_l2_gas"`
|
||||
ProvingStatus int `json:"proving_status" gorm:"column:proving_status;default:1"`
|
||||
Proof []byte `json:"proof" gorm:"column:proof"`
|
||||
InstanceCommitments []byte `json:"instance_commitments" gorm:"column:instance_commitments"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" gorm:"column:proof_time_sec;default:0"`
|
||||
RollupStatus int `json:"rollup_status" gorm:"column:rollup_status;default:1"`
|
||||
CommitTxHash string `json:"commit_tx_hash" gorm:"column:commit_tx_hash;default:NULL"`
|
||||
OracleStatus int `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at;default:CURRENT_TIMESTAMP()"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" gorm:"column:prover_assigned_at;default:NULL"`
|
||||
ProvedAt *time.Time `json:"proved_at" gorm:"column:proved_at;default:NULL"`
|
||||
CommittedAt *time.Time `json:"committed_at" gorm:"column:committed_at;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewBlockBatch create an blockBatchOrm instance
|
||||
@@ -101,24 +100,14 @@ func (o *BlockBatch) GetBlockBatchesHashByRollupStatus(status types.RollupStatus
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
// GetVerifiedProofByHash get verified proof and instance comments by hash
|
||||
func (o *BlockBatch) GetVerifiedProofByHash(hash string) (*message.AggProof, error) {
|
||||
result := o.db.Model(&BlockBatch{}).Select("proof").Where("hash", hash).Where("proving_status", int(types.ProvingTaskVerified)).Row()
|
||||
if result.Err() != nil {
|
||||
return nil, result.Err()
|
||||
// GetVerifiedProofAndInstanceCommitmentsByHash get verified proof and instance comments by hash
|
||||
func (o *BlockBatch) GetVerifiedProofAndInstanceCommitmentsByHash(hash string) ([]byte, []byte, error) {
|
||||
var blockBatch BlockBatch
|
||||
err := o.db.Select("proof, instance_commitments").Where("hash", hash).Where("proving_status", int(types.ProvingTaskVerified)).Find(&blockBatch).Error
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var proofBytes []byte
|
||||
if err := result.Scan(&proofBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var proof message.AggProof
|
||||
if err := json.Unmarshal(proofBytes, &proof); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &proof, nil
|
||||
return blockBatch.Proof, blockBatch.InstanceCommitments, nil
|
||||
}
|
||||
|
||||
// GetLatestBatch get the latest batch
|
||||
@@ -158,17 +147,10 @@ func (o *BlockBatch) GetRollupStatusByHashList(hashes []string) ([]types.RollupS
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
statuses []types.RollupStatus
|
||||
_statusMap = make(map[string]types.RollupStatus, len(hashes))
|
||||
)
|
||||
for _, _batch := range blockBatches {
|
||||
_statusMap[_batch.Hash] = types.RollupStatus(_batch.RollupStatus)
|
||||
var statuses []types.RollupStatus
|
||||
for _, v := range blockBatches {
|
||||
statuses = append(statuses, types.RollupStatus(v.RollupStatus))
|
||||
}
|
||||
for _, _hash := range hashes {
|
||||
statuses = append(statuses, _statusMap[_hash])
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
@@ -297,16 +279,12 @@ func (o *BlockBatch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context,
|
||||
|
||||
// UpdateProofByHash update the block batch proof by hash
|
||||
// for unit test
|
||||
func (o *BlockBatch) UpdateProofByHash(ctx context.Context, hash string, proof *message.AggProof, proofTimeSec uint64) error {
|
||||
proofBytes, err := json.Marshal(proof)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (o *BlockBatch) UpdateProofByHash(ctx context.Context, hash string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["proof"] = proofBytes
|
||||
updateFields["proof"] = proof
|
||||
updateFields["instance_commitments"] = instanceCommitments
|
||||
updateFields["proof_time_sec"] = proofTimeSec
|
||||
err = o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error
|
||||
err := o.db.WithContext(ctx).Model(&BlockBatch{}).Where("hash", hash).Updates(updateFields).Error
|
||||
if err != nil {
|
||||
log.Error("failed to update proof", "err", err)
|
||||
}
|
||||
|
||||
@@ -13,8 +13,8 @@ create table l1_message
|
||||
layer1_hash VARCHAR NOT NULL,
|
||||
layer2_hash VARCHAR DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
@@ -32,7 +32,7 @@ create index l1_message_height_index
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
NEW.updated_time = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
@@ -13,8 +13,8 @@ create table l2_message
|
||||
layer1_hash VARCHAR DEFAULT NULL,
|
||||
proof TEXT DEFAULT NULL,
|
||||
status INTEGER DEFAULT 1,
|
||||
created_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
created_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_time TIMESTAMP(0) NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
comment
|
||||
@@ -32,7 +32,7 @@ create index l2_message_height_index
|
||||
CREATE OR REPLACE FUNCTION update_timestamp()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
NEW.updated_at = CURRENT_TIMESTAMP;
|
||||
NEW.updated_time = CURRENT_TIMESTAMP;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ language 'plpgsql';
|
||||
|
||||
@@ -16,6 +16,7 @@ create table block_batch
|
||||
total_l2_gas BIGINT NOT NULL,
|
||||
proving_status INTEGER DEFAULT 1,
|
||||
proof BYTEA DEFAULT NULL,
|
||||
instance_commitments BYTEA DEFAULT NULL,
|
||||
proof_time_sec INTEGER DEFAULT 0,
|
||||
rollup_status INTEGER DEFAULT 1,
|
||||
commit_tx_hash VARCHAR DEFAULT NULL,
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
@@ -111,11 +110,9 @@ func testRelayL2MessageSucceed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
@@ -118,11 +117,9 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.AggProof{
|
||||
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
FinalPair: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
|
||||
}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, proof, 100)
|
||||
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
|
||||
err = blockBatchOrm.UpdateProofByHash(context.Background(), batchHash, tProof, tInstanceCommitments, 100)
|
||||
assert.NoError(t, err)
|
||||
err = blockBatchOrm.UpdateProvingStatus(batchHash, types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -1,46 +1,45 @@
|
||||
ifeq ($(GO_VERSION),)
|
||||
GO_VERSION=1.19
|
||||
endif
|
||||
ifeq ($(RUST_VERSION),)
|
||||
RUST_VERSION=nightly-2022-12-10
|
||||
endif
|
||||
ifeq ($(PYTHON_VERSION),)
|
||||
PYTHON_VERSION=3.10
|
||||
endif
|
||||
ifeq ($(CUDA_VERSION),)
|
||||
CUDA_VERSION=11.7.1
|
||||
endif
|
||||
GO_VERSION := 1.18
|
||||
PYTHON_VERSION := 3.10
|
||||
RUST_VERSION := nightly-2022-12-10
|
||||
|
||||
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder cuda-go-rust-builder py-runner
|
||||
|
||||
cuda-go-rust-builder:
|
||||
docker build -t scrolltech/cuda-go-rust-builder:cuda-$(CUDA_VERSION)-go-$(GO_VERSION)-rust-$(RUST_VERSION) -f cuda-go-rust-builder.Dockerfile ./ --build-arg CUDA_VERSION=$(CUDA_VERSION) --build-arg GO_VERSION=$(GO_VERSION) --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
.PHONY: all go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
|
||||
|
||||
go-rust-builder:
|
||||
docker build -t scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION) -f go-rust-builder.Dockerfile ./ --build-arg GO_VERSION=$(GO_VERSION) --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
docker build -t scrolltech/go-rust-builder:latest -f go-rust-builder.Dockerfile ./
|
||||
docker image tag scrolltech/go-rust-builder:latest scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
|
||||
go-alpine-builder:
|
||||
docker build -t scrolltech/go-alpine-builder:$(GO_VERSION) -f go-alpine-builder.Dockerfile ./ --build-arg GO_VERSION=$(GO_VERSION)
|
||||
docker build -t scrolltech/go-alpine-builder:latest -f go-alpine-builder.Dockerfile ./
|
||||
docker image tag scrolltech/go-alpine-builder:latest scrolltech/go-alpine-builder:$(GO_VERSION)
|
||||
|
||||
rust-builder:
|
||||
docker build -t scrolltech/rust-builder:$(RUST_VERSION) -f rust-builder.Dockerfile ./ --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
docker build -t scrolltech/rust-builder:latest -f rust-builder.Dockerfile ./
|
||||
docker image tag scrolltech/rust-builder:latest scrolltech/rust-builder:$(RUST_VERSION)
|
||||
|
||||
rust-alpine-builder:
|
||||
docker build -t scrolltech/rust-alpine-builder:$(RUST_VERSION) -f rust-alpine-builder.Dockerfile ./ --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
docker build -t scrolltech/rust-alpine-builder:latest -f rust-alpine-builder.Dockerfile ./
|
||||
docker image tag scrolltech/rust-alpine-builder:latest scrolltech/rust-alpine-builder:$(RUST_VERSION)
|
||||
|
||||
go-rust-alpine-builder:
|
||||
docker build -t scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION) -f go-rust-alpine-builder.Dockerfile ./ --build-arg GO_VERSION=$(GO_VERSION) --build-arg RUST_VERSION=$(RUST_VERSION)
|
||||
docker build -t scrolltech/go-rust-alpine-builder:latest -f go-rust-alpine-builder.Dockerfile ./
|
||||
docker image tag scrolltech/go-rust-alpine-builder:latest scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
|
||||
py-runner:
|
||||
docker build -t scrolltech/py-runner:$(PYTHON_VERSION) -f py-runner.Dockerfile ./ --build-arg PYTHON_VERSION=$(PYTHON_VERSION)
|
||||
docker build -t scrolltech/py-runner:latest -f py-runner.Dockerfile ./
|
||||
docker image tag scrolltech/py-runner:latest scrolltech/py-runner:$(PYTHON_VERSION)
|
||||
|
||||
all: go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder cuda-go-rust-builder py-runner
|
||||
all: go-alpine-builder rust-builder rust-alpine-builder go-rust-alpine-builder go-rust-builder py-runner
|
||||
|
||||
publish:
|
||||
docker push scrolltech/go-alpine-builder:latest
|
||||
docker push scrolltech/go-alpine-builder:$(GO_VERSION)
|
||||
docker push scrolltech/rust-builder:latest
|
||||
docker push scrolltech/rust-builder:$(RUST_VERSION)
|
||||
docker push scrolltech/rust-alpine-builder:latest
|
||||
docker push scrolltech/rust-alpine-builder:$(RUST_VERSION)
|
||||
docker push scrolltech/go-rust-alpine-builder:latest
|
||||
docker push scrolltech/go-rust-alpine-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
docker push scrolltech/go-rust-builder:latest
|
||||
docker push scrolltech/go-rust-builder:go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
docker push scrolltech/cuda-go-rust-builder:cuda-$(CUDA_VERSION)-go-$(GO_VERSION)-rust-$(RUST_VERSION)
|
||||
docker push scrolltech/py-runner:latest
|
||||
docker push scrolltech/py-runner:$(PYTHON_VERSION)
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
ARG CUDA_VERSION=11.7.1
|
||||
ARG GO_VERSION=1.19
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04
|
||||
RUN apt-get update
|
||||
# Install basic packages
|
||||
RUN apt-get install build-essential curl wget git pkg-config --no-install-recommends -y
|
||||
# Install dev-packages
|
||||
RUN apt-get install libclang-dev libssl-dev cmake llvm --no-install-recommends -y
|
||||
# Install related libs
|
||||
RUN apt install libprocps-dev libboost-all-dev libmpfr-dev libgmp-dev --no-install-recommends -y
|
||||
# Clean installed cache
|
||||
RUN rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
# Add Toolchain
|
||||
ARG RUST_VERSION
|
||||
RUN rustup toolchain install ${RUST_VERSION}
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
|
||||
# Install Go
|
||||
ARG GO_VERSION
|
||||
RUN rm -rf /usr/local/go
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
@@ -1,6 +1,4 @@
|
||||
ARG GO_VERSION=1.19
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
FROM golang:1.18-alpine
|
||||
|
||||
# ENV GOPROXY https://goproxy.cn,direct
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
ARG GO_VERSION=1.19
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
FROM golang:1.18-alpine
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
|
||||
RUN apk add --no-cache gcc musl-dev linux-headers git ca-certificates openssl-dev
|
||||
|
||||
@@ -26,14 +24,12 @@ RUN set -eux; \
|
||||
wget "$url"; \
|
||||
chmod +x rustup-init;
|
||||
|
||||
ARG RUST_VERSION
|
||||
RUN ./rustup-init -y --no-modify-path --default-toolchain ${RUST_VERSION}; \
|
||||
RUN ./rustup-init -y --no-modify-path --default-toolchain ${DEFAULT_RUST_TOOLCHAIN}; \
|
||||
rm rustup-init; \
|
||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||
rustup --version; \
|
||||
cargo --version; \
|
||||
rustc --version;
|
||||
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
ARG GO_VERSION=1.19
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt-get update && ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime
|
||||
|
||||
# Install basic packages
|
||||
RUN apt-get install build-essential curl wget git pkg-config -y
|
||||
|
||||
# Install dev-packages
|
||||
RUN apt-get install libclang-dev libssl-dev llvm -y
|
||||
|
||||
@@ -15,17 +12,23 @@ RUN apt-get install libclang-dev libssl-dev llvm -y
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
# Add Toolchain
|
||||
ARG RUST_VERSION
|
||||
RUN rustup toolchain install ${RUST_VERSION}
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
|
||||
# TODO: make this ARG
|
||||
ENV CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
|
||||
# Install Go
|
||||
ARG GO_VERSION
|
||||
RUN rm -rf /usr/local/go
|
||||
RUN wget https://go.dev/dl/go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN tar -C /usr/local -xzf go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
RUN rm go${GO_VERSION}.1.linux-amd64.tar.gz
|
||||
# for 1.17
|
||||
# RUN wget https://go.dev/dl/go1.17.13.linux-amd64.tar.gz
|
||||
# RUN tar -C /usr/local -xzf go1.17.13.linux-amd64.tar.gz
|
||||
# for 1.18
|
||||
RUN wget https://go.dev/dl/go1.18.9.linux-amd64.tar.gz
|
||||
RUN tar -C /usr/local -xzf go1.18.9.linux-amd64.tar.gz
|
||||
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
ARG PYTHON_VERSION=3.10
|
||||
FROM python:${PYTHON_VERSION}-alpine
|
||||
FROM python:3.10-alpine3.15
|
||||
|
||||
RUN apk add --no-cache gcc g++ make musl-dev
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
ARG ALPINE_VERSION=3.15
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
ARG DEFAULT_RUST_TOOLCHAIN=nightly-2022-12-10
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
@@ -28,14 +27,12 @@ RUN set -eux; \
|
||||
wget "$url"; \
|
||||
chmod +x rustup-init;
|
||||
|
||||
ARG RUST_VERSION
|
||||
RUN ./rustup-init -y --no-modify-path --default-toolchain ${RUST_VERSION}; \
|
||||
RUN ./rustup-init -y --no-modify-path --default-toolchain ${DEFAULT_RUST_TOOLCHAIN}; \
|
||||
rm rustup-init; \
|
||||
chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \
|
||||
rustup --version; \
|
||||
cargo --version; \
|
||||
rustc --version;
|
||||
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
ARG RUST_VERSION=nightly-2022-12-10
|
||||
ARG CARGO_CHEF_TAG=0.1.41
|
||||
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt-get update && ln -fs /usr/share/zoneinfo/America/New_York /etc/localtime
|
||||
@@ -14,10 +11,6 @@ RUN apt-get install libclang-dev libssl-dev llvm -y
|
||||
# Install Rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
ENV CARGO_HOME=/root/.cargo
|
||||
|
||||
# Add Toolchain
|
||||
ARG RUST_VERSION
|
||||
RUN rustup toolchain install ${RUST_VERSION}
|
||||
ARG CARGO_CHEF_TAG
|
||||
RUN cargo install cargo-chef --locked --version ${CARGO_CHEF_TAG} \
|
||||
&& rm -rf $CARGO_HOME/registry/
|
||||
RUN rustup toolchain install nightly-2022-12-10
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
|
||||
const (
|
||||
// GolangCIVersion to be used for linting.
|
||||
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.52.2"
|
||||
GolangCIVersion = "github.com/golangci/golangci-lint/cmd/golangci-lint@v1.50.0"
|
||||
)
|
||||
|
||||
// GOBIN environment variable.
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
${GOBIN}/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.db.txt > coverage.db.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.common.txt > coverage.common.xml
|
||||
${GOBIN}/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
|
||||
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
|
||||
#${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
|
||||
|
||||
npx cobertura-merge -o cobertura.xml \
|
||||
|
||||
@@ -7,7 +7,7 @@ pipeline {
|
||||
timeout (20)
|
||||
}
|
||||
tools {
|
||||
go 'go-1.19'
|
||||
go 'go-1.18'
|
||||
nodejs "nodejs"
|
||||
}
|
||||
environment {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
module scroll-tech/common
|
||||
|
||||
go 1.19
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/docker/docker v20.10.21+incompatible
|
||||
|
||||
2
common/testdata/blockTrace_03.json
vendored
2
common/testdata/blockTrace_03.json
vendored
@@ -29,7 +29,7 @@
|
||||
{
|
||||
"type": 2,
|
||||
"nonce": 2,
|
||||
"txHash": "0x6b50040f5f14bad253f202b0775d6742131bcaee6b992f05578386f00e53b7e4",
|
||||
"txHash": "0xaaaeb971adfac989c7db5426737bc2932756091a5730ea6d5324f93e4cff9713",
|
||||
"gas": 1152994,
|
||||
"gasPrice": "0x3b9b0a17",
|
||||
"from": "0x1c5a77d9fa7ef466951b2f01f724bca3a5820b63",
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// BatchHeader contains batch header info to be committed.
|
||||
type BatchHeader struct {
|
||||
// Encoded in BatchHeaderV0Codec
|
||||
version uint8
|
||||
batchIndex uint64
|
||||
l1MessagePopped uint64
|
||||
totalL1MessagePopped uint64
|
||||
dataHash common.Hash
|
||||
parentBatchHash common.Hash
|
||||
skippedL1MessageBitmap []*big.Int // LSB is the first L1 message
|
||||
}
|
||||
|
||||
// NewBatchHeader creates a new BatchHeader
|
||||
func NewBatchHeader(version uint8, batchIndex, totalL1MessagePoppedBefore uint64, parentBatchHash common.Hash, chunks []*Chunk) (*BatchHeader, error) {
|
||||
// TODO calculate `l1MessagePopped`, `totalL1MessagePopped`, and `skippedL1MessageBitmap` based on `chunks`
|
||||
var dataBytes []byte
|
||||
for _, chunk := range chunks {
|
||||
// Build dataHash
|
||||
chunkBytes, err := chunk.Hash()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dataBytes = append(dataBytes, chunkBytes...)
|
||||
}
|
||||
dataHash := crypto.Keccak256Hash(dataBytes)
|
||||
|
||||
return &BatchHeader{
|
||||
version: version,
|
||||
batchIndex: batchIndex,
|
||||
l1MessagePopped: 0, // TODO
|
||||
totalL1MessagePopped: totalL1MessagePoppedBefore, // TODO
|
||||
dataHash: dataHash,
|
||||
parentBatchHash: parentBatchHash,
|
||||
skippedL1MessageBitmap: nil, // TODO
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Encode encodes the BatchHeader into RollupV2 BatchHeaderV0Codec Encoding.
|
||||
func (b *BatchHeader) Encode() []byte {
|
||||
batchBytes := make([]byte, 89)
|
||||
batchBytes[0] = b.version
|
||||
binary.BigEndian.PutUint64(batchBytes[1:], b.batchIndex)
|
||||
binary.BigEndian.PutUint64(batchBytes[9:], b.l1MessagePopped)
|
||||
binary.BigEndian.PutUint64(batchBytes[17:], b.totalL1MessagePopped)
|
||||
copy(batchBytes[25:], b.dataHash[:])
|
||||
copy(batchBytes[57:], b.parentBatchHash[:])
|
||||
// TODO: encode skippedL1MessageBitmap
|
||||
|
||||
return batchBytes
|
||||
}
|
||||
|
||||
// Hash calculates the hash of the batch header.
|
||||
func (b *BatchHeader) Hash() common.Hash {
|
||||
return crypto.Keccak256Hash(b.Encode())
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewBatchHeader(t *testing.T) {
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
parentBatchHeader := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 0,
|
||||
l1MessagePopped: 0,
|
||||
totalL1MessagePopped: 0,
|
||||
dataHash: common.HexToHash("0x0"),
|
||||
parentBatchHash: common.HexToHash("0x0"),
|
||||
skippedL1MessageBitmap: nil,
|
||||
}
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
}
|
||||
|
||||
func TestBatchHeaderEncode(t *testing.T) {
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
parentBatchHeader := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 0,
|
||||
l1MessagePopped: 0,
|
||||
totalL1MessagePopped: 0,
|
||||
dataHash: common.HexToHash("0x0"),
|
||||
parentBatchHash: common.HexToHash("0x0"),
|
||||
skippedL1MessageBitmap: nil,
|
||||
}
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
bytes := batchHeader.Encode()
|
||||
assert.Equal(t, 89, len(bytes))
|
||||
assert.Equal(t, "0100000000000000010000000000000000000000000000000010a64c9bd905f8caf5d668fbda622d6558c5a42cdb4b3895709743d159c22e534136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f94985", common.Bytes2Hex(bytes))
|
||||
}
|
||||
|
||||
func TestBatchHeaderHash(t *testing.T) {
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
parentBatchHeader := &BatchHeader{
|
||||
version: 1,
|
||||
batchIndex: 0,
|
||||
l1MessagePopped: 0,
|
||||
totalL1MessagePopped: 0,
|
||||
dataHash: common.HexToHash("0x0"),
|
||||
parentBatchHash: common.HexToHash("0x0"),
|
||||
skippedL1MessageBitmap: nil,
|
||||
}
|
||||
batchHeader, err := NewBatchHeader(1, 1, 0, parentBatchHeader.Hash(), []*Chunk{chunk})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader)
|
||||
hash := batchHeader.Hash()
|
||||
assert.Equal(t, "d69da4357da0073f4093c76e49f077e21bb52f48f57ee3e1fbd9c38a2881af81", common.Bytes2Hex(hash.Bytes()))
|
||||
|
||||
templateBlockTrace, err = os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock2))
|
||||
chunk2 := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
batchHeader2, err := NewBatchHeader(1, 2, 0, batchHeader.Hash(), []*Chunk{chunk2})
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batchHeader2)
|
||||
hash2 := batchHeader2.Hash()
|
||||
assert.Equal(t, "34de600163aa745d4513113137a5b54960d13f0d3f2849e490c4b875028bf930", common.Bytes2Hex(hash2.Bytes()))
|
||||
}
|
||||
@@ -1,10 +1,6 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
)
|
||||
@@ -16,30 +12,3 @@ type WrappedBlock struct {
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawTrieRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
}
|
||||
|
||||
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
|
||||
func (w *WrappedBlock) Encode() ([]byte, error) {
|
||||
bytes := make([]byte, 60)
|
||||
|
||||
if !w.Header.Number.IsUint64() {
|
||||
return nil, errors.New("block number is not uint64")
|
||||
}
|
||||
|
||||
if len(w.Transactions) > math.MaxUint16 {
|
||||
return nil, errors.New("number of transactions exceeds max uint16")
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
|
||||
|
||||
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
|
||||
|
||||
// TODO: Currently, baseFee is 0
|
||||
|
||||
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
|
||||
|
||||
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
|
||||
|
||||
// TODO: set numL1Messages properly
|
||||
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
@@ -1,117 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// Chunk contains blocks to be encoded
|
||||
type Chunk struct {
|
||||
Blocks []*WrappedBlock `json:"blocks"`
|
||||
}
|
||||
|
||||
// Encode encodes the Chunk into RollupV2 Chunk Encoding.
|
||||
func (c *Chunk) Encode() ([]byte, error) {
|
||||
numBlocks := len(c.Blocks)
|
||||
|
||||
if numBlocks > 255 {
|
||||
return nil, errors.New("number of blocks exceeds 1 byte")
|
||||
}
|
||||
if numBlocks == 0 {
|
||||
return nil, errors.New("number of blocks is 0")
|
||||
}
|
||||
|
||||
var chunkBytes []byte
|
||||
chunkBytes = append(chunkBytes, byte(numBlocks))
|
||||
|
||||
var l2TxDataBytes []byte
|
||||
|
||||
for _, block := range c.Blocks {
|
||||
blockBytes, err := block.Encode()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode block: %v", err)
|
||||
}
|
||||
|
||||
if len(blockBytes) != 60 {
|
||||
return nil, fmt.Errorf("block encoding is not 60 bytes long %x", len(blockBytes))
|
||||
}
|
||||
|
||||
chunkBytes = append(chunkBytes, blockBytes...)
|
||||
|
||||
// Append l2Tx Hashes
|
||||
for _, txData := range block.Transactions {
|
||||
if txData.Type == 0x7E {
|
||||
continue
|
||||
}
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
// right now we only support legacy tx
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
var txLen [4]byte
|
||||
binary.BigEndian.PutUint32(txLen[:], uint32(len(rlpTxData)))
|
||||
l2TxDataBytes = append(l2TxDataBytes, txLen[:]...)
|
||||
l2TxDataBytes = append(l2TxDataBytes, rlpTxData...)
|
||||
}
|
||||
}
|
||||
|
||||
chunkBytes = append(chunkBytes, l2TxDataBytes...)
|
||||
|
||||
return chunkBytes, nil
|
||||
}
|
||||
|
||||
// Hash hashes the Chunk into RollupV2 Chunk Hash
|
||||
func (c *Chunk) Hash() ([]byte, error) {
|
||||
chunkBytes, err := c.Encode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
numBlocks := chunkBytes[0]
|
||||
|
||||
// concatenate block contexts
|
||||
// only first 58 bytes is needed
|
||||
var dataBytes []byte
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
// only first 58 bytes is needed
|
||||
dataBytes = append(dataBytes, chunkBytes[1+60*i:60*i+59]...)
|
||||
}
|
||||
|
||||
// concatenate l1 and l2 tx hashes
|
||||
var l2TxHashes []byte
|
||||
for _, block := range c.Blocks {
|
||||
for _, txData := range block.Transactions {
|
||||
// TODO: concatenate l1 message hashes
|
||||
if txData.Type == 0x7E {
|
||||
continue
|
||||
}
|
||||
// concatenate l2 txs hashes
|
||||
// retrieve the number of transactions in current block.
|
||||
txHash := strings.TrimPrefix(txData.TxHash, "0x")
|
||||
hashBytes, err := hex.DecodeString(txHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l2TxHashes = append(l2TxHashes, hashBytes...)
|
||||
}
|
||||
}
|
||||
|
||||
dataBytes = append(dataBytes, l2TxHashes...)
|
||||
hash := crypto.Keccak256Hash(dataBytes).Bytes()
|
||||
return hash, nil
|
||||
}
|
||||
@@ -1,92 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestChunkEncode(t *testing.T) {
|
||||
// Test case 1: when the chunk contains no blocks.
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
bytes, err := chunk.Encode()
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
|
||||
// Test case 2: when the chunk contains more than 255 blocks.
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
for i := 0; i < 256; i++ {
|
||||
chunk.Blocks = append(chunk.Blocks, &WrappedBlock{})
|
||||
}
|
||||
bytes, err = chunk.Encode()
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks exceeds 1 byte")
|
||||
|
||||
// Test case 3: when the chunk contains one block.
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Encode()
|
||||
hexString := hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 299, len(bytes))
|
||||
assert.Equal(t, "0100000000000000020000000063807b2a0000000000000000000000000000000000000000000000000000000000000000000355418d1e81840002000000000073f87180843b9aec2e8307a12094c0c4c8baea3f6acb49b6e1fb9e2adeceeacb0ca28a152d02c7e14af60000008083019ecea0ab07ae99c67aa78e7ba5cf6781e90cc32b219b1de102513d56548a41e86df514a034cbd19feacd73e8ce64d00c4d1996b9b5243c578fd7f51bfaec288bbaf42a8b00000073f87101843b9aec2e8307a1209401bae6bf68e9a03fb2bc0615b1bf0d69ce9411ed8a152d02c7e14af60000008083019ecea0f039985866d8256f10c1be4f7b2cace28d8f20bde27e2604393eb095b7f77316a05a3e6e81065f2b4604bcec5bd4aba684835996fc3f879380aac1c09c6eed32f1", hexString)
|
||||
}
|
||||
|
||||
func TestChunkHash(t *testing.T) {
|
||||
// Test case 1: when the chunk contains no blocks
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{},
|
||||
}
|
||||
bytes, err := chunk.Hash()
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of blocks is 0")
|
||||
|
||||
// Test case 2: successfully hashing a chunk on one block
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Hash()
|
||||
hexString := hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "78c839dfc494396c16b40946f32b3f4c3e8c2d4bfd04aefcf235edec474482f8", hexString)
|
||||
|
||||
// Test case 3: successfully hashing a chunk on two blocks
|
||||
templateBlockTrace1, err := os.ReadFile("../testdata/blockTrace_03.json")
|
||||
assert.NoError(t, err)
|
||||
wrappedBlock1 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace1, wrappedBlock1))
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
wrappedBlock1,
|
||||
},
|
||||
}
|
||||
bytes, err = chunk.Hash()
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "aa9e494f72bc6965857856f0fae6916f27b2a6591c714a573b2fab46df03b8ae", hexString)
|
||||
}
|
||||
@@ -233,30 +233,31 @@ const (
|
||||
|
||||
// BlockBatch is structure of stored block_batch
|
||||
type BlockBatch struct {
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
Index uint64 `json:"index" db:"index"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
|
||||
StateRoot string `json:"state_root" db:"state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
|
||||
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
|
||||
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
|
||||
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
|
||||
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
|
||||
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
|
||||
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
|
||||
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
|
||||
Hash string `json:"hash" db:"hash"`
|
||||
Index uint64 `json:"index" db:"index"`
|
||||
ParentHash string `json:"parent_hash" db:"parent_hash"`
|
||||
StartBlockNumber uint64 `json:"start_block_number" db:"start_block_number"`
|
||||
StartBlockHash string `json:"start_block_hash" db:"start_block_hash"`
|
||||
EndBlockNumber uint64 `json:"end_block_number" db:"end_block_number"`
|
||||
EndBlockHash string `json:"end_block_hash" db:"end_block_hash"`
|
||||
StateRoot string `json:"state_root" db:"state_root"`
|
||||
TotalTxNum uint64 `json:"total_tx_num" db:"total_tx_num"`
|
||||
TotalL1TxNum uint64 `json:"total_l1_tx_num" db:"total_l1_tx_num"`
|
||||
TotalL2Gas uint64 `json:"total_l2_gas" db:"total_l2_gas"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
InstanceCommitments []byte `json:"instance_commitments" db:"instance_commitments"`
|
||||
ProofTimeSec uint64 `json:"proof_time_sec" db:"proof_time_sec"`
|
||||
RollupStatus RollupStatus `json:"rollup_status" db:"rollup_status"`
|
||||
OracleStatus GasOracleStatus `json:"oracle_status" db:"oracle_status"`
|
||||
CommitTxHash sql.NullString `json:"commit_tx_hash" db:"commit_tx_hash"`
|
||||
FinalizeTxHash sql.NullString `json:"finalize_tx_hash" db:"finalize_tx_hash"`
|
||||
OracleTxHash sql.NullString `json:"oracle_tx_hash" db:"oracle_tx_hash"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
ProverAssignedAt *time.Time `json:"prover_assigned_at" db:"prover_assigned_at"`
|
||||
ProvedAt *time.Time `json:"proved_at" db:"proved_at"`
|
||||
CommittedAt *time.Time `json:"committed_at" db:"committed_at"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" db:"finalized_at"`
|
||||
}
|
||||
|
||||
// AggTask is a wrapper type around db AggProveTask type.
|
||||
@@ -268,6 +269,6 @@ type AggTask struct {
|
||||
EndBatchHash string `json:"end_batch_hash" db:"end_batch_hash"`
|
||||
ProvingStatus ProvingStatus `json:"proving_status" db:"proving_status"`
|
||||
Proof []byte `json:"proof" db:"proof"`
|
||||
CreatedAt *time.Time `json:"created_at" db:"created_at"`
|
||||
UpdatedAt *time.Time `json:"updated_at" db:"updated_at"`
|
||||
CreatedTime *time.Time `json:"created_time" db:"created_time"`
|
||||
UpdatedTime *time.Time `json:"updated_time" db:"updated_time"`
|
||||
}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
@@ -207,7 +205,7 @@ type TaskMsg struct {
|
||||
// For decentralization, basic rollers will get block hashes from the coordinator. So that they can refer to the block hashes and fetch traces locally. Only applicable for basic rollers.
|
||||
BlockHashes []common.Hash `json:"block_hashes,omitempty"`
|
||||
// Only applicable for aggregator rollers.
|
||||
SubProofs []*AggProof `json:"sub_proofs,omitempty"`
|
||||
SubProofs [][]byte `json:"sub_proofs,omitempty"`
|
||||
}
|
||||
|
||||
// ProofDetail is the message received from rollers that contains zk proof, the status of
|
||||
@@ -239,26 +237,3 @@ type AggProof struct {
|
||||
Vk []byte `json:"vk"`
|
||||
BlockCount uint `json:"block_count"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether an AggProof is in a legal format
|
||||
// TODO: change to check Proof&Instance when upgrading to snark verifier v0.4
|
||||
func (ap *AggProof) SanityCheck() error {
|
||||
if ap == nil {
|
||||
return errors.New("agg_proof is nil")
|
||||
}
|
||||
|
||||
if len(ap.Proof) == 0 {
|
||||
return errors.New("proof not ready")
|
||||
}
|
||||
if len(ap.FinalPair) == 0 {
|
||||
return errors.New("final_pair not ready")
|
||||
}
|
||||
if len(ap.Proof)%32 != 0 {
|
||||
return fmt.Errorf("proof buffer has wrong length, expected: 32, got: %d", len(ap.Proof))
|
||||
}
|
||||
if len(ap.FinalPair)%32 != 0 {
|
||||
return fmt.Errorf("final_pair buffer has wrong length, expected: 32, got: %d", len(ap.FinalPair))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v3.3.4"
|
||||
var tag = "v3.1.2"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env sh
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
|
||||
cd contracts
|
||||
yarn lint-staged
|
||||
yarn lint-staged && forge build && npx hardhat compile
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
Note: For more comprehensive documentation, see [`./docs/`](./docs).
|
||||
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
@@ -23,8 +24,10 @@ remappings.txt - "foundry dependency mappings"
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
## Dependencies
|
||||
|
||||
|
||||
### Foundry
|
||||
|
||||
First run the command below to get foundryup, the Foundry toolchain installer:
|
||||
@@ -39,22 +42,24 @@ Then, run `foundryup` in a new terminal session or after reloading your `PATH`.
|
||||
|
||||
Other ways to install Foundry can be found [here](https://github.com/foundry-rs/foundry#installation).
|
||||
|
||||
|
||||
### Hardhat
|
||||
|
||||
```
|
||||
yarn install
|
||||
```
|
||||
|
||||
|
||||
## Build
|
||||
|
||||
- Run `git submodule update --init --recursive` to initialise git submodules.
|
||||
- Run `yarn prettier:solidity` to run linting in fix mode, will auto-format all solidity codes.
|
||||
- Run `yarn prettier` to run linting in fix mode, will auto-format all typescript codes.
|
||||
- Run `yarn prepare` to install the precommit linting hook
|
||||
- Run `forge build` to compile contracts with foundry.
|
||||
- Run `npx hardhat compile` to compile with hardhat.
|
||||
- Run `forge test -vvv` to run foundry units tests. It will compile all contracts before running the unit tests.
|
||||
- Run `npx hardhat test` to run integration tests. It may not compile all contracts before running, it's better to run `npx hardhat compile` first.
|
||||
+ Run `git submodule update --init --recursive` to initialise git submodules.
|
||||
+ Run `yarn prettier:solidity` to run linting in fix mode, will auto-format all solidity codes.
|
||||
+ Run `yarn prettier` to run linting in fix mode, will auto-format all typescript codes.
|
||||
+ Run `forge build` to compile contracts with foundry.
|
||||
+ Run `npx hardhat compile` to compile with hardhat.
|
||||
+ Run `forge test -vvv` to run foundry units tests. It will compile all contracts before running the unit tests.
|
||||
+ Run `npx hardhat test` to run integration tests. It may not compile all contracts before running, it's better to run `npx hardhat compile` first.
|
||||
|
||||
|
||||
## TODO
|
||||
|
||||
|
||||
@@ -11,8 +11,7 @@
|
||||
"lint:sol": "./node_modules/.bin/prettier --write 'src/**/*.sol'",
|
||||
"lint:ts": "./node_modules/.bin/prettier --write 'integration-test/**/*.ts' 'scripts/**/*.ts' *.ts",
|
||||
"lint": "yarn lint:ts && yarn lint:sol",
|
||||
"coverage": "hardhat coverage",
|
||||
"prepare": "cd .. && husky install contracts/.husky"
|
||||
"coverage": "hardhat coverage"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@nomiclabs/hardhat-ethers": "^2.0.0",
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {Fallback} from "../../src/misc/Fallback.sol";
|
||||
|
||||
contract DeployFallbackContracts is Script {
|
||||
uint256 DEPLOYER_PRIVATE_KEY = vm.envUint("DEPLOYER_PRIVATE_KEY");
|
||||
uint256 NUM_CONTRACTS = vm.envUint("NUM_CONTRACTS");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
for (uint256 ii = 0; ii < NUM_CONTRACTS; ++ii) {
|
||||
Fallback fallbackContract = new Fallback();
|
||||
logAddress("FALLBACK", address(fallbackContract));
|
||||
}
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
@@ -25,7 +25,7 @@ import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
|
||||
contract DeployL1BridgeContracts is Script {
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
uint32 CHAIN_ID_L2 = uint32(vm.envUint("CHAIN_ID_L2"));
|
||||
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
|
||||
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
address L2_WETH_ADDR = vm.envAddress("L2_WETH_ADDR");
|
||||
|
||||
@@ -19,7 +19,7 @@ async function main() {
|
||||
console.log("Using rollup proxy address:", rollupAddr);
|
||||
|
||||
const ScrollChain = await ethers.getContractAt("ScrollChain", rollupAddr, deployer);
|
||||
const genesis = JSON.parse(fs.readFileSync(GENESIS_FILE_PATH, "utf8"));
|
||||
const genesis = JSON.parse(fs.readFileSync(GENESIS_FILE_PATH, 'utf8'));
|
||||
console.log("Using genesis block:", genesis.blockHash);
|
||||
|
||||
const tx = await ScrollChain.importGenesisBatch(genesis);
|
||||
|
||||
@@ -26,16 +26,16 @@ async function main() {
|
||||
const L2StandardERC20FactoryAddress = process.env.L2_SCROLL_STANDARD_ERC20_FACTORY_ADDR!;
|
||||
|
||||
// if ((await L1StandardERC20Gateway.counterpart()) === constants.AddressZero) {
|
||||
const tx = await L1StandardERC20Gateway.initialize(
|
||||
L2StandardERC20GatewayAddress,
|
||||
L1GatewayRouterAddress,
|
||||
L1ScrollMessengerAddress,
|
||||
L2StandardERC20Impl,
|
||||
L2StandardERC20FactoryAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L1StandardERC20Gateway.initialize(
|
||||
L2StandardERC20GatewayAddress,
|
||||
L1GatewayRouterAddress,
|
||||
L1ScrollMessengerAddress,
|
||||
L2StandardERC20Impl,
|
||||
L2StandardERC20FactoryAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -24,14 +24,14 @@ async function main() {
|
||||
const L2GatewayRouterAddress = process.env.L2_GATEWAY_ROUTER_PROXY_ADDR!;
|
||||
|
||||
// if ((await L1GatewayRouter.counterpart()) === constants.AddressZero) {
|
||||
const tx = await L1GatewayRouter.initialize(
|
||||
L1StandardERC20GatewayAddress,
|
||||
L2GatewayRouterAddress,
|
||||
L1ScrollMessengerAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L1GatewayRouter.initialize(
|
||||
L1StandardERC20GatewayAddress,
|
||||
L2GatewayRouterAddress,
|
||||
L1ScrollMessengerAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -22,10 +22,10 @@ async function main() {
|
||||
const ZKRollupAddress = addressFile.get("ZKRollup.proxy");
|
||||
|
||||
// if ((await L1ScrollMessenger.rollup()) === constants.AddressZero) {
|
||||
const tx = await L1ScrollMessenger.initialize(ZKRollupAddress);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L1ScrollMessenger.initialize(ZKRollupAddress);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -25,15 +25,15 @@ async function main() {
|
||||
const L1StandardERC20GatewayAddress = process.env.L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR!;
|
||||
|
||||
// if ((await L2StandardERC20Gateway.counterpart()) === constants.AddressZero) {
|
||||
const tx = await L2StandardERC20Gateway.initialize(
|
||||
L1StandardERC20GatewayAddress,
|
||||
L2GatewayRouterAddress,
|
||||
L2ScrollMessengerAddress,
|
||||
L2StandardERC20FactoryAddress
|
||||
);
|
||||
console.log("initialize L2StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L2StandardERC20Gateway.initialize(
|
||||
L1StandardERC20GatewayAddress,
|
||||
L2GatewayRouterAddress,
|
||||
L2ScrollMessengerAddress,
|
||||
L2StandardERC20FactoryAddress
|
||||
);
|
||||
console.log("initialize L2StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -24,14 +24,14 @@ async function main() {
|
||||
const L1GatewayRouterAddress = process.env.L1_GATEWAY_ROUTER_PROXY_ADDR!;
|
||||
|
||||
// if ((await L2GatewayRouter.counterpart()) === constants.AddressZero) {
|
||||
const tx = await L2GatewayRouter.initialize(
|
||||
L2StandardERC20GatewayAddress,
|
||||
L1GatewayRouterAddress,
|
||||
L2ScrollMessengerAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await L2GatewayRouter.initialize(
|
||||
L2StandardERC20GatewayAddress,
|
||||
L1GatewayRouterAddress,
|
||||
L2ScrollMessengerAddress
|
||||
);
|
||||
console.log("initialize L1StandardERC20Gateway, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -21,10 +21,10 @@ async function main() {
|
||||
const L2StandardERC20GatewayAddress = addressFile.get("L2StandardERC20Gateway.proxy");
|
||||
|
||||
// if ((await ScrollStandardERC20Factory.owner()) !== L2StandardERC20GatewayAddress) {
|
||||
const tx = await ScrollStandardERC20Factory.transferOwnership(L2StandardERC20GatewayAddress);
|
||||
console.log("transfer ownernship ScrollStandardERC20Factory, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
const tx = await ScrollStandardERC20Factory.transferOwnership(L2StandardERC20GatewayAddress);
|
||||
console.log("transfer ownernship ScrollStandardERC20Factory, hash:", tx.hash);
|
||||
const receipt = await tx.wait();
|
||||
console.log(`✅ Done, gas used: ${receipt.gasUsed}`);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,9 @@ export function selectAddressFile(network: string) {
|
||||
}
|
||||
|
||||
let filename: string;
|
||||
if (["hardhat", "l1geth", "l2geth"].includes(network)) {
|
||||
if (["hardhat",
|
||||
"l1geth", "l2geth",
|
||||
].includes(network)) {
|
||||
filename = path.join(CONFIG_FILE_DIR, `${network}.json`);
|
||||
} else {
|
||||
throw new Error(`network ${network} not supported yet`);
|
||||
|
||||
@@ -22,32 +22,4 @@ interface IL1GatewayRouter is IL1ETHGateway, IL1ERC20Gateway {
|
||||
/// @param token The address of token updated.
|
||||
/// @param gateway The corresponding address of gateway updated.
|
||||
event SetERC20Gateway(address indexed token, address indexed gateway);
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
|
||||
/// @notice Return the corresponding gateway address for given token address.
|
||||
/// @param _token The address of token to query.
|
||||
function getERC20Gateway(address _token) external view returns (address);
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Update the address of ETH gateway contract.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _ethGateway The address to update.
|
||||
function setETHGateway(address _ethGateway) external;
|
||||
|
||||
/// @notice Update the address of default ERC20 gateway contract.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _defaultERC20Gateway The address to update.
|
||||
function setDefaultERC20Gateway(address _defaultERC20Gateway) external;
|
||||
|
||||
/// @notice Update the mapping from token address to gateway address.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _tokens The list of addresses of tokens to update.
|
||||
/// @param _gateways The list of addresses of gateways to update.
|
||||
function setERC20Gateway(address[] memory _tokens, address[] memory _gateways) external;
|
||||
}
|
||||
|
||||
@@ -68,7 +68,8 @@ contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
|
||||
return IL1ERC20Gateway(_gateway).getL2ERC20Address(_l1Address);
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1GatewayRouter
|
||||
/// @notice Return the corresponding gateway address for given token address.
|
||||
/// @param _token The address of token to query.
|
||||
function getERC20Gateway(address _token) public view returns (address) {
|
||||
address _gateway = ERC20Gateway[_token];
|
||||
if (_gateway == address(0)) {
|
||||
@@ -177,21 +178,28 @@ contract L1GatewayRouter is OwnableUpgradeable, IL1GatewayRouter {
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @inheritdoc IL1GatewayRouter
|
||||
/// @notice Update the address of ETH gateway contract.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _ethGateway The address to update.
|
||||
function setETHGateway(address _ethGateway) external onlyOwner {
|
||||
ethGateway = _ethGateway;
|
||||
|
||||
emit SetETHGateway(_ethGateway);
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1GatewayRouter
|
||||
/// @notice Update the address of default ERC20 gateway contract.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _defaultERC20Gateway The address to update.
|
||||
function setDefaultERC20Gateway(address _defaultERC20Gateway) external onlyOwner {
|
||||
defaultERC20Gateway = _defaultERC20Gateway;
|
||||
|
||||
emit SetDefaultERC20Gateway(_defaultERC20Gateway);
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1GatewayRouter
|
||||
/// @notice Update the mapping from token address to gateway address.
|
||||
/// @dev This function should only be called by contract owner.
|
||||
/// @param _tokens The list of addresses of tokens to update.
|
||||
/// @param _gateways The list of addresses of gateways to update.
|
||||
function setERC20Gateway(address[] memory _tokens, address[] memory _gateways) external onlyOwner {
|
||||
require(_tokens.length == _gateways.length, "length mismatch");
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import {BatchHeaderV0Codec} from "../../libraries/codec/BatchHeaderV0Codec.sol";
|
||||
import {ChunkCodec} from "../../libraries/codec/ChunkCodec.sol";
|
||||
import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol";
|
||||
|
||||
// solhint-disable no-inline-assembly
|
||||
// solhint-disable reason-string
|
||||
|
||||
/// @title ScrollChain
|
||||
@@ -40,7 +39,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
*************/
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint32 public immutable layer2ChainId;
|
||||
uint256 public immutable layer2ChainId;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
@@ -84,7 +83,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(uint32 _chainId) {
|
||||
constructor(uint256 _chainId) {
|
||||
layer2ChainId = _chainId;
|
||||
}
|
||||
|
||||
@@ -296,9 +295,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
require(finalizedStateRoots[_batchIndex] == bytes32(0), "batch already verified");
|
||||
|
||||
// compute public input hash
|
||||
bytes32 _publicInputHash = keccak256(
|
||||
abi.encodePacked(layer2ChainId, _prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash)
|
||||
);
|
||||
bytes32 _publicInputHash = keccak256(abi.encode(_prevStateRoot, _postStateRoot, _withdrawRoot, _dataHash));
|
||||
|
||||
// verify batch
|
||||
IRollupVerifier(verifier).verifyAggregateProof(_aggrProof, _publicInputHash);
|
||||
@@ -402,13 +399,11 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
bytes calldata _skippedL1MessageBitmap
|
||||
) internal view returns (uint256 _totalNumL1MessagesInChunk) {
|
||||
uint256 chunkPtr;
|
||||
uint256 startDataPtr;
|
||||
uint256 dataPtr;
|
||||
uint256 blockPtr;
|
||||
|
||||
assembly {
|
||||
dataPtr := mload(0x40)
|
||||
startDataPtr := dataPtr
|
||||
chunkPtr := add(_chunk, 0x20) // skip chunkLength
|
||||
blockPtr := add(chunkPtr, 1) // skip numBlocks
|
||||
}
|
||||
@@ -416,23 +411,15 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
uint256 _numBlocks = ChunkCodec.validateChunkLength(chunkPtr, _chunk.length);
|
||||
|
||||
// concatenate block contexts
|
||||
uint256 _totalTransactionsInChunk;
|
||||
for (uint256 i = 0; i < _numBlocks; i++) {
|
||||
dataPtr = ChunkCodec.copyBlockContext(chunkPtr, dataPtr, i);
|
||||
uint256 _numTransactionsInBlock = ChunkCodec.numTransactions(blockPtr);
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock;
|
||||
blockPtr += ChunkCodec.BLOCK_CONTEXT_LENGTH;
|
||||
}
|
||||
}
|
||||
|
||||
assembly {
|
||||
mstore(0x40, add(dataPtr, mul(_totalTransactionsInChunk, 0x20))) // reserve memory for tx hashes
|
||||
blockPtr := add(chunkPtr, 1) // reset block ptr
|
||||
}
|
||||
|
||||
// concatenate tx hashes
|
||||
uint256 l2TxPtr = ChunkCodec.l2TxPtr(chunkPtr, _numBlocks);
|
||||
|
||||
// avoid stack too deep on forge coverage
|
||||
uint256 _totalTransactionsInChunk;
|
||||
while (_numBlocks > 0) {
|
||||
// concatenate l1 message hashes
|
||||
uint256 _numL1MessagesInBlock = ChunkCodec.numL1Messages(blockPtr);
|
||||
@@ -456,6 +443,7 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
}
|
||||
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock;
|
||||
_totalNumL1MessagesInChunk += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
|
||||
@@ -476,7 +464,9 @@ contract ScrollChain is OwnableUpgradeable, IScrollChain {
|
||||
|
||||
// compute data hash and store to memory
|
||||
assembly {
|
||||
let dataHash := keccak256(startDataPtr, sub(dataPtr, startDataPtr))
|
||||
let startPtr := mload(0x40)
|
||||
let dataHash := keccak256(startPtr, sub(dataPtr, startPtr))
|
||||
|
||||
mstore(memPtr, dataHash)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,6 @@
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
// solhint-disable no-inline-assembly
|
||||
|
||||
/// @dev Below is the encoding for `BatchHeader` V0, total 89 + ceil(l1MessagePopped / 256) * 32 bytes.
|
||||
/// ```text
|
||||
/// * Field Bytes Type Index Comments
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
|
||||
import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol";
|
||||
|
||||
contract Fallback is Ownable {
|
||||
using SafeERC20 for IERC20;
|
||||
|
||||
/// @notice Withdraw stucked token from this contract.
|
||||
/// @param _token The address of token to withdraw, use `address(0)` if withdraw ETH.
|
||||
/// @param _amount The amount of token to withdraw.
|
||||
/// @param _recipient The address of receiver.
|
||||
function withdraw(
|
||||
address _token,
|
||||
uint256 _amount,
|
||||
address _recipient
|
||||
) external onlyOwner {
|
||||
if (_token == address(0)) {
|
||||
(bool _success, ) = _recipient.call{value: _amount}("");
|
||||
require(_success, "transfer ETH failed");
|
||||
} else {
|
||||
IERC20(_token).safeTransfer(_recipient, _amount);
|
||||
}
|
||||
}
|
||||
|
||||
/// @notice Execute an arbitrary message.
|
||||
/// @param _target The address of contract to call.
|
||||
/// @param _data The calldata passed to target contract.
|
||||
function execute(address _target, bytes calldata _data) external payable onlyOwner {
|
||||
(bool _success, ) = _target.call{value: msg.value}(_data);
|
||||
require(_success, "call failed");
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,6 @@ import {ScrollChain, IScrollChain} from "../L1/rollup/ScrollChain.sol";
|
||||
import {MockScrollChain} from "./mocks/MockScrollChain.sol";
|
||||
import {MockRollupVerifier} from "./mocks/MockRollupVerifier.sol";
|
||||
|
||||
// solhint-disable no-inline-assembly
|
||||
|
||||
contract ScrollChainTest is DSTestPlus {
|
||||
// from ScrollChain
|
||||
event UpdateSequencer(address indexed account, bool status);
|
||||
@@ -44,6 +42,35 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.initialize(address(messageQueue), address(0), 100);
|
||||
}
|
||||
|
||||
/*
|
||||
function testPublicInputHash() public {
|
||||
IScrollChain.Batch memory batch;
|
||||
batch.prevStateRoot = bytes32(0x000000000000000000000000000000000000000000000000000000000000cafe);
|
||||
batch.newStateRoot = bytes32(0);
|
||||
batch.withdrawTrieRoot = bytes32(0);
|
||||
batch
|
||||
.l2Transactions = hex"0000007402f8710582fd14808506e38dccc9825208944d496ccc28058b1d74b7a19541663e21154f9c848801561db11e24a43380c080a0d890606d7a35b2ab0f9b866d62c092d5b163f3e6a55537ae1485aac08c3f8ff7a023997be2d32f53e146b160fff0ba81e81dbb4491c865ab174d15c5b3d28c41ae";
|
||||
|
||||
batch.blocks = new IScrollChain.BlockContext[](1);
|
||||
batch.blocks[0].blockHash = bytes32(0);
|
||||
batch.blocks[0].parentHash = bytes32(0);
|
||||
batch.blocks[0].blockNumber = 51966;
|
||||
batch.blocks[0].timestamp = 123456789;
|
||||
batch.blocks[0].baseFee = 0;
|
||||
batch.blocks[0].gasLimit = 10000000000000000;
|
||||
batch.blocks[0].numTransactions = 1;
|
||||
batch.blocks[0].numL1Messages = 0;
|
||||
|
||||
(bytes32 hash, , , ) = chain.computePublicInputHash(0, batch);
|
||||
assertEq(hash, bytes32(0xa9f2ca3175794f91226a410ba1e60fff07a405c957562675c4149b77e659d805));
|
||||
|
||||
batch
|
||||
.l2Transactions = hex"00000064f8628001830f424094000000000000000000000000000000000000bbbb8080820a97a064e07cd8f939e2117724bdcbadc80dda421381cbc2a1f4e0d093d9cc5c5cf68ea03e264227f80852d88743cd9e43998f2746b619180366a87e4531debf9c3fa5dc";
|
||||
(hash, , , ) = chain.computePublicInputHash(0, batch);
|
||||
assertEq(hash, bytes32(0x398cb22bbfa1665c1b342b813267538a4c933d7f92d8bd9184aba0dd1122987b));
|
||||
}
|
||||
*/
|
||||
|
||||
function testCommitBatch() public {
|
||||
bytes memory batchHeader0 = new bytes(89);
|
||||
|
||||
@@ -233,34 +260,13 @@ contract ScrollChainTest is DSTestPlus {
|
||||
bytes memory chunk1;
|
||||
|
||||
// commit batch1, one chunk with one block, 1 tx, 1 L1 message, no skip
|
||||
// => payload for data hash of chunk0
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 0001
|
||||
// 50c3caa727394b95dc4885b7d25033ed22ac772b985fb274f2a7c0699a11346d
|
||||
// => data hash for chunk0
|
||||
// bb88f47194a07d59ed17bc9b2015f83d0afea8f7892d9c5f0b6565563bf06b26
|
||||
// => data hash for all chunks
|
||||
// 038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340
|
||||
// => payload for batch header
|
||||
// 00
|
||||
// 0000000000000002
|
||||
// 0000000000000001
|
||||
// 0000000000000001
|
||||
// 038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340
|
||||
// 119b828c2a2798d2c957228ebeaff7e10bb099ae0d4e224f3eeb779ff61cba61
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// => hash for batch header
|
||||
// cef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e
|
||||
bytes memory batchHeader1 = new bytes(89 + 32);
|
||||
assembly {
|
||||
mstore(add(batchHeader1, 0x20), 0) // version
|
||||
mstore(add(batchHeader1, add(0x20, 1)), shl(192, 1)) // batchIndex = 1
|
||||
mstore(add(batchHeader1, add(0x20, 9)), shl(192, 1)) // l1MessagePopped = 1
|
||||
mstore(add(batchHeader1, add(0x20, 17)), shl(192, 1)) // totalL1MessagePopped = 1
|
||||
mstore(add(batchHeader1, add(0x20, 25)), 0x038433daac85a0b03cd443ed50bc85e832c883061651ae2182b2984751e0b340) // dataHash
|
||||
mstore(add(batchHeader1, add(0x20, 25)), 0xfe21c37fa013c76f86b8593ddf15d685a04b55061d06797597ee866f6ff2edf8) // dataHash
|
||||
mstore(add(batchHeader1, add(0x20, 57)), batchHash0) // parentBatchHash
|
||||
mstore(add(batchHeader1, add(0x20, 89)), 0) // bitmap0
|
||||
}
|
||||
@@ -274,9 +280,9 @@ contract ScrollChainTest is DSTestPlus {
|
||||
chunks[0] = chunk0;
|
||||
bitmap = new bytes(32);
|
||||
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
|
||||
assertGt(uint256(rollup.committedBatches(1)), 0);
|
||||
assertBoolEq(rollup.isBatchFinalized(1), false);
|
||||
bytes32 batchHash1 = rollup.committedBatches(1);
|
||||
assertEq(batchHash1, bytes32(0xcef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e));
|
||||
|
||||
// finalize batch1
|
||||
rollup.finalizeBatchWithProof(
|
||||
@@ -295,57 +301,17 @@ contract ScrollChainTest is DSTestPlus {
|
||||
|
||||
// commit batch2 with two chunks, correctly
|
||||
// 1. chunk0 has one block, 3 tx, no L1 messages
|
||||
// => payload for chunk0
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 0003
|
||||
// ... (some tx hashes)
|
||||
// => data hash for chunk0
|
||||
// 2ac1dad3f3696e5581dfc10f2c7a7a8fc5b344285f7d332c7895a8825fca609a
|
||||
// 2. chunk1 has three blocks
|
||||
// 2.1 block0 has 5 tx, 3 L1 messages, no skips
|
||||
// 2.2 block1 has 10 tx, 5 L1 messages, even is skipped.
|
||||
// 2.2 block1 has 300 tx, 256 L1 messages, odd position is skipped.
|
||||
// => payload for chunk1
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 0005
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 000a
|
||||
// 0000000000000000
|
||||
// 0000000000000000
|
||||
// 0000000000000000000000000000000000000000000000000000000000000000
|
||||
// 0000000000000000
|
||||
// 012c
|
||||
// ... (some tx hashes)
|
||||
// => data hash for chunk2
|
||||
// 5c91563ee8be18cb94accfc83728f883ff5e3aa600fd0799e0a4e39afc7970b9
|
||||
// => data hash for all chunks
|
||||
// bf38f308e0a87ed7bf92fa2da038fa1d59a7b9801eb0f6d487f8eef528632145
|
||||
// => payload for batch header
|
||||
// 00
|
||||
// 0000000000000002
|
||||
// 0000000000000108
|
||||
// 0000000000000109
|
||||
// bf38f308e0a87ed7bf92fa2da038fa1d59a7b9801eb0f6d487f8eef528632145
|
||||
// cef70bf80683c4d9b8b2813e90c314e8c56648e231300b8cfed9d666b0caf14e
|
||||
// aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa800000000000000000000000000000000000000000000000000000000000000aa
|
||||
// => hash for batch header
|
||||
// 17fe6c12739f3a6261ae6db6486f41758dbd5d0508f19a5ca9ac37df67bbfec2
|
||||
bytes memory batchHeader2 = new bytes(89 + 32 + 32);
|
||||
assembly {
|
||||
mstore(add(batchHeader2, 0x20), 0) // version
|
||||
mstore(add(batchHeader2, add(0x20, 1)), shl(192, 2)) // batchIndex = 2
|
||||
mstore(add(batchHeader2, add(0x20, 9)), shl(192, 264)) // l1MessagePopped = 264
|
||||
mstore(add(batchHeader2, add(0x20, 17)), shl(192, 265)) // totalL1MessagePopped = 265
|
||||
mstore(add(batchHeader2, add(0x20, 25)), 0xbf38f308e0a87ed7bf92fa2da038fa1d59a7b9801eb0f6d487f8eef528632145) // dataHash
|
||||
mstore(add(batchHeader2, add(0x20, 25)), 0xa447b3c80bc6c3ee1aebc1af746c7c6b35a05c4a7af89d2103e9e0788b54375c) // dataHash
|
||||
mstore(add(batchHeader2, add(0x20, 57)), batchHash1) // parentBatchHash
|
||||
mstore(
|
||||
add(batchHeader2, add(0x20, 89)),
|
||||
@@ -392,9 +358,9 @@ contract ScrollChainTest is DSTestPlus {
|
||||
}
|
||||
|
||||
rollup.commitBatch(0, batchHeader1, chunks, bitmap);
|
||||
assertGt(uint256(rollup.committedBatches(2)), 0);
|
||||
assertBoolEq(rollup.isBatchFinalized(2), false);
|
||||
bytes32 batchHash2 = rollup.committedBatches(2);
|
||||
assertEq(batchHash2, bytes32(0x17fe6c12739f3a6261ae6db6486f41758dbd5d0508f19a5ca9ac37df67bbfec2));
|
||||
|
||||
// verify committed batch correctly
|
||||
rollup.finalizeBatchWithProof(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: lint docker clean coordinator mock_coordinator
|
||||
.PHONY: lint docker clean coordinator
|
||||
|
||||
IMAGE_NAME=coordinator
|
||||
IMAGE_VERSION=latest
|
||||
@@ -25,9 +25,6 @@ libzkp:
|
||||
coordinator: libzkp ## Builds the Coordinator instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
mock_coordinator: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
test-verifier: libzkp
|
||||
go test -tags ffi -timeout 0 -v ./verifier
|
||||
|
||||
@@ -35,7 +32,6 @@ test-gpu-verifier: libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -v ./verifier
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
cp -r ../common/libzkp/interface ./verifier/lib
|
||||
GOBIN=$(PWD)/build/bin go run ../build/lint.go
|
||||
|
||||
clean: ## Empty out the bin folder
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user