mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a1ebb3e42a | ||
|
|
58c0511a94 | ||
|
|
ba129ba161 | ||
|
|
8b58bd1eab | ||
|
|
7097bdb4e1 | ||
|
|
c3ae79bff4 | ||
|
|
e8f5251ae2 | ||
|
|
d10438ae0f | ||
|
|
a7ce900aa7 | ||
|
|
49ec0b8fa8 | ||
|
|
244e5e915a | ||
|
|
dc53d6d022 | ||
|
|
6dc89a9c0b | ||
|
|
9e450a7b0f | ||
|
|
af76593c1d |
4
.github/workflows/bridge.yml
vendored
4
.github/workflows/bridge.yml
vendored
@@ -43,6 +43,8 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Lint
|
||||
working-directory: 'bridge'
|
||||
run: |
|
||||
@@ -92,6 +94,8 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -88,6 +88,8 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -104,6 +104,8 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
2
.github/workflows/database.yml
vendored
2
.github/workflows/database.yml
vendored
@@ -81,6 +81,8 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
95
.github/workflows/docker.yaml
vendored
95
.github/workflows/docker.yaml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- v**
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
event_watcher:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -27,6 +27,18 @@ jobs:
|
||||
tags: scrolltech/event-watcher:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
gas_oracle:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push gas_oracle docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -36,6 +48,18 @@ jobs:
|
||||
tags: scrolltech/gas-oracle:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
msg_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push msg_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -45,6 +69,18 @@ jobs:
|
||||
tags: scrolltech/msg-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push rollup_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -54,6 +90,18 @@ jobs:
|
||||
tags: scrolltech/rollup-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
bridgehistoryapi-cross-msg-fetcher:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-cross-msg-fetcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -63,6 +111,18 @@ jobs:
|
||||
tags: scrolltech/bridgehistoryapi-cross-msg-fetcher:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
bridgehistoryapi-server:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-server docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -72,6 +132,18 @@ jobs:
|
||||
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
coordinator:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -81,3 +153,24 @@ jobs:
|
||||
tags: scrolltech/coordinator:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
prover-stats-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push prover-stats-api docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/prover-stats-api.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/prover-stats-api:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
2
.github/workflows/integration.yaml
vendored
2
.github/workflows/integration.yaml
vendored
@@ -33,6 +33,8 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
@@ -27,12 +27,8 @@
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
],
|
||||
"gas_oracle_sender_private_keys": [
|
||||
"1313131313131313131313131313131313131313131313131313131313131313"
|
||||
]
|
||||
"message_sender_private_key": "1212121212121212121212121212121212121212121212121212121212121212",
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
|
||||
}
|
||||
},
|
||||
"l2_config": {
|
||||
@@ -62,15 +58,10 @@
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
],
|
||||
"gas_oracle_sender_private_keys": [
|
||||
"1313131313131313131313131313131313131313131313131313131313131313"
|
||||
],
|
||||
"rollup_sender_private_keys": [
|
||||
"1414141414141414141414141414141414141414141414141414141414141414"
|
||||
]
|
||||
"message_sender_private_key": "1212121212121212121212121212121212121212121212121212121212121212",
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
|
||||
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"max_tx_gas_per_chunk": 1123456,
|
||||
|
||||
@@ -4,13 +4,11 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.3.0
|
||||
gorm.io/gorm v1.25.2
|
||||
)
|
||||
|
||||
@@ -53,6 +51,7 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
|
||||
@@ -84,8 +84,6 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
|
||||
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
|
||||
@@ -15,10 +15,6 @@ func TestConfig(t *testing.T) {
|
||||
cfg, err := NewConfig("../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys, 1)
|
||||
assert.Len(t, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys, 1)
|
||||
assert.Len(t, cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys, 1)
|
||||
|
||||
data, err := json.Marshal(cfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
@@ -34,12 +34,11 @@ type SenderConfig struct {
|
||||
// The interval (in seconds) to check balance and top up sender's accounts
|
||||
CheckBalanceTime uint64 `json:"check_balance_time"`
|
||||
// The sender's pending count limit.
|
||||
PendingLimit int `json:"pending_limit,omitempty"`
|
||||
PendingLimit int `json:"pending_limit"`
|
||||
}
|
||||
|
||||
// RelayerConfig loads relayer configuration items.
|
||||
// What we need to pay attention to is that
|
||||
// `MessageSenderPrivateKeys` and `RollupSenderPrivateKeys` cannot have common private keys.
|
||||
type RelayerConfig struct {
|
||||
// RollupContractAddress store the rollup contract address.
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
|
||||
@@ -56,9 +55,10 @@ type RelayerConfig struct {
|
||||
// MessageRelayMinGasLimit to avoid OutOfGas error
|
||||
MessageRelayMinGasLimit uint64 `json:"message_relay_min_gas_limit,omitempty"`
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
GasOracleSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
MessageSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
GasOracleSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
CommitSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
FinalizeSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
}
|
||||
|
||||
// GasOracleConfig The config for updating gas price oracle.
|
||||
@@ -72,46 +72,61 @@ type GasOracleConfig struct {
|
||||
// relayerConfigAlias RelayerConfig alias name
|
||||
type relayerConfigAlias RelayerConfig
|
||||
|
||||
func convertAndCheck(key string, uniqueAddressesSet map[string]struct{}) (*ecdsa.PrivateKey, error) {
|
||||
if key == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
privKey, err := crypto.ToECDSA(common.FromHex(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr := crypto.PubkeyToAddress(privKey.PublicKey).Hex()
|
||||
if _, exists := uniqueAddressesSet[addr]; exists {
|
||||
return nil, fmt.Errorf("detected duplicated address for private key: %s", addr)
|
||||
}
|
||||
uniqueAddressesSet[addr] = struct{}{}
|
||||
|
||||
return privKey, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshal relayer_config struct.
|
||||
func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
var jsonConfig struct {
|
||||
var privateKeysConfig struct {
|
||||
relayerConfigAlias
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
|
||||
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys"`
|
||||
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
|
||||
MessageSenderPrivateKey string `json:"message_sender_private_key"`
|
||||
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
|
||||
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
|
||||
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
|
||||
}
|
||||
if err := json.Unmarshal(input, &jsonConfig); err != nil {
|
||||
return err
|
||||
var err error
|
||||
if err = json.Unmarshal(input, &privateKeysConfig); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal private keys config: %w", err)
|
||||
}
|
||||
|
||||
*r = RelayerConfig(jsonConfig.relayerConfigAlias)
|
||||
*r = RelayerConfig(privateKeysConfig.relayerConfigAlias)
|
||||
|
||||
// Get messenger private key list.
|
||||
for _, privStr := range jsonConfig.MessageSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
|
||||
}
|
||||
r.MessageSenderPrivateKeys = append(r.MessageSenderPrivateKeys, priv)
|
||||
uniqueAddressesSet := make(map[string]struct{})
|
||||
|
||||
r.MessageSenderPrivateKey, err = convertAndCheck(privateKeysConfig.MessageSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking message sender private key: %w", err)
|
||||
}
|
||||
|
||||
// Get gas oracle private key list.
|
||||
for _, privStr := range jsonConfig.GasOracleSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
|
||||
}
|
||||
r.GasOracleSenderPrivateKeys = append(r.GasOracleSenderPrivateKeys, priv)
|
||||
r.GasOracleSenderPrivateKey, err = convertAndCheck(privateKeysConfig.GasOracleSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking gas oracle sender private key: %w", err)
|
||||
}
|
||||
|
||||
// Get rollup private key
|
||||
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect prover_private_key format, err: %v", err)
|
||||
}
|
||||
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
|
||||
r.CommitSenderPrivateKey, err = convertAndCheck(privateKeysConfig.CommitSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking commit sender private key: %w", err)
|
||||
}
|
||||
|
||||
r.FinalizeSenderPrivateKey, err = convertAndCheck(privateKeysConfig.FinalizeSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking finalize sender private key: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -119,28 +134,20 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
|
||||
// MarshalJSON marshal RelayerConfig config, transfer private keys.
|
||||
func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
|
||||
jsonConfig := struct {
|
||||
privateKeysConfig := struct {
|
||||
relayerConfigAlias
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
|
||||
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys,omitempty"`
|
||||
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
|
||||
}{relayerConfigAlias(*r), nil, nil, nil}
|
||||
MessageSenderPrivateKey string `json:"message_sender_private_key"`
|
||||
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
|
||||
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
|
||||
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
|
||||
}{}
|
||||
|
||||
// Transfer message sender private keys to hex type.
|
||||
for _, priv := range r.MessageSenderPrivateKeys {
|
||||
jsonConfig.MessageSenderPrivateKeys = append(jsonConfig.MessageSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
|
||||
}
|
||||
privateKeysConfig.relayerConfigAlias = relayerConfigAlias(*r)
|
||||
privateKeysConfig.MessageSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.MessageSenderPrivateKey))
|
||||
privateKeysConfig.GasOracleSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.GasOracleSenderPrivateKey))
|
||||
privateKeysConfig.CommitSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.CommitSenderPrivateKey))
|
||||
privateKeysConfig.FinalizeSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.FinalizeSenderPrivateKey))
|
||||
|
||||
// Transfer rollup sender private keys to hex type.
|
||||
for _, priv := range r.GasOracleSenderPrivateKeys {
|
||||
jsonConfig.GasOracleSenderPrivateKeys = append(jsonConfig.GasOracleSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
|
||||
}
|
||||
|
||||
// Transfer rollup sender private keys to hex type.
|
||||
for _, priv := range r.RollupSenderPrivateKeys {
|
||||
jsonConfig.RollupSenderPrivateKeys = append(jsonConfig.RollupSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
|
||||
}
|
||||
|
||||
return json.Marshal(&jsonConfig)
|
||||
return json.Marshal(&privateKeysConfig)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package relayer
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
@@ -57,19 +58,16 @@ type Layer1Relayer struct {
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new MessageSender failed", "main address", addr.String(), "err", err)
|
||||
return nil, err
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
}
|
||||
|
||||
// @todo make sure only one sender is available
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new GasOracleSender failed", "main address", addr.String(), "err", err)
|
||||
return nil, err
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
}
|
||||
|
||||
var minGasPrice uint64
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
@@ -53,8 +54,9 @@ type Layer2Relayer struct {
|
||||
messageSender *sender.Sender
|
||||
l1MessengerABI *abi.ABI
|
||||
|
||||
rollupSender *sender.Sender
|
||||
l1RollupABI *abi.ABI
|
||||
commitSender *sender.Sender
|
||||
finalizeSender *sender.Sender
|
||||
l1RollupABI *abi.ABI
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
l2GasOracleABI *abi.ABI
|
||||
@@ -80,23 +82,28 @@ type Layer2Relayer struct {
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
|
||||
// @todo use different sender for relayer, block commit and proof finalize
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey)
|
||||
if err != nil {
|
||||
log.Error("Failed to create messenger sender", "err", err)
|
||||
return nil, err
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new message sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
rollupSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.RollupSenderPrivateKeys)
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey)
|
||||
if err != nil {
|
||||
log.Error("Failed to create rollup sender", "err", err)
|
||||
return nil, err
|
||||
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new commit sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
|
||||
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey)
|
||||
if err != nil {
|
||||
log.Error("Failed to create gas oracle sender", "err", err)
|
||||
return nil, err
|
||||
addr := crypto.PubkeyToAddress(cfg.FinalizeSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new finalize sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
var minGasPrice uint64
|
||||
@@ -127,8 +134,9 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
messageSender: messageSender,
|
||||
l1MessengerABI: bridgeAbi.L1ScrollMessengerABI,
|
||||
|
||||
rollupSender: rollupSender,
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
commitSender: commitSender,
|
||||
finalizeSender: finalizeSender,
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
|
||||
@@ -230,7 +238,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
txHash, err := r.rollupSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
txHash, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
|
||||
}
|
||||
@@ -245,14 +253,14 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
select {
|
||||
// print progress
|
||||
case <-ticker.C:
|
||||
log.Info("Waiting for confirmation", "pending count", r.rollupSender.PendingCount())
|
||||
log.Info("Waiting for confirmation")
|
||||
|
||||
// timeout
|
||||
case <-time.After(5 * time.Minute):
|
||||
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash.String())
|
||||
|
||||
// handle confirmation
|
||||
case confirmation := <-r.rollupSender.ConfirmChan():
|
||||
case confirmation := <-r.commitSender.ConfirmChan():
|
||||
if confirmation.ID != batchHash {
|
||||
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
|
||||
}
|
||||
@@ -374,7 +382,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
// send transaction
|
||||
txID := batch.Hash + "-commit"
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
log.Error(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
@@ -468,7 +476,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
|
||||
txID := hash + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
|
||||
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
@@ -577,7 +585,9 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
return
|
||||
case confirmation := <-r.messageSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupSender.ConfirmChan():
|
||||
case confirmation := <-r.commitSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.finalizeSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
if !cfm.IsSuccessful {
|
||||
|
||||
@@ -106,7 +106,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
}
|
||||
|
||||
func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -118,8 +118,8 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
processingKeys := []string{"committed-1", "committed-2", "finalized-1", "finalized-2"}
|
||||
isSuccessful := []bool{true, false, true, false}
|
||||
processingKeys := []string{"committed-1", "committed-2"}
|
||||
isSuccessful := []bool{true, false}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
@@ -129,29 +129,68 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
|
||||
for i, key := range processingKeys[:2] {
|
||||
for i, key := range processingKeys {
|
||||
l2Relayer.processingCommitment.Store(key, batchHashes[i])
|
||||
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
|
||||
l2Relayer.commitSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
})
|
||||
}
|
||||
|
||||
for i, key := range processingKeys[2:] {
|
||||
l2Relayer.processingFinalization.Store(key, batchHashes[i+2])
|
||||
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i+2],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
})
|
||||
}
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupCommitted,
|
||||
types.RollupCommitFailed,
|
||||
}
|
||||
|
||||
for i, batchHash := range batchHashes {
|
||||
batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHash}, nil, 0)
|
||||
if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
processingKeys := []string{"finalized-1", "finalized-2"}
|
||||
isSuccessful := []bool{true, false}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
for i := range batchHashes {
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
|
||||
for i, key := range processingKeys {
|
||||
l2Relayer.processingFinalization.Store(key, batchHashes[i])
|
||||
l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
})
|
||||
}
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupFinalized,
|
||||
types.RollupFinalizeFailed,
|
||||
}
|
||||
|
||||
@@ -97,7 +97,8 @@ func TestFunctions(t *testing.T) {
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
|
||||
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
|
||||
t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
|
||||
}
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
package sender
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
type accountPool struct {
|
||||
client *ethclient.Client
|
||||
|
||||
minBalance *big.Int
|
||||
accounts map[common.Address]*bind.TransactOpts
|
||||
accsCh chan *bind.TransactOpts
|
||||
}
|
||||
|
||||
// newAccounts creates an accountPool instance.
|
||||
func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.Client, privs []*ecdsa.PrivateKey) (*accountPool, error) {
|
||||
accs := &accountPool{
|
||||
client: client,
|
||||
minBalance: minBalance,
|
||||
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
|
||||
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
|
||||
}
|
||||
|
||||
// get chainID from client
|
||||
chainID, err := client.ChainID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, privStr := range privs {
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privStr, chainID)
|
||||
if err != nil {
|
||||
log.Error("failed to create account", "chainID", chainID.String(), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set pending nonce
|
||||
nonce, err := client.PendingNonceAt(ctx, auth.From)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
accs.accounts[auth.From] = auth
|
||||
accs.accsCh <- auth
|
||||
}
|
||||
|
||||
return accs, accs.checkAndSetBalances(ctx)
|
||||
}
|
||||
|
||||
// getAccount get auth from channel.
|
||||
func (a *accountPool) getAccount() *bind.TransactOpts {
|
||||
select {
|
||||
case auth := <-a.accsCh:
|
||||
return auth
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// releaseAccount set used auth into channel.
|
||||
func (a *accountPool) releaseAccount(auth *bind.TransactOpts) {
|
||||
a.accsCh <- auth
|
||||
}
|
||||
|
||||
// reSetNonce reset nonce if send signed tx failed.
|
||||
func (a *accountPool) resetNonce(ctx context.Context, auth *bind.TransactOpts) {
|
||||
nonce, err := a.client.PendingNonceAt(ctx, auth.From)
|
||||
if err != nil {
|
||||
log.Warn("failed to reset nonce", "address", auth.From.String(), "err", err)
|
||||
return
|
||||
}
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
}
|
||||
|
||||
// checkAndSetBalance check balance and set min balance.
|
||||
func (a *accountPool) checkAndSetBalances(ctx context.Context) error {
|
||||
var (
|
||||
root *bind.TransactOpts
|
||||
maxBls = big.NewInt(0)
|
||||
lostAuths []*bind.TransactOpts
|
||||
)
|
||||
|
||||
for addr, auth := range a.accounts {
|
||||
bls, err := a.client.BalanceAt(ctx, addr, nil)
|
||||
if err != nil || bls.Cmp(a.minBalance) < 0 {
|
||||
if err != nil {
|
||||
log.Warn("failed to get balance", "address", addr.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
lostAuths = append(lostAuths, auth)
|
||||
continue
|
||||
} else if bls.Cmp(maxBls) > 0 { // Find the biggest balance account.
|
||||
root, maxBls = auth, bls
|
||||
}
|
||||
}
|
||||
if root == nil {
|
||||
return fmt.Errorf("no account has enough balance")
|
||||
}
|
||||
if len(lostAuths) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
tx *types.Transaction
|
||||
err error
|
||||
)
|
||||
for _, auth := range lostAuths {
|
||||
tx, err = a.createSignedTx(root, &auth.From, a.minBalance)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = a.client.SendTransaction(ctx, tx)
|
||||
if err != nil {
|
||||
log.Error("Failed to send balance to account", "err", err)
|
||||
return err
|
||||
}
|
||||
log.Debug("send balance to account", "account", auth.From.String(), "balance", a.minBalance.String())
|
||||
}
|
||||
// wait util mined
|
||||
if _, err = bind.WaitMined(ctx, a.client, tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reset root's nonce.
|
||||
a.resetNonce(ctx, root)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *accountPool) createSignedTx(from *bind.TransactOpts, to *common.Address, value *big.Int) (*types.Transaction, error) {
|
||||
gasPrice, err := a.client.SuggestGasPrice(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gasPrice.Mul(gasPrice, big.NewInt(2))
|
||||
|
||||
// Get pending nonce
|
||||
nonce, err := a.client.PendingNonceAt(context.Background(), from.From)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
To: to,
|
||||
Value: value,
|
||||
Gas: 500000,
|
||||
GasPrice: gasPrice,
|
||||
})
|
||||
signedTx, err := from.Signer(from.From, tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signedTx, nil
|
||||
}
|
||||
@@ -39,10 +39,6 @@ var (
|
||||
ErrFullPending = errors.New("sender's pending pool is full")
|
||||
)
|
||||
|
||||
var (
|
||||
defaultPendingLimit = 10
|
||||
)
|
||||
|
||||
// Confirmation struct used to indicate transaction confirmation details
|
||||
type Confirmation struct {
|
||||
ID string
|
||||
@@ -75,8 +71,8 @@ type Sender struct {
|
||||
chainID *big.Int // The chain id of the endpoint
|
||||
ctx context.Context
|
||||
|
||||
// account fields.
|
||||
auths *accountPool
|
||||
auth *bind.TransactOpts
|
||||
minBalance *big.Int
|
||||
|
||||
blockNumber uint64 // Current block number on chain.
|
||||
baseFeePerGas uint64 // Current base fee per gas on chain
|
||||
@@ -88,27 +84,34 @@ type Sender struct {
|
||||
|
||||
// NewSender returns a new instance of transaction sender
|
||||
// txConfirmationCh is used to notify confirmed transaction
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.PrivateKey) (*Sender, error) {
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey) (*Sender, error) {
|
||||
client, err := ethclient.Dial(config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
|
||||
}
|
||||
|
||||
// get chainID from client
|
||||
chainID, err := client.ChainID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get chain ID, err: %w", err)
|
||||
}
|
||||
|
||||
auths, err := newAccountPool(ctx, config.MinBalance, client, privs)
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(priv, chainID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create account pool, err: %v", err)
|
||||
return nil, fmt.Errorf("failed to create transactor with chain ID %v, err: %w", chainID, err)
|
||||
}
|
||||
|
||||
// Set pending nonce
|
||||
nonce, err := client.PendingNonceAt(ctx, auth.From)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get pending nonce for address %s, err: %w", auth.From.Hex(), err)
|
||||
}
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
|
||||
// get header by number
|
||||
header, err := client.HeaderByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get header by number, err: %w", err)
|
||||
}
|
||||
|
||||
var baseFeePerGas uint64
|
||||
@@ -116,21 +119,17 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
if header.BaseFee != nil {
|
||||
baseFeePerGas = header.BaseFee.Uint64()
|
||||
} else {
|
||||
return nil, errors.New("DynamicFeeTxType not supported, header.BaseFee nil")
|
||||
return nil, errors.New("dynamic fee tx type not supported: header.BaseFee is nil")
|
||||
}
|
||||
}
|
||||
|
||||
// initialize pending limit with a default value
|
||||
if config.PendingLimit == 0 {
|
||||
config.PendingLimit = defaultPendingLimit
|
||||
}
|
||||
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
client: client,
|
||||
chainID: chainID,
|
||||
auths: auths,
|
||||
auth: auth,
|
||||
minBalance: config.MinBalance,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
@@ -175,11 +174,6 @@ func (s *Sender) SendConfirmation(cfm *Confirmation) {
|
||||
s.confirmCh <- cfm
|
||||
}
|
||||
|
||||
// NumberOfAccounts return the count of accounts.
|
||||
func (s *Sender) NumberOfAccounts() int {
|
||||
return len(s.auths.accounts)
|
||||
}
|
||||
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (*FeeData, error) {
|
||||
if s.config.TxType == DynamicFeeTxType {
|
||||
return s.estimateDynamicGas(auth, target, value, data, minGasLimit)
|
||||
@@ -188,53 +182,39 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (common.Hash, error) {
|
||||
if s.IsFull() {
|
||||
return common.Hash{}, ErrFullPending
|
||||
}
|
||||
// We occupy the ID, in case some other threads call with the same ID in the same time
|
||||
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
|
||||
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
|
||||
return common.Hash{}, fmt.Errorf("repeat transaction ID: %s", ID)
|
||||
}
|
||||
// get
|
||||
auth := s.auths.getAccount()
|
||||
if auth == nil {
|
||||
s.pendingTxs.Remove(ID) // release the ID on failure
|
||||
return common.Hash{}, ErrNoAvailableAccount
|
||||
}
|
||||
|
||||
defer s.auths.releaseAccount(auth)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.pendingTxs.Remove(ID) // release the ID on failure
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
feeData *FeeData
|
||||
tx *types.Transaction
|
||||
err error
|
||||
)
|
||||
// estimate gas fee
|
||||
if feeData, err = s.getFeeData(auth, target, value, data, minGasLimit); err != nil {
|
||||
return
|
||||
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
}
|
||||
if tx, err = s.createAndSendTx(auth, feeData, target, value, data, nil); err == nil {
|
||||
// add pending transaction to queue
|
||||
pending := &PendingTransaction{
|
||||
tx: tx,
|
||||
id: ID,
|
||||
signer: auth,
|
||||
submitAt: atomic.LoadUint64(&s.blockNumber),
|
||||
feeData: feeData,
|
||||
}
|
||||
s.pendingTxs.Set(ID, pending)
|
||||
return tx.Hash(), nil
|
||||
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
|
||||
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
// add pending transaction
|
||||
pending := &PendingTransaction{
|
||||
tx: tx,
|
||||
id: ID,
|
||||
signer: s.auth,
|
||||
submitAt: atomic.LoadUint64(&s.blockNumber),
|
||||
feeData: feeData,
|
||||
}
|
||||
s.pendingTxs.Set(ID, pending)
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
|
||||
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (tx *types.Transaction, err error) {
|
||||
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (*types.Transaction, error) {
|
||||
var (
|
||||
nonce = auth.Nonce.Uint64()
|
||||
txData types.TxData
|
||||
@@ -292,26 +272,36 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
|
||||
}
|
||||
|
||||
// sign and send
|
||||
tx, err = auth.Signer(auth.From, types.NewTx(txData))
|
||||
tx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Error("failed to sign tx", "err", err)
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
if err = s.client.SendTransaction(s.ctx, tx); err != nil {
|
||||
log.Error("failed to send tx", "tx hash", tx.Hash().String(), "err", err)
|
||||
// Check if contain nonce, and reset nonce
|
||||
// only reset nonce when it is not from resubmit
|
||||
if strings.Contains(err.Error(), "nonce") && overrideNonce == nil {
|
||||
s.auths.resetNonce(context.Background(), auth)
|
||||
s.resetNonce(context.Background())
|
||||
}
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// update nonce when it is not from resubmit
|
||||
if overrideNonce == nil {
|
||||
auth.Nonce = big.NewInt(int64(nonce + 1))
|
||||
}
|
||||
return
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
// reSetNonce reset nonce if send signed tx failed.
|
||||
func (s *Sender) resetNonce(ctx context.Context) {
|
||||
nonce, err := s.client.PendingNonceAt(ctx, s.auth.From)
|
||||
if err != nil {
|
||||
log.Warn("failed to reset nonce", "address", s.auth.From.String(), "err", err)
|
||||
return
|
||||
}
|
||||
s.auth.Nonce = big.NewInt(int64(nonce))
|
||||
}
|
||||
|
||||
func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts, tx *types.Transaction) (*types.Transaction, error) {
|
||||
@@ -448,6 +438,22 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
|
||||
}
|
||||
}
|
||||
|
||||
// checkBalance checks balance and print error log if balance is under a threshold.
|
||||
func (s *Sender) checkBalance(ctx context.Context) error {
|
||||
bls, err := s.client.BalanceAt(ctx, s.auth.From, nil)
|
||||
if err != nil {
|
||||
log.Warn("failed to get balance", "address", s.auth.From.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if bls.Cmp(s.minBalance) < 0 {
|
||||
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s",
|
||||
bls.String(), s.minBalance.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop is the main event loop
|
||||
func (s *Sender) loop(ctx context.Context) {
|
||||
checkTick := time.NewTicker(time.Duration(s.config.CheckPendingTime) * time.Second)
|
||||
@@ -474,7 +480,9 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
s.checkPendingTransaction(header, confirmed)
|
||||
case <-checkBalanceTicker.C:
|
||||
// Check and set balance.
|
||||
_ = s.auths.checkAndSetBalances(ctx)
|
||||
if err := s.checkBalance(ctx); err != nil {
|
||||
log.Error("check balance, err: %w", err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-s.stopCh:
|
||||
|
||||
@@ -7,10 +7,8 @@ import (
|
||||
"math/big"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -18,7 +16,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
@@ -28,10 +25,10 @@ import (
|
||||
const TXBatch = 50
|
||||
|
||||
var (
|
||||
privateKeys []*ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
|
||||
privateKey *ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -50,7 +47,7 @@ func setupEnv(t *testing.T) {
|
||||
priv, err := crypto.HexToECDSA("1212121212121212121212121212121212121212121212121212121212121212")
|
||||
assert.NoError(t, err)
|
||||
// Load default private key.
|
||||
privateKeys = []*ecdsa.PrivateKey{priv}
|
||||
privateKey = priv
|
||||
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.CheckBalanceTime = 1
|
||||
@@ -62,15 +59,10 @@ func TestSender(t *testing.T) {
|
||||
|
||||
t.Run("test new sender", testNewSender)
|
||||
t.Run("test pending limit", testPendLimit)
|
||||
|
||||
t.Run("test min gas limit", testMinGasLimit)
|
||||
t.Run("test resubmit transaction", testResubmitTransaction)
|
||||
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
|
||||
t.Run("test check pending transaction", testCheckPendingTransaction)
|
||||
|
||||
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
|
||||
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
|
||||
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
|
||||
}
|
||||
|
||||
func testNewSender(t *testing.T) {
|
||||
@@ -78,7 +70,7 @@ func testNewSender(t *testing.T) {
|
||||
// exit by Stop()
|
||||
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1.TxType = txType
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKeys)
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey)
|
||||
assert.NoError(t, err)
|
||||
newSender1.Stop()
|
||||
|
||||
@@ -86,7 +78,7 @@ func testNewSender(t *testing.T) {
|
||||
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2.TxType = txType
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKeys)
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey)
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}
|
||||
@@ -98,7 +90,7 @@ func testPendLimit(t *testing.T) {
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.PendingLimit = 2
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 2*newSender.PendingLimit(); i++ {
|
||||
@@ -115,7 +107,7 @@ func testMinGasLimit(t *testing.T) {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
client, err := ethclient.Dial(cfgCopy.Endpoint)
|
||||
@@ -143,13 +135,12 @@ func testResubmitTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
auth := s.auths.getAccount()
|
||||
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
_, err = s.resubmitTransaction(feeData, auth, tx)
|
||||
_, err = s.resubmitTransaction(feeData, s.auth, tx)
|
||||
assert.NoError(t, err)
|
||||
s.Stop()
|
||||
}
|
||||
@@ -160,17 +151,16 @@ func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
auth := s.auths.getAccount()
|
||||
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
s.baseFeePerGas = 1000
|
||||
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
// bump the basefee by 10x
|
||||
s.baseFeePerGas *= 10
|
||||
// resubmit and check that the gas fee has been adjusted accordingly
|
||||
newTx, err := s.resubmitTransaction(feeData, auth, tx)
|
||||
newTx, err := s.resubmitTransaction(feeData, s.auth, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
|
||||
@@ -196,14 +186,13 @@ func testCheckPendingTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
header := &types.Header{Number: big.NewInt(100), BaseFee: big.NewInt(100)}
|
||||
confirmed := uint64(100)
|
||||
receipt := &types.Receipt{Status: types.ReceiptStatusSuccessful, BlockNumber: big.NewInt(90)}
|
||||
auth := s.auths.getAccount()
|
||||
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -280,66 +269,3 @@ func testCheckPendingTransaction(t *testing.T) {
|
||||
s.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchSender(t *testing.T, batchSize int) {
|
||||
for _, txType := range txTypes {
|
||||
for len(privateKeys) < batchSize {
|
||||
priv, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
privateKeys = append(privateKeys, priv)
|
||||
}
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.PendingLimit = batchSize * TXBatch
|
||||
cfgCopy.TxType = txType
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// send transactions
|
||||
var (
|
||||
eg errgroup.Group
|
||||
idCache = cmap.New()
|
||||
confirmCh = newSender.ConfirmChan()
|
||||
)
|
||||
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
|
||||
index := idx
|
||||
eg.Go(func() error {
|
||||
for i := 0; i < TXBatch; i++ {
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := strconv.Itoa(i + index*1000)
|
||||
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
|
||||
if errors.Is(err, ErrNoAvailableAccount) || errors.Is(err, ErrFullPending) {
|
||||
<-time.After(time.Second)
|
||||
continue
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
idCache.Set(id, struct{}{})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
assert.NoError(t, eg.Wait())
|
||||
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
|
||||
|
||||
// avoid 10 mins cause testcase panic
|
||||
after := time.After(80 * time.Second)
|
||||
isDone := false
|
||||
for !isDone {
|
||||
select {
|
||||
case cmsg := <-confirmCh:
|
||||
assert.Equal(t, true, cmsg.IsSuccessful)
|
||||
_, exist := idCache.Pop(cmsg.ID)
|
||||
assert.Equal(t, true, exist)
|
||||
// Receive all confirmed txs.
|
||||
if idCache.Count() == 0 {
|
||||
isDone = true
|
||||
}
|
||||
case <-after:
|
||||
t.Error("newSender test failed because of timeout")
|
||||
isDone = true
|
||||
}
|
||||
}
|
||||
newSender.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create several transactions and commit to block
|
||||
@@ -71,7 +71,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKey)
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, _, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
|
||||
@@ -83,10 +83,10 @@ func setupEnv(t *testing.T) {
|
||||
l2Cfg.Confirmations = 0
|
||||
l2Cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKeys[0], base.L1gethImg.ChainID())
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKey, base.L1gethImg.ChainID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L1Config.RelayerConfig.MessageSenderPrivateKeys[0], base.L2gethImg.ChainID())
|
||||
l2Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L1Config.RelayerConfig.MessageSenderPrivateKey, base.L2gethImg.ChainID())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -37,7 +37,7 @@ COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
|
||||
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv internal/logic/verifier/lib /bin/
|
||||
RUN cd ./coordinator && make coordinator_skip_libzkp && mv ./build/bin/coordinator /bin/coordinator && mv internal/logic/verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
@@ -46,6 +46,6 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/li
|
||||
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/coordinator /bin/
|
||||
|
||||
RUN /bin/coordinator --version
|
||||
|
||||
ENTRYPOINT ["/bin/coordinator"]
|
||||
|
||||
File diff suppressed because one or more lines are too long
25
common/libzkp/impl/Cargo.lock
generated
25
common/libzkp/impl/Cargo.lock
generated
@@ -32,13 +32,14 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
"halo2_proofs",
|
||||
"hex",
|
||||
"itertools",
|
||||
"log",
|
||||
"rand",
|
||||
@@ -432,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1048,7 +1049,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1225,7 +1226,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1438,7 +1439,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1478,7 +1479,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -2076,7 +2077,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2263,7 +2264,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2278,7 +2279,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"bus-mapping",
|
||||
"eth-types",
|
||||
@@ -2754,7 +2755,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.10#cb8f71475e6aa9fc78a24a832369fecb1c7d2201"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.13#0756ac68ee2f41c51f09a748cd28220290fe49ae"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4039,7 +4040,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.10#cb8f71475e6aa9fc78a24a832369fecb1c7d2201"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.13#0756ac68ee2f41c51f09a748cd28220290fe49ae"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
@@ -4490,7 +4491,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.13#a8c59b2e6e0a845c78d6b93a80c0a9c0601f59d0"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
|
||||
@@ -20,8 +20,8 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.10" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.10" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.13" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.13" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
log = "0.4"
|
||||
|
||||
@@ -126,7 +126,7 @@ const (
|
||||
ProvingTaskUnassigned
|
||||
// ProvingTaskAssigned : proving_task is assigned to be proved
|
||||
ProvingTaskAssigned
|
||||
// ProvingTaskProved : proof has been returned by prover
|
||||
// ProvingTaskProved DEPRECATED: proof has been returned by prover
|
||||
ProvingTaskProved
|
||||
// ProvingTaskVerified : proof is valid
|
||||
ProvingTaskVerified
|
||||
|
||||
@@ -250,11 +250,12 @@ type ChunkInfo struct {
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"` // cross-reference between cooridinator computation and prover compution
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
|
||||
@@ -73,6 +73,7 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
},
|
||||
Error: "testError",
|
||||
},
|
||||
@@ -101,12 +102,13 @@ func TestProofDetailHash(t *testing.T) {
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
},
|
||||
Error: "testError",
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "4165f5ab3399001002a5b8e4062914249a2deb72f6133d647b586f53e236802d"
|
||||
expectedHash := "72a00232c1fcb100b1b67e6d12cd449e5d2d890e3a66e50f4c23499d4990766f"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
@@ -136,6 +138,7 @@ func TestProofMsgPublicKey(t *testing.T) {
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
},
|
||||
Error: "testError",
|
||||
},
|
||||
|
||||
@@ -20,12 +20,17 @@ var (
|
||||
MessageRelayerApp MockAppName = "message-relayer-test"
|
||||
// RollupRelayerApp the name of mock rollup-relayer app.
|
||||
RollupRelayerApp MockAppName = "rollup-relayer-test"
|
||||
// CoordinatorApp the name of mock coordinator app.
|
||||
CoordinatorApp MockAppName = "coordinator-test"
|
||||
|
||||
// DBCliApp the name of mock database app.
|
||||
DBCliApp MockAppName = "db_cli-test"
|
||||
// ProverApp the name of mock prover app.
|
||||
ProverApp MockAppName = "prover-test"
|
||||
|
||||
// CoordinatorApp the name of mock coordinator app.
|
||||
CoordinatorApp MockAppName = "coordinator-test"
|
||||
|
||||
// ChunkProverApp the name of mock chunk prover app.
|
||||
ChunkProverApp MockAppName = "chunkProver-test"
|
||||
// BatchProverApp the name of mock batch prover app.
|
||||
BatchProverApp MockAppName = "batchProver-test"
|
||||
)
|
||||
|
||||
// RegisterSimulation register initializer function for integration-test.
|
||||
|
||||
@@ -3,9 +3,10 @@ package version
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var tag = "v4.1.26"
|
||||
var tag = "v4.1.36"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -19,11 +20,29 @@ var commit = func() string {
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
// Set default value for integration test.
|
||||
return "000000"
|
||||
}()
|
||||
|
||||
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover
|
||||
var ZkVersion string
|
||||
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
|
||||
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
|
||||
var ZkVersion = "000000-000000"
|
||||
|
||||
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
|
||||
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
|
||||
|
||||
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
|
||||
func CheckScrollProverVersion(proverVersion string) bool {
|
||||
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
|
||||
// so split-by-'-' length should be 4
|
||||
remote := strings.Split(proverVersion, "-")
|
||||
if len(remote) != 4 {
|
||||
return false
|
||||
}
|
||||
local := strings.Split(Version, "-")
|
||||
if len(local) != 4 {
|
||||
return false
|
||||
}
|
||||
// compare the `scroll_prover` version
|
||||
return remote[2] == local[2]
|
||||
}
|
||||
|
||||
@@ -23,7 +23,8 @@ contract InitializeL1BridgeContracts is Script {
|
||||
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
|
||||
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
|
||||
uint256 MAX_L1_MESSAGE_GAS_LIMIT = vm.envUint("MAX_L1_MESSAGE_GAS_LIMIT");
|
||||
address L1_ROLLUP_OPERATOR_ADDR = vm.envAddress("L1_ROLLUP_OPERATOR_ADDR");
|
||||
address L1_COMMIT_SENDER_ADDRESS = vm.envAddress("L1_COMMIT_SENDER_ADDRESS");
|
||||
address L1_FINALIZE_SENDER_ADDRESS = vm.envAddress("L1_FINALIZE_SENDER_ADDRESS");
|
||||
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
|
||||
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
|
||||
|
||||
@@ -67,8 +68,8 @@ contract InitializeL1BridgeContracts is Script {
|
||||
L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR,
|
||||
MAX_L2_TX_IN_CHUNK
|
||||
);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addSequencer(L1_ROLLUP_OPERATOR_ADDR);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addProver(L1_ROLLUP_OPERATOR_ADDR);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addSequencer(L1_COMMIT_SENDER_ADDRESS);
|
||||
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addProver(L1_FINALIZE_SENDER_ADDRESS);
|
||||
|
||||
// initialize L2GasPriceOracle
|
||||
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).initialize(
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
.PHONY: lint docker clean coordinator mock_coordinator
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
|
||||
|
||||
IMAGE_NAME=coordinator
|
||||
IMAGE_VERSION=latest
|
||||
@@ -25,6 +25,9 @@ libzkp:
|
||||
coordinator: libzkp ## Builds the Coordinator instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
coordinator_skip_libzkp:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
mock_coordinator: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
|
||||
|
||||
@@ -66,50 +66,48 @@ func (c *Collector) timeoutProofTask() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
assignedProverTasks, err := c.proverTaskOrm.GetAssignedProverTasks(c.ctx, 10)
|
||||
timeout := time.Duration(c.cfg.ProverManager.CollectionTimeSec) * time.Second
|
||||
assignedProverTasks, err := c.proverTaskOrm.GetTimeoutAssignedProverTasks(c.ctx, 10, timeout)
|
||||
if err != nil {
|
||||
log.Error("get unassigned session info failure", "error", err)
|
||||
break
|
||||
}
|
||||
|
||||
// here not update the block batch proving status failed, because the collector loop will check
|
||||
// the attempt times. if reach the times, the collector will set the block batch proving status.
|
||||
for _, assignedProverTask := range assignedProverTasks {
|
||||
timeoutDuration := time.Duration(c.cfg.ProverManager.CollectionTimeSec) * time.Second
|
||||
// here not update the block batch proving status failed, because the collector loop will check
|
||||
// the attempt times. if reach the times, the collector will set the block batch proving status.
|
||||
if time.Since(assignedProverTask.AssignedAt) >= timeoutDuration {
|
||||
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
|
||||
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
|
||||
err = c.db.Transaction(func(tx *gorm.DB) error {
|
||||
// update prover task proving status as ProverProofInvalid
|
||||
if err = c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
|
||||
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update prover task failure type
|
||||
if err = c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
|
||||
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update the task to unassigned, let collector restart it
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
|
||||
if err = c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
|
||||
if err = c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("check task proof is timeout failure", "error", err)
|
||||
log.Warn("proof task have reach the timeout", "task id", assignedProverTask.TaskID,
|
||||
"prover public key", assignedProverTask.ProverPublicKey, "prover name", assignedProverTask.ProverName, "task type", assignedProverTask.TaskType)
|
||||
err = c.db.Transaction(func(tx *gorm.DB) error {
|
||||
// update prover task proving status as ProverProofInvalid
|
||||
if err = c.proverTaskOrm.UpdateProverTaskProvingStatus(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverProofInvalid, tx); err != nil {
|
||||
log.Error("update prover task proving status failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update prover task failure type
|
||||
if err = c.proverTaskOrm.UpdateProverTaskFailureType(c.ctx, message.ProofType(assignedProverTask.TaskType),
|
||||
assignedProverTask.TaskID, assignedProverTask.ProverPublicKey, types.ProverTaskFailureTypeTimeout, tx); err != nil {
|
||||
log.Error("update prover task failure type failure", "hash", assignedProverTask.TaskID, "pubKey", assignedProverTask.ProverPublicKey, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update the task to unassigned, let collector restart it
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeChunk {
|
||||
if err = c.chunkOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update chunk proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
if message.ProofType(assignedProverTask.TaskType) == message.ProofTypeBatch {
|
||||
if err = c.batchOrm.UpdateProvingStatus(c.ctx, assignedProverTask.TaskID, types.ProvingTaskUnassigned, tx); err != nil {
|
||||
log.Error("update batch proving status to unassigned to restart it failure", "hash", assignedProverTask.TaskID, "err", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
log.Error("check task proof is timeout failure", "error", err)
|
||||
}
|
||||
}
|
||||
case <-c.ctx.Done():
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
@@ -42,17 +43,29 @@ func NewBatchProverTask(cfg *config.Config, db *gorm.DB) *BatchProverTask {
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return nil, fmt.Errorf("get public key from contex failed")
|
||||
return nil, fmt.Errorf("get public key from context failed")
|
||||
}
|
||||
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return nil, fmt.Errorf("get prover name from contex failed")
|
||||
return nil, fmt.Errorf("get prover name from context failed")
|
||||
}
|
||||
|
||||
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
|
||||
if !proverVersionExist {
|
||||
return nil, fmt.Errorf("get prover version from contex failed")
|
||||
return nil, fmt.Errorf("get prover version from context failed")
|
||||
}
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", proverVersion.(string), version.Version)
|
||||
}
|
||||
|
||||
isAssigned, err := bp.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if prover is assigned a task: %w", err)
|
||||
}
|
||||
|
||||
if isAssigned {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task", publicKey)
|
||||
}
|
||||
|
||||
batchTasks, err := bp.batchOrm.UpdateUnassignedBatchReturning(ctx, 1)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
@@ -42,17 +43,29 @@ func NewChunkProverTask(cfg *config.Config, db *gorm.DB) *ChunkProverTask {
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey)
|
||||
if !publicKeyExist {
|
||||
return nil, fmt.Errorf("get public key from contex failed")
|
||||
return nil, fmt.Errorf("get public key from context failed")
|
||||
}
|
||||
|
||||
proverName, proverNameExist := ctx.Get(coordinatorType.ProverName)
|
||||
if !proverNameExist {
|
||||
return nil, fmt.Errorf("get prover name from contex failed")
|
||||
return nil, fmt.Errorf("get prover name from context failed")
|
||||
}
|
||||
|
||||
proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion)
|
||||
if !proverVersionExist {
|
||||
return nil, fmt.Errorf("get prover version from contex failed")
|
||||
return nil, fmt.Errorf("get prover version from context failed")
|
||||
}
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
}
|
||||
|
||||
isAssigned, err := cp.proverTaskOrm.IsProverAssigned(ctx, publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check if prover is assigned a task: %w", err)
|
||||
}
|
||||
|
||||
if isAssigned {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task", publicKey)
|
||||
}
|
||||
|
||||
// load and send chunk tasks
|
||||
|
||||
@@ -2,6 +2,7 @@ package submitproof
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
@@ -34,8 +35,12 @@ var (
|
||||
ErrValidatorFailureProofMsgStatusNotOk = errors.New("validator failure proof msg status not ok")
|
||||
// ErrValidatorFailureProverTaskEmpty get none prover task
|
||||
ErrValidatorFailureProverTaskEmpty = errors.New("validator failure get none prover task for the proof")
|
||||
// ErrValidatorFailureProverInfoHasProofValid proof is vaild
|
||||
ErrValidatorFailureProverInfoHasProofValid = errors.New("validator failure prover task info has proof valid")
|
||||
// ErrValidatorFailureProverTaskCannotSubmitTwice prove task can not submit proof twice
|
||||
ErrValidatorFailureProverTaskCannotSubmitTwice = errors.New("validator failure prove task cannot submit proof twice")
|
||||
// ErrValidatorFailureProofTimeout the submit proof is timeout
|
||||
ErrValidatorFailureProofTimeout = errors.New("validator failure submit proof timeout")
|
||||
// ErrValidatorFailureTaskHaveVerifiedSuccess have proved success and verified success
|
||||
ErrValidatorFailureTaskHaveVerifiedSuccess = errors.New("validator failure chunk/batch have proved and verified success")
|
||||
)
|
||||
|
||||
// ProofReceiverLogic the proof receiver logic
|
||||
@@ -74,7 +79,7 @@ func NewSubmitProofReceiverLogic(cfg *config.ProverManager, db *gorm.DB) *ProofR
|
||||
func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.ProofMsg) error {
|
||||
pk := ctx.GetString(coordinatorType.PublicKey)
|
||||
if len(pk) == 0 {
|
||||
return fmt.Errorf("get public key from contex failed")
|
||||
return fmt.Errorf("get public key from context failed")
|
||||
}
|
||||
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, proofMsg.ID, pk)
|
||||
@@ -83,47 +88,15 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
return ErrValidatorFailureProverTaskEmpty
|
||||
}
|
||||
|
||||
if err = m.validator(proverTask, pk, proofMsg); err != nil {
|
||||
if errors.Is(err, ErrValidatorFailureProofMsgStatusNotOk) {
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
// store proof content
|
||||
var storeProofErr error
|
||||
switch proofMsg.Type {
|
||||
case message.ProofTypeChunk:
|
||||
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
|
||||
if dbErr := m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, proofTimeSec, tx); dbErr != nil {
|
||||
return fmt.Errorf("failed to store chunk proof into db, err:%w", dbErr)
|
||||
}
|
||||
if dbErr := m.chunkOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
|
||||
return fmt.Errorf("failed to update chunk task status as proved, error:%w", dbErr)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
case message.ProofTypeBatch:
|
||||
storeProofErr = m.db.Transaction(func(tx *gorm.DB) error {
|
||||
if dbErr := m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.BatchProof, proofTimeSec, tx); dbErr != nil {
|
||||
return fmt.Errorf("failed to store batch proof into db, error:%w", dbErr)
|
||||
}
|
||||
if dbErr := m.batchOrm.UpdateProvingStatus(ctx, proofMsg.ID, types.ProvingTaskProved, tx); dbErr != nil {
|
||||
return fmt.Errorf("failed to update batch task status as proved, error:%w", dbErr)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if storeProofErr != nil {
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
log.Error("failed to store basic proof into db", "error", storeProofErr)
|
||||
return storeProofErr
|
||||
}
|
||||
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
|
||||
|
||||
coordinatorProofsReceivedTotalCounter.Inc(1)
|
||||
if err = m.validator(ctx, proverTask, pk, proofMsg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var success bool
|
||||
var verifyErr error
|
||||
@@ -134,7 +107,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
}
|
||||
|
||||
if verifyErr != nil || !success {
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
m.proofFailure(ctx, proofMsg.ID, pk, proofMsg)
|
||||
coordinatorProofsVerifiedFailedTimeTimer.Update(proofTime)
|
||||
|
||||
log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
@@ -149,8 +122,10 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec)
|
||||
|
||||
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg); err != nil {
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg.Type)
|
||||
coordinatorProofsReceivedTotalCounter.Inc(1)
|
||||
|
||||
if err := m.closeProofTask(ctx, proofMsg.ID, pk, proofMsg, proofTimeSec); err != nil {
|
||||
m.proofRecover(ctx, proofMsg.ID, pk, proofMsg)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -178,49 +153,78 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) validator(proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
|
||||
func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg) error {
|
||||
// Ensure this prover is eligible to participate in the prover task.
|
||||
if types.ProverProveStatus(proverTask.ProvingStatus) == types.ProverProofValid {
|
||||
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
|
||||
// TODO: Defend invalid proof resubmissions by one of the following two methods:
|
||||
// (i) slash the prover for each submission of invalid proof
|
||||
// (ii) set the maximum failure retry times
|
||||
log.Warn("prover has already submitted valid proof in proof session", "prover name", proverTask.ProverName,
|
||||
"prover pk", proverTask.ProverPublicKey, "proof type", proverTask.TaskType, "proof id", proofMsg.ProofDetail.ID)
|
||||
return ErrValidatorFailureProverInfoHasProofValid
|
||||
log.Warn("the prover task cannot submit twice", "hash", proofMsg.ID, "prover pk", proverTask.ProverPublicKey,
|
||||
"prover name", proverTask.ProverName, "proof type", proverTask.TaskType)
|
||||
return ErrValidatorFailureProverTaskCannotSubmitTwice
|
||||
}
|
||||
|
||||
proofTime := time.Since(proverTask.CreatedAt)
|
||||
proofTimeSec := uint64(proofTime.Seconds())
|
||||
|
||||
log.Info("handling zk proof", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proverTask.TaskType, "proof time", proofTimeSec)
|
||||
|
||||
if proofMsg.Status != message.StatusOk {
|
||||
coordinatorProofsGeneratedFailedTimeTimer.Update(proofTime)
|
||||
|
||||
log.Info("proof generated by prover failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName,
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", proofMsg.Error)
|
||||
"prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
|
||||
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, proofMsg.ID, pk, types.ProverProofInvalid); updateErr != nil {
|
||||
log.Error("proof generated by prover failed update prover task proving status failure", "proof id", proofMsg.ID,
|
||||
"prover name", proverTask.ProverName, "prover pk", pk, "prove type", proofMsg.Type, "error", proofMsg.Error)
|
||||
}
|
||||
return ErrValidatorFailureProofMsgStatusNotOk
|
||||
}
|
||||
|
||||
// if prover task FailureType is SessionInfoFailureTimeout, the submit proof is timeout, need skip it
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "proof type", proverTask.TaskType,
|
||||
"prover name", proverTask.ProverName, "prover public key", pk, "proof time", proofTimeSec)
|
||||
return ErrValidatorFailureProofTimeout
|
||||
}
|
||||
|
||||
// store the proof to prover task
|
||||
if updateTaskProofErr := m.updateProverTaskProof(ctx, pk, proofMsg); updateTaskProofErr != nil {
|
||||
log.Warn("update prover task proof failure", "hash", proofMsg.ID, "public key", pk,
|
||||
"prover name", proverTask.ProverName, "error", updateTaskProofErr)
|
||||
}
|
||||
|
||||
// if the batch/chunk have proved and verifier success, need skip this submit proof
|
||||
if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) {
|
||||
log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID,
|
||||
"proof type", proverTask.TaskType, "prover name", proverTask.ProverName, "prover public key", pk)
|
||||
return ErrValidatorFailureTaskHaveVerifiedSuccess
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskFailed); err != nil {
|
||||
func (m *ProofReceiverLogic) proofFailure(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
log.Info("proof failure update proof status", "hash", hash, "public key", pubKey,
|
||||
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskFailed.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskFailed, 0); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskFailed", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
}
|
||||
coordinatorSessionsFailedTotalCounter.Inc(1)
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsgType message.ProofType) {
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsgType, types.ProvingTaskUnassigned); err != nil {
|
||||
func (m *ProofReceiverLogic) proofRecover(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) {
|
||||
log.Info("proof recover update proof status", "hash", hash, "public key", pubKey,
|
||||
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskUnassigned.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskUnassigned, 0); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskUnassigned", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg) error {
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg.Type, types.ProvingTaskVerified); err != nil {
|
||||
func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pubKey string, proofMsg *message.ProofMsg, proofTimeSec uint64) error {
|
||||
log.Info("proof close task update proof status", "hash", hash, "public key", pubKey,
|
||||
"proof type", proofMsg.Type.String(), "status", types.ProvingTaskVerified.String())
|
||||
|
||||
if err := m.updateProofStatus(ctx, hash, pubKey, proofMsg, types.ProvingTaskVerified, proofTimeSec); err != nil {
|
||||
log.Error("failed to updated proof status ProvingTaskVerified", "hash", hash, "pubKey", pubKey, "error", err)
|
||||
return err
|
||||
}
|
||||
@@ -228,13 +232,7 @@ func (m *ProofReceiverLogic) closeProofTask(ctx context.Context, hash string, pu
|
||||
}
|
||||
|
||||
// UpdateProofStatus update the chunk/batch task and session info status
|
||||
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsgType message.ProofType, status types.ProvingStatus) error {
|
||||
// if the prover task failure type is SessionInfoFailureTimeout,
|
||||
// just skip update the status because the proof result come too late.
|
||||
if m.checkIsTimeoutFailure(ctx, hash, proverPublicKey) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string, proverPublicKey string, proofMsg *message.ProofMsg, status types.ProvingStatus, proofTimeSec uint64) error {
|
||||
var proverTaskStatus types.ProverProveStatus
|
||||
switch status {
|
||||
case types.ProvingTaskFailed, types.ProvingTaskUnassigned:
|
||||
@@ -244,16 +242,31 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
|
||||
}
|
||||
|
||||
err := m.db.Transaction(func(tx *gorm.DB) error {
|
||||
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsgType, hash, proverPublicKey, proverTaskStatus, tx); updateErr != nil {
|
||||
if updateErr := m.proverTaskOrm.UpdateProverTaskProvingStatus(ctx, proofMsg.Type, hash, proverPublicKey, proverTaskStatus, tx); updateErr != nil {
|
||||
return updateErr
|
||||
}
|
||||
|
||||
// if the block batch has proof verified, so the failed status not update block batch proving status
|
||||
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsgType) {
|
||||
if status == types.ProvingTaskFailed && m.checkIsTaskSuccess(ctx, hash, proofMsg.Type) {
|
||||
log.Info("update proof status ProvingTaskFailed skip because other prover have prove success", "hash", hash, "public key", proverPublicKey)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch proofMsgType {
|
||||
if status == types.ProvingTaskVerified {
|
||||
var storeProofErr error
|
||||
switch proofMsg.Type {
|
||||
case message.ProofTypeChunk:
|
||||
storeProofErr = m.chunkOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.ChunkProof, proofTimeSec, tx)
|
||||
case message.ProofTypeBatch:
|
||||
storeProofErr = m.batchOrm.UpdateProofByHash(ctx, proofMsg.ID, proofMsg.BatchProof, proofTimeSec, tx)
|
||||
}
|
||||
if storeProofErr != nil {
|
||||
log.Error("failed to store chunk/batch proof into db", "hash", hash, "public key", proverPublicKey, "error", storeProofErr)
|
||||
return storeProofErr
|
||||
}
|
||||
}
|
||||
|
||||
switch proofMsg.Type {
|
||||
case message.ProofTypeChunk:
|
||||
if err := m.chunkOrm.UpdateProvingStatus(ctx, hash, status, tx); err != nil {
|
||||
log.Error("failed to update chunk proving_status as failed", "msg.ID", hash, "error", err)
|
||||
@@ -272,7 +285,7 @@ func (m *ProofReceiverLogic) updateProofStatus(ctx context.Context, hash string,
|
||||
return err
|
||||
}
|
||||
|
||||
if status == types.ProvingTaskVerified && proofMsgType == message.ProofTypeChunk {
|
||||
if status == types.ProvingTaskVerified && proofMsg.Type == message.ProofTypeChunk {
|
||||
if checkReadyErr := m.checkAreAllChunkProofsReady(ctx, hash); checkReadyErr != nil {
|
||||
log.Error("failed to check are all chunk proofs ready", "error", checkReadyErr)
|
||||
return checkReadyErr
|
||||
@@ -302,14 +315,19 @@ func (m *ProofReceiverLogic) checkIsTaskSuccess(ctx context.Context, hash string
|
||||
return provingStatus == types.ProvingTaskVerified
|
||||
}
|
||||
|
||||
func (m *ProofReceiverLogic) checkIsTimeoutFailure(ctx context.Context, hash, proverPublicKey string) bool {
|
||||
proverTask, err := m.proverTaskOrm.GetProverTaskByTaskIDAndPubKey(ctx, hash, proverPublicKey)
|
||||
if err != nil {
|
||||
return false
|
||||
func (m *ProofReceiverLogic) updateProverTaskProof(ctx context.Context, pk string, proofMsg *message.ProofMsg) error {
|
||||
// store the proof to prover task
|
||||
var proofBytes []byte
|
||||
var marshalErr error
|
||||
switch proofMsg.Type {
|
||||
case message.ProofTypeChunk:
|
||||
proofBytes, marshalErr = json.Marshal(proofMsg.ChunkProof)
|
||||
case message.ProofTypeBatch:
|
||||
proofBytes, marshalErr = json.Marshal(proofMsg.BatchProof)
|
||||
}
|
||||
|
||||
if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout {
|
||||
return true
|
||||
if len(proofBytes) == 0 || marshalErr != nil {
|
||||
return fmt.Errorf("updateProverTaskProof marshal proof error:%w", marshalErr)
|
||||
}
|
||||
return false
|
||||
return m.proverTaskOrm.UpdateProverTaskProof(ctx, proofMsg.Type, proofMsg.ID, pk, proofBytes)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// ProverTask is assigned provers info of chunk/batch proof prover task
|
||||
@@ -51,6 +52,20 @@ func (*ProverTask) TableName() string {
|
||||
return "prover_task"
|
||||
}
|
||||
|
||||
// IsProverAssigned checks if a prover with the given public key has been assigned a task.
|
||||
func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (bool, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
var task ProverTask
|
||||
err := db.Where("prover_public_key = ? AND proving_status = ?", publicKey, types.ProverAssigned).First(&task).Error
|
||||
if err != nil {
|
||||
if err == gorm.ErrRecordNotFound {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// GetProverTasks get prover tasks
|
||||
func (o *ProverTask) GetProverTasks(ctx context.Context, fields map[string]interface{}, orderByList []string, offset, limit int) ([]ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -112,11 +127,26 @@ func (o *ProverTask) GetProverTaskByTaskIDAndPubKey(ctx context.Context, taskID,
|
||||
return &proverTask, nil
|
||||
}
|
||||
|
||||
// GetAssignedProverTasks get the assigned prover task
|
||||
func (o *ProverTask) GetAssignedProverTasks(ctx context.Context, limit int) ([]ProverTask, error) {
|
||||
// GetProvingStatusByTaskID retrieves the proving status of a prover task
|
||||
func (o *ProverTask) GetProvingStatusByTaskID(ctx context.Context, taskID string) (types.ProverProveStatus, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Select("proving_status")
|
||||
db = db.Where("task_id = ?", taskID)
|
||||
|
||||
var proverTask ProverTask
|
||||
if err := db.Find(&proverTask).Error; err != nil {
|
||||
return types.ProverProofInvalid, fmt.Errorf("ProverTask.GetProvingStatusByTaskID error: %w, taskID: %v", err, taskID)
|
||||
}
|
||||
return types.ProverProveStatus(proverTask.ProvingStatus), nil
|
||||
}
|
||||
|
||||
// GetTimeoutAssignedProverTasks get the timeout and assigned proving_status prover task
|
||||
func (o *ProverTask) GetTimeoutAssignedProverTasks(ctx context.Context, limit int, timeout time.Duration) ([]ProverTask, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("proving_status", int(types.ProverAssigned))
|
||||
db = db.Where("assigned_at < ?", utils.NowUTC().Add(-timeout))
|
||||
db = db.Limit(limit)
|
||||
|
||||
var proverTasks []ProverTask
|
||||
@@ -146,6 +176,19 @@ func (o *ProverTask) SetProverTask(ctx context.Context, proverTask *ProverTask,
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProverTaskProof update the prover task's proof
|
||||
func (o *ProverTask) UpdateProverTaskProof(ctx context.Context, proofType message.ProofType, taskID string, pk string, proof []byte) error {
|
||||
db := o.db
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&ProverTask{})
|
||||
db = db.Where("task_type = ? AND task_id = ? AND prover_public_key = ?", int(proofType), taskID, pk)
|
||||
|
||||
if err := db.Update("proof", proof).Error; err != nil {
|
||||
return fmt.Errorf("ProverTask.UpdateProverTaskProof error: %w, proof type: %v, taskID: %v, prover public key: %v", err, proofType.String(), taskID, pk)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProverTaskProvingStatus updates the proving_status of a specific ProverTask record.
|
||||
func (o *ProverTask) UpdateProverTaskProvingStatus(ctx context.Context, proofType message.ProofType, taskID string, pk string, status types.ProverProveStatus, dbTX ...*gorm.DB) error {
|
||||
db := o.db
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
@@ -37,10 +38,11 @@ var (
|
||||
|
||||
base *docker.App
|
||||
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
db *gorm.DB
|
||||
l2BlockOrm *orm.L2Block
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
proverTaskOrm *orm.ProverTask
|
||||
|
||||
wrappedBlock1 *types.WrappedBlock
|
||||
wrappedBlock2 *types.WrappedBlock
|
||||
@@ -107,6 +109,8 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
}
|
||||
|
||||
func setEnv(t *testing.T) {
|
||||
version.Version = "v1.2.3-aaa-bbb-ccc"
|
||||
|
||||
base = docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
@@ -127,6 +131,7 @@ func setEnv(t *testing.T) {
|
||||
batchOrm = orm.NewBatch(db)
|
||||
chunkOrm = orm.NewChunk(db)
|
||||
l2BlockOrm = orm.NewL2Block(db)
|
||||
proverTaskOrm = orm.NewProverTask(db)
|
||||
|
||||
templateBlockTrace, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
@@ -362,8 +367,12 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
tickStop = time.Tick(time.Minute)
|
||||
)
|
||||
|
||||
var chunkProofStatus types.ProvingStatus
|
||||
var batchProofStatus types.ProvingStatus
|
||||
var (
|
||||
chunkProofStatus types.ProvingStatus
|
||||
batchProofStatus types.ProvingStatus
|
||||
chunkProverTaskProvingStatus types.ProverProveStatus
|
||||
batchProverTaskProvingStatus types.ProverProveStatus
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -372,7 +381,15 @@ func testProofGeneratedFailed(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProofStatus == types.ProvingTaskFailed && batchProofStatus == types.ProvingTaskFailed {
|
||||
if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned {
|
||||
return
|
||||
}
|
||||
|
||||
chunkProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), dbChunk.Hash)
|
||||
assert.NoError(t, err)
|
||||
batchProverTaskProvingStatus, err = proverTaskOrm.GetProvingStatusByTaskID(context.Background(), batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
if chunkProverTaskProvingStatus == types.ProverProofInvalid && batchProverTaskProvingStatus == types.ProverProofInvalid {
|
||||
return
|
||||
}
|
||||
case <-tickStop:
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
ctypes "scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
"scroll-tech/coordinator/internal/types"
|
||||
@@ -78,7 +79,7 @@ func (r *mockProver) login(t *testing.T, challengeString string) string {
|
||||
Identity: &message.Identity{
|
||||
Challenge: challengeString,
|
||||
ProverName: "test",
|
||||
ProverVersion: "v1.0.0",
|
||||
ProverVersion: version.Version,
|
||||
},
|
||||
}
|
||||
assert.NoError(t, authMsg.SignWithKey(r.privKey))
|
||||
@@ -162,11 +163,16 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType) *t
|
||||
}
|
||||
|
||||
func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) {
|
||||
proofMsgStatus := message.StatusOk
|
||||
if proofStatus == generatedFailed {
|
||||
proofMsgStatus = message.StatusProofError
|
||||
}
|
||||
|
||||
proof := &message.ProofMsg{
|
||||
ProofDetail: &message.ProofDetail{
|
||||
ID: proverTaskSchema.TaskID,
|
||||
Type: message.ProofType(proverTaskSchema.TaskType),
|
||||
Status: message.RespStatus(proofStatus),
|
||||
Status: proofMsgStatus,
|
||||
ChunkProof: &message.ChunkProof{},
|
||||
BatchProof: &message.BatchProof{},
|
||||
},
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -33,7 +34,18 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
|
||||
SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second).
|
||||
SetRetryCount(cfg.RetryCount).
|
||||
SetRetryWaitTime(time.Duration(cfg.RetryWaitTimeSec) * time.Second).
|
||||
SetBaseURL(cfg.BaseURL)
|
||||
SetBaseURL(cfg.BaseURL).
|
||||
AddRetryCondition(func(r *resty.Response, _ error) bool {
|
||||
// Check for HTTP 5xx errors, e.g., coordinator is restarting.
|
||||
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
|
||||
return r.StatusCode() >= http.StatusInternalServerError
|
||||
})
|
||||
|
||||
log.Info("successfully initialized prover client",
|
||||
"base url", cfg.BaseURL,
|
||||
"connection timeout (second)", cfg.ConnectionTimeoutSec,
|
||||
"retry count", cfg.RetryCount,
|
||||
"retry wait time (second)", cfg.RetryWaitTimeSec)
|
||||
|
||||
return &CoordinatorClient{
|
||||
client: client,
|
||||
|
||||
@@ -31,7 +31,8 @@ func init() {
|
||||
}
|
||||
|
||||
// Register `prover-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.ProverApp)
|
||||
utils.RegisterSimulation(app, utils.ChunkProverApp)
|
||||
utils.RegisterSimulation(app, utils.BatchProverApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
|
||||
@@ -6,11 +6,21 @@ import (
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestRunProver(t *testing.T) {
|
||||
prover := cmd.NewCmd("prover-test", "--version")
|
||||
func TestRunChunkProver(t *testing.T) {
|
||||
prover := cmd.NewCmd(string(utils.ChunkProverApp), "--version")
|
||||
defer prover.WaitExit()
|
||||
|
||||
// wait result
|
||||
prover.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("prover version %s", version.Version))
|
||||
prover.RunApp(nil)
|
||||
}
|
||||
|
||||
func TestRunBatchProver(t *testing.T) {
|
||||
prover := cmd.NewCmd(string(utils.BatchProverApp), "--version")
|
||||
defer prover.WaitExit()
|
||||
|
||||
// wait result
|
||||
|
||||
@@ -4,15 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
proverConfig "scroll-tech/prover/config"
|
||||
"scroll-tech/prover/config"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/docker"
|
||||
@@ -31,7 +28,7 @@ func getIndex() int {
|
||||
|
||||
// ProverApp prover-test client manager.
|
||||
type ProverApp struct {
|
||||
Config *proverConfig.Config
|
||||
Config *config.Config
|
||||
|
||||
base *docker.App
|
||||
|
||||
@@ -41,23 +38,30 @@ type ProverApp struct {
|
||||
|
||||
index int
|
||||
name string
|
||||
args []string
|
||||
docker.AppAPI
|
||||
}
|
||||
|
||||
// NewProverApp return a new proverApp manager.
|
||||
func NewProverApp(base *docker.App, file string, httpURL string, proofType message.ProofType) *ProverApp {
|
||||
uuid := uuid.New().String()
|
||||
|
||||
proverFile := fmt.Sprintf("/tmp/%s_%d_prover-config.json", uuid, base.Timestamp)
|
||||
func NewProverApp(base *docker.App, mockName utils.MockAppName, file string, httpURL string) *ProverApp {
|
||||
var proofType message.ProofType
|
||||
switch mockName {
|
||||
case utils.ChunkProverApp:
|
||||
proofType = message.ProofTypeChunk
|
||||
case utils.BatchProverApp:
|
||||
proofType = message.ProofTypeBatch
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
name := string(mockName)
|
||||
proverFile := fmt.Sprintf("/tmp/%d_%s-config.json", base.Timestamp, name)
|
||||
proverApp := &ProverApp{
|
||||
base: base,
|
||||
originFile: file,
|
||||
proverFile: proverFile,
|
||||
bboltDB: fmt.Sprintf("/tmp/%s_%d_bbolt_db", uuid, base.Timestamp),
|
||||
bboltDB: fmt.Sprintf("/tmp/%d_%s_bbolt_db", base.Timestamp, name),
|
||||
index: getIndex(),
|
||||
name: string(utils.ProverApp),
|
||||
args: []string{"--log.debug", "--config", proverFile},
|
||||
name: name,
|
||||
AppAPI: cmd.NewCmd(name, []string{"--log.debug", "--config", proverFile}...),
|
||||
}
|
||||
if err := proverApp.MockConfig(true, httpURL, proofType); err != nil {
|
||||
panic(err)
|
||||
@@ -66,18 +70,10 @@ func NewProverApp(base *docker.App, file string, httpURL string, proofType messa
|
||||
}
|
||||
|
||||
// RunApp run prover-test child process by multi parameters.
|
||||
func (r *ProverApp) RunApp(t *testing.T, args ...string) {
|
||||
r.AppAPI = cmd.NewCmd(r.name, append(r.args, args...)...)
|
||||
func (r *ProverApp) RunApp(t *testing.T) {
|
||||
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, "prover start successfully") })
|
||||
}
|
||||
|
||||
// RunAppWithExpectedResult runs the prover-test child process with multiple parameters,
|
||||
// and checks for a specific expected result in the output.
|
||||
func (r *ProverApp) RunAppWithExpectedResult(t *testing.T, expectedResult string, args ...string) {
|
||||
r.AppAPI = cmd.NewCmd(r.name, append(r.args, args...)...)
|
||||
r.AppAPI.RunApp(func() bool { return r.AppAPI.WaitResult(t, time.Second*40, expectedResult) })
|
||||
}
|
||||
|
||||
// Free stop and release prover-test.
|
||||
func (r *ProverApp) Free() {
|
||||
if !utils.IsNil(r.AppAPI) {
|
||||
@@ -90,7 +86,7 @@ func (r *ProverApp) Free() {
|
||||
|
||||
// MockConfig creates a new prover config.
|
||||
func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.ProofType) error {
|
||||
cfg, err := proverConfig.NewConfig(r.originFile)
|
||||
cfg, err := config.NewConfig(r.originFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -123,47 +119,3 @@ func (r *ProverApp) MockConfig(store bool, httpURL string, proofType message.Pro
|
||||
}
|
||||
return os.WriteFile(r.proverFile, data, 0600)
|
||||
}
|
||||
|
||||
// ProverApps proverApp list.
|
||||
type ProverApps []*ProverApp
|
||||
|
||||
// RunApps starts all the proverApps.
|
||||
func (r ProverApps) RunApps(t *testing.T, args ...string) {
|
||||
var eg errgroup.Group
|
||||
for i := range r {
|
||||
i := i
|
||||
eg.Go(func() error {
|
||||
r[i].RunApp(t, args...)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
_ = eg.Wait()
|
||||
}
|
||||
|
||||
// Free releases proverApps.
|
||||
func (r ProverApps) Free() {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(r))
|
||||
for i := range r {
|
||||
i := i
|
||||
go func() {
|
||||
r[i].Free()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// WaitExit wait proverApps stopped.
|
||||
func (r ProverApps) WaitExit() {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(r))
|
||||
for i := range r {
|
||||
i := i
|
||||
go func() {
|
||||
r[i].WaitExit()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
@@ -4,12 +4,10 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
go.etcd.io/bbolt v1.3.7
|
||||
golang.org/x/sync v0.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -21,6 +19,7 @@ require (
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/holiman/uint256 v1.2.3 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
@@ -45,6 +44,7 @@ require (
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestERC20(t *testing.T) {
|
||||
token, err := erc20.NewERC20Mock(erc20Address, l2Cli)
|
||||
assert.NoError(t, err)
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKeys[0], base.L2gethImg.ChainID())
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKey, base.L2gethImg.ChainID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
authBls0, err := token.BalanceOf(nil, auth.From)
|
||||
@@ -57,7 +57,7 @@ func TestGreeter(t *testing.T) {
|
||||
l2Cli, err := base.L2Client()
|
||||
assert.Nil(t, err)
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKeys[0], base.L2gethImg.ChainID())
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKey, base.L2gethImg.ChainID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
token, err := greeter.NewGreeter(greeterAddress, l2Cli)
|
||||
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
bcmd "scroll-tech/bridge/cmd"
|
||||
)
|
||||
@@ -40,8 +40,8 @@ func TestMain(m *testing.M) {
|
||||
base = docker.NewDockerApp()
|
||||
bridgeApp = bcmd.NewBridgeApp(base, "../../bridge/conf/config.json")
|
||||
coordinatorApp = capp.NewCoordinatorApp(base, "../../coordinator/conf/config.json")
|
||||
chunkProverApp = rapp.NewProverApp(base, "../../prover/config.json", coordinatorApp.HTTPEndpoint(), message.ProofTypeChunk)
|
||||
batchProverApp = rapp.NewProverApp(base, "../../prover/config.json", coordinatorApp.HTTPEndpoint(), message.ProofTypeBatch)
|
||||
chunkProverApp = rapp.NewProverApp(base, utils.ChunkProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
batchProverApp = rapp.NewProverApp(base, utils.BatchProverApp, "../../prover/config.json", coordinatorApp.HTTPEndpoint())
|
||||
m.Run()
|
||||
bridgeApp.Free()
|
||||
coordinatorApp.Free()
|
||||
@@ -112,17 +112,21 @@ func TestCoordinatorProverInteraction(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, batch.Hash)
|
||||
assert.NoError(t, err)
|
||||
t.Log(version.Version)
|
||||
|
||||
// Run coordinator app.
|
||||
coordinatorApp.RunApp(t)
|
||||
|
||||
// Run prover app.
|
||||
chunkProverApp.RunAppWithExpectedResult(t, "proof submitted successfully") // chunk prover login -> get task -> submit proof.
|
||||
batchProverApp.RunAppWithExpectedResult(t, "proof submitted successfully") // batch prover login -> get task -> submit proof.
|
||||
chunkProverApp.ExpectWithTimeout(t, true, time.Second*40, "proof submitted successfully") // chunk prover login -> get task -> submit proof.
|
||||
batchProverApp.ExpectWithTimeout(t, true, time.Second*40, "proof submitted successfully") // batch prover login -> get task -> submit proof.
|
||||
|
||||
// All task has been proven, coordinator would not return any task.
|
||||
chunkProverApp.ExpectWithTimeout(t, false, 60*time.Second, "get empty prover task")
|
||||
batchProverApp.ExpectWithTimeout(t, false, 60*time.Second, "get empty prover task")
|
||||
chunkProverApp.ExpectWithTimeout(t, true, 60*time.Second, "get empty prover task")
|
||||
batchProverApp.ExpectWithTimeout(t, true, 60*time.Second, "get empty prover task")
|
||||
|
||||
chunkProverApp.RunApp(t)
|
||||
batchProverApp.RunApp(t)
|
||||
|
||||
// Free apps.
|
||||
chunkProverApp.WaitExit()
|
||||
@@ -139,10 +143,12 @@ func TestProverReLogin(t *testing.T) {
|
||||
|
||||
// Run coordinator app.
|
||||
coordinatorApp.RunApp(t) // login timeout: 1 sec
|
||||
chunkProverApp.RunApp(t)
|
||||
batchProverApp.RunApp(t)
|
||||
|
||||
// Run prover app.
|
||||
chunkProverApp.RunAppWithExpectedResult(t, "re-login success") // chunk prover login.
|
||||
batchProverApp.RunAppWithExpectedResult(t, "re-login success") // batch prover login.
|
||||
chunkProverApp.WaitResult(t, time.Second*40, "re-login success") // chunk prover login.
|
||||
batchProverApp.WaitResult(t, time.Second*40, "re-login success") // batch prover login.
|
||||
|
||||
// Free apps.
|
||||
chunkProverApp.WaitExit()
|
||||
|
||||
Reference in New Issue
Block a user