mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
154ff0c8a0 | ||
|
|
2b266aaa68 | ||
|
|
410f14bc7d | ||
|
|
4d903bc9b2 | ||
|
|
59a2f1e998 | ||
|
|
20c5e9855b | ||
|
|
1f2fe74cbe | ||
|
|
0e12661fd5 | ||
|
|
04e66231e5 | ||
|
|
417a228523 | ||
|
|
dcd85b2f56 | ||
|
|
afb6476823 | ||
|
|
d991d6b99d | ||
|
|
f0920362c5 | ||
|
|
5eed174b9e | ||
|
|
a79992e772 | ||
|
|
2a54c8aae6 |
@@ -78,12 +78,8 @@ func (h *HistoryLogic) GetClaimableTxsByAddress(ctx context.Context, address com
|
||||
var txHistories []*types.TxHistoryInfo
|
||||
l2SentMsgOrm := orm.NewL2SentMsg(h.db)
|
||||
l2CrossMsgOrm := orm.NewCrossMsg(h.db)
|
||||
total, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressTotalNum(ctx, address.Hex())
|
||||
if err != nil || total == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(ctx, address.Hex(), offset, limit)
|
||||
if err != nil || len(results) == 0 {
|
||||
total, results, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(ctx, address.Hex(), offset, limit)
|
||||
if err != nil || total == 0 || len(results) == 0 {
|
||||
return txHistories, 0, err
|
||||
}
|
||||
var msgHashList []string
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
@@ -72,26 +73,70 @@ func (l *L2SentMsg) GetLatestSentMsgHeightOnL2(ctx context.Context) (uint64, err
|
||||
return result.Height, nil
|
||||
}
|
||||
|
||||
// GetClaimableL2SentMsgByAddressWithOffset get claimable l2 sent msg by address with offset
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressWithOffset(ctx context.Context, address string, offset int, limit int) ([]*L2SentMsg, error) {
|
||||
var results []*L2SentMsg
|
||||
err := l.db.WithContext(ctx).Raw(`SELECT * FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='' ORDER BY id DESC LIMIT $2 OFFSET $3;`, address, limit, offset).
|
||||
Scan(&results).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressWithOffset error: %w", err)
|
||||
// GetClaimableL2SentMsgByAddressWithOffset returns both the total number of unclaimed messages and a paginated list of those messages.
|
||||
// TODO: Add metrics about the result set sizes (total/claimed/unclaimed messages).
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressWithOffset(ctx context.Context, address string, offset int, limit int) (uint64, []*L2SentMsg, error) {
|
||||
var totalMsgs []*L2SentMsg
|
||||
db := l.db.WithContext(ctx)
|
||||
db = db.Table("l2_sent_msg")
|
||||
db = db.Where("original_sender = ? OR sender = ?", address, address)
|
||||
db = db.Where("msg_proof != ''")
|
||||
db = db.Where("deleted_at IS NULL")
|
||||
db = db.Order("id DESC")
|
||||
tx := db.Find(&totalMsgs)
|
||||
if tx.Error != nil || tx.RowsAffected == 0 {
|
||||
return 0, nil, tx.Error
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// GetClaimableL2SentMsgByAddressTotalNum get claimable l2 sent msg by address total num
|
||||
func (l *L2SentMsg) GetClaimableL2SentMsgByAddressTotalNum(ctx context.Context, address string) (uint64, error) {
|
||||
var count uint64
|
||||
err := l.db.WithContext(ctx).Raw(`SELECT COUNT(*) FROM l2_sent_msg WHERE id NOT IN (SELECT l2_sent_msg.id FROM l2_sent_msg INNER JOIN relayed_msg ON l2_sent_msg.msg_hash = relayed_msg.msg_hash WHERE l2_sent_msg.deleted_at IS NULL AND relayed_msg.deleted_at IS NULL) AND (original_sender=$1 OR sender = $1) AND msg_proof !='';`, address).
|
||||
Scan(&count).Error
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("L2SentMsg.GetClaimableL2SentMsgByAddressTotalNum error: %w", err)
|
||||
// Note on the use of IN vs VALUES in SQL Queries:
|
||||
// ------------------------------------------------
|
||||
// When using the IN predicate with a large list (>100) of values, performance may suffer.
|
||||
// An alternative approach is to use constant subqueries with the VALUES construct.
|
||||
// For more details and optimization tips, visit:
|
||||
// https://postgres.cz/wiki/PostgreSQL_SQL_Tricks_I#Predicate_IN_optimalization
|
||||
//
|
||||
// Example using IN:
|
||||
// SELECT * FROM tab WHERE x IN (1,2,3,...,n); -- where n > 70
|
||||
//
|
||||
// Optimized example using VALUES:
|
||||
// SELECT * FROM tab WHERE x IN (VALUES(10), (20));
|
||||
//
|
||||
var valuesStr string
|
||||
for _, msg := range totalMsgs {
|
||||
valuesStr += fmt.Sprintf("('%s'),", msg.MsgHash)
|
||||
}
|
||||
return count, nil
|
||||
valuesStr = strings.TrimSuffix(valuesStr, ",")
|
||||
|
||||
var claimedMsgHashes []string
|
||||
db = l.db.WithContext(ctx)
|
||||
db = db.Table("relayed_msg")
|
||||
db = db.Where(fmt.Sprintf("msg_hash IN (VALUES %s)", valuesStr))
|
||||
db = db.Where("deleted_at IS NULL")
|
||||
if err := db.Pluck("msg_hash", &claimedMsgHashes).Error; err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
claimedMsgHashSet := make(map[string]struct{})
|
||||
for _, hash := range claimedMsgHashes {
|
||||
claimedMsgHashSet[hash] = struct{}{}
|
||||
}
|
||||
var unclaimedL2Msgs []*L2SentMsg
|
||||
for _, msg := range totalMsgs {
|
||||
if _, found := claimedMsgHashSet[msg.MsgHash]; !found {
|
||||
unclaimedL2Msgs = append(unclaimedL2Msgs, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// pagination
|
||||
start := offset
|
||||
end := offset + limit
|
||||
if start > len(unclaimedL2Msgs) {
|
||||
start = len(unclaimedL2Msgs)
|
||||
}
|
||||
if end > len(unclaimedL2Msgs) {
|
||||
end = len(unclaimedL2Msgs)
|
||||
}
|
||||
return uint64(len(unclaimedL2Msgs)), unclaimedL2Msgs[start:end], nil
|
||||
}
|
||||
|
||||
// GetLatestL2SentMsgBatchIndex get latest l2 sent msg batch index
|
||||
|
||||
76
bridge-history-api/orm/l2_sent_msg_test.go
Normal file
76
bridge-history-api/orm/l2_sent_msg_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"bridge-history-api/orm/migrate"
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
)
|
||||
|
||||
func TestGetClaimableL2SentMsgByAddressWithOffset(t *testing.T) {
|
||||
base := docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
db, err := database.InitDB(
|
||||
&database.Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
},
|
||||
)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sqlDB, err := db.DB()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
l2SentMsgOrm := NewL2SentMsg(db)
|
||||
relayedMsgOrm := NewRelayedMsg(db)
|
||||
|
||||
count, msgs, err := l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(context.Background(), "sender1", 0, 10)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(0), count)
|
||||
|
||||
l2SentMsgs := []*L2SentMsg{
|
||||
{
|
||||
Sender: "sender1",
|
||||
MsgHash: "hash1",
|
||||
MsgProof: "proof1",
|
||||
Nonce: 0,
|
||||
},
|
||||
{
|
||||
OriginalSender: "sender1",
|
||||
MsgHash: "hash2",
|
||||
MsgProof: "proof2",
|
||||
Nonce: 1,
|
||||
},
|
||||
{
|
||||
OriginalSender: "sender1",
|
||||
MsgHash: "hash3",
|
||||
MsgProof: "",
|
||||
Nonce: 2,
|
||||
},
|
||||
}
|
||||
relayedMsgs := []*RelayedMsg{
|
||||
{
|
||||
MsgHash: "hash2",
|
||||
},
|
||||
{
|
||||
MsgHash: "hash3",
|
||||
},
|
||||
}
|
||||
err = l2SentMsgOrm.InsertL2SentMsg(context.Background(), l2SentMsgs)
|
||||
assert.NoError(t, err)
|
||||
err = relayedMsgOrm.InsertRelayedMsg(context.Background(), relayedMsgs)
|
||||
assert.NoError(t, err)
|
||||
|
||||
count, msgs, err = l2SentMsgOrm.GetClaimableL2SentMsgByAddressWithOffset(context.Background(), "sender1", 0, 10)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1), count)
|
||||
assert.Equal(t, "hash1", msgs[0].MsgHash)
|
||||
}
|
||||
23
common/libzkp/impl/Cargo.lock
generated
23
common/libzkp/impl/Cargo.lock
generated
@@ -31,7 +31,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -333,7 +333,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -959,7 +959,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -970,6 +970,7 @@ dependencies = [
|
||||
"libsecp256k1",
|
||||
"num",
|
||||
"num-bigint",
|
||||
"once_cell",
|
||||
"poseidon-circuit",
|
||||
"regex",
|
||||
"serde",
|
||||
@@ -1115,7 +1116,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1295,7 +1296,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1327,7 +1328,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -1936,7 +1937,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2134,7 +2135,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2150,7 +2151,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"halo2-mpt-circuits",
|
||||
@@ -2581,7 +2582,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4124,7 +4125,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.0#b99974d2d37696562a1035c0e595dbc87fabaa62"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.1#88414cc46913978325efd744536c4d5a4a02a766"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
|
||||
@@ -21,7 +21,7 @@ halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch =
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.0", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.1", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use crate::{
|
||||
types::{CheckChunkProofsResponse, ProofResult},
|
||||
utils::{
|
||||
c_char_to_str, c_char_to_vec, file_exists, string_to_c_char, vec_to_c_char, OUTPUT_DIR,
|
||||
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
|
||||
OUTPUT_DIR,
|
||||
},
|
||||
};
|
||||
use libc::c_char;
|
||||
@@ -11,7 +12,7 @@ use prover::{
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
BatchProof, BlockTrace, ChunkHash, ChunkProof,
|
||||
};
|
||||
use std::{cell::OnceCell, env, panic, ptr::null};
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
|
||||
@@ -55,7 +56,7 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
|
||||
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
|
||||
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
|
||||
|
||||
vk_result
|
||||
.ok()
|
||||
@@ -66,7 +67,7 @@ pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char {
|
||||
let check_result: Result<bool, String> = panic::catch_unwind(|| {
|
||||
let check_result: Result<bool, String> = panic_catch(|| {
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
|
||||
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
|
||||
@@ -102,7 +103,7 @@ pub unsafe extern "C" fn gen_batch_proof(
|
||||
chunk_hashes: *const c_char,
|
||||
chunk_proofs: *const c_char,
|
||||
) -> *const c_char {
|
||||
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
|
||||
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
|
||||
let chunk_hashes = c_char_to_vec(chunk_hashes);
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
|
||||
@@ -151,7 +152,7 @@ pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<BatchProof>(proof.as_slice()).unwrap();
|
||||
|
||||
let verified = panic::catch_unwind(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
|
||||
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof));
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use crate::{
|
||||
types::ProofResult,
|
||||
utils::{
|
||||
c_char_to_str, c_char_to_vec, file_exists, string_to_c_char, vec_to_c_char, OUTPUT_DIR,
|
||||
c_char_to_str, c_char_to_vec, file_exists, panic_catch, string_to_c_char, vec_to_c_char,
|
||||
OUTPUT_DIR,
|
||||
},
|
||||
};
|
||||
use libc::c_char;
|
||||
@@ -11,7 +12,7 @@ use prover::{
|
||||
zkevm::{Prover, Verifier},
|
||||
BlockTrace, ChunkProof,
|
||||
};
|
||||
use std::{cell::OnceCell, env, panic, ptr::null};
|
||||
use std::{cell::OnceCell, env, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
|
||||
@@ -55,7 +56,7 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
|
||||
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
|
||||
let vk_result = panic_catch(|| PROVER.get_mut().unwrap().get_vk());
|
||||
|
||||
vk_result
|
||||
.ok()
|
||||
@@ -66,7 +67,7 @@ pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
|
||||
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
|
||||
let proof_result: Result<Vec<u8>, String> = panic_catch(|| {
|
||||
let block_traces = c_char_to_vec(block_traces);
|
||||
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
|
||||
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
|
||||
@@ -101,6 +102,6 @@ pub unsafe extern "C" fn verify_chunk_proof(proof: *const c_char) -> c_char {
|
||||
let proof = c_char_to_vec(proof);
|
||||
let proof = serde_json::from_slice::<ChunkProof>(proof.as_slice()).unwrap();
|
||||
|
||||
let verified = panic::catch_unwind(|| VERIFIER.get().unwrap().verify_chunk_proof(proof));
|
||||
let verified = panic_catch(|| VERIFIER.get().unwrap().verify_chunk_proof(proof));
|
||||
verified.unwrap_or(false) as c_char
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::{
|
||||
env,
|
||||
ffi::{CStr, CString},
|
||||
os::raw::c_char,
|
||||
panic::{catch_unwind, AssertUnwindSafe},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
@@ -34,3 +35,15 @@ pub(crate) fn file_exists(dir: &str, filename: &str) -> bool {
|
||||
|
||||
path.exists()
|
||||
}
|
||||
|
||||
pub(crate) fn panic_catch<F: FnOnce() -> R, R>(f: F) -> Result<R, String> {
|
||||
catch_unwind(AssertUnwindSafe(f)).map_err(|err| {
|
||||
if let Some(s) = err.downcast_ref::<String>() {
|
||||
s.to_string()
|
||||
} else if let Some(s) = err.downcast_ref::<&str>() {
|
||||
s.to_string()
|
||||
} else {
|
||||
format!("unable to get panic info {err:?}")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -103,7 +103,7 @@ const (
|
||||
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota
|
||||
// ProverTaskFailureTypeTimeout prover task failure of timeout
|
||||
ProverTaskFailureTypeTimeout
|
||||
// ProverTaskFailureTypeSubmitStatusNotOk prover task failure of validated failed by coordinator
|
||||
// ProverTaskFailureTypeSubmitStatusNotOk prover task failure of submit status not ok
|
||||
ProverTaskFailureTypeSubmitStatusNotOk
|
||||
// ProverTaskFailureTypeVerifiedFailed prover task failure of verified failed by coordinator
|
||||
ProverTaskFailureTypeVerifiedFailed
|
||||
|
||||
@@ -263,7 +263,7 @@ type ChunkInfo struct {
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
StorageTrace []byte `json:"storage_trace,omitempty"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.7"
|
||||
var tag = "v4.3.17"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
66
contracts/scripts/foundry/DeployL1ScrollOwner.s.sol
Normal file
66
contracts/scripts/foundry/DeployL1ScrollOwner.s.sol
Normal file
@@ -0,0 +1,66 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {TimelockController} from "@openzeppelin/contracts/governance/TimelockController.sol";
|
||||
|
||||
import {ScrollOwner} from "../../src/misc/ScrollOwner.sol";
|
||||
|
||||
// solhint-disable state-visibility
|
||||
// solhint-disable var-name-mixedcase
|
||||
|
||||
contract DeployL1ScrollOwner is Script {
|
||||
string NETWORK = vm.envString("NETWORK");
|
||||
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
address SCROLL_MULTISIG_ADDR = vm.envAddress("L1_SCROLL_MULTISIG_ADDR");
|
||||
|
||||
address SECURITY_COUNCIL_ADDR = vm.envAddress("L1_SECURITY_COUNCIL_ADDR");
|
||||
|
||||
address L1_PROPOSAL_EXECUTOR_ADDR = vm.envAddress("L1_PROPOSAL_EXECUTOR_ADDR");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployScrollOwner();
|
||||
|
||||
if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("sepolia"))) {
|
||||
// for sepolia
|
||||
deployTimelockController("1D", 1 minutes);
|
||||
deployTimelockController("7D", 7 minutes);
|
||||
deployTimelockController("14D", 14 minutes);
|
||||
} else if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("mainnet"))) {
|
||||
// for mainnet
|
||||
deployTimelockController("1D", 1 days);
|
||||
deployTimelockController("7D", 7 days);
|
||||
deployTimelockController("14D", 14 days);
|
||||
}
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployScrollOwner() internal {
|
||||
ScrollOwner owner = new ScrollOwner();
|
||||
|
||||
logAddress("L1_SCROLL_OWNER_ADDR", address(owner));
|
||||
}
|
||||
|
||||
function deployTimelockController(string memory label, uint256 delay) internal {
|
||||
address[] memory proposers = new address[](1);
|
||||
address[] memory executors = new address[](1);
|
||||
|
||||
proposers[0] = SCROLL_MULTISIG_ADDR;
|
||||
executors[0] = L1_PROPOSAL_EXECUTOR_ADDR;
|
||||
|
||||
TimelockController timelock = new TimelockController(delay, proposers, executors, SECURITY_COUNCIL_ADDR);
|
||||
|
||||
logAddress(string(abi.encodePacked("L1_", label, "_TIMELOCK_ADDR")), address(timelock));
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
49
contracts/scripts/foundry/DeployL2RateLimiter.s.sol
Normal file
49
contracts/scripts/foundry/DeployL2RateLimiter.s.sol
Normal file
@@ -0,0 +1,49 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {ScrollGatewayBase} from "../../src/libraries/gateway/ScrollGatewayBase.sol";
|
||||
import {ScrollMessengerBase} from "../../src/libraries/ScrollMessengerBase.sol";
|
||||
|
||||
import {ETHRateLimiter} from "../../src/rate-limiter/ETHRateLimiter.sol";
|
||||
import {TokenRateLimiter} from "../../src/rate-limiter/TokenRateLimiter.sol";
|
||||
|
||||
contract DeployL2RateLimiter is Script {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
address L2_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
|
||||
uint256 RATE_LIMITER_PERIOD_LENGTH = vm.envUint("RATE_LIMITER_PERIOD_LENGTH");
|
||||
uint104 ETH_TOTAL_LIMIT = uint104(vm.envUint("ETH_TOTAL_LIMIT"));
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployETHRateLimiter();
|
||||
deployTokenRateLimiter();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployETHRateLimiter() internal {
|
||||
ETHRateLimiter limiter = new ETHRateLimiter(
|
||||
RATE_LIMITER_PERIOD_LENGTH,
|
||||
L2_SCROLL_MESSENGER_PROXY_ADDR,
|
||||
ETH_TOTAL_LIMIT
|
||||
);
|
||||
|
||||
logAddress("L2_ETH_RATE_LIMITER_ADDR", address(limiter));
|
||||
}
|
||||
|
||||
function deployTokenRateLimiter() internal {
|
||||
TokenRateLimiter limiter = new TokenRateLimiter(RATE_LIMITER_PERIOD_LENGTH);
|
||||
|
||||
logAddress("L2_TOKEN_RATE_LIMITER_ADDR", address(limiter));
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
66
contracts/scripts/foundry/DeployL2ScrollOwner.s.sol
Normal file
66
contracts/scripts/foundry/DeployL2ScrollOwner.s.sol
Normal file
@@ -0,0 +1,66 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
import {console} from "forge-std/console.sol";
|
||||
|
||||
import {TimelockController} from "@openzeppelin/contracts/governance/TimelockController.sol";
|
||||
|
||||
import {ScrollOwner} from "../../src/misc/ScrollOwner.sol";
|
||||
|
||||
// solhint-disable state-visibility
|
||||
// solhint-disable var-name-mixedcase
|
||||
|
||||
contract DeployL2ScrollOwner is Script {
|
||||
string NETWORK = vm.envString("NETWORK");
|
||||
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
address SCROLL_MULTISIG_ADDR = vm.envAddress("L2_SCROLL_MULTISIG_ADDR");
|
||||
|
||||
address SECURITY_COUNCIL_ADDR = vm.envAddress("L2_SECURITY_COUNCIL_ADDR");
|
||||
|
||||
address L2_PROPOSAL_EXECUTOR_ADDR = vm.envAddress("L2_PROPOSAL_EXECUTOR_ADDR");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
deployScrollOwner();
|
||||
|
||||
if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("sepolia"))) {
|
||||
// for sepolia
|
||||
deployTimelockController("1D", 1 minutes);
|
||||
deployTimelockController("7D", 7 minutes);
|
||||
deployTimelockController("14D", 14 minutes);
|
||||
} else if (keccak256(abi.encodePacked(NETWORK)) == keccak256(abi.encodePacked("mainnet"))) {
|
||||
// for mainnet
|
||||
deployTimelockController("1D", 1 days);
|
||||
deployTimelockController("7D", 7 days);
|
||||
deployTimelockController("14D", 14 days);
|
||||
}
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function deployScrollOwner() internal {
|
||||
ScrollOwner owner = new ScrollOwner();
|
||||
|
||||
logAddress("L2_SCROLL_OWNER_ADDR", address(owner));
|
||||
}
|
||||
|
||||
function deployTimelockController(string memory label, uint256 delay) internal {
|
||||
address[] memory proposers = new address[](1);
|
||||
address[] memory executors = new address[](1);
|
||||
|
||||
proposers[0] = SCROLL_MULTISIG_ADDR;
|
||||
executors[0] = L2_PROPOSAL_EXECUTOR_ADDR;
|
||||
|
||||
TimelockController timelock = new TimelockController(delay, proposers, executors, SECURITY_COUNCIL_ADDR);
|
||||
|
||||
logAddress(string(abi.encodePacked("L2_", label, "_TIMELOCK_ADDR")), address(timelock));
|
||||
}
|
||||
|
||||
function logAddress(string memory name, address addr) internal view {
|
||||
console.log(string(abi.encodePacked(name, "=", vm.toString(address(addr)))));
|
||||
}
|
||||
}
|
||||
263
contracts/scripts/foundry/InitializeL1ScrollOwner.s.sol
Normal file
263
contracts/scripts/foundry/InitializeL1ScrollOwner.s.sol
Normal file
@@ -0,0 +1,263 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
|
||||
|
||||
import {L1ScrollMessenger} from "../../src/L1/L1ScrollMessenger.sol";
|
||||
import {L1USDCGateway} from "../../src/L1/gateways/usdc/L1USDCGateway.sol";
|
||||
import {EnforcedTxGateway} from "../../src/L1/gateways/EnforcedTxGateway.sol";
|
||||
import {L1CustomERC20Gateway} from "../../src/L1/gateways/L1CustomERC20Gateway.sol";
|
||||
import {L1ERC1155Gateway} from "../../src/L1/gateways/L1ERC1155Gateway.sol";
|
||||
import {L1ERC721Gateway} from "../../src/L1/gateways/L1ERC721Gateway.sol";
|
||||
import {L1GatewayRouter} from "../../src/L1/gateways/L1GatewayRouter.sol";
|
||||
import {L1MessageQueue} from "../../src/L1/rollup/L1MessageQueue.sol";
|
||||
import {ScrollMessengerBase} from "../../src/libraries/ScrollMessengerBase.sol";
|
||||
import {L2GasPriceOracle} from "../../src/L1/rollup/L2GasPriceOracle.sol";
|
||||
import {MultipleVersionRollupVerifier} from "../../src/L1/rollup/MultipleVersionRollupVerifier.sol";
|
||||
import {ScrollChain} from "../../src/L1/rollup/ScrollChain.sol";
|
||||
import {ScrollOwner} from "../../src/misc/ScrollOwner.sol";
|
||||
import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
|
||||
|
||||
// solhint-disable max-states-count
|
||||
// solhint-disable state-visibility
|
||||
// solhint-disable var-name-mixedcase
|
||||
|
||||
contract InitializeL1ScrollOwner is Script {
|
||||
uint256 L1_DEPLOYER_PRIVATE_KEY = vm.envUint("L1_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
bytes32 constant SECURITY_COUNCIL_NO_DELAY_ROLE = keccak256("SECURITY_COUNCIL_NO_DELAY_ROLE");
|
||||
bytes32 constant SCROLL_MULTISIG_NO_DELAY_ROLE = keccak256("SCROLL_MULTISIG_NO_DELAY_ROLE");
|
||||
|
||||
bytes32 constant TIMELOCK_1DAY_DELAY_ROLE = keccak256("TIMELOCK_1DAY_DELAY_ROLE");
|
||||
bytes32 constant TIMELOCK_7DAY_DELAY_ROLE = keccak256("TIMELOCK_7DAY_DELAY_ROLE");
|
||||
|
||||
address SCROLL_MULTISIG_ADDR = vm.envAddress("L1_SCROLL_MULTISIG_ADDR");
|
||||
address SECURITY_COUNCIL_ADDR = vm.envAddress("L1_SECURITY_COUNCIL_ADDR");
|
||||
|
||||
address L1_SCROLL_OWNER_ADDR = vm.envAddress("L1_SCROLL_OWNER_ADDR");
|
||||
address L1_1D_TIMELOCK_ADDR = vm.envAddress("L1_1D_TIMELOCK_ADDR");
|
||||
address L1_7D_TIMELOCK_ADDR = vm.envAddress("L1_7D_TIMELOCK_ADDR");
|
||||
address L1_14D_TIMELOCK_ADDR = vm.envAddress("L1_14D_TIMELOCK_ADDR");
|
||||
|
||||
address L1_PROXY_ADMIN_ADDR = vm.envAddress("L1_PROXY_ADMIN_ADDR");
|
||||
address L1_SCROLL_CHAIN_PROXY_ADDR = vm.envAddress("L1_SCROLL_CHAIN_PROXY_ADDR");
|
||||
address L1_MESSAGE_QUEUE_PROXY_ADDR = vm.envAddress("L1_MESSAGE_QUEUE_PROXY_ADDR");
|
||||
address L2_GAS_PRICE_ORACLE_PROXY_ADDR = vm.envAddress("L2_GAS_PRICE_ORACLE_PROXY_ADDR");
|
||||
address L1_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L1_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L1_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L1_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L1_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
// address L1_USDC_GATEWAY_PROXY_ADDR = vm.envAddress("L1_USDC_GATEWAY_PROXY_ADDR");
|
||||
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_ERC721_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC721_GATEWAY_PROXY_ADDR");
|
||||
address L1_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
address L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR");
|
||||
// address L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR");
|
||||
address L1_WHITELIST_ADDR = vm.envAddress("L1_WHITELIST_ADDR");
|
||||
|
||||
ScrollOwner owner;
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L1_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
owner = ScrollOwner(payable(L1_SCROLL_OWNER_ADDR));
|
||||
|
||||
// @note we don't config 14D access, since the default admin is a 14D timelock which can access all methods.
|
||||
configProxyAdmin();
|
||||
configScrollChain();
|
||||
configL1MessageQueue();
|
||||
configL1ScrollMessenger();
|
||||
configL2GasPriceOracle();
|
||||
configL1Whitelist();
|
||||
configMultipleVersionRollupVerifier();
|
||||
configL1GatewayRouter();
|
||||
configL1CustomERC20Gateway();
|
||||
configL1ERC721Gateway();
|
||||
configL1ERC1155Gateway();
|
||||
|
||||
// @note comments out for testnet
|
||||
// configEnforcedTxGateway();
|
||||
// configL1USDCGateway();
|
||||
|
||||
grantRoles();
|
||||
transferOwnership();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function transferOwnership() internal {
|
||||
Ownable(L1_PROXY_ADMIN_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_SCROLL_CHAIN_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_MESSAGE_QUEUE_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_SCROLL_MESSENGER_PROXY_ADDR).transferOwnership(address(owner));
|
||||
// Ownable(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_GAS_PRICE_ORACLE_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_WHITELIST_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_GATEWAY_ROUTER_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_ETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
// Ownable(L1_USDC_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_WETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_ERC721_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_ERC1155_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
}
|
||||
|
||||
function grantRoles() internal {
|
||||
owner.grantRole(SECURITY_COUNCIL_NO_DELAY_ROLE, SECURITY_COUNCIL_ADDR);
|
||||
owner.grantRole(SCROLL_MULTISIG_NO_DELAY_ROLE, SCROLL_MULTISIG_ADDR);
|
||||
owner.grantRole(TIMELOCK_1DAY_DELAY_ROLE, L1_1D_TIMELOCK_ADDR);
|
||||
owner.grantRole(TIMELOCK_7DAY_DELAY_ROLE, L1_7D_TIMELOCK_ADDR);
|
||||
|
||||
owner.grantRole(owner.DEFAULT_ADMIN_ROLE(), L1_14D_TIMELOCK_ADDR);
|
||||
owner.revokeRole(owner.DEFAULT_ADMIN_ROLE(), vm.addr(L1_DEPLOYER_PRIVATE_KEY));
|
||||
}
|
||||
|
||||
function configProxyAdmin() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, security council
|
||||
_selectors = new bytes4[](2);
|
||||
_selectors[0] = ProxyAdmin.upgrade.selector;
|
||||
_selectors[1] = ProxyAdmin.upgradeAndCall.selector;
|
||||
owner.updateAccess(L1_PROXY_ADMIN_ADDR, _selectors, SECURITY_COUNCIL_NO_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configScrollChain() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, scroll multisig
|
||||
_selectors = new bytes4[](5);
|
||||
_selectors[0] = ScrollChain.revertBatch.selector;
|
||||
_selectors[1] = ScrollChain.removeSequencer.selector;
|
||||
_selectors[2] = ScrollChain.removeProver.selector;
|
||||
_selectors[3] = ScrollChain.updateMaxNumTxInChunk.selector;
|
||||
_selectors[4] = ScrollChain.setPause.selector;
|
||||
owner.updateAccess(L1_SCROLL_CHAIN_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](2);
|
||||
_selectors[0] = ScrollChain.addSequencer.selector;
|
||||
_selectors[1] = ScrollChain.addProver.selector;
|
||||
owner.updateAccess(L1_SCROLL_CHAIN_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL1MessageQueue() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](2);
|
||||
_selectors[0] = L1MessageQueue.updateGasOracle.selector;
|
||||
_selectors[1] = L1MessageQueue.updateMaxGasLimit.selector;
|
||||
owner.updateAccess(L1_MESSAGE_QUEUE_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL1ScrollMessenger() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = ScrollMessengerBase.setPause.selector;
|
||||
owner.updateAccess(L1_SCROLL_MESSENGER_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L1ScrollMessenger.updateMaxReplayTimes.selector;
|
||||
owner.updateAccess(L1_SCROLL_MESSENGER_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL2GasPriceOracle() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L2GasPriceOracle.setIntrinsicParams.selector;
|
||||
owner.updateAccess(L2_GAS_PRICE_ORACLE_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL1Whitelist() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = Whitelist.updateWhitelistStatus.selector;
|
||||
owner.updateAccess(L1_WHITELIST_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configMultipleVersionRollupVerifier() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, security council
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = MultipleVersionRollupVerifier.updateVerifier.selector;
|
||||
owner.updateAccess(L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR, _selectors, SECURITY_COUNCIL_NO_DELAY_ROLE, true);
|
||||
|
||||
// delay 7 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = MultipleVersionRollupVerifier.updateVerifier.selector;
|
||||
owner.updateAccess(L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL1GatewayRouter() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L1GatewayRouter.setERC20Gateway.selector;
|
||||
owner.updateAccess(L1_GATEWAY_ROUTER_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL1CustomERC20Gateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L1CustomERC20Gateway.updateTokenMapping.selector;
|
||||
owner.updateAccess(L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL1ERC721Gateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L1ERC721Gateway.updateTokenMapping.selector;
|
||||
owner.updateAccess(L1_ERC721_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL1ERC1155Gateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L1ERC1155Gateway.updateTokenMapping.selector;
|
||||
owner.updateAccess(L1_ERC1155_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
/*
|
||||
function configL1USDCGateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 7 day, scroll multisig
|
||||
_selectors = new bytes4[](3);
|
||||
_selectors[0] = L1USDCGateway.updateCircleCaller.selector;
|
||||
_selectors[1] = L1USDCGateway.pauseDeposit.selector;
|
||||
_selectors[2] = L1USDCGateway.pauseWithdraw.selector;
|
||||
owner.updateAccess(L1_USDC_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configEnforcedTxGateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = EnforcedTxGateway.setPause.selector;
|
||||
owner.updateAccess(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
}
|
||||
*/
|
||||
}
|
||||
55
contracts/scripts/foundry/InitializeL2RateLimiter.s.sol
Normal file
55
contracts/scripts/foundry/InitializeL2RateLimiter.s.sol
Normal file
@@ -0,0 +1,55 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
import {AccessControlEnumerable} from "@openzeppelin/contracts/access/AccessControlEnumerable.sol";
|
||||
import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
|
||||
|
||||
import {ScrollMessengerBase} from "../../src/libraries/ScrollMessengerBase.sol";
|
||||
import {ScrollGatewayBase} from "../../src/libraries/gateway/ScrollGatewayBase.sol";
|
||||
import {ETHRateLimiter} from "../../src/rate-limiter/ETHRateLimiter.sol";
|
||||
import {TokenRateLimiter} from "../../src/rate-limiter/TokenRateLimiter.sol";
|
||||
|
||||
// solhint-disable max-states-count
|
||||
// solhint-disable state-visibility
|
||||
// solhint-disable var-name-mixedcase
|
||||
|
||||
contract InitializeL2RateLimiter is Script {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
address L2_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L2_DAI_GATEWAY_PROXY_ADDR");
|
||||
// address L2_USDC_GATEWAY_PROXY_ADDR = vm.envAddress("L2_USDC_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_ETH_RATE_LIMITER_ADDR = vm.envAddress("L2_ETH_RATE_LIMITER_ADDR");
|
||||
address L2_TOKEN_RATE_LIMITER_ADDR = vm.envAddress("L2_TOKEN_RATE_LIMITER_ADDR");
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
ScrollMessengerBase(payable(L2_SCROLL_MESSENGER_PROXY_ADDR)).updateRateLimiter(L2_ETH_RATE_LIMITER_ADDR);
|
||||
|
||||
bytes32 TOKEN_SPENDER_ROLE = TokenRateLimiter(L2_TOKEN_RATE_LIMITER_ADDR).TOKEN_SPENDER_ROLE();
|
||||
TokenRateLimiter(L2_TOKEN_RATE_LIMITER_ADDR).grantRole(TOKEN_SPENDER_ROLE, L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR);
|
||||
TokenRateLimiter(L2_TOKEN_RATE_LIMITER_ADDR).grantRole(
|
||||
TOKEN_SPENDER_ROLE,
|
||||
L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR
|
||||
);
|
||||
TokenRateLimiter(L2_TOKEN_RATE_LIMITER_ADDR).grantRole(TOKEN_SPENDER_ROLE, L2_DAI_GATEWAY_PROXY_ADDR);
|
||||
|
||||
ScrollGatewayBase(payable(L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR)).updateRateLimiter(L2_TOKEN_RATE_LIMITER_ADDR);
|
||||
ScrollGatewayBase(payable(L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR)).updateRateLimiter(L2_TOKEN_RATE_LIMITER_ADDR);
|
||||
ScrollGatewayBase(payable(L2_DAI_GATEWAY_PROXY_ADDR)).updateRateLimiter(L2_TOKEN_RATE_LIMITER_ADDR);
|
||||
|
||||
// @note comments out for now
|
||||
// limiter.grantRole(TOKEN_SPENDER_ROLE, L2_USDC_GATEWAY_PROXY_ADDR);
|
||||
// ScrollGatewayBase(payable(L2_USDC_GATEWAY_PROXY_ADDR)).updateRateLimiter(address(limiter));
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
}
|
||||
247
contracts/scripts/foundry/InitializeL2ScrollOwner.s.sol
Normal file
247
contracts/scripts/foundry/InitializeL2ScrollOwner.s.sol
Normal file
@@ -0,0 +1,247 @@
|
||||
// SPDX-License-Identifier: UNLICENSED
|
||||
pragma solidity ^0.8.10;
|
||||
|
||||
import {Script} from "forge-std/Script.sol";
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
import {AccessControl} from "@openzeppelin/contracts/access/AccessControl.sol";
|
||||
import {AccessControlEnumerable} from "@openzeppelin/contracts/access/AccessControlEnumerable.sol";
|
||||
import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
|
||||
|
||||
import {L2USDCGateway} from "../../src/L2/gateways/usdc/L2USDCGateway.sol";
|
||||
import {L2CustomERC20Gateway} from "../../src/L2/gateways/L2CustomERC20Gateway.sol";
|
||||
import {L2CustomERC20Gateway} from "../../src/L2/gateways/L2CustomERC20Gateway.sol";
|
||||
import {L2ERC1155Gateway} from "../../src/L2/gateways/L2ERC1155Gateway.sol";
|
||||
import {L2ERC721Gateway} from "../../src/L2/gateways/L2ERC721Gateway.sol";
|
||||
import {L2GatewayRouter} from "../../src/L2/gateways/L2GatewayRouter.sol";
|
||||
import {ScrollMessengerBase} from "../../src/libraries/ScrollMessengerBase.sol";
|
||||
import {L1GasPriceOracle} from "../../src/L2/predeploys/L1GasPriceOracle.sol";
|
||||
import {L2TxFeeVault} from "../../src/L2/predeploys/L2TxFeeVault.sol";
|
||||
import {Whitelist} from "../../src/L2/predeploys/Whitelist.sol";
|
||||
import {ScrollOwner} from "../../src/misc/ScrollOwner.sol";
|
||||
import {ETHRateLimiter} from "../../src/rate-limiter/ETHRateLimiter.sol";
|
||||
import {TokenRateLimiter} from "../../src/rate-limiter/TokenRateLimiter.sol";
|
||||
|
||||
// solhint-disable max-states-count
|
||||
// solhint-disable state-visibility
|
||||
// solhint-disable var-name-mixedcase
|
||||
|
||||
contract InitializeL2ScrollOwner is Script {
|
||||
uint256 L2_DEPLOYER_PRIVATE_KEY = vm.envUint("L2_DEPLOYER_PRIVATE_KEY");
|
||||
|
||||
bytes32 constant SECURITY_COUNCIL_NO_DELAY_ROLE = keccak256("SECURITY_COUNCIL_NO_DELAY_ROLE");
|
||||
bytes32 constant SCROLL_MULTISIG_NO_DELAY_ROLE = keccak256("SCROLL_MULTISIG_NO_DELAY_ROLE");
|
||||
|
||||
bytes32 constant TIMELOCK_1DAY_DELAY_ROLE = keccak256("TIMELOCK_1DAY_DELAY_ROLE");
|
||||
bytes32 constant TIMELOCK_7DAY_DELAY_ROLE = keccak256("TIMELOCK_7DAY_DELAY_ROLE");
|
||||
|
||||
address SCROLL_MULTISIG_ADDR = vm.envAddress("L2_SCROLL_MULTISIG_ADDR");
|
||||
address SECURITY_COUNCIL_ADDR = vm.envAddress("L2_SECURITY_COUNCIL_ADDR");
|
||||
|
||||
address L2_SCROLL_OWNER_ADDR = vm.envAddress("L2_SCROLL_OWNER_ADDR");
|
||||
address L2_1D_TIMELOCK_ADDR = vm.envAddress("L2_1D_TIMELOCK_ADDR");
|
||||
address L2_7D_TIMELOCK_ADDR = vm.envAddress("L2_7D_TIMELOCK_ADDR");
|
||||
address L2_14D_TIMELOCK_ADDR = vm.envAddress("L2_14D_TIMELOCK_ADDR");
|
||||
|
||||
address L2_PROXY_ADMIN_ADDR = vm.envAddress("L2_PROXY_ADMIN_ADDR");
|
||||
address L2_TX_FEE_VAULT_ADDR = vm.envAddress("L2_TX_FEE_VAULT_ADDR");
|
||||
address L1_GAS_PRICE_ORACLE_ADDR = vm.envAddress("L1_GAS_PRICE_ORACLE_ADDR");
|
||||
address L2_WHITELIST_ADDR = vm.envAddress("L2_WHITELIST_ADDR");
|
||||
address L2_MESSAGE_QUEUE_ADDR = vm.envAddress("L2_MESSAGE_QUEUE_ADDR");
|
||||
|
||||
address L2_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
// address L2_USDC_GATEWAY_PROXY_ADDR = vm.envAddress("L2_USDC_GATEWAY_PROXY_ADDR");
|
||||
address L2_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_WETH_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC721_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC721_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_ETH_RATE_LIMITER_ADDR = vm.envAddress("L2_ETH_RATE_LIMITER_ADDR");
|
||||
address L2_TOKEN_RATE_LIMITER_ADDR = vm.envAddress("L2_TOKEN_RATE_LIMITER_ADDR");
|
||||
|
||||
ScrollOwner owner;
|
||||
|
||||
function run() external {
|
||||
vm.startBroadcast(L2_DEPLOYER_PRIVATE_KEY);
|
||||
|
||||
owner = ScrollOwner(payable(L2_SCROLL_OWNER_ADDR));
|
||||
|
||||
// @note we don't config 14D access, since the default admin is a 14D timelock which can access all methods.
|
||||
configProxyAdmin();
|
||||
configL1GasPriceOracle();
|
||||
configL2TxFeeVault();
|
||||
configL2Whitelist();
|
||||
configL2ScrollMessenger();
|
||||
configL2GatewayRouter();
|
||||
configL2CustomERC20Gateway();
|
||||
configL2ERC721Gateway();
|
||||
configL2ERC1155Gateway();
|
||||
configETHRateLimiter();
|
||||
configTokenRateLimiter();
|
||||
|
||||
// @note comments out for testnet
|
||||
// configL2USDCGateway();
|
||||
|
||||
grantRoles();
|
||||
transferOwnership();
|
||||
|
||||
vm.stopBroadcast();
|
||||
}
|
||||
|
||||
function transferOwnership() internal {
|
||||
Ownable(L2_PROXY_ADMIN_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_MESSAGE_QUEUE_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_GAS_PRICE_ORACLE_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_TX_FEE_VAULT_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_WHITELIST_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_SCROLL_MESSENGER_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_GATEWAY_ROUTER_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_ETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_WETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_ERC721_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_ERC1155_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
|
||||
// Ownable(L2_USDC_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
|
||||
Ownable(L2_ETH_RATE_LIMITER_ADDR).transferOwnership(address(owner));
|
||||
|
||||
TokenRateLimiter tokenRateLimiter = TokenRateLimiter(L2_TOKEN_RATE_LIMITER_ADDR);
|
||||
tokenRateLimiter.grantRole(tokenRateLimiter.DEFAULT_ADMIN_ROLE(), address(owner));
|
||||
tokenRateLimiter.revokeRole(tokenRateLimiter.DEFAULT_ADMIN_ROLE(), vm.addr(L2_DEPLOYER_PRIVATE_KEY));
|
||||
}
|
||||
|
||||
function grantRoles() internal {
|
||||
owner.grantRole(SECURITY_COUNCIL_NO_DELAY_ROLE, SECURITY_COUNCIL_ADDR);
|
||||
owner.grantRole(SCROLL_MULTISIG_NO_DELAY_ROLE, SCROLL_MULTISIG_ADDR);
|
||||
owner.grantRole(TIMELOCK_1DAY_DELAY_ROLE, L2_1D_TIMELOCK_ADDR);
|
||||
owner.grantRole(TIMELOCK_7DAY_DELAY_ROLE, L2_7D_TIMELOCK_ADDR);
|
||||
|
||||
owner.grantRole(owner.DEFAULT_ADMIN_ROLE(), L2_14D_TIMELOCK_ADDR);
|
||||
owner.revokeRole(owner.DEFAULT_ADMIN_ROLE(), vm.addr(L2_DEPLOYER_PRIVATE_KEY));
|
||||
}
|
||||
|
||||
function configProxyAdmin() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, security council
|
||||
_selectors = new bytes4[](2);
|
||||
_selectors[0] = ProxyAdmin.upgrade.selector;
|
||||
_selectors[1] = ProxyAdmin.upgradeAndCall.selector;
|
||||
owner.updateAccess(L2_PROXY_ADMIN_ADDR, _selectors, SECURITY_COUNCIL_NO_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL1GasPriceOracle() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, scroll multisig
|
||||
_selectors = new bytes4[](2);
|
||||
_selectors[0] = L1GasPriceOracle.setOverhead.selector;
|
||||
_selectors[1] = L1GasPriceOracle.setScalar.selector;
|
||||
owner.updateAccess(L1_GAS_PRICE_ORACLE_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL2TxFeeVault() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L2TxFeeVault.updateMinWithdrawAmount.selector;
|
||||
owner.updateAccess(L2_TX_FEE_VAULT_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL2Whitelist() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = Whitelist.updateWhitelistStatus.selector;
|
||||
owner.updateAccess(L2_WHITELIST_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL2ScrollMessenger() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = ScrollMessengerBase.setPause.selector;
|
||||
owner.updateAccess(L2_SCROLL_MESSENGER_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL2GatewayRouter() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L2GatewayRouter.setERC20Gateway.selector;
|
||||
owner.updateAccess(L2_GATEWAY_ROUTER_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL2CustomERC20Gateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L2CustomERC20Gateway.updateTokenMapping.selector;
|
||||
owner.updateAccess(L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL2ERC721Gateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L2ERC721Gateway.updateTokenMapping.selector;
|
||||
owner.updateAccess(L2_ERC721_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configL2ERC1155Gateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 1 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = L2ERC1155Gateway.updateTokenMapping.selector;
|
||||
owner.updateAccess(L2_ERC1155_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configETHRateLimiter() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = ETHRateLimiter.updateTotalLimit.selector;
|
||||
owner.updateAccess(L2_ETH_RATE_LIMITER_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
function configTokenRateLimiter() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// no delay, scroll multisig
|
||||
_selectors = new bytes4[](2);
|
||||
_selectors[0] = TokenRateLimiter.updateTotalLimit.selector;
|
||||
_selectors[1] = AccessControl.grantRole.selector;
|
||||
owner.updateAccess(L2_TOKEN_RATE_LIMITER_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
|
||||
// delay 7 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = AccessControl.revokeRole.selector;
|
||||
owner.updateAccess(L2_TOKEN_RATE_LIMITER_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
/*
|
||||
function configL2USDCGateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
// delay 7 day, scroll multisig
|
||||
_selectors = new bytes4[](3);
|
||||
_selectors[0] = L2USDCGateway.updateCircleCaller.selector;
|
||||
_selectors[1] = L2USDCGateway.pauseDeposit.selector;
|
||||
_selectors[2] = L2USDCGateway.pauseWithdraw.selector;
|
||||
owner.updateAccess(L2_USDC_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
|
||||
}
|
||||
*/
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {TimelockController} from "@openzeppelin/contracts/governance/TimelockController.sol";
|
||||
import {ProxyAdmin} from "@openzeppelin/contracts/proxy/transparent/ProxyAdmin.sol";
|
||||
import {TransparentUpgradeableProxy} from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol";
|
||||
import {MinimalForwarder} from "@openzeppelin/contracts/metatx/MinimalForwarder.sol";
|
||||
|
||||
@@ -64,7 +64,7 @@ contract L1MessageQueue is OwnableUpgradeable, IL1MessageQueue {
|
||||
/// @notice The max gas limit of L1 transactions.
|
||||
uint256 public maxGasLimit;
|
||||
|
||||
/// @dev The bitmap for skipped messages.
|
||||
/// @dev The bitmap for dropped messages, where `droppedMessageBitmap[i]` keeps the bits from `[i*256, (i+1)*256)`.
|
||||
BitMapsUpgradeable.BitMap private droppedMessageBitmap;
|
||||
|
||||
/// @dev The bitmap for skipped messages, where `skippedMessageBitmap[i]` keeps the bits from `[i*256, (i+1)*256)`.
|
||||
|
||||
@@ -108,23 +108,7 @@ contract L2GasPriceOracle is OwnableUpgradeable, IL2GasPriceOracle {
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @notice Allows the owner to update parameters for intrinsic gas calculation.
|
||||
/// @param _txGas The intrinsic gas for transaction.
|
||||
/// @param _txGasContractCreation The intrinsic gas for contract creation.
|
||||
/// @param _zeroGas The intrinsic gas for each zero byte.
|
||||
/// @param _nonZeroGas The intrinsic gas for each nonzero byte.
|
||||
function setIntrinsicParams(
|
||||
uint64 _txGas,
|
||||
uint64 _txGasContractCreation,
|
||||
uint64 _zeroGas,
|
||||
uint64 _nonZeroGas
|
||||
) external {
|
||||
require(whitelist.isSenderAllowed(msg.sender), "Not whitelisted sender");
|
||||
|
||||
_setIntrinsicParams(_txGas, _txGasContractCreation, _zeroGas, _nonZeroGas);
|
||||
}
|
||||
|
||||
/// @notice Allows the owner to modify the l2 base fee.
|
||||
/// @notice Allows whitelisted caller to modify the l2 base fee.
|
||||
/// @param _newL2BaseFee The new l2 base fee.
|
||||
function setL2BaseFee(uint256 _newL2BaseFee) external {
|
||||
require(whitelist.isSenderAllowed(msg.sender), "Not whitelisted sender");
|
||||
@@ -149,6 +133,20 @@ contract L2GasPriceOracle is OwnableUpgradeable, IL2GasPriceOracle {
|
||||
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
|
||||
}
|
||||
|
||||
/// @notice Allows the owner to update parameters for intrinsic gas calculation.
|
||||
/// @param _txGas The intrinsic gas for transaction.
|
||||
/// @param _txGasContractCreation The intrinsic gas for contract creation.
|
||||
/// @param _zeroGas The intrinsic gas for each zero byte.
|
||||
/// @param _nonZeroGas The intrinsic gas for each nonzero byte.
|
||||
function setIntrinsicParams(
|
||||
uint64 _txGas,
|
||||
uint64 _txGasContractCreation,
|
||||
uint64 _zeroGas,
|
||||
uint64 _nonZeroGas
|
||||
) external onlyOwner {
|
||||
_setIntrinsicParams(_txGas, _txGasContractCreation, _zeroGas, _nonZeroGas);
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
@@ -32,7 +32,7 @@ import {OwnableBase} from "../../libraries/common/OwnableBase.sol";
|
||||
// solhint-disable reason-string
|
||||
|
||||
/// @title L2TxFeeVault
|
||||
/// @notice The L2TxFeeVault contract contains the basic logic for the various different vault contracts
|
||||
/// @notice The L2TxFeeVault contract contains the logic for the vault contracts
|
||||
/// used to hold fee revenue generated by the L2 system.
|
||||
contract L2TxFeeVault is OwnableBase {
|
||||
/**********
|
||||
@@ -110,6 +110,9 @@ contract L2TxFeeVault is OwnableBase {
|
||||
"FeeVault: withdrawal amount must be greater than minimum withdrawal amount"
|
||||
);
|
||||
|
||||
uint256 _balance = address(this).balance;
|
||||
require(_value <= _balance, "FeeVault: insufficient balance to withdraw");
|
||||
|
||||
unchecked {
|
||||
totalProcessed += _value;
|
||||
}
|
||||
|
||||
@@ -2,17 +2,17 @@
|
||||
|
||||
pragma solidity =0.8.16;
|
||||
|
||||
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
|
||||
import {ERC2771Context} from "@openzeppelin/contracts/metatx/ERC2771Context.sol";
|
||||
import {ReentrancyGuard} from "@openzeppelin/contracts/security/ReentrancyGuard.sol";
|
||||
import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
|
||||
import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol";
|
||||
import {IERC20Permit} from "@openzeppelin/contracts/token/ERC20/extensions/draft-IERC20Permit.sol";
|
||||
|
||||
import {OwnableBase} from "../libraries/common/OwnableBase.sol";
|
||||
import {Context} from "@openzeppelin/contracts/utils/Context.sol";
|
||||
|
||||
// solhint-disable no-empty-blocks
|
||||
|
||||
contract GasSwap is ERC2771Context, ReentrancyGuard, OwnableBase {
|
||||
contract GasSwap is ERC2771Context, Ownable, ReentrancyGuard {
|
||||
using SafeERC20 for IERC20;
|
||||
using SafeERC20 for IERC20Permit;
|
||||
|
||||
@@ -76,9 +76,7 @@ contract GasSwap is ERC2771Context, ReentrancyGuard, OwnableBase {
|
||||
* Constructor *
|
||||
***************/
|
||||
|
||||
constructor(address trustedForwarder) ERC2771Context(trustedForwarder) {
|
||||
owner = msg.sender;
|
||||
}
|
||||
constructor(address trustedForwarder) ERC2771Context(trustedForwarder) {}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
@@ -174,6 +172,16 @@ contract GasSwap is ERC2771Context, ReentrancyGuard, OwnableBase {
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @inheritdoc Context
|
||||
function _msgData() internal view virtual override(Context, ERC2771Context) returns (bytes calldata) {
|
||||
return ERC2771Context._msgData();
|
||||
}
|
||||
|
||||
/// @inheritdoc Context
|
||||
function _msgSender() internal view virtual override(Context, ERC2771Context) returns (address) {
|
||||
return ERC2771Context._msgSender();
|
||||
}
|
||||
|
||||
/// @dev Internal function to concat two bytes array.
|
||||
function concat(bytes memory a, bytes memory b) internal pure returns (bytes memory) {
|
||||
return abi.encodePacked(a, b);
|
||||
|
||||
@@ -50,7 +50,7 @@ contract L2GasPriceOracleTest is DSTestPlus {
|
||||
|
||||
function testSetIntrinsicParamsAccess() external {
|
||||
hevm.startPrank(address(4));
|
||||
hevm.expectRevert("Not whitelisted sender");
|
||||
hevm.expectRevert("Ownable: caller is not the owner");
|
||||
oracle.setIntrinsicParams(1, 0, 0, 1);
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ contract L2TxFeeVaultTest is DSTestPlus {
|
||||
function testCantWithdrawMoreThanBalance(uint256 amount) public {
|
||||
hevm.assume(amount >= 10 ether);
|
||||
hevm.deal(address(vault), amount - 1);
|
||||
hevm.expectRevert(new bytes(0));
|
||||
hevm.expectRevert("FeeVault: insufficient balance to withdraw");
|
||||
vault.withdraw(amount);
|
||||
}
|
||||
|
||||
|
||||
@@ -5,10 +5,10 @@ IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
else
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
endif
|
||||
|
||||
|
||||
39
coordinator/internal/controller/cron/cleanup_challenge.go
Normal file
39
coordinator/internal/controller/cron/cleanup_challenge.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package cron
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
func (c *Collector) cleanupChallenge() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
nerr := fmt.Errorf("clean challenge panic error: %v", err)
|
||||
log.Warn(nerr.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(time.Minute * 10)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
expiredTime := utils.NowUTC().Add(-time.Hour)
|
||||
if err := c.challenge.DeleteExpireChallenge(c.ctx, expiredTime); err != nil {
|
||||
log.Error("delete expired challenge failure", "error", err)
|
||||
}
|
||||
case <-c.ctx.Done():
|
||||
if c.ctx.Err() != nil {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -28,6 +28,7 @@ type Collector struct {
|
||||
proverTaskOrm *orm.ProverTask
|
||||
chunkOrm *orm.Chunk
|
||||
batchOrm *orm.Batch
|
||||
challenge *orm.Challenge
|
||||
|
||||
timeoutBatchCheckerRunTotal prometheus.Counter
|
||||
batchProverTaskTimeoutTotal prometheus.Counter
|
||||
@@ -46,6 +47,7 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
challenge: orm.NewChallenge(db),
|
||||
|
||||
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_timeout_checker_run_total",
|
||||
@@ -72,6 +74,7 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
go c.timeoutBatchProofTask()
|
||||
go c.timeoutChunkProofTask()
|
||||
go c.checkBatchAllChunkReady()
|
||||
go c.cleanupChallenge()
|
||||
|
||||
log.Info("Start coordinator successfully.")
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -61,13 +62,48 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
batchTask, err := bp.batchOrm.UpdateBatchAttemptsReturning(ctx, maxActiveAttempts, maxTotalAttempts)
|
||||
if err != nil {
|
||||
log.Error("failed to get unassigned batch proving tasks", "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
var batchTask *orm.Batch
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpBatchTask *orm.Batch
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetUnassignedBatch(ctx, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// batch to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpBatchTask == nil {
|
||||
tmpBatchTask, getTaskError = bp.batchOrm.GetAssignedBatch(ctx, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned batch proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
|
||||
if tmpBatchTask == nil {
|
||||
log.Debug("get empty batch", "height", getTaskParameter.ProverHeight)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx, tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
batchTask = tmpBatchTask
|
||||
break
|
||||
}
|
||||
|
||||
if batchTask == nil {
|
||||
log.Debug("get empty unassigned batch after retry 5 times", "height", getTaskParameter.ProverHeight)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -64,13 +65,48 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
|
||||
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := cp.cfg.ProverManager.SessionAttempts
|
||||
chunkTask, err := cp.chunkOrm.UpdateChunkAttemptsReturning(ctx, getTaskParameter.ProverHeight, maxActiveAttempts, maxTotalAttempts)
|
||||
if err != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
var chunkTask *orm.Chunk
|
||||
for i := 0; i < 5; i++ {
|
||||
var getTaskError error
|
||||
var tmpChunkTask *orm.Chunk
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetUnassignedChunk(ctx, getTaskParameter.ProverHeight, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get unassigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
// Why here need get again? In order to support a task can assign to multiple prover, need also assign `ProvingTaskAssigned`
|
||||
// chunk to prover. But use `proving_status in (1, 2)` will not use the postgres index. So need split the sql.
|
||||
if tmpChunkTask == nil {
|
||||
tmpChunkTask, getTaskError = cp.chunkOrm.GetAssignedChunk(ctx, getTaskParameter.ProverHeight, maxActiveAttempts, maxTotalAttempts)
|
||||
if getTaskError != nil {
|
||||
log.Error("failed to get assigned chunk proving tasks", "height", getTaskParameter.ProverHeight, "err", getTaskError)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
|
||||
if tmpChunkTask == nil {
|
||||
log.Debug("get empty chunk", "height", getTaskParameter.ProverHeight)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx, tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
|
||||
if updateAttemptsErr != nil {
|
||||
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
chunkTask = tmpChunkTask
|
||||
break
|
||||
}
|
||||
|
||||
if chunkTask == nil {
|
||||
log.Debug("get empty unassigned chunk after retry 5 times", "height", getTaskParameter.ProverHeight)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
@@ -72,26 +71,46 @@ func (*Batch) TableName() string {
|
||||
return "batch"
|
||||
}
|
||||
|
||||
// GetUnassignedBatches retrieves unassigned batches based on the specified limit.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetUnassignedBatches(ctx context.Context, limit int) ([]*Batch, error) {
|
||||
if limit < 0 {
|
||||
return nil, errors.New("limit must not be smaller than zero")
|
||||
}
|
||||
if limit == 0 {
|
||||
// GetUnassignedBatch retrieves unassigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
|
||||
var batch Batch
|
||||
err := db.First(&batch).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ? AND chunk_proofs_status = ?", types.ProvingTaskUnassigned, types.ChunkProofsStatusReady)
|
||||
db = db.Order("index ASC")
|
||||
db = db.Limit(limit)
|
||||
|
||||
var batches []*Batch
|
||||
if err := db.Find(&batches).Error; err != nil {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetUnassignedBatches error: %w", err)
|
||||
}
|
||||
return batches, nil
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// GetAssignedBatch retrieves assigned batch based on the specified limit.
|
||||
// The returned batch are sorted in ascending order by their index.
|
||||
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
|
||||
var batch Batch
|
||||
err := db.First(&batch).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Batch.GetAssignedBatches error: %w", err)
|
||||
}
|
||||
return &batch, nil
|
||||
}
|
||||
|
||||
// GetUnassignedAndChunksUnreadyBatches get the batches which is unassigned and chunks is not ready
|
||||
@@ -303,22 +322,13 @@ func (o *Batch) UpdateProofAndProvingStatusByHash(ctx context.Context, hash stri
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateBatchAttemptsReturning atomically increments the attempts count for the earliest available batch that meets the conditions.
|
||||
func (o *Batch) UpdateBatchAttemptsReturning(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {
|
||||
// UpdateBatchAttempts atomically increments the attempts count for the earliest available batch that meets the conditions.
|
||||
func (o *Batch) UpdateBatchAttempts(ctx context.Context, index uint64, curActiveAttempts, curTotalAttempts int16) (int64, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
subQueryDB := db.Model(&Batch{}).Select("index")
|
||||
subQueryDB = subQueryDB.Clauses(clause.Locking{Strength: "UPDATE"})
|
||||
subQueryDB = subQueryDB.Where("proving_status not in (?)", []int{int(types.ProvingTaskVerified), int(types.ProvingTaskFailed)})
|
||||
subQueryDB = subQueryDB.Where("total_attempts < ?", maxTotalAttempts)
|
||||
subQueryDB = subQueryDB.Where("active_attempts < ?", maxActiveAttempts)
|
||||
subQueryDB = subQueryDB.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
|
||||
subQueryDB = subQueryDB.Order("index ASC")
|
||||
subQueryDB = subQueryDB.Limit(1)
|
||||
|
||||
var updatedBatch Batch
|
||||
db = db.Model(&updatedBatch).Clauses(clause.Returning{})
|
||||
db = db.Where("index = (?)", subQueryDB)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("index = ?", index)
|
||||
db = db.Where("active_attempts = ?", curActiveAttempts)
|
||||
db = db.Where("total_attempts = ?", curTotalAttempts)
|
||||
result := db.Updates(map[string]interface{}{
|
||||
"proving_status": types.ProvingTaskAssigned,
|
||||
"total_attempts": gorm.Expr("total_attempts + 1"),
|
||||
@@ -326,13 +336,9 @@ func (o *Batch) UpdateBatchAttemptsReturning(ctx context.Context, maxActiveAttem
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, fmt.Errorf("failed to select and update batch, max active attempts: %v, max total attempts: %v, err: %w",
|
||||
maxActiveAttempts, maxTotalAttempts, result.Error)
|
||||
return 0, fmt.Errorf("failed to update batch, err:%w", result.Error)
|
||||
}
|
||||
if result.RowsAffected == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return &updatedBatch, nil
|
||||
return result.RowsAffected, nil
|
||||
}
|
||||
|
||||
// DecreaseActiveAttemptsByHash decrements the active_attempts of a batch given its hash.
|
||||
|
||||
@@ -55,3 +55,14 @@ func (r *Challenge) InsertChallenge(ctx context.Context, challengeString string)
|
||||
|
||||
return fmt.Errorf("insert challenge string affected rows more than 1")
|
||||
}
|
||||
|
||||
// DeleteExpireChallenge delete the expire challenge
|
||||
func (r *Challenge) DeleteExpireChallenge(ctx context.Context, expiredTime time.Time) error {
|
||||
db := r.db.WithContext(ctx)
|
||||
db = db.Model(&Challenge{})
|
||||
db = db.Where("created_at < ?", expiredTime)
|
||||
if err := db.Unscoped().Delete(&Challenge{}).Error; err != nil {
|
||||
return fmt.Errorf("Challenge.DeleteExpireChallenge err: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
@@ -67,27 +66,48 @@ func (*Chunk) TableName() string {
|
||||
return "chunk"
|
||||
}
|
||||
|
||||
// GetUnassignedChunks retrieves unassigned chunks based on the specified limit.
|
||||
// GetUnassignedChunk retrieves unassigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetUnassignedChunks(ctx context.Context, limit int) ([]*Chunk, error) {
|
||||
if limit < 0 {
|
||||
return nil, errors.New("limit must not be smaller than zero")
|
||||
}
|
||||
if limit == 0 {
|
||||
func (o *Chunk) GetUnassignedChunk(ctx context.Context, height int, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("end_block_number <= ?", height)
|
||||
|
||||
var chunk Chunk
|
||||
err := db.First(&chunk).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", types.ProvingTaskUnassigned)
|
||||
db = db.Order("index ASC")
|
||||
db = db.Limit(limit)
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetUnassignedChunks error: %w", err)
|
||||
}
|
||||
return chunks, nil
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetAssignedChunk(ctx context.Context, height int, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
db = db.Where("total_attempts < ?", maxTotalAttempts)
|
||||
db = db.Where("active_attempts < ?", maxActiveAttempts)
|
||||
db = db.Where("end_block_number <= ?", height)
|
||||
|
||||
var chunk Chunk
|
||||
err := db.First(&chunk).Error
|
||||
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunks error: %w", err)
|
||||
}
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
// GetChunksByBatchHash retrieves the chunks associated with a specific batch hash.
|
||||
@@ -158,19 +178,6 @@ func (o *Chunk) GetProvingStatusByHash(ctx context.Context, hash string) (types.
|
||||
return types.ProvingStatus(chunk.ProvingStatus), nil
|
||||
}
|
||||
|
||||
// GetAssignedChunks retrieves all chunks whose proving_status is either types.ProvingTaskAssigned.
|
||||
func (o *Chunk) GetAssignedChunks(ctx context.Context) ([]*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("proving_status = ?", int(types.ProvingTaskAssigned))
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetAssignedChunks error: %w", err)
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// CheckIfBatchChunkProofsAreReady checks if all proofs for all chunks of a given batchHash are collected.
|
||||
func (o *Chunk) CheckIfBatchChunkProofsAreReady(ctx context.Context, batchHash string) (bool, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -350,26 +357,13 @@ func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, e
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateChunkAttemptsReturning atomically increments the attempts count for the earliest available chunk that meets the conditions.
|
||||
func (o *Chunk) UpdateChunkAttemptsReturning(ctx context.Context, height int, maxActiveAttempts, maxTotalAttempts uint8) (*Chunk, error) {
|
||||
if height <= 0 {
|
||||
return nil, errors.New("Chunk.UpdateChunkAttemptsReturning error: height must be larger than zero")
|
||||
}
|
||||
|
||||
// UpdateChunkAttempts atomically increments the attempts count for the earliest available chunk that meets the conditions.
|
||||
func (o *Chunk) UpdateChunkAttempts(ctx context.Context, index uint64, curActiveAttempts, curTotalAttempts int16) (int64, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
subQueryDB := db.Model(&Chunk{}).Select("index")
|
||||
subQueryDB = subQueryDB.Clauses(clause.Locking{Strength: "UPDATE"})
|
||||
subQueryDB = subQueryDB.Where("proving_status not in (?)", []int{int(types.ProvingTaskVerified), int(types.ProvingTaskFailed)})
|
||||
subQueryDB = subQueryDB.Where("total_attempts < ?", maxTotalAttempts)
|
||||
subQueryDB = subQueryDB.Where("active_attempts < ?", maxActiveAttempts)
|
||||
subQueryDB = subQueryDB.Where("end_block_number <= ?", height)
|
||||
subQueryDB = subQueryDB.Order("index ASC")
|
||||
subQueryDB = subQueryDB.Limit(1)
|
||||
|
||||
var updatedChunk Chunk
|
||||
db = db.Model(&updatedChunk).Clauses(clause.Returning{})
|
||||
db = db.Where("index = (?)", subQueryDB)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("index = ?", index)
|
||||
db = db.Where("active_attempts = ?", curActiveAttempts)
|
||||
db = db.Where("total_attempts = ?", curTotalAttempts)
|
||||
result := db.Updates(map[string]interface{}{
|
||||
"proving_status": types.ProvingTaskAssigned,
|
||||
"total_attempts": gorm.Expr("total_attempts + 1"),
|
||||
@@ -377,13 +371,9 @@ func (o *Chunk) UpdateChunkAttemptsReturning(ctx context.Context, height int, ma
|
||||
})
|
||||
|
||||
if result.Error != nil {
|
||||
return nil, fmt.Errorf("failed to select and update batch, max active attempts: %v, max total attempts: %v, err: %w",
|
||||
maxActiveAttempts, maxTotalAttempts, result.Error)
|
||||
return 0, fmt.Errorf("failed to update chunk, err:%w", result.Error)
|
||||
}
|
||||
if result.RowsAffected == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return &updatedChunk, nil
|
||||
return result.RowsAffected, nil
|
||||
}
|
||||
|
||||
// DecreaseActiveAttemptsByHash decrements the active_attempts of a chunk given its hash.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
.PHONY: lint docker clean prover mock-prover
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
else
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "scroll-prover" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
ZKEVM_VERSION=$(shell grep -m 1 "zkevm-circuits" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
endif
|
||||
|
||||
@@ -39,7 +39,7 @@ test-gpu-prover: libzkp
|
||||
go test -tags="gpu ffi" -timeout 0 -v ./prover
|
||||
|
||||
lastest-zk-version:
|
||||
curl -sL https://api.github.com/repos/scroll-tech/scroll-prover/commits | jq -r ".[0].sha"
|
||||
curl -sL https://api.github.com/repos/scroll-tech/zkevm-circuits/commits | jq -r ".[0].sha"
|
||||
|
||||
lint: ## Lint the files - used for CI
|
||||
cp -r ../common/libzkp/interface ./core/lib
|
||||
|
||||
Reference in New Issue
Block a user