Compare commits

..

8 Commits

Author SHA1 Message Date
Morty
a6f2457040 feat(coordinator): assign static prover first and avoid reassigning failed task to same prover (#1584)
Co-authored-by: yiweichi <yiweichi@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2025-01-15 19:42:58 +08:00
Alexis
fa0927c5dc optimize l2 GasPrice comparasion (#1581)
Co-authored-by: alexis <alexisdevilliers1999@gmail.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2025-01-06 17:56:09 +08:00
colin
f92029aaeb fix(rollup-relayer): update batch finalizing status and unify db time… (#1582) 2024-12-30 17:46:40 +08:00
Hsiao_Jan
45b23edde9 fix(coordinator): fix the error in the incorrect call during database… (#1576)
Co-authored-by: xiaoranlu <xiaoranlu@tencent.com>
2024-12-17 14:51:50 +08:00
qcrao
33b1b3cb51 opt: pre-allocate chunk blocks slice in chunk proposer (#1572)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2024-12-12 14:27:22 +08:00
colin
51c930d7da fix(sender): nonce update (#1570)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-12-12 14:22:24 +08:00
Bin
4cfc5511fb fix: Using the Recommended Error Determination (#1546)
Co-authored-by: 0xmountaintop <37070449+0xmountaintop@users.noreply.github.com>
Co-authored-by: georgehao <haohongfan@gmail.com>
2024-12-12 14:15:32 +08:00
colin
06beb5dca3 fix(rollup): only enable chain-monitor in rollup-relayer (#1569)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2024-12-06 14:15:22 +08:00
30 changed files with 351 additions and 98 deletions

View File

@@ -2,6 +2,7 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -45,7 +46,7 @@ func (c *BatchEvent) GetBatchEventSyncedHeightInDB(ctx context.Context) (uint64,
db = db.Model(&BatchEvent{})
db = db.Order("l1_block_number desc")
if err := db.First(&batch).Error; err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return 0, nil
}
return 0, fmt.Errorf("failed to get batch synced height in db, error: %w", err)
@@ -62,7 +63,7 @@ func (c *BatchEvent) GetLastUpdatedFinalizedBlockHeight(ctx context.Context) (ui
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUpdated)
db = db.Order("batch_index desc")
if err := db.First(&batch).Error; err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
// No finalized batch found, return genesis batch's end block number.
return 0, nil
}
@@ -81,7 +82,7 @@ func (c *BatchEvent) GetUnupdatedFinalizedBatchesLEBlockHeight(ctx context.Conte
db = db.Where("update_status = ?", btypes.UpdateStatusTypeUnupdated)
db = db.Order("batch_index asc")
if err := db.Find(&batches).Error; err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("failed to get unupdated finalized batches >= block height, error: %w", err)

View File

@@ -2,6 +2,7 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -84,7 +85,7 @@ func (c *CrossMessage) GetMessageSyncedHeightInDB(ctx context.Context, messageTy
db = db.Order("l2_block_number desc")
}
if err := db.First(&message).Error; err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return 0, nil
}
return 0, fmt.Errorf("failed to get latest processed height, type: %v, error: %w", messageType, err)
@@ -108,7 +109,7 @@ func (c *CrossMessage) GetL2LatestFinalizedWithdrawal(ctx context.Context) (*Cro
db = db.Where("rollup_status = ?", btypes.RollupStatusTypeFinalized)
db = db.Order("message_nonce desc")
if err := db.First(&message).Error; err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
@@ -127,10 +128,10 @@ func (c *CrossMessage) GetL2WithdrawalsByBlockRange(ctx context.Context, startBl
db = db.Where("message_type = ?", btypes.MessageTypeL2SentMessage)
db = db.Order("message_nonce asc")
if err := db.Find(&messages).Error; err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("failed to get latest L2 finalized sent message event, error: %w", err)
return nil, fmt.Errorf("failed to get L2 withdrawals by block range, error: %v", err)
}
return messages, nil
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.4.80"
var tag = "v4.4.86"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -2,6 +2,7 @@
"prover_manager": {
"provers_per_session": 1,
"session_attempts": 5,
"external_prover_threshold": 32,
"bundle_collection_time_sec": 180,
"batch_collection_time_sec": 180,
"chunk_collection_time_sec": 180,

View File

@@ -16,6 +16,8 @@ type ProverManager struct {
// Number of attempts that a session can be retried if previous attempts failed.
// Currently we only consider proving timeout as failure here.
SessionAttempts uint8 `json:"session_attempts"`
// Threshold for activating the external prover based on unassigned task count.
ExternalProverThreshold int64 `json:"external_prover_threshold"`
// Zk verifier config.
Verifier *VerifierConfig `json:"verifier"`
// BatchCollectionTimeSec batch Proof collection time (in seconds).

View File

@@ -70,10 +70,11 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims {
}
return jwt.MapClaims{
types.HardForkName: v.HardForkName,
types.PublicKey: v.PublicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.HardForkName: v.HardForkName,
types.PublicKey: v.PublicKey,
types.ProverName: v.Message.ProverName,
types.ProverVersion: v.Message.ProverVersion,
types.ProverProviderTypeKey: v.Message.ProverProviderType,
}
}
@@ -96,5 +97,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} {
c.Set(types.HardForkName, hardForkName)
}
if providerType, ok := claims[types.ProverProviderTypeKey]; ok {
c.Set(types.ProverProviderTypeKey, providerType)
}
return nil
}

View File

@@ -106,6 +106,17 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
}
}
}
if login.Message.ProverProviderType != types.ProverProviderTypeInternal && login.Message.ProverProviderType != types.ProverProviderTypeExternal {
// for backward compatibility, set ProverProviderType as internal
if login.Message.ProverProviderType == types.ProverProviderTypeUndefined {
login.Message.ProverProviderType = types.ProverProviderTypeInternal
} else {
log.Error("invalid prover_provider_type", "value", login.Message.ProverProviderType, "prover name", login.Message.ProverName, "prover version", login.Message.ProverVersion)
return errors.New("invalid prover provider type.")
}
}
return nil
}

View File

@@ -22,6 +22,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
cutils "scroll-tech/coordinator/internal/utils"
)
// BatchProverTask is prover task implement for batch proof
@@ -63,6 +64,18 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
unassignedBatchCount, getCountError := bp.batchOrm.GetUnassignedBatchCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getCountError != nil {
log.Error("failed to get unassigned batch proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
return nil, ErrCoordinatorInternalFailure
}
// Assign external prover if unassigned task number exceeds threshold
if unassignedBatchCount < bp.cfg.ProverManager.ExternalProverThreshold {
return nil, nil
}
}
var batchTask *orm.Batch
for i := 0; i < 5; i++ {
var getTaskError error
@@ -88,6 +101,20 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBatch, tmpBatchTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBatch.String(), "task ID", tmpBatchTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty batch, the prover already failed this task", "height", getTaskParameter.ProverHeight)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := bp.batchOrm.UpdateBatchAttempts(ctx.Copy(), tmpBatchTask.Index, tmpBatchTask.ActiveAttempts, tmpBatchTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update batch attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)
@@ -232,7 +259,7 @@ func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.Prove
}
func (bp *BatchProverTask) recoverActiveAttempts(ctx *gin.Context, batchTask *orm.Batch) {
if err := bp.chunkOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
if err := bp.batchOrm.DecreaseActiveAttemptsByHash(ctx.Copy(), batchTask.Hash); err != nil {
log.Error("failed to recover batch active attempts", "hash", batchTask.Hash, "error", err)
}
}

View File

@@ -21,6 +21,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
cutils "scroll-tech/coordinator/internal/utils"
)
// BundleProverTask is prover task implement for bundle proof
@@ -63,6 +64,18 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
unassignedBundleCount, getCountError := bp.bundleOrm.GetUnassignedBundleCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts)
if getCountError != nil {
log.Error("failed to get unassigned bundle proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
return nil, ErrCoordinatorInternalFailure
}
// Assign external prover if unassigned task number exceeds threshold
if unassignedBundleCount < bp.cfg.ProverManager.ExternalProverThreshold {
return nil, nil
}
}
var bundleTask *orm.Bundle
for i := 0; i < 5; i++ {
var getTaskError error
@@ -88,6 +101,20 @@ func (bp *BundleProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinat
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := bp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeBundle, tmpBundleTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeBundle.String(), "task ID", tmpBundleTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty bundle, the prover already failed this task", "height", getTaskParameter.ProverHeight)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := bp.bundleOrm.UpdateBundleAttempts(ctx.Copy(), tmpBundleTask.Hash, tmpBundleTask.ActiveAttempts, tmpBundleTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update bundle attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)

View File

@@ -21,6 +21,7 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/orm"
coordinatorType "scroll-tech/coordinator/internal/types"
cutils "scroll-tech/coordinator/internal/utils"
)
// ChunkProverTask the chunk prover task
@@ -61,6 +62,18 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
maxActiveAttempts := cp.cfg.ProverManager.ProversPerSession
maxTotalAttempts := cp.cfg.ProverManager.SessionAttempts
if taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) {
unassignedChunkCount, getCountError := cp.chunkOrm.GetUnassignedChunkCount(ctx.Copy(), maxActiveAttempts, maxTotalAttempts, getTaskParameter.ProverHeight)
if getCountError != nil {
log.Error("failed to get unassigned chunk proving tasks count", "height", getTaskParameter.ProverHeight, "err", getCountError)
return nil, ErrCoordinatorInternalFailure
}
// Assign external prover if unassigned task number exceeds threshold
if unassignedChunkCount < cp.cfg.ProverManager.ExternalProverThreshold {
return nil, nil
}
}
var chunkTask *orm.Chunk
for i := 0; i < 5; i++ {
var getTaskError error
@@ -86,6 +99,20 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
return nil, nil
}
// Don't dispatch the same failing job to the same prover
proverTasks, getTaskError := cp.proverTaskOrm.GetFailedProverTasksByHash(ctx.Copy(), message.ProofTypeChunk, tmpChunkTask.Hash, 2)
if getTaskError != nil {
log.Error("failed to get prover tasks", "proof type", message.ProofTypeChunk.String(), "task ID", tmpChunkTask.Hash, "error", getTaskError)
return nil, ErrCoordinatorInternalFailure
}
for i := 0; i < len(proverTasks); i++ {
if proverTasks[i].ProverPublicKey == taskCtx.PublicKey ||
taskCtx.ProverProviderType == uint8(coordinatorType.ProverProviderTypeExternal) && cutils.IsExternalProverNameMatch(proverTasks[i].ProverName, taskCtx.ProverName) {
log.Debug("get empty chunk, the prover already failed this task", "height", getTaskParameter.ProverHeight)
return nil, nil
}
}
rowsAffected, updateAttemptsErr := cp.chunkOrm.UpdateChunkAttempts(ctx.Copy(), tmpChunkTask.Index, tmpChunkTask.ActiveAttempts, tmpChunkTask.TotalAttempts)
if updateAttemptsErr != nil {
log.Error("failed to update chunk attempts", "height", getTaskParameter.ProverHeight, "err", updateAttemptsErr)

View File

@@ -47,10 +47,11 @@ type BaseProverTask struct {
}
type proverTaskContext struct {
PublicKey string
ProverName string
ProverVersion string
HardForkNames map[string]struct{}
PublicKey string
ProverName string
ProverVersion string
ProverProviderType uint8
HardForkNames map[string]struct{}
}
// checkParameter check the prover task parameter illegal
@@ -76,6 +77,12 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context) (*proverTaskContext, e
}
ptc.ProverVersion = proverVersion.(string)
ProverProviderType, ProverProviderTypeExist := ctx.Get(coordinatorType.ProverProviderTypeKey)
if !ProverProviderTypeExist {
return nil, errors.New("get prover provider type from context failed")
}
ptc.ProverProviderType = uint8(ProverProviderType.(float64))
hardForkNamesStr, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
if !hardForkNameExist {
return nil, errors.New("get hard fork name from context failed")

View File

@@ -95,6 +95,22 @@ func (o *Batch) GetUnassignedBatch(ctx context.Context, maxActiveAttempts, maxTo
return &batch, nil
}
// GetUnassignedBatchCount retrieves unassigned batch count.
func (o *Batch) GetUnassignedBatchCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
var count int64
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("chunk_proofs_status = ?", int(types.ChunkProofsStatusReady))
db = db.Where("batch.deleted_at IS NULL")
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Batch.GetUnassignedBatchCount error: %w", err)
}
return count, nil
}
// GetAssignedBatch retrieves assigned batch based on the specified limit.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetAssignedBatch(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Batch, error) {

View File

@@ -71,6 +71,22 @@ func (o *Bundle) GetUnassignedBundle(ctx context.Context, maxActiveAttempts, max
return &bundle, nil
}
// GetUnassignedBundleCount retrieves unassigned bundle count.
func (o *Bundle) GetUnassignedBundleCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (int64, error) {
var count int64
db := o.db.WithContext(ctx)
db = db.Model(&Bundle{})
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("batch_proofs_status = ?", int(types.BatchProofsStatusReady))
db = db.Where("bundle.deleted_at IS NULL")
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Bundle.GetUnassignedBundleCount error: %w", err)
}
return count, nil
}
// GetAssignedBundle retrieves assigned bundle based on the specified limit.
// The returned bundle sorts in ascending order by their index.
func (o *Bundle) GetAssignedBundle(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8) (*Bundle, error) {

View File

@@ -88,6 +88,22 @@ func (o *Chunk) GetUnassignedChunk(ctx context.Context, maxActiveAttempts, maxTo
return &chunk, nil
}
// GetUnassignedChunkCount retrieves unassigned chunk count.
func (o *Chunk) GetUnassignedChunkCount(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (int64, error) {
var count int64
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("proving_status = ?", int(types.ProvingTaskUnassigned))
db = db.Where("total_attempts < ?", maxTotalAttempts)
db = db.Where("active_attempts < ?", maxActiveAttempts)
db = db.Where("end_block_number <= ?", height)
db = db.Where("chunk.deleted_at IS NULL")
if err := db.Count(&count).Error; err != nil {
return 0, fmt.Errorf("Chunk.GetUnassignedChunkCount error: %w", err)
}
return count, nil
}
// GetAssignedChunk retrieves assigned chunk based on the specified limit.
// The returned chunks are sorted in ascending order by their index.
func (o *Chunk) GetAssignedChunk(ctx context.Context, maxActiveAttempts, maxTotalAttempts uint8, height uint64) (*Chunk, error) {

View File

@@ -2,6 +2,7 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -65,7 +66,7 @@ func (p *ProverBlockList) IsPublicKeyBlocked(ctx context.Context, publicKey stri
db = db.Model(&ProverBlockList{})
db = db.Where("public_key = ?", publicKey)
if err := db.First(&ProverBlockList{}).Error; err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return false, nil // Public key not found, hence it's not blocked.
}
return true, fmt.Errorf("ProverBlockList.IsPublicKeyBlocked error: %w, public key: %v", err, publicKey)

View File

@@ -2,6 +2,7 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -60,7 +61,7 @@ func (o *ProverTask) IsProverAssigned(ctx context.Context, publicKey string) (bo
var task ProverTask
err := db.Where("prover_public_key = ? AND proving_status = ?", publicKey, types.ProverAssigned).First(&task).Error
if err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return false, nil
}
return false, err
@@ -116,6 +117,27 @@ func (o *ProverTask) GetProverTasksByHashes(ctx context.Context, taskType messag
return proverTasks, nil
}
// GetFailedProverTasksByHash retrieves the failed ProverTask records associated with the specified hash.
// The returned prover task objects are sorted in descending order by their ids.
func (o *ProverTask) GetFailedProverTasksByHash(ctx context.Context, taskType message.ProofType, hash string, limit int) ([]*ProverTask, error) {
db := o.db.WithContext(ctx)
db = db.Model(&ProverTask{})
db = db.Where("task_type", int(taskType))
db = db.Where("task_id", hash)
db = db.Where("proving_status = ?", int(types.ProverProofInvalid))
db = db.Order("id desc")
if limit != 0 {
db = db.Limit(limit)
}
var proverTasks []*ProverTask
if err := db.Find(&proverTasks).Error; err != nil {
return nil, fmt.Errorf("ProverTask.GetFailedProverTasksByHash error: %w, hash: %v", err, hash)
}
return proverTasks, nil
}
// GetProverTaskByUUIDAndPublicKey get prover task taskID by uuid and public key
func (o *ProverTask) GetProverTaskByUUIDAndPublicKey(ctx context.Context, uuid, publicKey string) (*ProverTask, error) {
db := o.db.WithContext(ctx)

View File

@@ -18,6 +18,8 @@ const (
ProverName = "prover_name"
// ProverVersion the prover version for context
ProverVersion = "prover_version"
// ProverProviderTypeKey the prover provider type for context
ProverProviderTypeKey = "prover_provider_type"
// HardForkName the hard fork name for context
HardForkName = "hard_fork_name"
)
@@ -28,13 +30,22 @@ type LoginSchema struct {
Token string `json:"token"`
}
type MessageWithoutProverProviderType struct {
Challenge string `json:"challenge"`
ProverVersion string `json:"prover_version"`
ProverName string `json:"prover_name"`
ProverTypes []ProverType `json:"prover_types"`
VKs []string `json:"vks"`
}
// Message the login message struct
type Message struct {
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
ProverTypes []ProverType `form:"prover_types" json:"prover_types"`
VKs []string `form:"vks" json:"vks"`
Challenge string `form:"challenge" json:"challenge" binding:"required"`
ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"`
ProverName string `form:"prover_name" json:"prover_name" binding:"required"`
ProverProviderType ProverProviderType `form:"prover_provider_type" json:"prover_provider_type,omitempty"`
ProverTypes []ProverType `form:"prover_types" json:"prover_types"`
VKs []string `form:"vks" json:"vks"`
}
// LoginParameterWithHardForkName constructs new payload for login
@@ -53,7 +64,7 @@ type LoginParameter struct {
// SignWithKey auth message with private key and set public key in auth message's Identity
func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error {
// Hash identity content
hash, err := a.Message.Hash()
hash, err := Hash(a.Message)
if err != nil {
return err
}
@@ -70,7 +81,14 @@ func (a *LoginParameter) SignWithKey(priv *ecdsa.PrivateKey) error {
// Verify verifies the message of auth.
func (a *LoginParameter) Verify() (bool, error) {
hash, err := a.Message.Hash()
var hash []byte
var err error
if a.Message.ProverProviderType == ProverProviderTypeUndefined {
// for backward compatibility, calculate hash without ProverProviderType
hash, err = Hash(a.Message.ToMessageWithoutProverProviderType())
} else {
hash, err = Hash(a.Message)
}
if err != nil {
return false, err
}
@@ -85,15 +103,14 @@ func (a *LoginParameter) Verify() (bool, error) {
return isValid, nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Message) Hash() ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
func (m *Message) ToMessageWithoutProverProviderType() MessageWithoutProverProviderType {
return MessageWithoutProverProviderType{
Challenge: m.Challenge,
ProverVersion: m.ProverVersion,
ProverName: m.ProverName,
ProverTypes: m.ProverTypes,
VKs: m.VKs,
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
// DecodeAndUnmarshalPubkey decodes a hex-encoded public key and unmarshal it into an ecdsa.PublicKey
@@ -111,3 +128,14 @@ func (i *Message) DecodeAndUnmarshalPubkey(pubKeyHex string) (*ecdsa.PublicKey,
}
return pubKey, nil
}
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func Hash(i interface{}) ([]byte, error) {
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -18,11 +18,12 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
t.Run("sign", func(t *testing.T) {
authMsg = LoginParameter{
Message: Message{
ProverName: "test1",
ProverVersion: "v0.0.1",
Challenge: "abcdef",
ProverTypes: []ProverType{ProverTypeBatch},
VKs: []string{"vk1", "vk2"},
ProverName: "test1",
ProverVersion: "v0.0.1",
Challenge: "abcdef",
ProverProviderType: ProverProviderTypeInternal,
ProverTypes: []ProverType{ProverTypeBatch},
VKs: []string{"vk1", "vk2"},
},
PublicKey: publicKeyHex,
}
@@ -59,11 +60,12 @@ func TestGenerateSignature(t *testing.T) {
authMsg := LoginParameter{
Message: Message{
ProverName: "test",
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
ProverTypes: []ProverType{ProverTypeChunk},
VKs: []string{"mock_vk"},
ProverName: "test",
ProverVersion: "v4.4.45-37af5ef5-38a68e2-1c5093c",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MjQ4Mzg0ODUsIm9yaWdfaWF0IjoxNzI0ODM0ODg1LCJyYW5kb20iOiJ6QmdNZGstNGc4UzNUNTFrVEFsYk1RTXg2TGJ4SUs4czY3ejM2SlNuSFlJPSJ9.x9PvihhNx2w4_OX5uCrv8QJCNYVQkIi-K2k8XFXYmik",
ProverProviderType: ProverProviderTypeInternal,
ProverTypes: []ProverType{ProverTypeChunk},
VKs: []string{"mock_vk"},
},
PublicKey: publicKeyHex,
}

View File

@@ -40,3 +40,26 @@ func MakeProverType(proofType message.ProofType) ProverType {
return ProverTypeUndefined
}
}
// ProverProviderType represents the type of prover provider.
type ProverProviderType uint8
func (r ProverProviderType) String() string {
switch r {
case ProverProviderTypeInternal:
return "prover provider type internal"
case ProverProviderTypeExternal:
return "prover provider type external"
default:
return fmt.Sprintf("prover provider type: %d", r)
}
}
const (
// ProverProviderTypeUndefined is an unknown prover provider type
ProverProviderTypeUndefined ProverProviderType = iota
// ProverProviderTypeInternal is an internal prover provider type
ProverProviderTypeInternal
// ProverProviderTypeExternal is an external prover provider type
ProverProviderTypeExternal
)

View File

@@ -0,0 +1,17 @@
package utils
import "strings"
// IsExternalProverNameMatch checks if the local and remote external prover names belong to the same provider.
// It returns true if they do, otherwise false.
func IsExternalProverNameMatch(localName, remoteName string) bool {
local := strings.Split(localName, "_")
remote := strings.Split(remoteName, "_")
if len(local) < 3 || len(remote) < 3 {
return false
}
// note the name of cloud prover is in the format of "cloud_prover_{provider-name}_index"
return local[0] == remote[0] && local[1] == remote[1] && local[2] == remote[2]
}

View File

@@ -79,11 +79,12 @@ func (r *mockProver) challenge(t *testing.T) string {
func (r *mockProver) login(t *testing.T, challengeString string, proverTypes []types.ProverType) (string, int, string) {
authMsg := types.LoginParameter{
Message: types.Message{
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
ProverTypes: proverTypes,
VKs: []string{"mock_vk"},
Challenge: challengeString,
ProverName: r.proverName,
ProverVersion: r.proverVersion,
ProverProviderType: types.ProverProviderTypeInternal,
ProverTypes: proverTypes,
VKs: []string{"mock_vk"},
},
PublicKey: r.publicKey(),
}

View File

@@ -448,8 +448,6 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko
github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY=
github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
@@ -862,6 +860,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI=
github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE=
@@ -1142,8 +1141,6 @@ github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:4
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241023093931-91c2f9c27f4d h1:vuv7fGKEDtoeetI6RkKt8RAByJsYZBWk9Vo6gShv65c=
github.com/scroll-tech/go-ethereum v1.10.14-0.20241023093931-91c2f9c27f4d/go.mod h1:PWEOTg6LeWlJAlFJauO0msSLXWnpHmE+mVh5txtfeRM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
@@ -1439,7 +1436,6 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=

View File

@@ -251,14 +251,14 @@ func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64
}
expectedBaseFeeDelta := r.lastBaseFee*r.gasPriceDiff/gasPriceDiffPrecision + 1
if baseFee >= r.minGasPrice && (baseFee >= r.lastBaseFee+expectedBaseFeeDelta || baseFee+expectedBaseFeeDelta <= r.lastBaseFee) {
if baseFee >= r.minGasPrice && math.Abs(float64(baseFee)-float64(r.lastBaseFee)) >= float64(expectedBaseFeeDelta) {
return true
}
expectedBlobBaseFeeDelta := r.lastBlobBaseFee * r.gasPriceDiff / gasPriceDiffPrecision
// Plus a minimum of 0.01 gwei, since the blob base fee is usually low, preventing short-time flunctuation.
expectedBlobBaseFeeDelta += 10000000
if blobBaseFee >= r.minGasPrice && (blobBaseFee >= r.lastBlobBaseFee+expectedBlobBaseFeeDelta || blobBaseFee+expectedBlobBaseFeeDelta <= r.lastBlobBaseFee) {
if blobBaseFee >= r.minGasPrice && math.Abs(float64(blobBaseFee)-float64(r.lastBlobBaseFee)) >= float64(expectedBlobBaseFeeDelta) {
return true
}

View File

@@ -154,7 +154,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
}
// chain_monitor client
if cfg.ChainMonitor.Enabled {
if serviceType == ServiceTypeL2RollupRelayer && cfg.ChainMonitor.Enabled {
layer2Relayer.chainMonitorClient = resty.New()
layer2Relayer.chainMonitorClient.SetRetryCount(cfg.ChainMonitor.TryTimes)
layer2Relayer.chainMonitorClient.SetTimeout(time.Duration(cfg.ChainMonitor.TimeOut) * time.Second)
@@ -344,8 +344,9 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
expectedDelta = 1
}
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64+expectedDelta <= r.lastGasPrice)) {
// last is undefined or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice &&
(math.Abs(float64(suggestGasPriceUint64)-float64(r.lastGasPrice)) >= float64(expectedDelta))) {
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
if err != nil {
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
@@ -529,7 +530,7 @@ func (r *Layer2Relayer) ProcessPendingBundles() {
}
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "bundle hash", bundle.Hash)
log.Info("Start to roll up zk proof", "index", bundle.Index, "bundle hash", bundle.Hash)
r.metrics.rollupL2RelayerProcessPendingBundlesFinalizedTotal.Inc()
if err := r.finalizeBundle(bundle, true); err != nil {
log.Error("failed to finalize bundle with proof", "bundle index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "err", err)
@@ -636,8 +637,22 @@ func (r *Layer2Relayer) finalizeBundle(bundle *orm.Bundle, withProof bool) error
log.Info("finalizeBundle in layer1", "with proof", withProof, "index", bundle.Index, "start batch index", bundle.StartBatchIndex, "end batch index", bundle.EndBatchIndex, "tx hash", txHash.String())
// Updating rollup status in database.
if err := r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing); err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
err = r.db.Transaction(func(dbTX *gorm.DB) error {
if err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatusByBundleHash(r.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatusByBundleHash failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
return err
}
if err = r.bundleOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, bundle.Hash, txHash.String(), types.RollupFinalizing, dbTX); err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "index", bundle.Index, "bundle hash", bundle.Hash, "tx hash", txHash.String(), "err", err)
return err
}
return nil
})
if err != nil {
log.Warn("failed to update rollup status of bundle and batches", "err", err)
return err
}

View File

@@ -216,7 +216,7 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
}
signedTx, err := s.createTx(feeData, target, data, sidecar, nil)
signedTx, err := s.createTx(feeData, target, data, sidecar, s.transactionSigner.GetNonce())
if err != nil {
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
log.Error("failed to create signed tx (non-resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", s.transactionSigner.GetNonce(), "err", err)
@@ -247,19 +247,13 @@ func (s *Sender) SendTransaction(contextID string, target *common.Address, data
return common.Hash{}, fmt.Errorf("failed to send transaction, err: %w", err)
}
s.transactionSigner.SetNonce(signedTx.Nonce() + 1)
return signedTx.Hash(), nil
}
func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, overrideNonce *uint64) (*gethTypes.Transaction, error) {
var (
nonce = s.transactionSigner.GetNonce()
txData gethTypes.TxData
)
// this is a resubmit call, override the nonce
if overrideNonce != nil {
nonce = *overrideNonce
}
func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte, sidecar *gethTypes.BlobTxSidecar, nonce uint64) (*gethTypes.Transaction, error) {
var txData gethTypes.TxData
switch s.config.TxType {
case LegacyTxType:
@@ -312,11 +306,6 @@ func (s *Sender) createTx(feeData *FeeData, target *common.Address, data []byte,
return nil, err
}
// update nonce when it is not from resubmit
if overrideNonce == nil {
s.transactionSigner.SetNonce(nonce + 1)
}
if feeData.gasTipCap != nil {
s.metrics.currentGasTipCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasTipCap.Uint64()))
}
@@ -492,7 +481,7 @@ func (s *Sender) createReplacingTransaction(tx *gethTypes.Transaction, baseFee,
nonce := tx.Nonce()
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
signedTx, err := s.createTx(&feeData, tx.To(), tx.Data(), tx.BlobTxSidecar(), &nonce)
signedTx, err := s.createTx(&feeData, tx.To(), tx.Data(), tx.BlobTxSidecar(), nonce)
if err != nil {
log.Error("failed to create signed tx (resubmit case)", "from", s.transactionSigner.GetAddr().String(), "nonce", nonce, "err", err)
return nil, err

View File

@@ -282,7 +282,7 @@ func testResubmitZeroGasPriceTransaction(t *testing.T) {
gasFeeCap: big.NewInt(0),
gasLimit: 50000,
}
tx, err := s.createTx(feeData, &common.Address{}, nil, nil, nil)
tx, err := s.createTx(feeData, &common.Address{}, nil, nil, s.transactionSigner.GetNonce())
assert.NoError(t, err)
assert.NotNil(t, tx)
err = s.client.SendTransaction(s.ctx, tx)
@@ -373,7 +373,7 @@ func testResubmitNonZeroGasPriceTransaction(t *testing.T) {
sidecar, err = makeSidecar(txBlob[i])
assert.NoError(t, err)
}
tx, err := s.createTx(feeData, &common.Address{}, nil, sidecar, nil)
tx, err := s.createTx(feeData, &common.Address{}, nil, sidecar, s.transactionSigner.GetNonce())
assert.NoError(t, err)
assert.NotNil(t, tx)
err = s.client.SendTransaction(s.ctx, tx)
@@ -420,7 +420,7 @@ func testResubmitUnderpricedTransaction(t *testing.T) {
gasFeeCap: big.NewInt(1000000000),
gasLimit: 50000,
}
tx, err := s.createTx(feeData, &common.Address{}, nil, nil, nil)
tx, err := s.createTx(feeData, &common.Address{}, nil, nil, s.transactionSigner.GetNonce())
assert.NoError(t, err)
assert.NotNil(t, tx)
err = s.client.SendTransaction(s.ctx, tx)

View File

@@ -295,6 +295,7 @@ func (p *ChunkProposer) proposeChunk() error {
}
var chunk encoding.Chunk
chunk.Blocks = make([]*encoding.Block, 0, len(blocks))
for i, block := range blocks {
chunk.Blocks = append(chunk.Blocks, block)

View File

@@ -347,11 +347,11 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
updateFields["prover_assigned_at"] = utils.NowUTC()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
updateFields["proved_at"] = utils.NowUTC()
}
db := o.db
@@ -419,7 +419,7 @@ func (o *Batch) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash st
updateFields["finalize_tx_hash"] = finalizeTxHash
updateFields["rollup_status"] = int(status)
if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now()
updateFields["finalized_at"] = utils.NowUTC()
}
db := o.db.WithContext(ctx)
@@ -478,11 +478,11 @@ func (o *Batch) UpdateProvingStatusByBundleHash(ctx context.Context, bundleHash
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
updateFields["prover_assigned_at"] = utils.NowUTC()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
updateFields["proved_at"] = utils.NowUTC()
}
db := o.db

View File

@@ -67,7 +67,7 @@ func (o *Bundle) getLatestBundle(ctx context.Context) (*Bundle, error) {
var latestBundle Bundle
if err := db.First(&latestBundle).Error; err != nil {
if err == gorm.ErrRecordNotFound {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("getLatestBundle error: %w", err)
@@ -194,7 +194,7 @@ func (o *Bundle) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, hash s
updateFields["finalize_tx_hash"] = finalizeTxHash
updateFields["rollup_status"] = int(status)
if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now()
updateFields["finalized_at"] = utils.NowUTC()
}
db := o.db
@@ -218,7 +218,7 @@ func (o *Bundle) UpdateProvingStatus(ctx context.Context, hash string, status ty
switch status {
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
updateFields["proved_at"] = utils.NowUTC()
}
db := o.db
@@ -241,7 +241,7 @@ func (o *Bundle) UpdateRollupStatus(ctx context.Context, hash string, status typ
updateFields := make(map[string]interface{})
updateFields["rollup_status"] = int(status)
if status == types.RollupFinalized {
updateFields["finalized_at"] = time.Now()
updateFields["finalized_at"] = utils.NowUTC()
}
db := o.db.WithContext(ctx)

View File

@@ -11,8 +11,9 @@ import (
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/utils"
"scroll-tech/rollup/internal/utils"
rutils "scroll-tech/rollup/internal/utils"
)
// Chunk represents a chunk of blocks in the database.
@@ -177,7 +178,7 @@ func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, metrics utils.ChunkMetrics, dbTX ...*gorm.DB) (*Chunk, error) {
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, metrics rutils.ChunkMetrics, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
@@ -202,7 +203,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
parentChunkStateRoot = parentChunk.StateRoot
}
chunkHash, err := utils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion)
chunkHash, err := rutils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion)
if err != nil {
log.Error("failed to get chunk hash", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
@@ -261,11 +262,11 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
updateFields["prover_assigned_at"] = utils.NowUTC()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
updateFields["proved_at"] = utils.NowUTC()
}
db := o.db
@@ -289,11 +290,11 @@ func (o *Chunk) UpdateProvingStatusByBatchHash(ctx context.Context, batchHash st
switch status {
case types.ProvingTaskAssigned:
updateFields["prover_assigned_at"] = time.Now()
updateFields["prover_assigned_at"] = utils.NowUTC()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
updateFields["proved_at"] = utils.NowUTC()
}
db := o.db