refactor(coordinator): remove debug api namespace from manager (#221)

Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
This commit is contained in:
Nazarii Denha
2023-01-12 03:16:48 +01:00
committed by GitHub
parent 65699b89bb
commit e1247a7eb2
3 changed files with 11 additions and 122 deletions

View File

@@ -1,96 +0,0 @@
package coordinator
import (
"fmt"
"time"
"scroll-tech/database/orm"
)
// RollerDebugAPI roller api interface in order go get debug message.
type RollerDebugAPI interface {
// ListRollers returns all live rollers
ListRollers() ([]*RollerInfo, error)
// GetSessionInfo returns the session information given the session id.
GetSessionInfo(sessionID string) (*SessionInfo, error)
}
// RollerInfo records the roller name, pub key and active session info (id, start time).
type RollerInfo struct {
Name string `json:"name"`
Version string `json:"version"`
PublicKey string `json:"public_key"`
ActiveSession string `json:"active_session,omitempty"`
ActiveSessionStartTime time.Time `json:"active_session_start_time"` // latest proof start time.
}
// SessionInfo records proof create or proof verify failed session.
type SessionInfo struct {
ID string `json:"id"`
Status string `json:"status"`
StartTime time.Time `json:"start_time"`
FinishTime time.Time `json:"finish_time,omitempty"` // set to 0 if not finished
AssignedRollers []string `json:"assigned_rollers,omitempty"` // roller name list
Error string `json:"error,omitempty"` // empty string if no error encountered
}
// ListRollers returns all live rollers.
func (m *Manager) ListRollers() ([]*RollerInfo, error) {
m.mu.RLock()
defer m.mu.RUnlock()
var res []*RollerInfo
for _, pk := range m.rollerPool.Keys() {
node, exist := m.rollerPool.Get(pk)
if !exist {
continue
}
roller := node.(*rollerNode)
info := &RollerInfo{
Name: roller.Name,
Version: roller.Version,
PublicKey: pk,
}
for id, sess := range m.sessions {
if _, ok := sess.info.Rollers[pk]; ok {
info.ActiveSessionStartTime = time.Unix(sess.info.StartTimestamp, 0)
info.ActiveSession = id
break
}
}
res = append(res, info)
}
return res, nil
}
func newSessionInfo(sess *session, status orm.ProvingStatus, errMsg string, finished bool) *SessionInfo {
now := time.Now()
var nameList []string
for pk := range sess.info.Rollers {
nameList = append(nameList, sess.info.Rollers[pk].Name)
}
info := SessionInfo{
ID: sess.info.ID,
Status: status.String(),
AssignedRollers: nameList,
StartTime: time.Unix(sess.info.StartTimestamp, 0),
Error: errMsg,
}
if finished {
info.FinishTime = now
}
return &info
}
// GetSessionInfo returns the session information given the session id.
func (m *Manager) GetSessionInfo(sessionID string) (*SessionInfo, error) {
m.mu.RLock()
defer m.mu.RUnlock()
if info, ok := m.failedSessionInfos[sessionID]; ok {
return info, nil
}
if s, ok := m.sessions[sessionID]; ok {
return newSessionInfo(s, orm.ProvingTaskAssigned, "", false), nil
}
return nil, fmt.Errorf("no such session, sessionID: %s", sessionID)
}

View File

@@ -65,9 +65,6 @@ type Manager struct {
// A map containing proof failed or verify failed proof.
rollerPool cmap.ConcurrentMap
// TODO: once put into use, should add to graceful restart.
failedSessionInfos map[string]*SessionInfo
// A direct connection to the Halo2 verifier, used to verify
// incoming proofs.
verifier *verifier.Verifier
@@ -94,15 +91,14 @@ func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmF
log.Info("Start coordinator successfully.")
return &Manager{
ctx: ctx,
cfg: cfg,
rollerPool: cmap.New(),
sessions: make(map[string]*session),
failedSessionInfos: make(map[string]*SessionInfo),
verifier: v,
orm: orm,
Client: client,
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
ctx: ctx,
cfg: cfg,
rollerPool: cmap.New(),
sessions: make(map[string]*session),
verifier: v,
orm: orm,
Client: client,
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
}, nil
}
@@ -279,6 +275,9 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
"roller pk", roller.PublicKey,
"error", msg.Error,
)
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskFailed); dbErr != nil {
log.Error("failed to update task status as failed", "error", dbErr)
}
return nil
}
@@ -346,7 +345,6 @@ func (m *Manager) CollectProofs(sess *session) {
if len(participatingRollers) == 0 {
// record failed session.
errMsg := "proof generation session ended without receiving any valid proofs"
m.addFailedSession(sess, errMsg)
log.Warn(errMsg, "session id", sess.info.ID)
// Set status as skipped.
// Note that this is only a workaround for testnet here.
@@ -382,11 +380,6 @@ func (m *Manager) APIs() []rpc.API {
Service: RollerAPI(m),
Public: true,
},
{
Namespace: "debug",
Public: true,
Service: RollerDebugAPI(m),
},
}
}
@@ -506,10 +499,6 @@ func (m *Manager) IsRollerIdle(hexPk string) bool {
return true
}
func (m *Manager) addFailedSession(sess *session, errMsg string) {
m.failedSessionInfos[sess.info.ID] = newSessionInfo(sess, orm.ProvingTaskFailed, errMsg, true)
}
// VerifyToken verifies pukey for token and expiration time
func (m *Manager) VerifyToken(authMsg *message.AuthMsg) (bool, error) {
pubkey, _ := authMsg.PublicKey()

View File

@@ -318,10 +318,6 @@ func testGracefulRestart(t *testing.T) {
}()
for i := range ids {
info, err := newRollerManager.GetSessionInfo(ids[i])
assert.Equal(t, orm.ProvingTaskAssigned.String(), info.Status)
assert.NoError(t, err)
// at this point, roller haven't submitted
status, err := l2db.GetProvingStatusByID(ids[i])
assert.NoError(t, err)