mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-13 07:57:58 -05:00
Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
200ca7c15b | ||
|
|
53cf26597d | ||
|
|
f0f7341271 | ||
|
|
b6af88c936 | ||
|
|
de541a650a |
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
@@ -39,7 +39,6 @@ jobs:
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
platforms: linux/amd64,linux/arm64
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: event-watcher
|
||||
@@ -47,6 +46,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -91,6 +91,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -135,6 +136,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -179,6 +181,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-fetcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -223,6 +226,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-api.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -267,6 +271,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -311,6 +316,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-cron.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.93"
|
||||
var tag = "v4.3.94"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -30,8 +30,8 @@ func (c *Collector) cleanupChallenge() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopCleanChallengeChan:
|
||||
log.Info("the coordinator cleanupChallenge run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,7 +23,10 @@ type Collector struct {
|
||||
db *gorm.DB
|
||||
ctx context.Context
|
||||
|
||||
stopTimeoutChan chan struct{}
|
||||
stopChunkTimeoutChan chan struct{}
|
||||
stopBatchTimeoutChan chan struct{}
|
||||
stopBatchAllChunkReadyChan chan struct{}
|
||||
stopCleanChallengeChan chan struct{}
|
||||
|
||||
proverTaskOrm *orm.ProverTask
|
||||
chunkOrm *orm.Chunk
|
||||
@@ -40,14 +43,17 @@ type Collector struct {
|
||||
// NewCollector create a collector to cron collect the data to send to prover
|
||||
func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prometheus.Registerer) *Collector {
|
||||
c := &Collector{
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopTimeoutChan: make(chan struct{}),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
challenge: orm.NewChallenge(db),
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
stopChunkTimeoutChan: make(chan struct{}),
|
||||
stopBatchTimeoutChan: make(chan struct{}),
|
||||
stopBatchAllChunkReadyChan: make(chan struct{}),
|
||||
stopCleanChallengeChan: make(chan struct{}),
|
||||
proverTaskOrm: orm.NewProverTask(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
batchOrm: orm.NewBatch(db),
|
||||
challenge: orm.NewChallenge(db),
|
||||
|
||||
timeoutBatchCheckerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "coordinator_batch_timeout_checker_run_total",
|
||||
@@ -83,7 +89,10 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
|
||||
// Stop all the collector
|
||||
func (c *Collector) Stop() {
|
||||
c.stopTimeoutChan <- struct{}{}
|
||||
c.stopChunkTimeoutChan <- struct{}{}
|
||||
c.stopBatchTimeoutChan <- struct{}{}
|
||||
c.stopBatchAllChunkReadyChan <- struct{}{}
|
||||
c.stopCleanChallengeChan <- struct{}{}
|
||||
}
|
||||
|
||||
// timeoutTask cron check the send task is timeout. if timeout reached, restore the
|
||||
@@ -113,8 +122,8 @@ func (c *Collector) timeoutBatchProofTask() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopBatchTimeoutChan:
|
||||
log.Info("the coordinator timeoutBatchProofTask run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -146,8 +155,8 @@ func (c *Collector) timeoutChunkProofTask() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopChunkTimeoutChan:
|
||||
log.Info("the coordinator timeoutChunkProofTask run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -253,8 +262,8 @@ func (c *Collector) checkBatchAllChunkReady() {
|
||||
log.Error("manager context canceled with error", "error", c.ctx.Err())
|
||||
}
|
||||
return
|
||||
case <-c.stopTimeoutChan:
|
||||
log.Info("the coordinator run loop exit")
|
||||
case <-c.stopBatchAllChunkReadyChan:
|
||||
log.Info("the coordinator checkBatchAllChunkReady run loop exit")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
// total number of tables.
|
||||
assert.Equal(t, int64(17), cur)
|
||||
assert.Equal(t, int64(18), cur)
|
||||
}
|
||||
|
||||
func testMigrate(t *testing.T) {
|
||||
assert.NoError(t, Migrate(pgDB))
|
||||
cur, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(17), cur)
|
||||
assert.Equal(t, int64(18), cur)
|
||||
}
|
||||
|
||||
func testRollback(t *testing.T) {
|
||||
version, err := Current(pgDB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(17), version)
|
||||
assert.Equal(t, int64(18), version)
|
||||
|
||||
assert.NoError(t, Rollback(pgDB, nil))
|
||||
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
|
||||
create index if not exists idx_prover_task_created_at on prover_task(created_at) where deleted_at IS NULL;
|
||||
|
||||
create index if not exists idx_prover_task_task_id on prover_task(task_id) where deleted_at IS NULL;
|
||||
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
|
||||
drop index if exists idx_prover_task_created_at;
|
||||
|
||||
drop index if exists idx_prover_task_task_id;
|
||||
|
||||
|
||||
-- +goose StatementEnd
|
||||
Reference in New Issue
Block a user