mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 23:48:15 -05:00
Compare commits
9 Commits
coordinato
...
v4.4.14
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ab102a23b | ||
|
|
8248d020e5 | ||
|
|
c65cdfceb9 | ||
|
|
71ab2006fb | ||
|
|
60a98fa876 | ||
|
|
661b68cf86 | ||
|
|
6eea9195fc | ||
|
|
e45838f3ac | ||
|
|
acd1432d44 |
51
.github/workflows/docker.yml
vendored
51
.github/workflows/docker.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
platforms: linux/amd64
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -91,7 +91,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
platforms: linux/amd64
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -136,7 +136,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
platforms: linux/amd64
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -279,6 +279,51 @@ jobs:
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
bridgehistoryapi-db-cli:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ env.AWS_REGION }}
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- name: check repo and create it if not exist
|
||||
env:
|
||||
REPOSITORY: bridgehistoryapi-db-cli
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
ECR_REGISTRY: ${{ steps.login-ecr.outputs.registry }}
|
||||
REPOSITORY: bridgehistoryapi-db-cli
|
||||
IMAGE_TAG: ${{ github.ref_name }}
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/bridgehistoryapi-db-cli.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:latest
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
${{ env.ECR_REGISTRY }}/${{ env.REPOSITORY }}:latest
|
||||
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
2
.github/workflows/rollup.yml
vendored
2
.github/workflows/rollup.yml
vendored
@@ -105,7 +105,7 @@ jobs:
|
||||
- name: Test rollup packages
|
||||
working-directory: 'rollup'
|
||||
run: |
|
||||
./run_test.sh
|
||||
make test
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
|
||||
11
README.md
11
README.md
@@ -46,18 +46,7 @@ make dev_docker
|
||||
Run the tests using the following commands:
|
||||
|
||||
```bash
|
||||
export LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
export SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
sudo mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
sudo wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SCROLL_LIB_PATH
|
||||
export CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
@@ -18,36 +15,14 @@ RUN go mod download -x
|
||||
# Build event_watcher
|
||||
FROM base as builder
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
|
||||
cd /src/rollup/cmd/event_watcher/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/event_watcher
|
||||
|
||||
# Pull event_watcher into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/event_watcher /bin/
|
||||
WORKDIR /app
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
@@ -18,36 +15,14 @@ RUN go mod download -x
|
||||
# Build gas_oracle
|
||||
FROM base as builder
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
|
||||
cd /src/rollup/cmd/gas_oracle/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/gas_oracle
|
||||
|
||||
# Pull gas_oracle into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/gas_oracle /bin/
|
||||
WORKDIR /app
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
@@ -18,36 +15,14 @@ RUN go mod download -x
|
||||
# Build rollup_relayer
|
||||
FROM base as builder
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
|
||||
cd /src/rollup/cmd/rollup_relayer/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/rollup_relayer /bin/
|
||||
WORKDIR /app
|
||||
|
||||
@@ -154,11 +154,10 @@ func (t *TestcontainerApps) GetL2GethEndPoint() (string, error) {
|
||||
|
||||
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
|
||||
func (t *TestcontainerApps) GetGormDBClient() (*gorm.DB, error) {
|
||||
// endpoint, err := t.GetDBEndPoint()
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
endpoint := "postgres://lmr:@localhost:5432/unittest?sslmode=disable"
|
||||
endpoint, err := t.GetDBEndPoint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbCfg := &database.Config{
|
||||
DSN: endpoint,
|
||||
DriverName: "postgres",
|
||||
|
||||
@@ -183,6 +183,12 @@ type ChunkInfo struct {
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
}
|
||||
|
||||
// SubCircuitRowUsage tracing info added in v0.11.0rc8
|
||||
type SubCircuitRowUsage struct {
|
||||
Name string `json:"name"`
|
||||
RowNumber uint64 `json:"row_number"`
|
||||
}
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace,omitempty"`
|
||||
@@ -191,8 +197,9 @@ type ChunkProof struct {
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestProofDetailHash(t *testing.T) {
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
|
||||
expectedHash := "01128ea9006601146ba80dbda959c96ebaefca463e78570e473a57d821db5ec1"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.4.9"
|
||||
var tag = "v4.4.14"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"scroll-tech/coordinator/internal/logic/provertask"
|
||||
"scroll-tech/coordinator/internal/logic/verifier"
|
||||
coordinatorType "scroll-tech/coordinator/internal/types"
|
||||
itypes "scroll-tech/coordinator/internal/types"
|
||||
)
|
||||
|
||||
// GetTaskController the get prover task api controller
|
||||
@@ -71,10 +70,6 @@ func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error {
|
||||
|
||||
// GetTasks get assigned chunk/batch task
|
||||
func (ptc *GetTaskController) GetTasks(ctx *gin.Context) {
|
||||
ctx.Set(itypes.PublicKey, "fake_public_key2")
|
||||
ctx.Set(itypes.ProverName, "test")
|
||||
ctx.Set(itypes.ProverVersion, "v4.4.9-000000-000000-000000")
|
||||
|
||||
var getTaskParameter coordinatorType.GetTaskParameter
|
||||
if err := ctx.ShouldBind(&getTaskParameter); err != nil {
|
||||
nerr := fmt.Errorf("prover task parameter invalid, err:%w", err)
|
||||
|
||||
@@ -82,7 +82,7 @@ func (r *chunkIndexRange) merge(o chunkIndexRange) *chunkIndexRange {
|
||||
}
|
||||
|
||||
func (r *chunkIndexRange) contains(start, end uint64) bool {
|
||||
return r.start <= start && r.end >= end+1
|
||||
return r.start <= start && r.end > end
|
||||
}
|
||||
|
||||
type getHardForkNameByBatchFunc func(*orm.Batch) (string, error)
|
||||
@@ -137,7 +137,7 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
@@ -146,7 +146,7 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(batchTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get version by chunk", "error", err.Error())
|
||||
log.Error("failed to get hard fork name by batch", "task_id", batchTask.Hash, "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
@@ -166,14 +166,14 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
|
||||
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
@@ -222,7 +222,6 @@ func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName st
|
||||
endChunkIndex = toChunk.Index
|
||||
}
|
||||
}
|
||||
fmt.Printf("%s index range %+v\n", hardForkName, &chunkIndexRange{startChunkIndex, endChunkIndex})
|
||||
return &chunkIndexRange{startChunkIndex, endChunkIndex}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -118,16 +118,16 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
err error
|
||||
)
|
||||
var err error
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(chunkTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get version by chunk", "error", err.Error())
|
||||
log.Error("failed to get hard fork name by chunk", "task_id", chunkTask.Hash, "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
@@ -146,14 +146,14 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
|
||||
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
|
||||
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
@@ -229,7 +229,7 @@ func (r *blockRange) merge(o blockRange) (*blockRange, error) {
|
||||
}
|
||||
|
||||
func (r *blockRange) contains(start, end uint64) bool {
|
||||
return r.from <= start && r.to >= end+1
|
||||
return r.from <= start && r.to > end
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
|
||||
@@ -251,7 +251,6 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
|
||||
return cp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
return cp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
|
||||
|
||||
@@ -32,7 +32,9 @@ type ProverTask interface {
|
||||
func reverseMap(input map[string]string) map[string]string {
|
||||
output := make(map[string]string, len(input))
|
||||
for k, v := range input {
|
||||
output[v] = k
|
||||
if k != "" {
|
||||
output[v] = k
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
@@ -94,13 +96,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
if len(getTaskParameter.VKs) != 2 {
|
||||
return nil, fmt.Errorf("parameter vks length must be 2")
|
||||
}
|
||||
// min prover version supporting multi circuits, maybe put it to config file?
|
||||
var minMultiCircuitsProverVersion = "v4.4.7"
|
||||
if !version.CheckScrollRepoVersion(ptc.ProverVersion, minMultiCircuitsProverVersion) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", minMultiCircuitsProverVersion, ptc.ProverVersion)
|
||||
}
|
||||
for _, vk := range getTaskParameter.VKs {
|
||||
fmt.Printf("%+v\n", b.reverseVkMap)
|
||||
if _, exists := b.reverseVkMap[vk]; !exists {
|
||||
return nil, fmt.Errorf("incompatible vk. vk %s is invalid", vk)
|
||||
}
|
||||
@@ -131,10 +127,10 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s", publicKey, err, proverName)
|
||||
return nil, fmt.Errorf("failed to check whether the public key %s is blocked before assigning a chunk task, err: %w, proverName: %s, proverVersion: %s", publicKey, err, proverName, proverVersion)
|
||||
}
|
||||
if isBlocked {
|
||||
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, ptc.ProverVersion)
|
||||
return nil, fmt.Errorf("public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
|
||||
}
|
||||
|
||||
isAssigned, err := b.proverTaskOrm.IsProverAssigned(ctx.Copy(), publicKey.(string))
|
||||
@@ -143,7 +139,7 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
|
||||
if isAssigned {
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s", publicKey, proverName)
|
||||
return nil, fmt.Errorf("prover with publicKey %s is already assigned a task. ProverName: %s, ProverVersion: %s", publicKey, proverName, proverVersion)
|
||||
}
|
||||
return &ptc, nil
|
||||
}
|
||||
|
||||
@@ -12,17 +12,17 @@ import (
|
||||
func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) {
|
||||
batchVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "bernoulli",
|
||||
"london": "london",
|
||||
"istanbul": "istanbul",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
chunkVKMap := map[string]string{
|
||||
"shanghai": "",
|
||||
"bernoulli": "bernoulli",
|
||||
"london": "london",
|
||||
"istanbul": "istanbul",
|
||||
"bernoulli": "",
|
||||
"london": "",
|
||||
"istanbul": "",
|
||||
"homestead": "",
|
||||
"eip155": "",
|
||||
}
|
||||
|
||||
@@ -166,7 +166,5 @@ func (v *Verifier) loadEmbedVK() error {
|
||||
|
||||
v.BatchVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func v1(router *gin.RouterGroup, conf *config.Config) {
|
||||
r.POST("/login", challengeMiddleware.MiddlewareFunc(), loginMiddleware.LoginHandler)
|
||||
|
||||
// need jwt token api
|
||||
// r.Use(loginMiddleware.MiddlewareFunc())
|
||||
r.Use(loginMiddleware.MiddlewareFunc())
|
||||
{
|
||||
r.POST("/get_task", api.GetTask.GetTasks)
|
||||
r.POST("/submit_proof", api.SubmitProof.SubmitProof)
|
||||
|
||||
@@ -4,8 +4,8 @@ package types
|
||||
type GetTaskParameter struct {
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"`
|
||||
VKs []string `form:"vks" json:"vks"`
|
||||
VK string `form:"vk" json:"vk"` // will be deprecated after all go_prover offline
|
||||
VKs []string `form:"vks" json:"vks"` // for rust_prover that supporting multi-circuits
|
||||
}
|
||||
|
||||
// GetTaskSchema the schema data return to prover for get prover task
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/testcontainers"
|
||||
@@ -27,7 +26,6 @@ import (
|
||||
"scroll-tech/common/version"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
cutils "scroll-tech/common/utils"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/cron"
|
||||
@@ -82,25 +80,6 @@ func randomURL() string {
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
func useLocalDB(dsn string) *gorm.DB {
|
||||
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{
|
||||
// Logger: &tmpGormLogger,
|
||||
NowFunc: func() time.Time {
|
||||
// why set time to UTC.
|
||||
// if now set this, the inserted data time will use local timezone. like 2023-07-18 18:24:00 CST+8
|
||||
// but when inserted, store to postgres is 2023-07-18 18:24:00 UTC+0 the timezone is incorrect.
|
||||
// As mysql dsn user:pass@tcp(127.0.0.1:3306)/dbname?charset=utf8mb4&parseTime=True&loc=Local, we cant set
|
||||
// the timezone by loc=Local. but postgres's dsn don't have loc option to set timezone, so just need set the gorm option like that.
|
||||
return cutils.NowUTC()
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Println("failed to init db", err.Error())
|
||||
panic(err.Error())
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) {
|
||||
var err error
|
||||
db, err = testApps.GetGormDBClient()
|
||||
@@ -221,14 +200,14 @@ func TestApis(t *testing.T) {
|
||||
// Set up the test environment.
|
||||
setEnv(t)
|
||||
|
||||
// t.Run("TestHandshake", testHandshake)
|
||||
// t.Run("TestFailedHandshake", testFailedHandshake)
|
||||
// t.Run("TestGetTaskBlocked", testGetTaskBlocked)
|
||||
// t.Run("TestOutdatedProverVersion", testOutdatedProverVersion)
|
||||
// t.Run("TestValidProof", testValidProof)
|
||||
// t.Run("TestInvalidProof", testInvalidProof)
|
||||
// t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
|
||||
// t.Run("TestTimeoutProof", testTimeoutProof)
|
||||
t.Run("TestHandshake", testHandshake)
|
||||
t.Run("TestFailedHandshake", testFailedHandshake)
|
||||
t.Run("TestGetTaskBlocked", testGetTaskBlocked)
|
||||
t.Run("TestOutdatedProverVersion", testOutdatedProverVersion)
|
||||
t.Run("TestValidProof", testValidProof)
|
||||
t.Run("TestInvalidProof", testInvalidProof)
|
||||
t.Run("TestProofGeneratedFailed", testProofGeneratedFailed)
|
||||
t.Run("TestTimeoutProof", testTimeoutProof)
|
||||
t.Run("TestHardFork", testHardForkAssignTask)
|
||||
}
|
||||
|
||||
@@ -498,106 +477,6 @@ func testHardForkAssignTask(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
collector, httpHandler := setupCoordinator(t, 3, coordinatorURL, tt.forkNumbers)
|
||||
defer func() {
|
||||
collector.Stop()
|
||||
assert.NoError(t, httpHandler.Shutdown(context.Background()))
|
||||
}()
|
||||
|
||||
chunkProof := &message.ChunkProof{
|
||||
StorageTrace: []byte("testStorageTrace"),
|
||||
Protocol: []byte("testProtocol"),
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
}
|
||||
|
||||
// the insert block number is 2 and 3
|
||||
// chunk1 batch1 contains block number 2
|
||||
// chunk2 batch2 contains block number 3
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbHardForkChunk1, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk1)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 0, 2, dbHardForkChunk1.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk1.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
|
||||
assert.NoError(t, err)
|
||||
dbHardForkBatch1, err := batchOrm.InsertBatch(context.Background(), hardForkBatch1)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, dbHardForkBatch1.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch1.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
dbHardForkChunk2, err := chunkOrm.InsertChunk(context.Background(), hardForkChunk2)
|
||||
assert.NoError(t, err)
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 3, 100, dbHardForkChunk2.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateProofAndProvingStatusByHash(context.Background(), dbHardForkChunk2.Hash, chunkProof, types.ProvingTaskUnassigned, 1)
|
||||
assert.NoError(t, err)
|
||||
dbHardForkBatch2, err := batchOrm.InsertBatch(context.Background(), hardForkBatch2)
|
||||
assert.NoError(t, err)
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 1, 1, dbHardForkBatch2.Hash)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), dbHardForkBatch2.Hash, types.ChunkProofsStatusReady)
|
||||
assert.NoError(t, err)
|
||||
|
||||
fmt.Println("data inserted")
|
||||
time.Sleep(50 * time.Second)
|
||||
|
||||
getTaskNumber := 0
|
||||
for i := 0; i < 2; i++ {
|
||||
mockProver := newMockProver(t, fmt.Sprintf("mock_prover_%d", i), coordinatorURL, tt.proofType, version.Version)
|
||||
proverTask, errCode, errMsg := mockProver.getProverTask(t, tt.proofType, tt.proverForkNames[i])
|
||||
assert.Equal(t, tt.exceptGetTaskErrCodes[i], errCode)
|
||||
assert.Equal(t, tt.exceptGetTaskErrMsgs[i], errMsg)
|
||||
if errCode != types.Success {
|
||||
continue
|
||||
}
|
||||
getTaskNumber++
|
||||
mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i])
|
||||
}
|
||||
assert.Equal(t, getTaskNumber, tt.exceptTaskNumber)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testHardForkAssignTaskMultiCircuits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
proofType message.ProofType
|
||||
forkNumbers map[string]int64
|
||||
proverForkNames []string
|
||||
exceptTaskNumber int
|
||||
exceptGetTaskErrCodes []int
|
||||
exceptGetTaskErrMsgs []string
|
||||
}{
|
||||
{ // hard fork 4, prover 4 block [2-3]
|
||||
name: "noTaskForkChunkProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeChunk,
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"bernoulli", "bernoulli"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
{
|
||||
name: "noTaskForkBatchProverVersionLargeOrEqualThanHardFork",
|
||||
proofType: message.ProofTypeBatch,
|
||||
forkNumbers: map[string]int64{"bernoulli": forkNumberFour},
|
||||
exceptTaskNumber: 0,
|
||||
proverForkNames: []string{"bernoulli", "bernoulli"},
|
||||
exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData},
|
||||
exceptGetTaskErrMsgs: []string{"get empty prover task", "get empty prover task"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
coordinatorURL := randomURL()
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/version"
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/cron"
|
||||
"scroll-tech/coordinator/internal/route"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// GetGormDBClient returns a gorm.DB by connecting to the running postgres container
|
||||
func GetGormDBClient() (*gorm.DB, error) {
|
||||
// endpoint, err := t.GetDBEndPoint()
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
endpoint := "postgres://lmr:@localhost:5432/unittest?sslmode=disable"
|
||||
dbCfg := &database.Config{
|
||||
DSN: endpoint,
|
||||
DriverName: "postgres",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
}
|
||||
return database.InitDB(dbCfg)
|
||||
}
|
||||
|
||||
func setupCoordinator(proversPerSession uint8, coordinatorURL string, nameForkMap map[string]int64) (*cron.Collector, *http.Server) {
|
||||
db, err := GetGormDBClient()
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
tokenTimeout := 6
|
||||
conf := &config.Config{
|
||||
L2: &config.L2{
|
||||
ChainID: 111,
|
||||
},
|
||||
ProverManager: &config.ProverManager{
|
||||
ProversPerSession: proversPerSession,
|
||||
Verifier: &config.VerifierConfig{
|
||||
MockMode: true,
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
ChunkCollectionTimeSec: 10,
|
||||
MaxVerifierWorkers: 10,
|
||||
SessionAttempts: 5,
|
||||
MinProverVersion: version.Version,
|
||||
},
|
||||
Auth: &config.Auth{
|
||||
ChallengeExpireDurationSec: tokenTimeout,
|
||||
LoginExpireDurationSec: tokenTimeout,
|
||||
},
|
||||
}
|
||||
|
||||
var chainConf params.ChainConfig
|
||||
for forkName, forkNumber := range nameForkMap {
|
||||
switch forkName {
|
||||
case "shanghai":
|
||||
chainConf.ShanghaiBlock = big.NewInt(forkNumber)
|
||||
case "bernoulli":
|
||||
chainConf.BernoulliBlock = big.NewInt(forkNumber)
|
||||
case "london":
|
||||
chainConf.LondonBlock = big.NewInt(forkNumber)
|
||||
case "istanbul":
|
||||
chainConf.IstanbulBlock = big.NewInt(forkNumber)
|
||||
case "homestead":
|
||||
chainConf.HomesteadBlock = big.NewInt(forkNumber)
|
||||
case "eip155":
|
||||
chainConf.EIP155Block = big.NewInt(forkNumber)
|
||||
}
|
||||
}
|
||||
|
||||
proofCollector := cron.NewCollector(context.Background(), db, conf, nil)
|
||||
|
||||
router := gin.New()
|
||||
api.InitController(conf, &chainConf, db, nil)
|
||||
route.Route(router, conf, nil)
|
||||
srv := &http.Server{
|
||||
Addr: coordinatorURL,
|
||||
Handler: router,
|
||||
}
|
||||
go func() {
|
||||
runErr := srv.ListenAndServe()
|
||||
if runErr != nil && !errors.Is(runErr, http.ErrServerClosed) {
|
||||
panic(runErr.Error())
|
||||
}
|
||||
}()
|
||||
time.Sleep(time.Second * 2)
|
||||
|
||||
return proofCollector, srv
|
||||
}
|
||||
|
||||
func main() {
|
||||
coordinatorURL := ":9091"
|
||||
nameForkMap := map[string]int64{"london": 2,
|
||||
"istanbul": 3,
|
||||
"bernoulli": 4}
|
||||
setupCoordinator(1, coordinatorURL, nameForkMap)
|
||||
|
||||
var c = make(chan struct{}, 1)
|
||||
_ = <-c
|
||||
}
|
||||
@@ -378,8 +378,8 @@ github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGu
|
||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce h1:SN43TBs7VaJt9q737eWWqGz0OCg4v+PtUn3RbJcG1o0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a h1:5uWQHo/+cXexQGeSrywtXM2z29zRFctmux2vXs3JLrM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7 h1:CDrPMqifvAVyYqu0x1J5qickVV0b51tApPnOwDYLESI=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
|
||||
@@ -2,28 +2,14 @@
|
||||
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
mock_abi:
|
||||
cd .. && solc --evm-version cancun --bin --abi --optimize --overwrite -o ./build/bin ./rollup/mock_bridge/MockBridge.sol
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --bin=./build/bin/MockBridge.bin --abi=./build/bin/MockBridge.abi --pkg=mock_bridge --out=./rollup/mock_bridge/MockBridge.go
|
||||
|
||||
libzstd:
|
||||
sudo mkdir -p $(SCROLL_LIB_PATH)/
|
||||
sudo wget -O $(SCROLL_LIB_PATH)/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/v0.1.0-rc0-ubuntu20.04/libscroll_zstd.so
|
||||
|
||||
rollup_bins: libzstd ## Builds the Rollup bins.
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
export CGO_LDFLAGS="-L$(SCROLL_LIB_PATH) -Wl,-rpath,$(SCROLL_LIB_PATH)" && \
|
||||
rollup_bins: ## Builds the Rollup bins.
|
||||
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
|
||||
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
export CGO_LDFLAGS="-L$(SCROLL_LIB_PATH) -Wl,-rpath,$(SCROLL_LIB_PATH)" && \
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
export CGO_LDFLAGS="-L$(SCROLL_LIB_PATH) -Wl,-rpath,$(SCROLL_LIB_PATH)" && \
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
event_watcher: ## Builds the event_watcher bin
|
||||
|
||||
@@ -10,7 +10,7 @@ require (
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
|
||||
@@ -236,8 +236,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a h1:5uWQHo/+cXexQGeSrywtXM2z29zRFctmux2vXs3JLrM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7 h1:CDrPMqifvAVyYqu0x1J5qickVV0b51tApPnOwDYLESI=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
|
||||
|
||||
@@ -32,7 +32,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
return db
|
||||
}
|
||||
|
||||
// testCreateNewRelayer test create new relayer instance and stop
|
||||
// testCreateNewL1Relayer test create new relayer instance and stop
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Download .so files
|
||||
export LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
export SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
sudo mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
sudo wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
# Set the environment variable
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SCROLL_LIB_PATH
|
||||
export CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
# Run module tests
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
Reference in New Issue
Block a user