staterecover: adds blobscan to local stack (#279)

* staterecover: adds blobscan to local stack

* staterecover: adds blobscan env file

* connect port 4001 for blobscan-api

* statereover: improve test utils for code reuse

---------

Co-authored-by: thedarkjester <grant.southey@consensys.net>
This commit is contained in:
Pedro Novais
2024-11-21 10:08:07 +00:00
committed by GitHub
parent c0b28f1970
commit 3d5d4e6ab0
7 changed files with 238 additions and 42 deletions

View File

@@ -28,7 +28,7 @@ clean-testnet-folders:
rm -rf tmp/testnet/*
clean-environment:
docker compose -f docker/compose.yml -f docker/compose-local-dev-traces-v2.overrides.yml --profile l1 --profile l2 --profile debug down || true
docker compose -f docker/compose.yml -f docker/compose-local-dev-traces-v2.overrides.yml --profile l1 --profile l2 --profile debug --profile staterecover down || true
make clean-local-folders
docker network prune -f
docker volume rm linea-local-dev linea-logs || true # ignore failure if volumes do not exist already

View File

@@ -167,6 +167,34 @@ dockerCompose {
noRecreate = true
projectName = "docker"
}
localStackForStateRecover {
startedServices = [
"postgres",
"sequencer",
"l1-node-genesis-generator",
"l1-el-node",
"l1-cl-node",
"l2-node",
"blobscan-api",
"blobscan-indexer",
"redis",
// For debug
// "l1-blockscout",
// "l2-blockscout"
]
composeAdditionalArgs = ["--profile", "l1", "--profile", "l2", "--profile", "staterecover"]
useComposeFiles = ["${project.rootDir.path}/docker/compose.yml"]
waitForHealthyStateTimeout = Duration.ofMinutes(3)
waitForTcpPorts = false
removeOrphans = true
// this is to avoid recreating the containers
// specially l1-node-genesis-generator which corrupts the state if run more than once
// without cleaning the volumes
noRecreate = true
projectName = "docker"
environment.put("L1_GENESIS_TIME", "${Instant.now().plusSeconds(3).getEpochSecond()}")
}
}
static Boolean hasKotlinPlugin(Project proj) {

View File

@@ -137,7 +137,7 @@ private open class WhaleBasedAccountManager(
val randomPrivKey = Bytes.random(32).toHexString().replace("0x", "")
val newAccount = Account(randomPrivKey, Credentials.create(randomPrivKey).address)
val transferResult = whaleTxManager.sendTransaction(
/*gasPrice*/ 300000000.toBigInteger(),
/*gasPrice*/ 300_000_000.toBigInteger(),
/*gasLimit*/ 21000.toBigInteger(),
newAccount.address,
"",
@@ -163,11 +163,11 @@ private open class WhaleBasedAccountManager(
transferTx.transactionHash,
whaleAccount.address
)
web3jClient.waitForTransactionExecution(
web3jClient.waitForTxReceipt(
transferTx.transactionHash,
expectedStatus = "0x1",
timeout = 24.seconds,
pollInterval = 500.milliseconds
timeout = 40.seconds,
pollingInterval = 500.milliseconds
)
if (log.isDebugEnabled) {
log.debug(

View File

@@ -1,33 +1,40 @@
package net.consensys.zkevm.ethereum
import org.awaitility.Awaitility
import org.web3j.protocol.Web3j
import org.web3j.protocol.core.methods.response.TransactionReceipt
import kotlin.jvm.optionals.getOrNull
import kotlin.time.Duration
import kotlin.time.Duration.Companion.milliseconds
import kotlin.time.Duration.Companion.seconds
import kotlin.time.toJavaDuration
fun Web3j.waitForTransactionExecution(
transactionHash: String,
/**
* Helper to wait for a transaction receipt to be available.
* This is useful when you need to wait for a transaction to be mined in your tests before proceeding.
*
* @param txHash The transaction hash to wait for.
* @param timeout The maximum time to wait for the transaction receipt.
*/
fun Web3j.waitForTxReceipt(
txHash: String,
expectedStatus: String? = null,
timeout: Duration = 30.seconds,
pollInterval: Duration = 500.milliseconds
) {
Awaitility.await()
.timeout(timeout.toJavaDuration())
.pollInterval(pollInterval.toJavaDuration())
.untilAsserted {
val lastBlobTxReceipt = this.ethGetTransactionReceipt(transactionHash).send()
if (lastBlobTxReceipt.result == null) {
throw AssertionError("Transaction receipt not found: txHash=$transactionHash, timeout=$timeout")
}
expectedStatus?.also {
if (lastBlobTxReceipt.result.status != expectedStatus) {
throw AssertionError(
"Transaction status does not match expected status: " +
"txHash=$transactionHash, expected=$expectedStatus, actual=${lastBlobTxReceipt.result.status}"
)
}
timeout: Duration = 5.seconds,
pollingInterval: Duration = 500.milliseconds
): TransactionReceipt {
val waitLimit = System.currentTimeMillis() + timeout.inWholeMilliseconds
while (System.currentTimeMillis() < waitLimit) {
val receipt = this.ethGetTransactionReceipt(txHash).send().transactionReceipt.getOrNull()
if (receipt != null) {
if (expectedStatus != null && receipt.status != expectedStatus) {
throw RuntimeException(
"Transaction status does not match expected status: " +
"txHash=$txHash, expected=$expectedStatus, actual=${receipt.status}"
)
}
return receipt
}
Thread.sleep(pollingInterval.inWholeMilliseconds)
}
throw RuntimeException("Timed out waiting $timeout for transaction receipt for tx $txHash")
}

View File

@@ -10,7 +10,7 @@ networks:
ipam:
config:
- subnet: 11.11.11.0/24
l1-network:
l1network:
driver: bridge
ipam:
config:
@@ -66,7 +66,7 @@ services:
- ../config/common/traces-limits-besu-v1.toml:/var/lib/besu/traces-limits.toml:ro
- ../tmp/linea-besu-sequencer/plugins:/opt/besu/plugins/
networks:
l1-network:
l1network:
linea:
ipv4_address: 11.11.11.101
@@ -164,7 +164,7 @@ services:
- ../tmp/linea-besu-sequencer/plugins:/opt/besu/plugins/
- ../tmp/local/:/data/:rw
networks:
l1-network:
l1network:
linea:
ipv4_address: 11.11.11.119
@@ -280,7 +280,7 @@ services:
POSTGRES_PASSWORD: "postgres"
POSTGRES_DB: "postman_db"
networks:
l1-network:
l1network:
ipv4_address: 10.10.10.222
linea:
ipv4_address: 11.11.11.222
@@ -359,7 +359,7 @@ services:
- ../testdata/type2state-manager/state-proof.json:/opt/consensys/linea/coordinator/testdata/type2state-manager/state-proof.json
- local-dev:/data/
networks:
l1-network:
l1network:
ipv4_address: 10.10.10.106
linea:
ipv4_address: 11.11.11.106
@@ -394,10 +394,10 @@ services:
ipv4_address: 11.11.11.200
postgres:
image: postgres:14.5
image: postgres:16.0
hostname: postgres
container_name: postgres
profiles: [ "l2", "debug", "external-to-monorepo" ]
profiles: [ "l2", "debug", "external-to-monorepo", "staterecover" ]
environment:
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
@@ -421,7 +421,7 @@ services:
- ./postgres/conf/:/etc/postgresql/
networks:
- linea
- l1-network
- l1network
l1-el-node:
container_name: l1-el-node
@@ -430,7 +430,7 @@ services:
profiles: [ "l1", "debug", "external-to-monorepo" ]
depends_on:
l1-node-genesis-generator:
condition: service_completed_successfully
condition: service_completed_successfully
healthcheck:
test: [ "CMD-SHELL", "bash -c \"[ -f /tmp/pid ]\"" ]
interval: 1s
@@ -457,13 +457,14 @@ services:
- "30303:30303"
- "9001:9001/tcp"
networks:
l1-network:
l1network:
ipv4_address: 10.10.10.201
l1-cl-node:
container_name: l1-cl-node
hostname: l1-cl-node
image: consensys/teku:24.2.0
# image: consensys/teku:24.2.0
image: consensys/teku:24.10.3
profiles: [ "l1", "debug", "external-to-monorepo" ]
depends_on:
l1-el-node:
@@ -482,16 +483,15 @@ services:
- "9002:9000"
- "8008:8008/tcp"
- "4003:4000/tcp"
- "5051/tcp"
networks:
l1-network:
l1network:
ipv4_address: 10.10.10.202
l1-node-genesis-generator:
build:
context: ./config/l1-node/
profiles: [ "l1", "debug", "external-to-monorepo" ]
command:
command:
--genesis-time ${L1_GENESIS_TIME}
--l1-genesis /config/l1-genesis.json
--network-config /config/network-config.yml
@@ -527,7 +527,7 @@ services:
volumes:
- ./config/l1-node/el/genesis.json:/app/genesis.json:ro
networks:
- l1-network
- l1network
zkbesu-shomei:
image: consensys/linea-besu:linea-delivery-27
@@ -690,7 +690,74 @@ services:
- ./config/linea-local-dev-genesis-PoA.json:/app/genesis.json:ro
networks:
- linea
########################
# Blob Scan stack, used for state recover app
########################
blobscan-api:
container_name: blobscan-api
hostname: blobscan-api
image: blossomlabs/blobscan-api:1.1.0
platform: linux/amd64 # only linux available
profiles: [ "staterecover" ]
ports:
- "4001:4001"
env_file: "./config/blobscan/env"
restart: no
# healthcheck:
# test: [ "CMD", "curl", "-f", "http://localhost:4001/healthcheck" ]
# disable: true
# interval: 30s
# timeout: 10s
# retries: 20
# start_period: 5s
networks:
linea:
l1network:
ipv4_address: 10.10.10.203
depends_on:
postgres:
condition: service_healthy
blobscan-indexer:
container_name: blobscan-indexer
hostname: blobscan-indexer
image: blossomlabs/blobscan-indexer:0.2.1
platform: linux/amd64 # only linux available
profiles: [ "staterecover" ]
env_file: "./config/blobscan/env"
networks:
linea:
l1network:
ipv4_address: 10.10.10.204
restart: always
depends_on:
postgres:
condition: service_healthy
blobscan-api:
condition: service_started
redis:
container_name: redis
hostname: redis
image: "redis:7.4.1-alpine"
command: redis-server /usr/local/etc/redis/redis.conf
profiles: [ "staterecover" ]
ports:
- "6379:6379"
volumes:
- ./misc/data:/var/lib/redis
- ./misc/conf:/usr/local/etc/redis/redis.conf
environment:
- REDIS_REPLICATION_MODE=master
- REDIS_PASSWORD=s3cr3t
- REDIS_USERNAME=blobscan
networks:
l1network:
ipv4_address: 10.10.10.205
########################
# Observability stack
########################
loki:
container_name: loki
hostname: loki

View File

@@ -0,0 +1,92 @@
# Since .env is gitignored, you can use .env.example to build a new `.env` file when you clone the repo.
# Keep this file up-to-date when you add new variables to \`.env\`.
# This file will be committed to version control, so make sure not to have any secrets in it.
# If you are cloning this repo, create a copy of this file named `.env` and populate it with your secrets.
# We use dotenv to load Prisma from Next.js' .env file
# @see https://www.prisma.io/docs/reference/database-reference/connection-urls
# DATABASE_URL=postgresql://blobscan:s3cr3t@localhost:5432/blobscan_dev?schema=public
DATABASE_URL=postgresql://postgres:postgres@postgres:5432/blobscan
BLOBSCAN_WEB_TAG=next
BLOBSCAN_API_TAG=next
INDEXER_TAG=master
DENCUN_FORK_SLOT=0
############################
#### rest api server APP
############################
BLOBSCAN_API_PORT=4001
EXTERNAL_API_PORT=4001
CHAIN_ID=31648428
LOG_LEVEL=debug
NETWORK_NAME=devnet
# SENTRY_DSN_API=
############################
#### blobscan indexer APP
############################
SECRET_KEY=supersecret
BLOBSCAN_API_ENDPOINT=http://blobscan-api:4001
BEACON_NODE_ENDPOINT=http://l1-cl-node:4000
EXECUTION_NODE_ENDPOINT=http://l1-el-node:8545
RUST_LOG=blob_indexer=info
LOGGER=default
NODE_ENV=development
# SENTRY_DSN_INDEXER=
### telemetry
# METRICS_ENABLED=
# TRACES_ENABLED=
# OTLP_AUTH_USERNAME=
# OTLP_AUTH_PASSWORD=
# OTEL_EXPORTER_OTLP_PROTOCOL=
# OTEL_EXPORTER_OTLP_ENDPOINT=
# OTEL_DIAG_ENABLED=
### blobscan website
### NOTE: Just place holder for now. Not used in the project atm
EXTERNAL_WEB_PORT=3000
# BEE_ENDPOINT=
CHAIN_ID=31648428
NETWORK_NAME=devnet
# GOOGLE_STORAGE_BUCKET_NAME=blobscan-test-bucket
# GOOGLE_STORAGE_PROJECT_ID=blobscan-test-project
# GOOGLE_SERVICE_KEY=
# GOOGLE_STORAGE_API_ENDPOINT=http://localhost:4443
BLOB_PROPAGATOR_ENABLED=false
GOOGLE_STORAGE_ENABLED=false
POSTGRES_STORAGE_ENABLED=true
SWARM_STORAGE_ENABLED=false
REDIS_URI=redis://redis:6379/1
# PRISMA_BATCH_OPERATIONS_MAX_SIZE=
# FEEDBACK_WEBHOOK_URL=
# @see https://next-auth.js.org/configuration/options#nextauth_url
NEXTAUTH_URL=http://localhost:3000
# You can generate the secret via 'openssl rand -base64 32' on Unix
# @see https://next-auth.js.org/configuration/options#secret
SECRET_KEY=supersecret
NEXT_PUBLIC_NETWORK_NAME=mainnet
NEXT_PUBLIC_VERCEL_ANALYTICS_ENABLED=false
NEXT_PUBLIC_BEACON_BASE_URL=https://dora.ethpandaops.io/
NEXT_PUBLIC_EXPLORER_BASE_URL=https://etherscan.io/

View File

@@ -3,3 +3,5 @@ CREATE DATABASE postman_db;
CREATE DATABASE l1_blockscout_db;
CREATE DATABASE l2_blockscout_db;
CREATE DATABASE linea_transaction_exclusion;
CREATE DATABASE blobscan;