mirror of
https://github.com/vacp2p/status-linea-besu.git
synced 2026-01-09 15:28:09 -05:00
Merge branch 'main' into zkbesu
This commit is contained in:
@@ -11,6 +11,7 @@
|
||||
- Fine tune already seen txs tracker when a tx is removed from the pool [#7755](https://github.com/hyperledger/besu/pull/7755)
|
||||
- Create and publish Besu BOM (Bill of Materials) [#7615](https://github.com/hyperledger/besu/pull/7615)
|
||||
- Update Java dependencies [#7786](https://github.com/hyperledger/besu/pull/7786)
|
||||
- Add a method to get all the transaction in the pool, to the `TransactionPoolService`, to easily access the transaction pool content from plugins [#7813](https://github.com/hyperledger/besu/pull/7813)
|
||||
|
||||
### Bug fixes
|
||||
|
||||
|
||||
@@ -49,13 +49,15 @@ import org.hyperledger.besu.tests.acceptance.dsl.transaction.web3.Web3Transactio
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.lang.ProcessBuilder.Redirect;
|
||||
import java.math.BigInteger;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
import org.junit.After;
|
||||
import org.apache.logging.log4j.ThreadContext;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.TestInfo;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@@ -125,7 +127,15 @@ public class AcceptanceTestBase {
|
||||
exitedSuccessfully = new ExitedWithCode(0);
|
||||
}
|
||||
|
||||
@After
|
||||
@BeforeEach
|
||||
public void setUp(final TestInfo testInfo) {
|
||||
// log4j is configured to create a file per test
|
||||
// build/acceptanceTestLogs/${ctx:class}.${ctx:test}.log
|
||||
ThreadContext.put("class", this.getClass().getSimpleName());
|
||||
ThreadContext.put("test", testInfo.getTestMethod().get().getName());
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
public void tearDownAcceptanceTestBase() {
|
||||
reportMemory();
|
||||
cluster.close();
|
||||
@@ -143,7 +153,9 @@ public class AcceptanceTestBase {
|
||||
if (command != null) {
|
||||
LOG.info("Memory usage at end of test:");
|
||||
final ProcessBuilder processBuilder =
|
||||
new ProcessBuilder(command).redirectErrorStream(true).redirectInput(Redirect.INHERIT);
|
||||
new ProcessBuilder(command)
|
||||
.redirectErrorStream(true)
|
||||
.redirectInput(ProcessBuilder.Redirect.INHERIT);
|
||||
try {
|
||||
final Process memInfoProcess = processBuilder.start();
|
||||
outputProcessorExecutor.execute(() -> printOutput(memInfoProcess));
|
||||
|
||||
@@ -1,200 +0,0 @@
|
||||
/*
|
||||
* Copyright contributors to Hyperledger Besu.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.tests.acceptance.dsl;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.account.Accounts;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.blockchain.Blockchain;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.admin.AdminConditions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.bft.BftConditions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.clique.CliqueConditions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.eth.EthConditions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.login.LoginConditions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.net.NetConditions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.perm.PermissioningConditions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.process.ExitedWithCode;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.txpool.TxPoolConditions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.web3.Web3Conditions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.contract.ContractVerifier;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.Node;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.cluster.Cluster;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.configuration.BesuNodeFactory;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.configuration.permissioning.PermissionedNodeBuilder;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.account.AccountTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.admin.AdminTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.bft.BftTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.clique.CliqueTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.contract.ContractTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.eth.EthTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.miner.MinerTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.net.NetTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.perm.PermissioningTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.txpool.TxPoolTransactions;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.transaction.web3.Web3Transactions;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.math.BigInteger;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
import org.apache.logging.log4j.ThreadContext;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.TestInfo;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Superclass for acceptance tests. For now (transition to junit5 is ongoing) this class supports
|
||||
* junit5 format. Once the transition is complete, this class can be removed and recombined with
|
||||
* AcceptanceTestBase (original).
|
||||
*/
|
||||
@ExtendWith(AcceptanceTestBaseTestWatcher.class)
|
||||
public class AcceptanceTestBaseJunit5 {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(AcceptanceTestBaseJunit5.class);
|
||||
|
||||
protected final Accounts accounts;
|
||||
protected final AccountTransactions accountTransactions;
|
||||
protected final AdminConditions admin;
|
||||
protected final AdminTransactions adminTransactions;
|
||||
protected final Blockchain blockchain;
|
||||
protected final CliqueConditions clique;
|
||||
protected final CliqueTransactions cliqueTransactions;
|
||||
protected final Cluster cluster;
|
||||
protected final ContractVerifier contractVerifier;
|
||||
protected final ContractTransactions contractTransactions;
|
||||
protected final EthConditions eth;
|
||||
protected final EthTransactions ethTransactions;
|
||||
protected final BftTransactions bftTransactions;
|
||||
protected final BftConditions bft;
|
||||
protected final LoginConditions login;
|
||||
protected final NetConditions net;
|
||||
protected final BesuNodeFactory besu;
|
||||
protected final PermissioningConditions perm;
|
||||
protected final PermissionedNodeBuilder permissionedNodeBuilder;
|
||||
protected final PermissioningTransactions permissioningTransactions;
|
||||
protected final MinerTransactions minerTransactions;
|
||||
protected final Web3Conditions web3;
|
||||
protected final TxPoolConditions txPoolConditions;
|
||||
protected final TxPoolTransactions txPoolTransactions;
|
||||
protected final ExitedWithCode exitedSuccessfully;
|
||||
|
||||
private final ExecutorService outputProcessorExecutor = Executors.newCachedThreadPool();
|
||||
|
||||
protected AcceptanceTestBaseJunit5() {
|
||||
ethTransactions = new EthTransactions();
|
||||
accounts = new Accounts(ethTransactions);
|
||||
adminTransactions = new AdminTransactions();
|
||||
cliqueTransactions = new CliqueTransactions();
|
||||
bftTransactions = new BftTransactions();
|
||||
accountTransactions = new AccountTransactions(accounts);
|
||||
permissioningTransactions = new PermissioningTransactions();
|
||||
contractTransactions = new ContractTransactions();
|
||||
minerTransactions = new MinerTransactions();
|
||||
blockchain = new Blockchain(ethTransactions);
|
||||
clique = new CliqueConditions(ethTransactions, cliqueTransactions);
|
||||
eth = new EthConditions(ethTransactions);
|
||||
bft = new BftConditions(bftTransactions);
|
||||
login = new LoginConditions();
|
||||
net = new NetConditions(new NetTransactions());
|
||||
cluster = new Cluster(net);
|
||||
perm = new PermissioningConditions(permissioningTransactions);
|
||||
admin = new AdminConditions(adminTransactions);
|
||||
web3 = new Web3Conditions(new Web3Transactions());
|
||||
besu = new BesuNodeFactory();
|
||||
txPoolTransactions = new TxPoolTransactions();
|
||||
txPoolConditions = new TxPoolConditions(txPoolTransactions);
|
||||
contractVerifier = new ContractVerifier(accounts.getPrimaryBenefactor());
|
||||
permissionedNodeBuilder = new PermissionedNodeBuilder();
|
||||
exitedSuccessfully = new ExitedWithCode(0);
|
||||
}
|
||||
|
||||
@BeforeEach
|
||||
public void setUp(final TestInfo testInfo) {
|
||||
// log4j is configured to create a file per test
|
||||
// build/acceptanceTestLogs/${ctx:class}.${ctx:test}.log
|
||||
ThreadContext.put("class", this.getClass().getSimpleName());
|
||||
ThreadContext.put("test", testInfo.getTestMethod().get().getName());
|
||||
}
|
||||
|
||||
@AfterEach
|
||||
public void tearDownAcceptanceTestBase() {
|
||||
reportMemory();
|
||||
cluster.close();
|
||||
}
|
||||
|
||||
public void reportMemory() {
|
||||
String os = System.getProperty("os.name");
|
||||
String[] command = null;
|
||||
if (os.contains("Linux")) {
|
||||
command = new String[] {"/usr/bin/top", "-n", "1", "-o", "%MEM", "-b", "-c", "-w", "180"};
|
||||
}
|
||||
if (os.contains("Mac")) {
|
||||
command = new String[] {"/usr/bin/top", "-l", "1", "-o", "mem", "-n", "20"};
|
||||
}
|
||||
if (command != null) {
|
||||
LOG.info("Memory usage at end of test:");
|
||||
final ProcessBuilder processBuilder =
|
||||
new ProcessBuilder(command)
|
||||
.redirectErrorStream(true)
|
||||
.redirectInput(ProcessBuilder.Redirect.INHERIT);
|
||||
try {
|
||||
final Process memInfoProcess = processBuilder.start();
|
||||
outputProcessorExecutor.execute(() -> printOutput(memInfoProcess));
|
||||
memInfoProcess.waitFor();
|
||||
LOG.debug("Memory info process exited with code {}", memInfoProcess.exitValue());
|
||||
} catch (final Exception e) {
|
||||
LOG.warn("Error running memory information process", e);
|
||||
}
|
||||
} else {
|
||||
LOG.info("Don't know how to report memory for OS {}", os);
|
||||
}
|
||||
}
|
||||
|
||||
private void printOutput(final Process process) {
|
||||
try (final BufferedReader in =
|
||||
new BufferedReader(new InputStreamReader(process.getInputStream(), UTF_8))) {
|
||||
String line = in.readLine();
|
||||
while (line != null) {
|
||||
LOG.info(line);
|
||||
line = in.readLine();
|
||||
}
|
||||
} catch (final IOException e) {
|
||||
LOG.warn("Failed to read output from memory information process: ", e);
|
||||
}
|
||||
}
|
||||
|
||||
protected void waitForBlockHeight(final Node node, final long blockchainHeight) {
|
||||
WaitUtils.waitFor(
|
||||
120,
|
||||
() ->
|
||||
assertThat(node.execute(ethTransactions.blockNumber()))
|
||||
.isGreaterThanOrEqualTo(BigInteger.valueOf(blockchainHeight)));
|
||||
}
|
||||
|
||||
@Test
|
||||
void dryRunDetector() {
|
||||
assertThat(true)
|
||||
.withFailMessage("This test is here so gradle --dry-run executes this class")
|
||||
.isTrue();
|
||||
}
|
||||
}
|
||||
@@ -17,8 +17,8 @@ package org.hyperledger.besu.tests.acceptance.dsl.node;
|
||||
import static com.google.common.base.Preconditions.checkState;
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
import org.hyperledger.besu.cli.options.DataStorageOptions;
|
||||
import org.hyperledger.besu.cli.options.TransactionPoolOptions;
|
||||
import org.hyperledger.besu.cli.options.storage.DataStorageOptions;
|
||||
import org.hyperledger.besu.cli.options.unstable.NetworkingOptions;
|
||||
import org.hyperledger.besu.ethereum.api.jsonrpc.ipc.JsonRpcIpcConfiguration;
|
||||
import org.hyperledger.besu.ethereum.eth.transactions.ImmutableTransactionPoolConfiguration;
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
*/
|
||||
package org.hyperledger.besu.tests.acceptance.bft;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@@ -22,7 +22,7 @@ import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.params.provider.Arguments;
|
||||
|
||||
@Disabled("This is not a test class, it offers BFT parameterization only.")
|
||||
public abstract class ParameterizedBftTestBase extends AcceptanceTestBaseJunit5 {
|
||||
public abstract class ParameterizedBftTestBase extends AcceptanceTestBase {
|
||||
protected String bftType;
|
||||
protected BftAcceptanceTestParameterization nodeFactory;
|
||||
|
||||
|
||||
@@ -14,13 +14,13 @@
|
||||
*/
|
||||
package org.hyperledger.besu.tests.acceptance.bft.qbft;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.account.Account;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class QbftContractAcceptanceTest extends AcceptanceTestBaseJunit5 {
|
||||
public class QbftContractAcceptanceTest extends AcceptanceTestBase {
|
||||
|
||||
@Test
|
||||
public void shouldMineOnMultipleNodesEvenWhenClusterContainsNonValidator() throws Exception {
|
||||
|
||||
@@ -41,7 +41,7 @@ public class BftMiningSoakTest extends ParameterizedBftTestBase {
|
||||
|
||||
private static final long ONE_MINUTE = Duration.of(1, ChronoUnit.MINUTES).toMillis();
|
||||
|
||||
private static final long THREE_MINUTES = Duration.of(1, ChronoUnit.MINUTES).toMillis();
|
||||
private static final long THREE_MINUTES = Duration.of(3, ChronoUnit.MINUTES).toMillis();
|
||||
|
||||
private static final long TEN_SECONDS = Duration.of(10, ChronoUnit.SECONDS).toMillis();
|
||||
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
*/
|
||||
package org.hyperledger.besu.tests.acceptance.clique;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class CliqueDiscardRpcAcceptanceTest extends AcceptanceTestBaseJunit5 {
|
||||
public class CliqueDiscardRpcAcceptanceTest extends AcceptanceTestBase {
|
||||
|
||||
@Test
|
||||
public void shouldDiscardVotes() throws IOException {
|
||||
|
||||
@@ -16,7 +16,7 @@ package org.hyperledger.besu.tests.acceptance.clique;
|
||||
|
||||
import static org.hyperledger.besu.tests.acceptance.dsl.transaction.clique.CliqueTransactions.LATEST;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
@@ -24,7 +24,7 @@ import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
@Disabled("flaky test due to hardcoded block numbers")
|
||||
public class CliqueGetSignersRpcAcceptanceTest extends AcceptanceTestBaseJunit5 {
|
||||
public class CliqueGetSignersRpcAcceptanceTest extends AcceptanceTestBase {
|
||||
private BesuNode minerNode1;
|
||||
private BesuNode minerNode2;
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ import static java.util.stream.Collectors.joining;
|
||||
import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
|
||||
import static org.assertj.core.data.Percentage.withPercentage;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.account.Account;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.configuration.genesis.GenesisConfigurationFactory.CliqueOptions;
|
||||
@@ -32,7 +32,7 @@ import java.util.Optional;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.web3j.protocol.core.DefaultBlockParameter;
|
||||
|
||||
public class CliqueMiningAcceptanceTest extends AcceptanceTestBaseJunit5 {
|
||||
public class CliqueMiningAcceptanceTest extends AcceptanceTestBase {
|
||||
|
||||
@Test
|
||||
public void shouldMineTransactionsOnSingleNode() throws IOException {
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
*/
|
||||
package org.hyperledger.besu.tests.acceptance.clique;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class CliqueProposalRpcAcceptanceTest extends AcceptanceTestBaseJunit5 {
|
||||
public class CliqueProposalRpcAcceptanceTest extends AcceptanceTestBase {
|
||||
|
||||
@Test
|
||||
public void shouldReturnProposals() throws IOException {
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
*/
|
||||
package org.hyperledger.besu.tests.acceptance.clique;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.Condition;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.condition.clique.ExpectNonceVote.CLIQUE_NONCE_VOTE;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
@@ -23,7 +23,7 @@ import java.io.IOException;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class CliqueProposeRpcAcceptanceTest extends AcceptanceTestBaseJunit5 {
|
||||
public class CliqueProposeRpcAcceptanceTest extends AcceptanceTestBase {
|
||||
|
||||
@Test
|
||||
public void shouldAddValidators() throws IOException {
|
||||
|
||||
@@ -14,14 +14,14 @@
|
||||
*/
|
||||
package org.hyperledger.besu.tests.acceptance.clique;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class CliqueZeroValidatorsAcceptanceTest extends AcceptanceTestBaseJunit5 {
|
||||
public class CliqueZeroValidatorsAcceptanceTest extends AcceptanceTestBase {
|
||||
|
||||
@Test
|
||||
public void zeroValidatorsFormValidCluster() throws IOException {
|
||||
|
||||
@@ -16,7 +16,7 @@ package org.hyperledger.besu.tests.acceptance.plugins;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
|
||||
import java.io.File;
|
||||
@@ -34,7 +34,7 @@ import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.condition.DisabledOnOs;
|
||||
import org.junit.jupiter.api.condition.OS;
|
||||
|
||||
public class BadCLIOptionsPluginTest extends AcceptanceTestBaseJunit5 {
|
||||
public class BadCLIOptionsPluginTest extends AcceptanceTestBase {
|
||||
private BesuNode node;
|
||||
|
||||
@BeforeEach
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
*/
|
||||
package org.hyperledger.besu.tests.acceptance.plugins;
|
||||
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
|
||||
import java.io.File;
|
||||
@@ -28,7 +28,7 @@ import org.awaitility.Awaitility;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class BesuEventsPluginTest extends AcceptanceTestBaseJunit5 {
|
||||
public class BesuEventsPluginTest extends AcceptanceTestBase {
|
||||
private BesuNode pluginNode;
|
||||
private BesuNode minerNode;
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
package org.hyperledger.besu.tests.acceptance.plugins;
|
||||
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBaseJunit5;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.account.Account;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.blockchain.Amount;
|
||||
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
|
||||
@@ -27,7 +27,7 @@ import java.util.List;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class PermissioningPluginTest extends AcceptanceTestBaseJunit5 {
|
||||
public class PermissioningPluginTest extends AcceptanceTestBase {
|
||||
private BesuNode minerNode;
|
||||
|
||||
private BesuNode aliceNode;
|
||||
|
||||
@@ -41,7 +41,6 @@ import org.hyperledger.besu.cli.converter.MetricCategoryConverter;
|
||||
import org.hyperledger.besu.cli.custom.JsonRPCAllowlistHostsProperty;
|
||||
import org.hyperledger.besu.cli.error.BesuExecutionExceptionHandler;
|
||||
import org.hyperledger.besu.cli.error.BesuParameterExceptionHandler;
|
||||
import org.hyperledger.besu.cli.options.DataStorageOptions;
|
||||
import org.hyperledger.besu.cli.options.MiningOptions;
|
||||
import org.hyperledger.besu.cli.options.TransactionPoolOptions;
|
||||
import org.hyperledger.besu.cli.options.stable.ApiConfigurationOptions;
|
||||
@@ -57,6 +56,8 @@ import org.hyperledger.besu.cli.options.stable.P2PDiscoveryOptions;
|
||||
import org.hyperledger.besu.cli.options.stable.PermissionsOptions;
|
||||
import org.hyperledger.besu.cli.options.stable.PluginsConfigurationOptions;
|
||||
import org.hyperledger.besu.cli.options.stable.RpcWebsocketOptions;
|
||||
import org.hyperledger.besu.cli.options.storage.DataStorageOptions;
|
||||
import org.hyperledger.besu.cli.options.storage.DiffBasedSubStorageOptions;
|
||||
import org.hyperledger.besu.cli.options.unstable.ChainPruningOptions;
|
||||
import org.hyperledger.besu.cli.options.unstable.DnsOptions;
|
||||
import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions;
|
||||
@@ -139,7 +140,9 @@ import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
|
||||
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
|
||||
import org.hyperledger.besu.ethereum.transaction.TransactionSimulator;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.evm.precompile.AbstractAltBnPrecompiledContract;
|
||||
import org.hyperledger.besu.evm.precompile.BigIntegerModularExponentiationPrecompiledContract;
|
||||
import org.hyperledger.besu.evm.precompile.KZGPointEvalPrecompiledContract;
|
||||
@@ -1643,7 +1646,11 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
|
||||
CommandLineUtils.failIfOptionDoesntMeetRequirement(
|
||||
commandLine,
|
||||
"--Xsnapsync-synchronizer-flat option can only be used when --Xbonsai-full-flat-db-enabled is true",
|
||||
dataStorageOptions.toDomainObject().getUnstable().getBonsaiFullFlatDbEnabled(),
|
||||
dataStorageOptions
|
||||
.toDomainObject()
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getUnstable()
|
||||
.getFullFlatDbEnabled(),
|
||||
asList(
|
||||
"--Xsnapsync-synchronizer-flat-account-healed-count-per-request",
|
||||
"--Xsnapsync-synchronizer-flat-slot-healed-count-per-request"));
|
||||
@@ -1775,38 +1782,46 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
|
||||
.withMiningParameters(miningParametersSupplier.get())
|
||||
.withJsonRpcHttpOptions(jsonRpcHttpOptions);
|
||||
final KeyValueStorageProvider storageProvider = keyValueStorageProvider(keyValueStorageName);
|
||||
return controllerBuilder
|
||||
.fromEthNetworkConfig(updateNetworkConfig(network), getDefaultSyncModeIfNotSet())
|
||||
.synchronizerConfiguration(buildSyncConfig())
|
||||
.ethProtocolConfiguration(unstableEthProtocolOptions.toDomainObject())
|
||||
.networkConfiguration(unstableNetworkingOptions.toDomainObject())
|
||||
.dataDirectory(dataDir())
|
||||
.dataStorageConfiguration(getDataStorageConfiguration())
|
||||
.miningParameters(miningParametersSupplier.get())
|
||||
.transactionPoolConfiguration(buildTransactionPoolConfiguration())
|
||||
.nodeKey(new NodeKey(securityModule()))
|
||||
.metricsSystem((ObservableMetricsSystem) besuComponent.getMetricsSystem())
|
||||
.messagePermissioningProviders(permissioningService.getMessagePermissioningProviders())
|
||||
.privacyParameters(privacyParameters())
|
||||
.clock(Clock.systemUTC())
|
||||
.isRevertReasonEnabled(isRevertReasonEnabled)
|
||||
.isParallelTxProcessingEnabled(
|
||||
dataStorageConfiguration.getUnstable().isParallelTxProcessingEnabled())
|
||||
.storageProvider(storageProvider)
|
||||
.gasLimitCalculator(
|
||||
miningParametersSupplier.get().getTargetGasLimit().isPresent()
|
||||
? new FrontierTargetingGasLimitCalculator()
|
||||
: GasLimitCalculator.constant())
|
||||
.requiredBlocks(requiredBlocks)
|
||||
.reorgLoggingThreshold(reorgLoggingThreshold)
|
||||
.evmConfiguration(unstableEvmOptions.toDomainObject())
|
||||
.maxPeers(p2PDiscoveryOptions.maxPeers)
|
||||
.maxRemotelyInitiatedPeers(maxRemoteInitiatedPeers)
|
||||
.randomPeerPriority(p2PDiscoveryOptions.randomPeerPriority)
|
||||
.chainPruningConfiguration(unstableChainPruningOptions.toDomainObject())
|
||||
.cacheLastBlocks(numberOfblocksToCache)
|
||||
.genesisStateHashCacheEnabled(genesisStateHashCacheEnabled)
|
||||
.besuComponent(besuComponent);
|
||||
BesuControllerBuilder besuControllerBuilder =
|
||||
controllerBuilder
|
||||
.fromEthNetworkConfig(updateNetworkConfig(network), getDefaultSyncModeIfNotSet())
|
||||
.synchronizerConfiguration(buildSyncConfig())
|
||||
.ethProtocolConfiguration(unstableEthProtocolOptions.toDomainObject())
|
||||
.networkConfiguration(unstableNetworkingOptions.toDomainObject())
|
||||
.dataDirectory(dataDir())
|
||||
.dataStorageConfiguration(getDataStorageConfiguration())
|
||||
.miningParameters(miningParametersSupplier.get())
|
||||
.transactionPoolConfiguration(buildTransactionPoolConfiguration())
|
||||
.nodeKey(new NodeKey(securityModule()))
|
||||
.metricsSystem((ObservableMetricsSystem) besuComponent.getMetricsSystem())
|
||||
.messagePermissioningProviders(permissioningService.getMessagePermissioningProviders())
|
||||
.privacyParameters(privacyParameters())
|
||||
.clock(Clock.systemUTC())
|
||||
.isRevertReasonEnabled(isRevertReasonEnabled)
|
||||
.storageProvider(storageProvider)
|
||||
.gasLimitCalculator(
|
||||
miningParametersSupplier.get().getTargetGasLimit().isPresent()
|
||||
? new FrontierTargetingGasLimitCalculator()
|
||||
: GasLimitCalculator.constant())
|
||||
.requiredBlocks(requiredBlocks)
|
||||
.reorgLoggingThreshold(reorgLoggingThreshold)
|
||||
.evmConfiguration(unstableEvmOptions.toDomainObject())
|
||||
.maxPeers(p2PDiscoveryOptions.maxPeers)
|
||||
.maxRemotelyInitiatedPeers(maxRemoteInitiatedPeers)
|
||||
.randomPeerPriority(p2PDiscoveryOptions.randomPeerPriority)
|
||||
.chainPruningConfiguration(unstableChainPruningOptions.toDomainObject())
|
||||
.cacheLastBlocks(numberOfblocksToCache)
|
||||
.genesisStateHashCacheEnabled(genesisStateHashCacheEnabled)
|
||||
.besuComponent(besuComponent);
|
||||
if (DataStorageFormat.BONSAI.equals(getDataStorageConfiguration().getDataStorageFormat())) {
|
||||
final DiffBasedSubStorageConfiguration subStorageConfiguration =
|
||||
getDataStorageConfiguration().getDiffBasedSubStorageConfiguration();
|
||||
if (subStorageConfiguration.getLimitTrieLogsEnabled()) {
|
||||
besuControllerBuilder.isParallelTxProcessingEnabled(
|
||||
subStorageConfiguration.getUnstable().isParallelTxProcessingEnabled());
|
||||
}
|
||||
}
|
||||
return besuControllerBuilder;
|
||||
}
|
||||
|
||||
private JsonRpcConfiguration createEngineJsonRpcConfiguration() {
|
||||
@@ -2126,29 +2141,34 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
|
||||
}
|
||||
|
||||
if (SyncMode.FULL.equals(getDefaultSyncModeIfNotSet())
|
||||
&& DataStorageFormat.BONSAI.equals(dataStorageConfiguration.getDataStorageFormat())
|
||||
&& dataStorageConfiguration.getBonsaiLimitTrieLogsEnabled()) {
|
||||
&& DataStorageFormat.BONSAI.equals(dataStorageConfiguration.getDataStorageFormat())) {
|
||||
final DiffBasedSubStorageConfiguration diffBasedSubStorageConfiguration =
|
||||
dataStorageConfiguration.getDiffBasedSubStorageConfiguration();
|
||||
if (diffBasedSubStorageConfiguration.getLimitTrieLogsEnabled()) {
|
||||
if (CommandLineUtils.isOptionSet(
|
||||
commandLine, DiffBasedSubStorageOptions.LIMIT_TRIE_LOGS_ENABLED)) {
|
||||
throw new ParameterException(
|
||||
commandLine,
|
||||
String.format(
|
||||
"Cannot enable %s with --sync-mode=%s and --data-storage-format=%s. You must set %s or use a different sync-mode",
|
||||
DiffBasedSubStorageOptions.LIMIT_TRIE_LOGS_ENABLED,
|
||||
SyncMode.FULL,
|
||||
DataStorageFormat.BONSAI,
|
||||
DiffBasedSubStorageOptions.LIMIT_TRIE_LOGS_ENABLED + "=false"));
|
||||
}
|
||||
|
||||
if (CommandLineUtils.isOptionSet(
|
||||
commandLine, DataStorageOptions.BONSAI_LIMIT_TRIE_LOGS_ENABLED)) {
|
||||
throw new ParameterException(
|
||||
commandLine,
|
||||
String.format(
|
||||
"Cannot enable %s with --sync-mode=%s and --data-storage-format=%s. You must set %s or use a different sync-mode",
|
||||
DataStorageOptions.BONSAI_LIMIT_TRIE_LOGS_ENABLED,
|
||||
SyncMode.FULL,
|
||||
DataStorageFormat.BONSAI,
|
||||
DataStorageOptions.BONSAI_LIMIT_TRIE_LOGS_ENABLED + "=false"));
|
||||
dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.copyOf(dataStorageConfiguration)
|
||||
.withDiffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.copyOf(
|
||||
dataStorageConfiguration.getDiffBasedSubStorageConfiguration())
|
||||
.withLimitTrieLogsEnabled(false));
|
||||
logger.warn(
|
||||
"Forcing {}, since it cannot be enabled with --sync-mode={} and --data-storage-format={}.",
|
||||
DiffBasedSubStorageOptions.LIMIT_TRIE_LOGS_ENABLED + "=false",
|
||||
SyncMode.FULL,
|
||||
DataStorageFormat.BONSAI);
|
||||
}
|
||||
|
||||
dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.copyOf(dataStorageConfiguration)
|
||||
.withBonsaiLimitTrieLogsEnabled(false);
|
||||
logger.warn(
|
||||
"Forcing {}, since it cannot be enabled with --sync-mode={} and --data-storage-format={}.",
|
||||
DataStorageOptions.BONSAI_LIMIT_TRIE_LOGS_ENABLED + "=false",
|
||||
SyncMode.FULL,
|
||||
DataStorageFormat.BONSAI);
|
||||
}
|
||||
return dataStorageConfiguration;
|
||||
}
|
||||
@@ -2715,12 +2735,14 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
|
||||
builder.setHighSpecEnabled();
|
||||
}
|
||||
|
||||
if (DataStorageFormat.BONSAI.equals(getDataStorageConfiguration().getDataStorageFormat())
|
||||
&& getDataStorageConfiguration().getBonsaiLimitTrieLogsEnabled()) {
|
||||
builder.setLimitTrieLogsEnabled();
|
||||
builder.setTrieLogRetentionLimit(getDataStorageConfiguration().getBonsaiMaxLayersToLoad());
|
||||
builder.setTrieLogsPruningWindowSize(
|
||||
getDataStorageConfiguration().getBonsaiTrieLogPruningWindowSize());
|
||||
if (DataStorageFormat.BONSAI.equals(getDataStorageConfiguration().getDataStorageFormat())) {
|
||||
final DiffBasedSubStorageConfiguration subStorageConfiguration =
|
||||
getDataStorageConfiguration().getDiffBasedSubStorageConfiguration();
|
||||
if (subStorageConfiguration.getLimitTrieLogsEnabled()) {
|
||||
builder.setLimitTrieLogsEnabled();
|
||||
builder.setTrieLogRetentionLimit(subStorageConfiguration.getMaxLayersToLoad());
|
||||
builder.setTrieLogsPruningWindowSize(subStorageConfiguration.getTrieLogPruningWindowSize());
|
||||
}
|
||||
}
|
||||
|
||||
builder.setSnapServerEnabled(this.unstableSynchronizerOptions.isSnapsyncServerEnabled());
|
||||
|
||||
@@ -53,7 +53,7 @@ public class ConfigurationOverviewBuilder {
|
||||
private Collection<String> engineApis;
|
||||
private String engineJwtFilePath;
|
||||
private boolean isHighSpec = false;
|
||||
private boolean isBonsaiLimitTrieLogsEnabled = false;
|
||||
private boolean isLimitTrieLogsEnabled = false;
|
||||
private long trieLogRetentionLimit = 0;
|
||||
private Integer trieLogsPruningWindowSize = null;
|
||||
private boolean isSnapServerEnabled = false;
|
||||
@@ -220,7 +220,7 @@ public class ConfigurationOverviewBuilder {
|
||||
* @return the builder
|
||||
*/
|
||||
public ConfigurationOverviewBuilder setLimitTrieLogsEnabled() {
|
||||
isBonsaiLimitTrieLogsEnabled = true;
|
||||
isLimitTrieLogsEnabled = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -389,7 +389,7 @@ public class ConfigurationOverviewBuilder {
|
||||
lines.add("Experimental Snap Sync for BFT enabled");
|
||||
}
|
||||
|
||||
if (isBonsaiLimitTrieLogsEnabled) {
|
||||
if (isLimitTrieLogsEnabled) {
|
||||
final StringBuilder trieLogPruningString = new StringBuilder();
|
||||
trieLogPruningString
|
||||
.append("Limit trie logs enabled: retention: ")
|
||||
|
||||
@@ -1,248 +0,0 @@
|
||||
/*
|
||||
* Copyright ConsenSys AG.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.cli.options;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_RECEIPT_COMPACTION_ENABLED;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_CODE_USING_CODE_HASH_ENABLED;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_FULL_FLAT_DB_ENABLED;
|
||||
|
||||
import org.hyperledger.besu.cli.util.CommandLineUtils;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
|
||||
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import picocli.CommandLine;
|
||||
import picocli.CommandLine.Option;
|
||||
|
||||
/** The Data storage CLI options. */
|
||||
public class DataStorageOptions implements CLIOptions<DataStorageConfiguration> {
|
||||
|
||||
private static final String DATA_STORAGE_FORMAT = "--data-storage-format";
|
||||
|
||||
/** The maximum number of historical layers to load. */
|
||||
public static final String BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD =
|
||||
"--bonsai-historical-block-limit";
|
||||
|
||||
// Use Bonsai DB
|
||||
@Option(
|
||||
names = {DATA_STORAGE_FORMAT},
|
||||
description =
|
||||
"Format to store trie data in. Either FOREST or BONSAI (default: ${DEFAULT-VALUE}).",
|
||||
arity = "1")
|
||||
private DataStorageFormat dataStorageFormat = DataStorageFormat.BONSAI;
|
||||
|
||||
@Option(
|
||||
names = {BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD, "--bonsai-maximum-back-layers-to-load"},
|
||||
paramLabel = "<LONG>",
|
||||
description =
|
||||
"Limit of historical layers that can be loaded with BONSAI (default: ${DEFAULT-VALUE}). When using "
|
||||
+ BONSAI_LIMIT_TRIE_LOGS_ENABLED
|
||||
+ " it will also be used as the number of layers of trie logs to retain.",
|
||||
arity = "1")
|
||||
private Long bonsaiMaxLayersToLoad = DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
|
||||
|
||||
/** The bonsai limit trie logs enabled option name */
|
||||
public static final String BONSAI_LIMIT_TRIE_LOGS_ENABLED = "--bonsai-limit-trie-logs-enabled";
|
||||
|
||||
/** The bonsai trie logs pruning window size. */
|
||||
public static final String BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE =
|
||||
"--bonsai-trie-logs-pruning-window-size";
|
||||
|
||||
// TODO --Xbonsai-limit-trie-logs-enabled and --Xbonsai-trie-log-pruning-enabled are deprecated,
|
||||
// remove in a future release
|
||||
@SuppressWarnings("ExperimentalCliOptionMustBeCorrectlyDisplayed")
|
||||
@CommandLine.Option(
|
||||
names = {
|
||||
BONSAI_LIMIT_TRIE_LOGS_ENABLED,
|
||||
"--Xbonsai-limit-trie-logs-enabled", // deprecated
|
||||
"--Xbonsai-trie-log-pruning-enabled" // deprecated
|
||||
},
|
||||
fallbackValue = "true",
|
||||
description = "Limit the number of trie logs that are retained. (default: ${DEFAULT-VALUE})")
|
||||
private Boolean bonsaiLimitTrieLogsEnabled = DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
|
||||
|
||||
// TODO --Xbonsai-trie-logs-pruning-window-size is deprecated, remove in a future release
|
||||
@SuppressWarnings("ExperimentalCliOptionMustBeCorrectlyDisplayed")
|
||||
@CommandLine.Option(
|
||||
names = {
|
||||
BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE,
|
||||
"--Xbonsai-trie-logs-pruning-window-size" // deprecated
|
||||
},
|
||||
description =
|
||||
"The max number of blocks to load and prune trie logs for at startup. (default: ${DEFAULT-VALUE})")
|
||||
private Integer bonsaiTrieLogPruningWindowSize = DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
|
||||
@Option(
|
||||
names = "--receipt-compaction-enabled",
|
||||
description = "Enables compact storing of receipts (default: ${DEFAULT-VALUE})",
|
||||
fallbackValue = "true")
|
||||
private Boolean receiptCompactionEnabled = DEFAULT_RECEIPT_COMPACTION_ENABLED;
|
||||
|
||||
@CommandLine.ArgGroup(validate = false)
|
||||
private final DataStorageOptions.Unstable unstableOptions = new Unstable();
|
||||
|
||||
/** Default Constructor. */
|
||||
DataStorageOptions() {}
|
||||
|
||||
/** The unstable options for data storage. */
|
||||
public static class Unstable {
|
||||
|
||||
// TODO: --Xsnapsync-synchronizer-flat-db-healing-enabled is deprecated, remove it in a future
|
||||
// release
|
||||
@CommandLine.Option(
|
||||
hidden = true,
|
||||
names = {
|
||||
"--Xbonsai-full-flat-db-enabled",
|
||||
"--Xsnapsync-synchronizer-flat-db-healing-enabled"
|
||||
},
|
||||
arity = "1",
|
||||
description = "Enables bonsai full flat database strategy. (default: ${DEFAULT-VALUE})")
|
||||
private Boolean bonsaiFullFlatDbEnabled = DEFAULT_BONSAI_FULL_FLAT_DB_ENABLED;
|
||||
|
||||
@CommandLine.Option(
|
||||
hidden = true,
|
||||
names = {"--Xbonsai-code-using-code-hash-enabled"},
|
||||
arity = "1",
|
||||
description =
|
||||
"Enables code storage using code hash instead of by account hash. (default: ${DEFAULT-VALUE})")
|
||||
private boolean bonsaiCodeUsingCodeHashEnabled = DEFAULT_BONSAI_CODE_USING_CODE_HASH_ENABLED;
|
||||
|
||||
@CommandLine.Option(
|
||||
hidden = true,
|
||||
names = {"--Xbonsai-parallel-tx-processing-enabled"},
|
||||
arity = "1",
|
||||
description =
|
||||
"Enables parallelization of transactions to optimize processing speed by concurrently loading and executing necessary data in advance. (default: ${DEFAULT-VALUE})")
|
||||
private Boolean isParallelTxProcessingEnabled = false;
|
||||
|
||||
/** Default Constructor. */
|
||||
Unstable() {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create data storage options.
|
||||
*
|
||||
* @return the data storage options
|
||||
*/
|
||||
public static DataStorageOptions create() {
|
||||
return new DataStorageOptions();
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the data storage options
|
||||
*
|
||||
* @param commandLine the full commandLine to check all the options specified by the user
|
||||
*/
|
||||
public void validate(final CommandLine commandLine) {
|
||||
if (DataStorageFormat.BONSAI == dataStorageFormat) {
|
||||
if (bonsaiLimitTrieLogsEnabled) {
|
||||
if (bonsaiMaxLayersToLoad < MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT) {
|
||||
throw new CommandLine.ParameterException(
|
||||
commandLine,
|
||||
String.format(
|
||||
BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD + " minimum value is %d",
|
||||
MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT));
|
||||
}
|
||||
if (bonsaiTrieLogPruningWindowSize <= 0) {
|
||||
throw new CommandLine.ParameterException(
|
||||
commandLine,
|
||||
String.format(
|
||||
BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE + "=%d must be greater than 0",
|
||||
bonsaiTrieLogPruningWindowSize));
|
||||
}
|
||||
if (bonsaiTrieLogPruningWindowSize <= bonsaiMaxLayersToLoad) {
|
||||
throw new CommandLine.ParameterException(
|
||||
commandLine,
|
||||
String.format(
|
||||
BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
|
||||
+ "=%d must be greater than "
|
||||
+ BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD
|
||||
+ "=%d",
|
||||
bonsaiTrieLogPruningWindowSize,
|
||||
bonsaiMaxLayersToLoad));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (unstableOptions.isParallelTxProcessingEnabled) {
|
||||
throw new CommandLine.ParameterException(
|
||||
commandLine,
|
||||
"Transaction parallelization is not supported unless operating in a 'diffbased' mode, such as Bonsai.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts to options from the configuration
|
||||
*
|
||||
* @param domainObject to be reversed
|
||||
* @return the options that correspond to the configuration
|
||||
*/
|
||||
public static DataStorageOptions fromConfig(final DataStorageConfiguration domainObject) {
|
||||
final DataStorageOptions dataStorageOptions = DataStorageOptions.create();
|
||||
dataStorageOptions.dataStorageFormat = domainObject.getDataStorageFormat();
|
||||
dataStorageOptions.bonsaiMaxLayersToLoad = domainObject.getBonsaiMaxLayersToLoad();
|
||||
dataStorageOptions.receiptCompactionEnabled = domainObject.getReceiptCompactionEnabled();
|
||||
dataStorageOptions.bonsaiLimitTrieLogsEnabled = domainObject.getBonsaiLimitTrieLogsEnabled();
|
||||
dataStorageOptions.bonsaiTrieLogPruningWindowSize =
|
||||
domainObject.getBonsaiTrieLogPruningWindowSize();
|
||||
dataStorageOptions.unstableOptions.bonsaiFullFlatDbEnabled =
|
||||
domainObject.getUnstable().getBonsaiFullFlatDbEnabled();
|
||||
dataStorageOptions.unstableOptions.bonsaiCodeUsingCodeHashEnabled =
|
||||
domainObject.getUnstable().getBonsaiCodeStoredByCodeHashEnabled();
|
||||
dataStorageOptions.unstableOptions.isParallelTxProcessingEnabled =
|
||||
domainObject.getUnstable().isParallelTxProcessingEnabled();
|
||||
|
||||
return dataStorageOptions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStorageConfiguration toDomainObject() {
|
||||
return ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(dataStorageFormat)
|
||||
.bonsaiMaxLayersToLoad(bonsaiMaxLayersToLoad)
|
||||
.receiptCompactionEnabled(receiptCompactionEnabled)
|
||||
.bonsaiLimitTrieLogsEnabled(bonsaiLimitTrieLogsEnabled)
|
||||
.bonsaiTrieLogPruningWindowSize(bonsaiTrieLogPruningWindowSize)
|
||||
.unstable(
|
||||
ImmutableDataStorageConfiguration.Unstable.builder()
|
||||
.bonsaiFullFlatDbEnabled(unstableOptions.bonsaiFullFlatDbEnabled)
|
||||
.bonsaiCodeStoredByCodeHashEnabled(unstableOptions.bonsaiCodeUsingCodeHashEnabled)
|
||||
.isParallelTxProcessingEnabled(unstableOptions.isParallelTxProcessingEnabled)
|
||||
.build())
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getCLIOptions() {
|
||||
return CommandLineUtils.getCLIOptions(this, new DataStorageOptions());
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize data storage format string.
|
||||
*
|
||||
* @return the normalized string
|
||||
*/
|
||||
public String normalizeDataStorageFormat() {
|
||||
return StringUtils.capitalize(dataStorageFormat.toString().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,121 @@
|
||||
/*
|
||||
* Copyright ConsenSys AG.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.cli.options.storage;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_RECEIPT_COMPACTION_ENABLED;
|
||||
|
||||
import org.hyperledger.besu.cli.options.CLIOptions;
|
||||
import org.hyperledger.besu.cli.util.CommandLineUtils;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
|
||||
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import picocli.CommandLine;
|
||||
import picocli.CommandLine.Mixin;
|
||||
import picocli.CommandLine.Option;
|
||||
|
||||
/** The Data storage CLI options. */
|
||||
public class DataStorageOptions implements CLIOptions<DataStorageConfiguration> {
|
||||
|
||||
private static final String DATA_STORAGE_FORMAT = "--data-storage-format";
|
||||
|
||||
// Use Bonsai DB
|
||||
@Option(
|
||||
names = {DATA_STORAGE_FORMAT},
|
||||
description =
|
||||
"Format to store trie data in. Either FOREST or BONSAI (default: ${DEFAULT-VALUE}).",
|
||||
arity = "1")
|
||||
private DataStorageFormat dataStorageFormat = DataStorageFormat.BONSAI;
|
||||
|
||||
@Option(
|
||||
names = "--receipt-compaction-enabled",
|
||||
description = "Enables compact storing of receipts (default: ${DEFAULT-VALUE})",
|
||||
fallbackValue = "true")
|
||||
private Boolean receiptCompactionEnabled = DEFAULT_RECEIPT_COMPACTION_ENABLED;
|
||||
|
||||
/**
|
||||
* Options specific to diff-based storage modes. Holds the necessary parameters to configure
|
||||
* diff-based storage, such as the Bonsai mode or Verkle in the future.
|
||||
*/
|
||||
@Mixin
|
||||
private DiffBasedSubStorageOptions diffBasedSubStorageOptions =
|
||||
DiffBasedSubStorageOptions.create();
|
||||
|
||||
/** Default Constructor. */
|
||||
DataStorageOptions() {}
|
||||
|
||||
/**
|
||||
* Create data storage options.
|
||||
*
|
||||
* @return the data storage options
|
||||
*/
|
||||
public static DataStorageOptions create() {
|
||||
return new DataStorageOptions();
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the data storage options
|
||||
*
|
||||
* @param commandLine the full commandLine to check all the options specified by the user
|
||||
*/
|
||||
public void validate(final CommandLine commandLine) {
|
||||
diffBasedSubStorageOptions.validate(commandLine, dataStorageFormat);
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts to options from the configuration
|
||||
*
|
||||
* @param domainObject to be reversed
|
||||
* @return the options that correspond to the configuration
|
||||
*/
|
||||
public static DataStorageOptions fromConfig(final DataStorageConfiguration domainObject) {
|
||||
final DataStorageOptions dataStorageOptions = DataStorageOptions.create();
|
||||
dataStorageOptions.dataStorageFormat = domainObject.getDataStorageFormat();
|
||||
dataStorageOptions.receiptCompactionEnabled = domainObject.getReceiptCompactionEnabled();
|
||||
dataStorageOptions.diffBasedSubStorageOptions =
|
||||
DiffBasedSubStorageOptions.fromConfig(domainObject.getDiffBasedSubStorageConfiguration());
|
||||
return dataStorageOptions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DataStorageConfiguration toDomainObject() {
|
||||
final ImmutableDataStorageConfiguration.Builder builder =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(dataStorageFormat)
|
||||
.receiptCompactionEnabled(receiptCompactionEnabled)
|
||||
.diffBasedSubStorageConfiguration(diffBasedSubStorageOptions.toDomainObject());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getCLIOptions() {
|
||||
final List<String> cliOptions = CommandLineUtils.getCLIOptions(this, new DataStorageOptions());
|
||||
cliOptions.addAll(diffBasedSubStorageOptions.getCLIOptions());
|
||||
return cliOptions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize data storage format string.
|
||||
*
|
||||
* @return the normalized string
|
||||
*/
|
||||
public String normalizeDataStorageFormat() {
|
||||
return StringUtils.capitalize(dataStorageFormat.toString().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,217 @@
|
||||
/*
|
||||
* Copyright ConsenSys AG.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.cli.options.storage;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.DEFAULT_LIMIT_TRIE_LOGS_ENABLED;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.DEFAULT_MAX_LAYERS_TO_LOAD;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.DEFAULT_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.DiffBasedUnstable.DEFAULT_CODE_USING_CODE_HASH_ENABLED;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.DiffBasedUnstable.DEFAULT_FULL_FLAT_DB_ENABLED;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.MINIMUM_TRIE_LOG_RETENTION_LIMIT;
|
||||
|
||||
import org.hyperledger.besu.cli.options.CLIOptions;
|
||||
import org.hyperledger.besu.cli.util.CommandLineUtils;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import picocli.CommandLine;
|
||||
import picocli.CommandLine.Option;
|
||||
|
||||
/** The Data storage CLI options. */
|
||||
public class DiffBasedSubStorageOptions implements CLIOptions<DiffBasedSubStorageConfiguration> {
|
||||
|
||||
/** The maximum number of historical layers to load. */
|
||||
public static final String MAX_LAYERS_TO_LOAD = "--bonsai-historical-block-limit";
|
||||
|
||||
@Option(
|
||||
names = {MAX_LAYERS_TO_LOAD, "--bonsai-maximum-back-layers-to-load"},
|
||||
paramLabel = "<LONG>",
|
||||
description =
|
||||
"Limit of historical layers that can be loaded with BONSAI (default: ${DEFAULT-VALUE}). When using "
|
||||
+ LIMIT_TRIE_LOGS_ENABLED
|
||||
+ " it will also be used as the number of layers of trie logs to retain.",
|
||||
arity = "1")
|
||||
private Long maxLayersToLoad = DEFAULT_MAX_LAYERS_TO_LOAD;
|
||||
|
||||
/** The bonsai limit trie logs enabled option name */
|
||||
public static final String LIMIT_TRIE_LOGS_ENABLED = "--bonsai-limit-trie-logs-enabled";
|
||||
|
||||
/** The bonsai trie logs pruning window size. */
|
||||
public static final String TRIE_LOG_PRUNING_WINDOW_SIZE =
|
||||
"--bonsai-trie-logs-pruning-window-size";
|
||||
|
||||
// TODO --Xbonsai-limit-trie-logs-enabled and --Xbonsai-trie-log-pruning-enabled are deprecated,
|
||||
// remove in a future release
|
||||
@SuppressWarnings("ExperimentalCliOptionMustBeCorrectlyDisplayed")
|
||||
@Option(
|
||||
names = {
|
||||
LIMIT_TRIE_LOGS_ENABLED,
|
||||
"--Xbonsai-limit-trie-logs-enabled", // deprecated
|
||||
"--Xbonsai-trie-log-pruning-enabled" // deprecated
|
||||
},
|
||||
fallbackValue = "true",
|
||||
description = "Limit the number of trie logs that are retained. (default: ${DEFAULT-VALUE})")
|
||||
private Boolean limitTrieLogsEnabled = DEFAULT_LIMIT_TRIE_LOGS_ENABLED;
|
||||
|
||||
// TODO --Xbonsai-trie-logs-pruning-window-size is deprecated, remove in a future release
|
||||
@SuppressWarnings("ExperimentalCliOptionMustBeCorrectlyDisplayed")
|
||||
@Option(
|
||||
names = {
|
||||
TRIE_LOG_PRUNING_WINDOW_SIZE,
|
||||
"--Xbonsai-trie-logs-pruning-window-size" // deprecated
|
||||
},
|
||||
description =
|
||||
"The max number of blocks to load and prune trie logs for at startup. (default: ${DEFAULT-VALUE})")
|
||||
private Integer trieLogPruningWindowSize = DEFAULT_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
|
||||
@CommandLine.ArgGroup(validate = false)
|
||||
private final DiffBasedSubStorageOptions.Unstable unstableOptions = new Unstable();
|
||||
|
||||
/** Default Constructor. */
|
||||
DiffBasedSubStorageOptions() {}
|
||||
|
||||
/** The unstable options for data storage. */
|
||||
public static class Unstable {
|
||||
|
||||
// TODO: --Xsnapsync-synchronizer-flat-db-healing-enabled is deprecated, remove it in a future
|
||||
// release
|
||||
@Option(
|
||||
hidden = true,
|
||||
names = {
|
||||
"--Xbonsai-full-flat-db-enabled",
|
||||
"--Xsnapsync-synchronizer-flat-db-healing-enabled"
|
||||
},
|
||||
arity = "1",
|
||||
description = "Enables bonsai full flat database strategy. (default: ${DEFAULT-VALUE})")
|
||||
private Boolean fullFlatDbEnabled = DEFAULT_FULL_FLAT_DB_ENABLED;
|
||||
|
||||
@Option(
|
||||
hidden = true,
|
||||
names = {"--Xbonsai-code-using-code-hash-enabled"},
|
||||
arity = "1",
|
||||
description =
|
||||
"Enables code storage using code hash instead of by account hash. (default: ${DEFAULT-VALUE})")
|
||||
private boolean codeUsingCodeHashEnabled = DEFAULT_CODE_USING_CODE_HASH_ENABLED;
|
||||
|
||||
@Option(
|
||||
hidden = true,
|
||||
names = {"--Xbonsai-parallel-tx-processing-enabled"},
|
||||
arity = "1",
|
||||
description =
|
||||
"Enables parallelization of transactions to optimize processing speed by concurrently loading and executing necessary data in advance. (default: ${DEFAULT-VALUE})")
|
||||
private Boolean isParallelTxProcessingEnabled = false;
|
||||
|
||||
/** Default Constructor. */
|
||||
Unstable() {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create data storage options.
|
||||
*
|
||||
* @return the data storage options
|
||||
*/
|
||||
public static DiffBasedSubStorageOptions create() {
|
||||
return new DiffBasedSubStorageOptions();
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the data storage options
|
||||
*
|
||||
* @param commandLine the full commandLine to check all the options specified by the user
|
||||
* @param dataStorageFormat the selected data storage format which determines the validation rules
|
||||
* to apply.
|
||||
*/
|
||||
public void validate(final CommandLine commandLine, final DataStorageFormat dataStorageFormat) {
|
||||
if (DataStorageFormat.BONSAI == dataStorageFormat) {
|
||||
if (limitTrieLogsEnabled) {
|
||||
if (maxLayersToLoad < MINIMUM_TRIE_LOG_RETENTION_LIMIT) {
|
||||
throw new CommandLine.ParameterException(
|
||||
commandLine,
|
||||
String.format(
|
||||
MAX_LAYERS_TO_LOAD + " minimum value is %d", MINIMUM_TRIE_LOG_RETENTION_LIMIT));
|
||||
}
|
||||
if (trieLogPruningWindowSize <= 0) {
|
||||
throw new CommandLine.ParameterException(
|
||||
commandLine,
|
||||
String.format(
|
||||
TRIE_LOG_PRUNING_WINDOW_SIZE + "=%d must be greater than 0",
|
||||
trieLogPruningWindowSize));
|
||||
}
|
||||
if (trieLogPruningWindowSize <= maxLayersToLoad) {
|
||||
throw new CommandLine.ParameterException(
|
||||
commandLine,
|
||||
String.format(
|
||||
TRIE_LOG_PRUNING_WINDOW_SIZE
|
||||
+ "=%d must be greater than "
|
||||
+ MAX_LAYERS_TO_LOAD
|
||||
+ "=%d",
|
||||
trieLogPruningWindowSize,
|
||||
maxLayersToLoad));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (unstableOptions.isParallelTxProcessingEnabled) {
|
||||
throw new CommandLine.ParameterException(
|
||||
commandLine,
|
||||
"Transaction parallelization is not supported unless operating in a 'diffbased' mode, such as Bonsai.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts to options from the configuration
|
||||
*
|
||||
* @param domainObject to be reversed
|
||||
* @return the options that correspond to the configuration
|
||||
*/
|
||||
public static DiffBasedSubStorageOptions fromConfig(
|
||||
final DiffBasedSubStorageConfiguration domainObject) {
|
||||
final DiffBasedSubStorageOptions dataStorageOptions = DiffBasedSubStorageOptions.create();
|
||||
dataStorageOptions.maxLayersToLoad = domainObject.getMaxLayersToLoad();
|
||||
dataStorageOptions.limitTrieLogsEnabled = domainObject.getLimitTrieLogsEnabled();
|
||||
dataStorageOptions.trieLogPruningWindowSize = domainObject.getTrieLogPruningWindowSize();
|
||||
dataStorageOptions.unstableOptions.fullFlatDbEnabled =
|
||||
domainObject.getUnstable().getFullFlatDbEnabled();
|
||||
dataStorageOptions.unstableOptions.codeUsingCodeHashEnabled =
|
||||
domainObject.getUnstable().getCodeStoredByCodeHashEnabled();
|
||||
dataStorageOptions.unstableOptions.isParallelTxProcessingEnabled =
|
||||
domainObject.getUnstable().isParallelTxProcessingEnabled();
|
||||
|
||||
return dataStorageOptions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DiffBasedSubStorageConfiguration toDomainObject() {
|
||||
return ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(maxLayersToLoad)
|
||||
.limitTrieLogsEnabled(limitTrieLogsEnabled)
|
||||
.trieLogPruningWindowSize(trieLogPruningWindowSize)
|
||||
.unstable(
|
||||
ImmutableDiffBasedSubStorageConfiguration.DiffBasedUnstable.builder()
|
||||
.fullFlatDbEnabled(unstableOptions.fullFlatDbEnabled)
|
||||
.codeStoredByCodeHashEnabled(unstableOptions.codeUsingCodeHashEnabled)
|
||||
.isParallelTxProcessingEnabled(unstableOptions.isParallelTxProcessingEnabled)
|
||||
.build())
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getCLIOptions() {
|
||||
return CommandLineUtils.getCLIOptions(this, new DiffBasedSubStorageOptions());
|
||||
}
|
||||
}
|
||||
@@ -15,21 +15,22 @@
|
||||
package org.hyperledger.besu.cli.subcommands.storage;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static org.hyperledger.besu.cli.options.DataStorageOptions.BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD;
|
||||
import static org.hyperledger.besu.cli.options.storage.DiffBasedSubStorageOptions.MAX_LAYERS_TO_LOAD;
|
||||
import static org.hyperledger.besu.cli.options.storage.DiffBasedSubStorageOptions.TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.DEFAULT_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
|
||||
import org.hyperledger.besu.cli.options.DataStorageOptions;
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.ethereum.chain.Blockchain;
|
||||
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
|
||||
import org.hyperledger.besu.ethereum.core.BlockHeader;
|
||||
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPInput;
|
||||
import org.hyperledger.besu.ethereum.rlp.RLP;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.trielog.TrieLogFactoryImpl;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogLayer;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
@@ -64,7 +65,7 @@ public class TrieLogHelper {
|
||||
|
||||
boolean prune(
|
||||
final DataStorageConfiguration config,
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final MutableBlockchain blockchain,
|
||||
final Path dataDirectoryPath) {
|
||||
|
||||
@@ -73,7 +74,7 @@ public class TrieLogHelper {
|
||||
|
||||
validatePruneConfiguration(config);
|
||||
|
||||
final long layersToRetain = config.getBonsaiMaxLayersToLoad();
|
||||
final long layersToRetain = config.getDiffBasedSubStorageConfiguration().getMaxLayersToLoad();
|
||||
|
||||
final long chainHeight = blockchain.getChainHeadBlockNumber();
|
||||
|
||||
@@ -102,7 +103,7 @@ public class TrieLogHelper {
|
||||
// Should only be layersToRetain left but loading extra just in case of an unforeseen bug
|
||||
final long countAfterPrune =
|
||||
rootWorldStateStorage
|
||||
.streamTrieLogKeys(layersToRetain + DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE)
|
||||
.streamTrieLogKeys(layersToRetain + DEFAULT_TRIE_LOG_PRUNING_WINDOW_SIZE)
|
||||
.count();
|
||||
if (countAfterPrune == layersToRetain) {
|
||||
if (deleteFiles(batchFileNameBase, numberOfBatches)) {
|
||||
@@ -115,15 +116,12 @@ public class TrieLogHelper {
|
||||
throw new IllegalStateException(
|
||||
String.format(
|
||||
"Remaining trie logs (%d) did not match %s (%d). Trie logs backup files (in %s) have not been deleted, it is safe to rerun the subcommand.",
|
||||
countAfterPrune,
|
||||
BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD,
|
||||
layersToRetain,
|
||||
batchFileNameBase));
|
||||
countAfterPrune, MAX_LAYERS_TO_LOAD, layersToRetain, batchFileNameBase));
|
||||
}
|
||||
}
|
||||
|
||||
private void processTrieLogBatches(
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final MutableBlockchain blockchain,
|
||||
final long chainHeight,
|
||||
final long lastBlockNumberToRetainTrieLogsFor,
|
||||
@@ -152,7 +150,7 @@ public class TrieLogHelper {
|
||||
|
||||
private void saveTrieLogBatches(
|
||||
final String batchFileName,
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final List<Hash> trieLogKeys) {
|
||||
|
||||
try {
|
||||
@@ -164,7 +162,7 @@ public class TrieLogHelper {
|
||||
}
|
||||
|
||||
private void restoreTrieLogBatches(
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final long batchNumber,
|
||||
final String batchFileNameBase) {
|
||||
|
||||
@@ -217,7 +215,7 @@ public class TrieLogHelper {
|
||||
final MutableBlockchain blockchain,
|
||||
final long chainHeight,
|
||||
final long lastBlockNumberToRetainTrieLogsFor,
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final long layersToRetain) {
|
||||
|
||||
if (lastBlockNumberToRetainTrieLogsFor < 0) {
|
||||
@@ -231,7 +229,7 @@ public class TrieLogHelper {
|
||||
// plus extra threshold to account forks and orphans
|
||||
final long clampedCountBeforePruning =
|
||||
rootWorldStateStorage
|
||||
.streamTrieLogKeys(layersToRetain + DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE)
|
||||
.streamTrieLogKeys(layersToRetain + DEFAULT_TRIE_LOG_PRUNING_WINDOW_SIZE)
|
||||
.count();
|
||||
if (clampedCountBeforePruning < layersToRetain) {
|
||||
throw new IllegalArgumentException(
|
||||
@@ -257,7 +255,7 @@ public class TrieLogHelper {
|
||||
}
|
||||
|
||||
private void recreateTrieLogs(
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final long batchNumber,
|
||||
final String batchFileNameBase)
|
||||
throws IOException {
|
||||
@@ -277,7 +275,7 @@ public class TrieLogHelper {
|
||||
final int chunkSize,
|
||||
final List<byte[]> keys,
|
||||
final IdentityHashMap<byte[], byte[]> trieLogsToRetain,
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage) {
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage) {
|
||||
|
||||
var updater = rootWorldStateStorage.updater();
|
||||
int endIndex = Math.min(startIndex + chunkSize, keys.size());
|
||||
@@ -294,31 +292,31 @@ public class TrieLogHelper {
|
||||
|
||||
@VisibleForTesting
|
||||
void validatePruneConfiguration(final DataStorageConfiguration config) {
|
||||
final DiffBasedSubStorageConfiguration subStorageConfiguration =
|
||||
config.getDiffBasedSubStorageConfiguration();
|
||||
checkArgument(
|
||||
config.getBonsaiMaxLayersToLoad()
|
||||
>= DataStorageConfiguration.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT,
|
||||
subStorageConfiguration.getMaxLayersToLoad()
|
||||
>= DiffBasedSubStorageConfiguration.MINIMUM_TRIE_LOG_RETENTION_LIMIT,
|
||||
String.format(
|
||||
BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD + " minimum value is %d",
|
||||
DataStorageConfiguration.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT));
|
||||
MAX_LAYERS_TO_LOAD + " minimum value is %d",
|
||||
DiffBasedSubStorageConfiguration.MINIMUM_TRIE_LOG_RETENTION_LIMIT));
|
||||
checkArgument(
|
||||
config.getBonsaiTrieLogPruningWindowSize() > 0,
|
||||
subStorageConfiguration.getTrieLogPruningWindowSize() > 0,
|
||||
String.format(
|
||||
DataStorageOptions.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE + "=%d must be greater than 0",
|
||||
config.getBonsaiTrieLogPruningWindowSize()));
|
||||
TRIE_LOG_PRUNING_WINDOW_SIZE + "=%d must be greater than 0",
|
||||
subStorageConfiguration.getTrieLogPruningWindowSize()));
|
||||
checkArgument(
|
||||
config.getBonsaiTrieLogPruningWindowSize() > config.getBonsaiMaxLayersToLoad(),
|
||||
subStorageConfiguration.getTrieLogPruningWindowSize()
|
||||
> subStorageConfiguration.getMaxLayersToLoad(),
|
||||
String.format(
|
||||
DataStorageOptions.BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE
|
||||
+ "=%d must be greater than "
|
||||
+ BONSAI_STORAGE_FORMAT_MAX_LAYERS_TO_LOAD
|
||||
+ "=%d",
|
||||
config.getBonsaiTrieLogPruningWindowSize(),
|
||||
config.getBonsaiMaxLayersToLoad()));
|
||||
TRIE_LOG_PRUNING_WINDOW_SIZE + "=%d must be greater than " + MAX_LAYERS_TO_LOAD + "=%d",
|
||||
subStorageConfiguration.getTrieLogPruningWindowSize(),
|
||||
subStorageConfiguration.getMaxLayersToLoad()));
|
||||
}
|
||||
|
||||
private void saveTrieLogsInFile(
|
||||
final List<Hash> trieLogsKeys,
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final String batchFileName)
|
||||
throws IOException {
|
||||
|
||||
@@ -355,7 +353,7 @@ public class TrieLogHelper {
|
||||
|
||||
private void saveTrieLogsAsRlpInFile(
|
||||
final List<Hash> trieLogsKeys,
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final String batchFileName) {
|
||||
File file = new File(batchFileName);
|
||||
if (file.exists()) {
|
||||
@@ -400,7 +398,8 @@ public class TrieLogHelper {
|
||||
}
|
||||
|
||||
private IdentityHashMap<byte[], byte[]> getTrieLogs(
|
||||
final List<Hash> trieLogKeys, final BonsaiWorldStateKeyValueStorage rootWorldStateStorage) {
|
||||
final List<Hash> trieLogKeys,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage) {
|
||||
IdentityHashMap<byte[], byte[]> trieLogsToRetain = new IdentityHashMap<>();
|
||||
|
||||
LOG.info("Obtaining trielogs from db, this may take a few minutes...");
|
||||
@@ -413,7 +412,7 @@ public class TrieLogHelper {
|
||||
}
|
||||
|
||||
TrieLogCount getCount(
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final int limit,
|
||||
final Blockchain blockchain) {
|
||||
final AtomicInteger total = new AtomicInteger();
|
||||
@@ -454,7 +453,7 @@ public class TrieLogHelper {
|
||||
}
|
||||
|
||||
void importTrieLog(
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage, final Path trieLogFilePath) {
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage, final Path trieLogFilePath) {
|
||||
|
||||
var trieLog = readTrieLogsAsRlpFromFile(trieLogFilePath.toString());
|
||||
|
||||
@@ -464,7 +463,7 @@ public class TrieLogHelper {
|
||||
}
|
||||
|
||||
void exportTrieLog(
|
||||
final BonsaiWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final DiffBasedWorldStateKeyValueStorage rootWorldStateStorage,
|
||||
final List<Hash> trieLogHash,
|
||||
final Path directoryPath)
|
||||
throws IOException {
|
||||
|
||||
@@ -29,6 +29,7 @@ import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldSt
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogPruner;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
|
||||
|
||||
import java.io.IOException;
|
||||
@@ -89,7 +90,11 @@ public class TrieLogSubCommand implements Runnable {
|
||||
.besuCommand
|
||||
.setupControllerBuilder()
|
||||
.dataStorageConfiguration(
|
||||
ImmutableDataStorageConfiguration.copyOf(config).withBonsaiLimitTrieLogsEnabled(false))
|
||||
ImmutableDataStorageConfiguration.copyOf(config)
|
||||
.withDiffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.copyOf(
|
||||
config.getDiffBasedSubStorageConfiguration())
|
||||
.withLimitTrieLogsEnabled(false)))
|
||||
.build();
|
||||
}
|
||||
|
||||
|
||||
@@ -89,6 +89,7 @@ import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManage
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogPruner;
|
||||
import org.hyperledger.besu.ethereum.trie.forest.ForestWorldStateArchive;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
|
||||
import org.hyperledger.besu.ethereum.worldstate.WorldStateKeyValueStorage;
|
||||
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
|
||||
@@ -741,15 +742,18 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
|
||||
final JsonRpcMethods additionalJsonRpcMethodFactory =
|
||||
createAdditionalJsonRpcMethodFactory(protocolContext, protocolSchedule, miningParameters);
|
||||
|
||||
if (dataStorageConfiguration.getBonsaiLimitTrieLogsEnabled()
|
||||
&& DataStorageFormat.BONSAI.equals(dataStorageConfiguration.getDataStorageFormat())) {
|
||||
final TrieLogManager trieLogManager =
|
||||
((BonsaiWorldStateProvider) worldStateArchive).getTrieLogManager();
|
||||
final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage =
|
||||
worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class);
|
||||
final TrieLogPruner trieLogPruner =
|
||||
createTrieLogPruner(worldStateKeyValueStorage, blockchain, scheduler);
|
||||
trieLogManager.subscribe(trieLogPruner);
|
||||
if (DataStorageFormat.BONSAI.equals(dataStorageConfiguration.getDataStorageFormat())) {
|
||||
final DiffBasedSubStorageConfiguration subStorageConfiguration =
|
||||
dataStorageConfiguration.getDiffBasedSubStorageConfiguration();
|
||||
if (subStorageConfiguration.getLimitTrieLogsEnabled()) {
|
||||
final TrieLogManager trieLogManager =
|
||||
((BonsaiWorldStateProvider) worldStateArchive).getTrieLogManager();
|
||||
final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage =
|
||||
worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class);
|
||||
final TrieLogPruner trieLogPruner =
|
||||
createTrieLogPruner(worldStateKeyValueStorage, blockchain, scheduler);
|
||||
trieLogManager.subscribe(trieLogPruner);
|
||||
}
|
||||
}
|
||||
|
||||
final List<Closeable> closeables = new ArrayList<>();
|
||||
@@ -803,14 +807,15 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
|
||||
final Blockchain blockchain,
|
||||
final EthScheduler scheduler) {
|
||||
final boolean isProofOfStake = genesisConfigOptions.getTerminalTotalDifficulty().isPresent();
|
||||
|
||||
final DiffBasedSubStorageConfiguration subStorageConfiguration =
|
||||
dataStorageConfiguration.getDiffBasedSubStorageConfiguration();
|
||||
final TrieLogPruner trieLogPruner =
|
||||
new TrieLogPruner(
|
||||
(BonsaiWorldStateKeyValueStorage) worldStateStorage,
|
||||
blockchain,
|
||||
scheduler::executeServiceTask,
|
||||
dataStorageConfiguration.getBonsaiMaxLayersToLoad(),
|
||||
dataStorageConfiguration.getBonsaiTrieLogPruningWindowSize(),
|
||||
subStorageConfiguration.getMaxLayersToLoad(),
|
||||
subStorageConfiguration.getTrieLogPruningWindowSize(),
|
||||
isProofOfStake,
|
||||
metricsSystem);
|
||||
trieLogPruner.initialize();
|
||||
@@ -1092,10 +1097,14 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
|
||||
case BONSAI -> {
|
||||
final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage =
|
||||
worldStateStorageCoordinator.getStrategy(BonsaiWorldStateKeyValueStorage.class);
|
||||
|
||||
yield new BonsaiWorldStateProvider(
|
||||
worldStateKeyValueStorage,
|
||||
blockchain,
|
||||
Optional.of(dataStorageConfiguration.getBonsaiMaxLayersToLoad()),
|
||||
Optional.of(
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getMaxLayersToLoad()),
|
||||
bonsaiCachedMerkleTrieLoader,
|
||||
besuComponent.map(BesuComponent::getBesuPluginContext).orElse(null),
|
||||
evmConfiguration);
|
||||
|
||||
@@ -14,9 +14,12 @@
|
||||
*/
|
||||
package org.hyperledger.besu.services;
|
||||
|
||||
import org.hyperledger.besu.datatypes.PendingTransaction;
|
||||
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
|
||||
import org.hyperledger.besu.plugin.services.transactionpool.TransactionPoolService;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
/** Service to enable and disable the transaction pool. */
|
||||
public class TransactionPoolServiceImpl implements TransactionPoolService {
|
||||
|
||||
@@ -40,4 +43,9 @@ public class TransactionPoolServiceImpl implements TransactionPoolService {
|
||||
public void enableTransactionPool() {
|
||||
transactionPool.setEnabled();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<? extends PendingTransaction> getPendingTransactions() {
|
||||
return transactionPool.getPendingTransactions();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1390,7 +1390,7 @@ public class BesuCommandTest extends CommandTestAbstract {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void bonsaiLimitTrieLogsEnabledByDefault() {
|
||||
public void diffbasedLimitTrieLogsEnabledByDefault() {
|
||||
parseCommand();
|
||||
verify(mockControllerBuilder)
|
||||
.dataStorageConfiguration(dataStorageConfigurationArgumentCaptor.capture());
|
||||
@@ -1398,7 +1398,11 @@ public class BesuCommandTest extends CommandTestAbstract {
|
||||
final DataStorageConfiguration dataStorageConfiguration =
|
||||
dataStorageConfigurationArgumentCaptor.getValue();
|
||||
assertThat(dataStorageConfiguration.getDataStorageFormat()).isEqualTo(BONSAI);
|
||||
assertThat(dataStorageConfiguration.getBonsaiLimitTrieLogsEnabled()).isTrue();
|
||||
assertThat(
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getLimitTrieLogsEnabled())
|
||||
.isTrue();
|
||||
assertThat(commandOutput.toString(UTF_8)).isEmpty();
|
||||
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
|
||||
}
|
||||
@@ -1413,7 +1417,11 @@ public class BesuCommandTest extends CommandTestAbstract {
|
||||
final DataStorageConfiguration dataStorageConfiguration =
|
||||
dataStorageConfigurationArgumentCaptor.getValue();
|
||||
assertThat(dataStorageConfiguration.getDataStorageFormat()).isEqualTo(BONSAI);
|
||||
assertThat(dataStorageConfiguration.getBonsaiLimitTrieLogsEnabled()).isFalse();
|
||||
assertThat(
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getLimitTrieLogsEnabled())
|
||||
.isFalse();
|
||||
verify(mockLogger)
|
||||
.warn(
|
||||
"Forcing {}, since it cannot be enabled with --sync-mode={} and --data-storage-format={}.",
|
||||
@@ -1448,7 +1456,8 @@ public class BesuCommandTest extends CommandTestAbstract {
|
||||
final DataStorageConfiguration dataStorageConfiguration =
|
||||
dataStorageConfigurationArgumentCaptor.getValue();
|
||||
assertThat(dataStorageConfiguration.getDataStorageFormat()).isEqualTo(BONSAI);
|
||||
assertThat(dataStorageConfiguration.getBonsaiMaxLayersToLoad()).isEqualTo(11);
|
||||
assertThat(dataStorageConfiguration.getDiffBasedSubStorageConfiguration().getMaxLayersToLoad())
|
||||
.isEqualTo(11);
|
||||
assertThat(commandOutput.toString(UTF_8)).isEmpty();
|
||||
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
|
||||
}
|
||||
@@ -2551,8 +2560,9 @@ public class BesuCommandTest extends CommandTestAbstract {
|
||||
besuCommand
|
||||
.getDataStorageOptions()
|
||||
.toDomainObject()
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getUnstable()
|
||||
.getBonsaiFullFlatDbEnabled())
|
||||
.getFullFlatDbEnabled())
|
||||
.isTrue();
|
||||
}
|
||||
|
||||
@@ -2563,8 +2573,9 @@ public class BesuCommandTest extends CommandTestAbstract {
|
||||
besuCommand
|
||||
.dataStorageOptions
|
||||
.toDomainObject()
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getUnstable()
|
||||
.getBonsaiFullFlatDbEnabled())
|
||||
.getFullFlatDbEnabled())
|
||||
.isFalse();
|
||||
}
|
||||
|
||||
|
||||
@@ -34,10 +34,10 @@ import org.hyperledger.besu.chainexport.RlpBlockExporter;
|
||||
import org.hyperledger.besu.chainimport.JsonBlockImporter;
|
||||
import org.hyperledger.besu.chainimport.RlpBlockImporter;
|
||||
import org.hyperledger.besu.cli.config.EthNetworkConfig;
|
||||
import org.hyperledger.besu.cli.options.DataStorageOptions;
|
||||
import org.hyperledger.besu.cli.options.MiningOptions;
|
||||
import org.hyperledger.besu.cli.options.TransactionPoolOptions;
|
||||
import org.hyperledger.besu.cli.options.stable.EthstatsOptions;
|
||||
import org.hyperledger.besu.cli.options.storage.DataStorageOptions;
|
||||
import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions;
|
||||
import org.hyperledger.besu.cli.options.unstable.NetworkingOptions;
|
||||
import org.hyperledger.besu.cli.options.unstable.SynchronizerOptions;
|
||||
|
||||
@@ -162,7 +162,7 @@ class ConfigurationOverviewBuilderTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
void setBonsaiLimitTrieLogsEnabled() {
|
||||
void setDiffbasedLimitTrieLogsEnabled() {
|
||||
final String noTrieLogRetentionLimitSet = builder.build();
|
||||
assertThat(noTrieLogRetentionLimitSet).doesNotContain("Limit trie logs enabled");
|
||||
|
||||
|
||||
@@ -64,7 +64,6 @@ public abstract class AbstractCLIOptionsTest<D, T extends CLIOptions<D>>
|
||||
private void getCLIOptions(final D domainObject) {
|
||||
T options = optionsFromDomainObject(domainObject);
|
||||
final String[] cliOptions = options.getCLIOptions().toArray(new String[0]);
|
||||
|
||||
final TestBesuCommand cmd = parseCommand(cliOptions);
|
||||
final T optionsFromCommand = getOptionsFromBesuCommand(cmd);
|
||||
|
||||
|
||||
@@ -15,12 +15,13 @@
|
||||
package org.hyperledger.besu.cli.options.stable;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.MINIMUM_TRIE_LOG_RETENTION_LIMIT;
|
||||
|
||||
import org.hyperledger.besu.cli.options.AbstractCLIOptionsTest;
|
||||
import org.hyperledger.besu.cli.options.DataStorageOptions;
|
||||
import org.hyperledger.besu.cli.options.storage.DataStorageOptions;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
|
||||
|
||||
import org.junit.jupiter.api.Test;
|
||||
@@ -32,7 +33,11 @@ public class DataStorageOptionsTest
|
||||
public void bonsaiTrieLogPruningLimitOption() {
|
||||
internalTestSuccess(
|
||||
dataStorageConfiguration ->
|
||||
assertThat(dataStorageConfiguration.getBonsaiTrieLogPruningWindowSize()).isEqualTo(600),
|
||||
assertThat(
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getTrieLogPruningWindowSize())
|
||||
.isEqualTo(600),
|
||||
"--bonsai-limit-trie-logs-enabled",
|
||||
"--bonsai-trie-logs-pruning-window-size",
|
||||
"600");
|
||||
@@ -42,7 +47,11 @@ public class DataStorageOptionsTest
|
||||
public void bonsaiTrieLogPruningLimitLegacyOption() {
|
||||
internalTestSuccess(
|
||||
dataStorageConfiguration ->
|
||||
assertThat(dataStorageConfiguration.getBonsaiTrieLogPruningWindowSize()).isEqualTo(600),
|
||||
assertThat(
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getTrieLogPruningWindowSize())
|
||||
.isEqualTo(600),
|
||||
"--Xbonsai-limit-trie-logs-enabled",
|
||||
"--Xbonsai-trie-logs-pruning-window-size",
|
||||
"600");
|
||||
@@ -52,12 +61,16 @@ public class DataStorageOptionsTest
|
||||
public void bonsaiTrieLogsEnabled_explicitlySetToFalse() {
|
||||
internalTestSuccess(
|
||||
dataStorageConfiguration ->
|
||||
assertThat(dataStorageConfiguration.getBonsaiLimitTrieLogsEnabled()).isEqualTo(false),
|
||||
assertThat(
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getLimitTrieLogsEnabled())
|
||||
.isEqualTo(false),
|
||||
"--bonsai-limit-trie-logs-enabled=false");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void bonsaiTrieLogPruningWindowSizeShouldBePositive() {
|
||||
public void diffbasedTrieLogPruningWindowSizeShouldBePositive() {
|
||||
internalTestFailure(
|
||||
"--bonsai-trie-logs-pruning-window-size=0 must be greater than 0",
|
||||
"--bonsai-limit-trie-logs-enabled",
|
||||
@@ -66,7 +79,7 @@ public class DataStorageOptionsTest
|
||||
}
|
||||
|
||||
@Test
|
||||
public void bonsaiTrieLogPruningWindowSizeShouldBeAboveRetentionLimit() {
|
||||
public void diffbasedTrieLogPruningWindowSizeShouldBeAboveRetentionLimit() {
|
||||
internalTestFailure(
|
||||
"--bonsai-trie-logs-pruning-window-size=512 must be greater than --bonsai-historical-block-limit=512",
|
||||
"--bonsai-limit-trie-logs-enabled",
|
||||
@@ -78,8 +91,11 @@ public class DataStorageOptionsTest
|
||||
public void bonsaiTrieLogRetentionLimitOption() {
|
||||
internalTestSuccess(
|
||||
dataStorageConfiguration ->
|
||||
assertThat(dataStorageConfiguration.getBonsaiMaxLayersToLoad())
|
||||
.isEqualTo(MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT + 1),
|
||||
assertThat(
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getMaxLayersToLoad())
|
||||
.isEqualTo(MINIMUM_TRIE_LOG_RETENTION_LIMIT + 1),
|
||||
"--bonsai-limit-trie-logs-enabled",
|
||||
"--bonsai-historical-block-limit",
|
||||
"513");
|
||||
@@ -89,8 +105,11 @@ public class DataStorageOptionsTest
|
||||
public void bonsaiTrieLogRetentionLimitOption_boundaryTest() {
|
||||
internalTestSuccess(
|
||||
dataStorageConfiguration ->
|
||||
assertThat(dataStorageConfiguration.getBonsaiMaxLayersToLoad())
|
||||
.isEqualTo(MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT),
|
||||
assertThat(
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getMaxLayersToLoad())
|
||||
.isEqualTo(MINIMUM_TRIE_LOG_RETENTION_LIMIT),
|
||||
"--bonsai-limit-trie-logs-enabled",
|
||||
"--bonsai-historical-block-limit",
|
||||
"512");
|
||||
@@ -106,22 +125,28 @@ public class DataStorageOptionsTest
|
||||
}
|
||||
|
||||
@Test
|
||||
public void bonsaiCodeUsingCodeHashEnabledCanBeEnabled() {
|
||||
public void diffbasedCodeUsingCodeHashEnabledCanBeEnabled() {
|
||||
internalTestSuccess(
|
||||
dataStorageConfiguration ->
|
||||
assertThat(
|
||||
dataStorageConfiguration.getUnstable().getBonsaiCodeStoredByCodeHashEnabled())
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getUnstable()
|
||||
.getCodeStoredByCodeHashEnabled())
|
||||
.isEqualTo(true),
|
||||
"--Xbonsai-code-using-code-hash-enabled",
|
||||
"true");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void bonsaiCodeUsingCodeHashEnabledCanBeDisabled() {
|
||||
public void diffbasedCodeUsingCodeHashEnabledCanBeDisabled() {
|
||||
internalTestSuccess(
|
||||
dataStorageConfiguration ->
|
||||
assertThat(
|
||||
dataStorageConfiguration.getUnstable().getBonsaiCodeStoredByCodeHashEnabled())
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getUnstable()
|
||||
.getCodeStoredByCodeHashEnabled())
|
||||
.isEqualTo(false),
|
||||
"--Xbonsai-code-using-code-hash-enabled",
|
||||
"false");
|
||||
@@ -160,9 +185,12 @@ public class DataStorageOptionsTest
|
||||
protected DataStorageConfiguration createCustomizedDomainObject() {
|
||||
return ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(DataStorageFormat.BONSAI)
|
||||
.bonsaiMaxLayersToLoad(513L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.bonsaiTrieLogPruningWindowSize(514)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(513L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.trieLogPruningWindowSize(514)
|
||||
.build())
|
||||
.build();
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ package org.hyperledger.besu.cli.subcommands.storage;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.DEFAULT_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.spy;
|
||||
@@ -35,6 +35,7 @@ import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.trielog.TrieLogFactor
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogLayer;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
@@ -134,8 +135,11 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(3L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(3L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
mockBlockchainBase();
|
||||
@@ -172,8 +176,11 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(2L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(2L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
when(blockchain.getChainHeadBlockNumber()).thenReturn(5L);
|
||||
@@ -192,8 +199,11 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(10L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(10L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
when(blockchain.getChainHeadBlockNumber()).thenReturn(5L);
|
||||
@@ -212,8 +222,11 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(2L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(2L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
mockBlockchainBase();
|
||||
@@ -233,8 +246,11 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(6L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(6L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
when(blockchain.getChainHeadBlockNumber()).thenReturn(5L);
|
||||
@@ -255,8 +271,11 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(3L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(3L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
mockBlockchainBase();
|
||||
@@ -266,7 +285,7 @@ class TrieLogHelperTest {
|
||||
|
||||
final BonsaiWorldStateKeyValueStorage inMemoryWorldStateSpy = spy(inMemoryWorldState);
|
||||
// force a different value the second time the trie log count is called
|
||||
when(inMemoryWorldStateSpy.streamTrieLogKeys(3L + DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE))
|
||||
when(inMemoryWorldStateSpy.streamTrieLogKeys(3L + DEFAULT_TRIE_LOG_PRUNING_WINDOW_SIZE))
|
||||
.thenCallRealMethod()
|
||||
.thenReturn(Stream.empty());
|
||||
assertThatThrownBy(
|
||||
@@ -284,8 +303,11 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(511L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(511L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
TrieLogHelper helper = new TrieLogHelper();
|
||||
@@ -302,9 +324,12 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(512L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.bonsaiTrieLogPruningWindowSize(0)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(512L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.trieLogPruningWindowSize(0)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
TrieLogHelper helper = new TrieLogHelper();
|
||||
@@ -320,9 +345,12 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(512L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.bonsaiTrieLogPruningWindowSize(512)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(512L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.trieLogPruningWindowSize(512)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
TrieLogHelper helper = new TrieLogHelper();
|
||||
@@ -340,8 +368,11 @@ class TrieLogHelperTest {
|
||||
DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(BONSAI)
|
||||
.bonsaiMaxLayersToLoad(3L)
|
||||
.bonsaiLimitTrieLogsEnabled(true)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(3L)
|
||||
.limitTrieLogsEnabled(true)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
mockBlockchainBase();
|
||||
|
||||
@@ -58,8 +58,10 @@ class TrieLogSubCommandTest extends CommandTestAbstract {
|
||||
.dataStorageConfiguration(dataStorageConfigurationArgumentCaptor.capture());
|
||||
final List<DataStorageConfiguration> configs =
|
||||
dataStorageConfigurationArgumentCaptor.getAllValues();
|
||||
assertThat(configs.get(0).getBonsaiLimitTrieLogsEnabled()).isTrue();
|
||||
assertThat(configs.get(1).getBonsaiLimitTrieLogsEnabled()).isFalse();
|
||||
assertThat(configs.get(0).getDiffBasedSubStorageConfiguration().getLimitTrieLogsEnabled())
|
||||
.isTrue();
|
||||
assertThat(configs.get(1).getDiffBasedSubStorageConfiguration().getLimitTrieLogsEnabled())
|
||||
.isFalse();
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -69,6 +71,11 @@ class TrieLogSubCommandTest extends CommandTestAbstract {
|
||||
.dataStorageConfiguration(dataStorageConfigurationArgumentCaptor.capture());
|
||||
final List<DataStorageConfiguration> configs =
|
||||
dataStorageConfigurationArgumentCaptor.getAllValues();
|
||||
assertThat(configs).allMatch(DataStorageConfiguration::getBonsaiLimitTrieLogsEnabled);
|
||||
assertThat(configs)
|
||||
.allMatch(
|
||||
dataStorageConfiguration ->
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getLimitTrieLogsEnabled());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,9 +24,10 @@ import org.hyperledger.besu.datatypes.StorageSlotKey;
|
||||
import org.hyperledger.besu.ethereum.storage.StorageProvider;
|
||||
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
|
||||
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.BonsaiFlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.BonsaiFlatDbStrategyProvider;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategyProvider;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
|
||||
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
|
||||
@@ -49,7 +50,7 @@ import org.apache.tuweni.bytes.Bytes32;
|
||||
|
||||
public class BonsaiWorldStateKeyValueStorage extends DiffBasedWorldStateKeyValueStorage
|
||||
implements WorldStateKeyValueStorage {
|
||||
protected final FlatDbStrategyProvider flatDbStrategyProvider;
|
||||
protected final BonsaiFlatDbStrategyProvider flatDbStrategyProvider;
|
||||
|
||||
public BonsaiWorldStateKeyValueStorage(
|
||||
final StorageProvider provider,
|
||||
@@ -61,12 +62,12 @@ public class BonsaiWorldStateKeyValueStorage extends DiffBasedWorldStateKeyValue
|
||||
ACCOUNT_INFO_STATE, CODE_STORAGE, ACCOUNT_STORAGE_STORAGE, TRIE_BRANCH_STORAGE)),
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE));
|
||||
this.flatDbStrategyProvider =
|
||||
new FlatDbStrategyProvider(metricsSystem, dataStorageConfiguration);
|
||||
new BonsaiFlatDbStrategyProvider(metricsSystem, dataStorageConfiguration);
|
||||
flatDbStrategyProvider.loadFlatDbStrategy(composedWorldStateStorage);
|
||||
}
|
||||
|
||||
public BonsaiWorldStateKeyValueStorage(
|
||||
final FlatDbStrategyProvider flatDbStrategyProvider,
|
||||
final BonsaiFlatDbStrategyProvider flatDbStrategyProvider,
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage,
|
||||
final KeyValueStorage trieLogStorage) {
|
||||
super(composedWorldStateStorage, trieLogStorage);
|
||||
@@ -87,15 +88,12 @@ public class BonsaiWorldStateKeyValueStorage extends DiffBasedWorldStateKeyValue
|
||||
if (codeHash.equals(Hash.EMPTY)) {
|
||||
return Optional.of(Bytes.EMPTY);
|
||||
} else {
|
||||
return flatDbStrategyProvider
|
||||
.getFlatDbStrategy(composedWorldStateStorage)
|
||||
.getFlatCode(codeHash, accountHash, composedWorldStateStorage);
|
||||
return getFlatDbStrategy().getFlatCode(codeHash, accountHash, composedWorldStateStorage);
|
||||
}
|
||||
}
|
||||
|
||||
public Optional<Bytes> getAccount(final Hash accountHash) {
|
||||
return flatDbStrategyProvider
|
||||
.getFlatDbStrategy(composedWorldStateStorage)
|
||||
return getFlatDbStrategy()
|
||||
.getFlatAccount(
|
||||
this::getWorldStateRootHash,
|
||||
this::getAccountStateTrieNode,
|
||||
@@ -148,8 +146,7 @@ public class BonsaiWorldStateKeyValueStorage extends DiffBasedWorldStateKeyValue
|
||||
final Supplier<Optional<Hash>> storageRootSupplier,
|
||||
final Hash accountHash,
|
||||
final StorageSlotKey storageSlotKey) {
|
||||
return flatDbStrategyProvider
|
||||
.getFlatDbStrategy(composedWorldStateStorage)
|
||||
return getFlatDbStrategy()
|
||||
.getFlatStorageValueByStorageSlotKey(
|
||||
this::getWorldStateRootHash,
|
||||
storageRootSupplier,
|
||||
@@ -180,8 +177,9 @@ public class BonsaiWorldStateKeyValueStorage extends DiffBasedWorldStateKeyValue
|
||||
}
|
||||
|
||||
@Override
|
||||
public FlatDbStrategy getFlatDbStrategy() {
|
||||
return flatDbStrategyProvider.getFlatDbStrategy(composedWorldStateStorage);
|
||||
public BonsaiFlatDbStrategy getFlatDbStrategy() {
|
||||
return (BonsaiFlatDbStrategy)
|
||||
flatDbStrategyProvider.getFlatDbStrategy(composedWorldStateStorage);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -189,7 +187,7 @@ public class BonsaiWorldStateKeyValueStorage extends DiffBasedWorldStateKeyValue
|
||||
return new Updater(
|
||||
composedWorldStateStorage.startTransaction(),
|
||||
trieLogStorage.startTransaction(),
|
||||
flatDbStrategyProvider.getFlatDbStrategy(composedWorldStateStorage));
|
||||
getFlatDbStrategy());
|
||||
}
|
||||
|
||||
public static class Updater implements DiffBasedWorldStateKeyValueStorage.Updater {
|
||||
|
||||
@@ -0,0 +1,173 @@
|
||||
/*
|
||||
* Copyright contributors to Hyperledger Besu.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE;
|
||||
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.datatypes.StorageSlotKey;
|
||||
import org.hyperledger.besu.ethereum.trie.NodeLoader;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import kotlin.Pair;
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
import org.apache.tuweni.bytes.Bytes32;
|
||||
|
||||
/**
|
||||
* This class represents a FlatDbReaderStrategy, which is responsible for reading and writing data
|
||||
* from flat databases. It implements various methods for storing and retrieving account data, code
|
||||
* data, and storage data from the corresponding KeyValueStorage.
|
||||
*/
|
||||
public abstract class BonsaiFlatDbStrategy extends FlatDbStrategy {
|
||||
|
||||
public BonsaiFlatDbStrategy(
|
||||
final MetricsSystem metricsSystem, final CodeStorageStrategy codeStorageStrategy) {
|
||||
super(metricsSystem, codeStorageStrategy);
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieves the account data for the given account hash, using the world state root hash supplier and node loader.
|
||||
*/
|
||||
public abstract Optional<Bytes> getFlatAccount(
|
||||
Supplier<Optional<Bytes>> worldStateRootHashSupplier,
|
||||
NodeLoader nodeLoader,
|
||||
Hash accountHash,
|
||||
SegmentedKeyValueStorage storage);
|
||||
|
||||
/*
|
||||
* Retrieves the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader.
|
||||
*/
|
||||
|
||||
public abstract Optional<Bytes> getFlatStorageValueByStorageSlotKey(
|
||||
Supplier<Optional<Bytes>> worldStateRootHashSupplier,
|
||||
Supplier<Optional<Hash>> storageRootSupplier,
|
||||
NodeLoader nodeLoader,
|
||||
Hash accountHash,
|
||||
StorageSlotKey storageSlotKey,
|
||||
SegmentedKeyValueStorage storageStorage);
|
||||
|
||||
@Override
|
||||
public void putFlatAccount(
|
||||
final SegmentedKeyValueStorageTransaction transaction,
|
||||
final Hash accountHash,
|
||||
final Bytes accountValue) {
|
||||
transaction.put(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe(), accountValue.toArrayUnsafe());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeFlatAccount(
|
||||
final SegmentedKeyValueStorageTransaction transaction, final Hash accountHash) {
|
||||
transaction.remove(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void putFlatAccountStorageValueByStorageSlotHash(
|
||||
final SegmentedKeyValueStorageTransaction transaction,
|
||||
final Hash accountHash,
|
||||
final Hash slotHash,
|
||||
final Bytes storage) {
|
||||
transaction.put(
|
||||
ACCOUNT_STORAGE_STORAGE,
|
||||
Bytes.concatenate(accountHash, slotHash).toArrayUnsafe(),
|
||||
storage.toArrayUnsafe());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeFlatAccountStorageValueByStorageSlotHash(
|
||||
final SegmentedKeyValueStorageTransaction transaction,
|
||||
final Hash accountHash,
|
||||
final Hash slotHash) {
|
||||
transaction.remove(
|
||||
ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, slotHash).toArrayUnsafe());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearAll(final SegmentedKeyValueStorage storage) {
|
||||
storage.clear(ACCOUNT_INFO_STATE);
|
||||
storage.clear(ACCOUNT_STORAGE_STORAGE);
|
||||
storage.clear(CODE_STORAGE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resetOnResync(final SegmentedKeyValueStorage storage) {
|
||||
storage.clear(ACCOUNT_INFO_STATE);
|
||||
storage.clear(ACCOUNT_STORAGE_STORAGE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Stream<Pair<Bytes32, Bytes>> storageToPairStream(
|
||||
final SegmentedKeyValueStorage storage,
|
||||
final Hash accountHash,
|
||||
final Bytes startKeyHash,
|
||||
final Function<Bytes, Bytes> valueMapper) {
|
||||
|
||||
return storage
|
||||
.streamFromKey(
|
||||
ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, startKeyHash).toArrayUnsafe())
|
||||
.takeWhile(pair -> Bytes.wrap(pair.getKey()).slice(0, Hash.SIZE).equals(accountHash))
|
||||
.map(
|
||||
pair ->
|
||||
new Pair<>(
|
||||
Bytes32.wrap(Bytes.wrap(pair.getKey()).slice(Hash.SIZE)),
|
||||
valueMapper.apply(Bytes.wrap(pair.getValue()).trimLeadingZeros())));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Stream<Pair<Bytes32, Bytes>> storageToPairStream(
|
||||
final SegmentedKeyValueStorage storage,
|
||||
final Hash accountHash,
|
||||
final Bytes startKeyHash,
|
||||
final Bytes32 endKeyHash,
|
||||
final Function<Bytes, Bytes> valueMapper) {
|
||||
|
||||
return storage
|
||||
.streamFromKey(
|
||||
ACCOUNT_STORAGE_STORAGE,
|
||||
Bytes.concatenate(accountHash, startKeyHash).toArrayUnsafe(),
|
||||
Bytes.concatenate(accountHash, endKeyHash).toArrayUnsafe())
|
||||
.map(
|
||||
pair ->
|
||||
new Pair<>(
|
||||
Bytes32.wrap(Bytes.wrap(pair.getKey()).slice(Hash.SIZE)),
|
||||
valueMapper.apply(Bytes.wrap(pair.getValue()).trimLeadingZeros())));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Stream<Pair<Bytes32, Bytes>> accountsToPairStream(
|
||||
final SegmentedKeyValueStorage storage, final Bytes startKeyHash, final Bytes32 endKeyHash) {
|
||||
return storage
|
||||
.streamFromKey(ACCOUNT_INFO_STATE, startKeyHash.toArrayUnsafe(), endKeyHash.toArrayUnsafe())
|
||||
.map(pair -> new Pair<>(Bytes32.wrap(pair.getKey()), Bytes.wrap(pair.getValue())));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Stream<Pair<Bytes32, Bytes>> accountsToPairStream(
|
||||
final SegmentedKeyValueStorage storage, final Bytes startKeyHash) {
|
||||
return storage
|
||||
.streamFromKey(ACCOUNT_INFO_STATE, startKeyHash.toArrayUnsafe())
|
||||
.map(pair -> new Pair<>(Bytes32.wrap(pair.getKey()), Bytes.wrap(pair.getValue())));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright contributors to Hyperledger Besu.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE;
|
||||
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategyProvider;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class BonsaiFlatDbStrategyProvider extends FlatDbStrategyProvider {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(BonsaiFlatDbStrategyProvider.class);
|
||||
|
||||
public BonsaiFlatDbStrategyProvider(
|
||||
final MetricsSystem metricsSystem, final DataStorageConfiguration dataStorageConfiguration) {
|
||||
super(metricsSystem, dataStorageConfiguration);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FlatDbMode getRequestedFlatDbMode(
|
||||
final DataStorageConfiguration dataStorageConfiguration) {
|
||||
return dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getUnstable()
|
||||
.getFullFlatDbEnabled()
|
||||
? FlatDbMode.FULL
|
||||
: FlatDbMode.PARTIAL;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FlatDbMode alternativeFlatDbModeForExistingDatabase() {
|
||||
return FlatDbMode.PARTIAL;
|
||||
}
|
||||
|
||||
public void upgradeToFullFlatDbMode(final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
final SegmentedKeyValueStorageTransaction transaction =
|
||||
composedWorldStateStorage.startTransaction();
|
||||
LOG.info("setting FlatDbStrategy to FULL");
|
||||
transaction.put(
|
||||
TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe());
|
||||
transaction.commit();
|
||||
loadFlatDbStrategy(composedWorldStateStorage); // force reload of flat db reader strategy
|
||||
}
|
||||
|
||||
public void downgradeToPartialFlatDbMode(
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
final SegmentedKeyValueStorageTransaction transaction =
|
||||
composedWorldStateStorage.startTransaction();
|
||||
LOG.info("setting FlatDbStrategy to PARTIAL");
|
||||
transaction.put(
|
||||
TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.PARTIAL.getVersion().toArrayUnsafe());
|
||||
transaction.commit();
|
||||
loadFlatDbStrategy(composedWorldStateStorage); // force reload of flat db reader strategy
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FlatDbStrategy createFlatDbStrategy(
|
||||
final FlatDbMode flatDbMode,
|
||||
final MetricsSystem metricsSystem,
|
||||
final CodeStorageStrategy codeStorageStrategy) {
|
||||
if (flatDbMode == FlatDbMode.FULL) {
|
||||
return new BonsaiFullFlatDbStrategy(metricsSystem, codeStorageStrategy);
|
||||
} else {
|
||||
return new BonsaiPartialFlatDbStrategy(metricsSystem, codeStorageStrategy);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,6 @@ import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.datatypes.StorageSlotKey;
|
||||
import org.hyperledger.besu.ethereum.trie.NodeLoader;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy;
|
||||
import org.hyperledger.besu.metrics.BesuMetricCategory;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.metrics.Counter;
|
||||
@@ -32,13 +31,13 @@ import java.util.function.Supplier;
|
||||
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
|
||||
public class FullFlatDbStrategy extends FlatDbStrategy {
|
||||
public class BonsaiFullFlatDbStrategy extends BonsaiFlatDbStrategy {
|
||||
|
||||
protected final Counter getAccountNotFoundInFlatDatabaseCounter;
|
||||
|
||||
protected final Counter getStorageValueNotFoundInFlatDatabaseCounter;
|
||||
|
||||
public FullFlatDbStrategy(
|
||||
public BonsaiFullFlatDbStrategy(
|
||||
final MetricsSystem metricsSystem, final CodeStorageStrategy codeStorageStrategy) {
|
||||
super(metricsSystem, codeStorageStrategy);
|
||||
|
||||
@@ -21,7 +21,6 @@ import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.datatypes.StorageSlotKey;
|
||||
import org.hyperledger.besu.ethereum.trie.NodeLoader;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.CodeStorageStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
|
||||
import org.hyperledger.besu.ethereum.trie.patricia.StoredNodeFactory;
|
||||
import org.hyperledger.besu.metrics.BesuMetricCategory;
|
||||
@@ -45,7 +44,7 @@ import org.apache.tuweni.rlp.RLP;
|
||||
* methods, which checks if the data is present in the flat database, and if not, queries the merkle
|
||||
* trie
|
||||
*/
|
||||
public class PartialFlatDbStrategy extends FlatDbStrategy {
|
||||
public class BonsaiPartialFlatDbStrategy extends BonsaiFlatDbStrategy {
|
||||
|
||||
protected final Counter getAccountMerkleTrieCounter;
|
||||
protected final Counter getAccountMissingMerkleTrieCounter;
|
||||
@@ -53,7 +52,7 @@ public class PartialFlatDbStrategy extends FlatDbStrategy {
|
||||
protected final Counter getStorageValueMerkleTrieCounter;
|
||||
protected final Counter getStorageValueMissingMerkleTrieCounter;
|
||||
|
||||
public PartialFlatDbStrategy(
|
||||
public BonsaiPartialFlatDbStrategy(
|
||||
final MetricsSystem metricsSystem, final CodeStorageStrategy codeStorageStrategy) {
|
||||
super(metricsSystem, codeStorageStrategy);
|
||||
getAccountMerkleTrieCounter =
|
||||
@@ -14,13 +14,7 @@
|
||||
*/
|
||||
package org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE;
|
||||
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.datatypes.StorageSlotKey;
|
||||
import org.hyperledger.besu.ethereum.trie.NodeLoader;
|
||||
import org.hyperledger.besu.metrics.BesuMetricCategory;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.metrics.Counter;
|
||||
@@ -33,7 +27,6 @@ import java.util.Optional;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@@ -86,27 +79,6 @@ public abstract class FlatDbStrategy {
|
||||
"Number of storage slots found in the flat database");
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieves the account data for the given account hash, using the world state root hash supplier and node loader.
|
||||
*/
|
||||
public abstract Optional<Bytes> getFlatAccount(
|
||||
Supplier<Optional<Bytes>> worldStateRootHashSupplier,
|
||||
NodeLoader nodeLoader,
|
||||
Hash accountHash,
|
||||
SegmentedKeyValueStorage storage);
|
||||
|
||||
/*
|
||||
* Retrieves the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader.
|
||||
*/
|
||||
|
||||
public abstract Optional<Bytes> getFlatStorageValueByStorageSlotKey(
|
||||
Supplier<Optional<Bytes>> worldStateRootHashSupplier,
|
||||
Supplier<Optional<Hash>> storageRootSupplier,
|
||||
NodeLoader nodeLoader,
|
||||
Hash accountHash,
|
||||
StorageSlotKey storageSlotKey,
|
||||
SegmentedKeyValueStorage storageStorage);
|
||||
|
||||
public boolean isCodeByCodeHash() {
|
||||
return codeStorageStrategy instanceof CodeHashCodeStorageStrategy;
|
||||
}
|
||||
@@ -123,46 +95,6 @@ public abstract class FlatDbStrategy {
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Puts the account data for the given account hash, using the world state root hash supplier and node loader.
|
||||
*/
|
||||
public void putFlatAccount(
|
||||
final SegmentedKeyValueStorageTransaction transaction,
|
||||
final Hash accountHash,
|
||||
final Bytes accountValue) {
|
||||
transaction.put(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe(), accountValue.toArrayUnsafe());
|
||||
}
|
||||
|
||||
public void removeFlatAccount(
|
||||
final SegmentedKeyValueStorageTransaction transaction, final Hash accountHash) {
|
||||
transaction.remove(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe());
|
||||
}
|
||||
|
||||
/*
|
||||
* Puts the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader.
|
||||
*/
|
||||
public void putFlatAccountStorageValueByStorageSlotHash(
|
||||
final SegmentedKeyValueStorageTransaction transaction,
|
||||
final Hash accountHash,
|
||||
final Hash slotHash,
|
||||
final Bytes storage) {
|
||||
transaction.put(
|
||||
ACCOUNT_STORAGE_STORAGE,
|
||||
Bytes.concatenate(accountHash, slotHash).toArrayUnsafe(),
|
||||
storage.toArrayUnsafe());
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader.
|
||||
*/
|
||||
public void removeFlatAccountStorageValueByStorageSlotHash(
|
||||
final SegmentedKeyValueStorageTransaction transaction,
|
||||
final Hash accountHash,
|
||||
final Hash slotHash) {
|
||||
transaction.remove(
|
||||
ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, slotHash).toArrayUnsafe());
|
||||
}
|
||||
|
||||
/*
|
||||
* Removes code for the given account hash.
|
||||
*/
|
||||
@@ -184,16 +116,37 @@ public abstract class FlatDbStrategy {
|
||||
codeStorageStrategy.putFlatCode(transaction, accountHash, codeHash, code);
|
||||
}
|
||||
|
||||
public void clearAll(final SegmentedKeyValueStorage storage) {
|
||||
storage.clear(ACCOUNT_INFO_STATE);
|
||||
storage.clear(ACCOUNT_STORAGE_STORAGE);
|
||||
storage.clear(CODE_STORAGE);
|
||||
}
|
||||
/*
|
||||
* Puts the account data for the given account hash, using the world state root hash supplier and node loader.
|
||||
*/
|
||||
public abstract void putFlatAccount(
|
||||
final SegmentedKeyValueStorageTransaction transaction,
|
||||
final Hash accountHash,
|
||||
final Bytes accountValue);
|
||||
|
||||
public void resetOnResync(final SegmentedKeyValueStorage storage) {
|
||||
storage.clear(ACCOUNT_INFO_STATE);
|
||||
storage.clear(ACCOUNT_STORAGE_STORAGE);
|
||||
}
|
||||
public abstract void removeFlatAccount(
|
||||
final SegmentedKeyValueStorageTransaction transaction, final Hash accountHash);
|
||||
|
||||
/*
|
||||
* Puts the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader.
|
||||
*/
|
||||
public abstract void putFlatAccountStorageValueByStorageSlotHash(
|
||||
final SegmentedKeyValueStorageTransaction transaction,
|
||||
final Hash accountHash,
|
||||
final Hash slotHash,
|
||||
final Bytes storage);
|
||||
|
||||
/*
|
||||
* Removes the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader.
|
||||
*/
|
||||
public abstract void removeFlatAccountStorageValueByStorageSlotHash(
|
||||
final SegmentedKeyValueStorageTransaction transaction,
|
||||
final Hash accountHash,
|
||||
final Hash slotHash);
|
||||
|
||||
public abstract void clearAll(final SegmentedKeyValueStorage storage);
|
||||
|
||||
public abstract void resetOnResync(final SegmentedKeyValueStorage storage);
|
||||
|
||||
public NavigableMap<Bytes32, Bytes> streamAccountFlatDatabase(
|
||||
final SegmentedKeyValueStorage storage,
|
||||
@@ -249,57 +202,26 @@ public abstract class FlatDbStrategy {
|
||||
.takeWhile(takeWhile));
|
||||
}
|
||||
|
||||
private static Stream<Pair<Bytes32, Bytes>> storageToPairStream(
|
||||
protected abstract Stream<Pair<Bytes32, Bytes>> storageToPairStream(
|
||||
final SegmentedKeyValueStorage storage,
|
||||
final Hash accountHash,
|
||||
final Bytes startKeyHash,
|
||||
final Function<Bytes, Bytes> valueMapper) {
|
||||
final Function<Bytes, Bytes> valueMapper);
|
||||
|
||||
return storage
|
||||
.streamFromKey(
|
||||
ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, startKeyHash).toArrayUnsafe())
|
||||
.takeWhile(pair -> Bytes.wrap(pair.getKey()).slice(0, Hash.SIZE).equals(accountHash))
|
||||
.map(
|
||||
pair ->
|
||||
new Pair<>(
|
||||
Bytes32.wrap(Bytes.wrap(pair.getKey()).slice(Hash.SIZE)),
|
||||
valueMapper.apply(Bytes.wrap(pair.getValue()).trimLeadingZeros())));
|
||||
}
|
||||
|
||||
private static Stream<Pair<Bytes32, Bytes>> storageToPairStream(
|
||||
protected abstract Stream<Pair<Bytes32, Bytes>> storageToPairStream(
|
||||
final SegmentedKeyValueStorage storage,
|
||||
final Hash accountHash,
|
||||
final Bytes startKeyHash,
|
||||
final Bytes32 endKeyHash,
|
||||
final Function<Bytes, Bytes> valueMapper) {
|
||||
final Function<Bytes, Bytes> valueMapper);
|
||||
|
||||
return storage
|
||||
.streamFromKey(
|
||||
ACCOUNT_STORAGE_STORAGE,
|
||||
Bytes.concatenate(accountHash, startKeyHash).toArrayUnsafe(),
|
||||
Bytes.concatenate(accountHash, endKeyHash).toArrayUnsafe())
|
||||
.map(
|
||||
pair ->
|
||||
new Pair<>(
|
||||
Bytes32.wrap(Bytes.wrap(pair.getKey()).slice(Hash.SIZE)),
|
||||
valueMapper.apply(Bytes.wrap(pair.getValue()).trimLeadingZeros())));
|
||||
}
|
||||
protected abstract Stream<Pair<Bytes32, Bytes>> accountsToPairStream(
|
||||
final SegmentedKeyValueStorage storage, final Bytes startKeyHash, final Bytes32 endKeyHash);
|
||||
|
||||
private static Stream<Pair<Bytes32, Bytes>> accountsToPairStream(
|
||||
final SegmentedKeyValueStorage storage, final Bytes startKeyHash, final Bytes32 endKeyHash) {
|
||||
return storage
|
||||
.streamFromKey(ACCOUNT_INFO_STATE, startKeyHash.toArrayUnsafe(), endKeyHash.toArrayUnsafe())
|
||||
.map(pair -> new Pair<>(Bytes32.wrap(pair.getKey()), Bytes.wrap(pair.getValue())));
|
||||
}
|
||||
protected abstract Stream<Pair<Bytes32, Bytes>> accountsToPairStream(
|
||||
final SegmentedKeyValueStorage storage, final Bytes startKeyHash);
|
||||
|
||||
private static Stream<Pair<Bytes32, Bytes>> accountsToPairStream(
|
||||
final SegmentedKeyValueStorage storage, final Bytes startKeyHash) {
|
||||
return storage
|
||||
.streamFromKey(ACCOUNT_INFO_STATE, startKeyHash.toArrayUnsafe())
|
||||
.map(pair -> new Pair<>(Bytes32.wrap(pair.getKey()), Bytes.wrap(pair.getValue())));
|
||||
}
|
||||
|
||||
private static NavigableMap<Bytes32, Bytes> toNavigableMap(
|
||||
private NavigableMap<Bytes32, Bytes> toNavigableMap(
|
||||
final Stream<Pair<Bytes32, Bytes>> pairStream) {
|
||||
final TreeMap<Bytes32, Bytes> collected =
|
||||
pairStream.collect(
|
||||
|
||||
@@ -18,13 +18,10 @@ import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIden
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE;
|
||||
import static org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY;
|
||||
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.FullFlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.PartialFlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Optional;
|
||||
@@ -34,7 +31,7 @@ import org.apache.tuweni.bytes.Bytes;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class FlatDbStrategyProvider {
|
||||
public abstract class FlatDbStrategyProvider {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(FlatDbStrategyProvider.class);
|
||||
|
||||
// 0x666C61744462537461747573
|
||||
@@ -61,20 +58,48 @@ public class FlatDbStrategyProvider {
|
||||
deriveUseCodeStorageByHash(composedWorldStateStorage)
|
||||
? new CodeHashCodeStorageStrategy()
|
||||
: new AccountHashCodeStorageStrategy();
|
||||
if (flatDbMode == FlatDbMode.FULL) {
|
||||
this.flatDbStrategy = new FullFlatDbStrategy(metricsSystem, codeStorageStrategy);
|
||||
} else {
|
||||
this.flatDbStrategy = new PartialFlatDbStrategy(metricsSystem, codeStorageStrategy);
|
||||
}
|
||||
this.flatDbStrategy = createFlatDbStrategy(flatDbMode, metricsSystem, codeStorageStrategy);
|
||||
}
|
||||
}
|
||||
|
||||
protected boolean deriveUseCodeStorageByHash(
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
final boolean configCodeUsingHash =
|
||||
dataStorageConfiguration
|
||||
.getDiffBasedSubStorageConfiguration()
|
||||
.getUnstable()
|
||||
.getCodeStoredByCodeHashEnabled();
|
||||
boolean codeUsingCodeByHash =
|
||||
detectCodeStorageByHash(composedWorldStateStorage)
|
||||
.map(
|
||||
dbCodeUsingHash -> {
|
||||
if (dbCodeUsingHash != configCodeUsingHash) {
|
||||
LOG.warn(
|
||||
"Bonsai db is using code storage mode {} but config specifies mode {}. Using mode from database",
|
||||
dbCodeUsingHash,
|
||||
configCodeUsingHash);
|
||||
}
|
||||
return dbCodeUsingHash;
|
||||
})
|
||||
.orElse(configCodeUsingHash);
|
||||
LOG.info("DB mode with code stored using code hash enabled = {}", codeUsingCodeByHash);
|
||||
return codeUsingCodeByHash;
|
||||
}
|
||||
|
||||
private Optional<Boolean> detectCodeStorageByHash(
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
return composedWorldStateStorage.stream(CODE_STORAGE)
|
||||
.limit(1)
|
||||
.findFirst()
|
||||
.map(
|
||||
keypair ->
|
||||
CodeHashCodeStorageStrategy.isCodeHashValue(keypair.getKey(), keypair.getValue()));
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
FlatDbMode deriveFlatDbStrategy(final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
final FlatDbMode requestedFlatDbMode =
|
||||
dataStorageConfiguration.getUnstable().getBonsaiFullFlatDbEnabled()
|
||||
? FlatDbMode.FULL
|
||||
: FlatDbMode.PARTIAL;
|
||||
private FlatDbMode deriveFlatDbStrategy(
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
final FlatDbMode requestedFlatDbMode = getRequestedFlatDbMode(dataStorageConfiguration);
|
||||
|
||||
final var existingTrieData =
|
||||
composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY).isPresent();
|
||||
@@ -91,7 +116,7 @@ public class FlatDbStrategyProvider {
|
||||
// and default to the storage config otherwise
|
||||
var flatDbModeVal =
|
||||
existingTrieData
|
||||
? FlatDbMode.PARTIAL.getVersion()
|
||||
? alternativeFlatDbModeForExistingDatabase().getVersion()
|
||||
: requestedFlatDbMode.getVersion();
|
||||
// persist this config in the db
|
||||
var setDbModeTx = composedWorldStateStorage.startTransaction();
|
||||
@@ -101,42 +126,11 @@ public class FlatDbStrategyProvider {
|
||||
|
||||
return flatDbModeVal;
|
||||
}));
|
||||
LOG.info("Bonsai flat db mode found {}", flatDbMode);
|
||||
LOG.info("Flat db mode found {}", flatDbMode);
|
||||
|
||||
return flatDbMode;
|
||||
}
|
||||
|
||||
protected boolean deriveUseCodeStorageByHash(
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
final boolean configCodeUsingHash =
|
||||
dataStorageConfiguration.getUnstable().getBonsaiCodeStoredByCodeHashEnabled();
|
||||
boolean codeUsingCodeByHash =
|
||||
detectCodeStorageByHash(composedWorldStateStorage)
|
||||
.map(
|
||||
dbCodeUsingHash -> {
|
||||
if (dbCodeUsingHash != configCodeUsingHash) {
|
||||
LOG.warn(
|
||||
"Bonsai db is using code storage mode {} but config specifies mode {}. Using mode from database",
|
||||
dbCodeUsingHash,
|
||||
configCodeUsingHash);
|
||||
}
|
||||
return dbCodeUsingHash;
|
||||
})
|
||||
.orElse(configCodeUsingHash);
|
||||
LOG.info("Bonsai db mode with code stored using code hash enabled = {}", codeUsingCodeByHash);
|
||||
return codeUsingCodeByHash;
|
||||
}
|
||||
|
||||
private Optional<Boolean> detectCodeStorageByHash(
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
return composedWorldStateStorage.stream(CODE_STORAGE)
|
||||
.limit(1)
|
||||
.findFirst()
|
||||
.map(
|
||||
keypair ->
|
||||
CodeHashCodeStorageStrategy.isCodeHashValue(keypair.getKey(), keypair.getValue()));
|
||||
}
|
||||
|
||||
public FlatDbStrategy getFlatDbStrategy(
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
if (flatDbStrategy == null) {
|
||||
@@ -145,28 +139,17 @@ public class FlatDbStrategyProvider {
|
||||
return flatDbStrategy;
|
||||
}
|
||||
|
||||
public void upgradeToFullFlatDbMode(final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
final SegmentedKeyValueStorageTransaction transaction =
|
||||
composedWorldStateStorage.startTransaction();
|
||||
LOG.info("setting FlatDbStrategy to FULL");
|
||||
transaction.put(
|
||||
TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe());
|
||||
transaction.commit();
|
||||
loadFlatDbStrategy(composedWorldStateStorage); // force reload of flat db reader strategy
|
||||
}
|
||||
|
||||
public void downgradeToPartialFlatDbMode(
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage) {
|
||||
final SegmentedKeyValueStorageTransaction transaction =
|
||||
composedWorldStateStorage.startTransaction();
|
||||
LOG.info("setting FlatDbStrategy to PARTIAL");
|
||||
transaction.put(
|
||||
TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.PARTIAL.getVersion().toArrayUnsafe());
|
||||
transaction.commit();
|
||||
loadFlatDbStrategy(composedWorldStateStorage); // force reload of flat db reader strategy
|
||||
}
|
||||
|
||||
public FlatDbMode getFlatDbMode() {
|
||||
return flatDbMode;
|
||||
}
|
||||
|
||||
protected abstract FlatDbMode getRequestedFlatDbMode(
|
||||
final DataStorageConfiguration dataStorageConfiguration);
|
||||
|
||||
protected abstract FlatDbMode alternativeFlatDbModeForExistingDatabase();
|
||||
|
||||
protected abstract FlatDbStrategy createFlatDbStrategy(
|
||||
final FlatDbMode flatDbMode,
|
||||
final MetricsSystem metricsSystem,
|
||||
final CodeStorageStrategy codeStorageStrategy);
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
*/
|
||||
package org.hyperledger.besu.ethereum.worldstate;
|
||||
|
||||
import org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.DiffBasedUnstable;
|
||||
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
|
||||
|
||||
import org.immutables.value.Value;
|
||||
@@ -22,90 +23,40 @@ import org.immutables.value.Value;
|
||||
@Value.Enclosing
|
||||
public interface DataStorageConfiguration {
|
||||
|
||||
long DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD = 512;
|
||||
boolean DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED = true;
|
||||
long MINIMUM_BONSAI_TRIE_LOG_RETENTION_LIMIT = DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
|
||||
int DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE = 5_000;
|
||||
boolean DEFAULT_RECEIPT_COMPACTION_ENABLED = true;
|
||||
|
||||
DataStorageConfiguration DEFAULT_CONFIG =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(DataStorageFormat.BONSAI)
|
||||
.bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(Unstable.DEFAULT)
|
||||
.diffBasedSubStorageConfiguration(DiffBasedSubStorageConfiguration.DEFAULT)
|
||||
.build();
|
||||
|
||||
DataStorageConfiguration DEFAULT_BONSAI_CONFIG =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(DataStorageFormat.BONSAI)
|
||||
.bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
|
||||
.build();
|
||||
DataStorageConfiguration DEFAULT_BONSAI_CONFIG = DEFAULT_CONFIG;
|
||||
|
||||
DataStorageConfiguration DEFAULT_BONSAI_PARTIAL_DB_CONFIG =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(DataStorageFormat.BONSAI)
|
||||
.bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(Unstable.DEFAULT_PARTIAL)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.unstable(DiffBasedUnstable.PARTIAL_MODE)
|
||||
.build())
|
||||
.build();
|
||||
|
||||
DataStorageConfiguration DEFAULT_FOREST_CONFIG =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(DataStorageFormat.FOREST)
|
||||
.bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(Unstable.DEFAULT)
|
||||
.diffBasedSubStorageConfiguration(DiffBasedSubStorageConfiguration.DISABLED)
|
||||
.build();
|
||||
|
||||
DataStorageFormat getDataStorageFormat();
|
||||
|
||||
Long getBonsaiMaxLayersToLoad();
|
||||
|
||||
@Value.Default
|
||||
default boolean getBonsaiLimitTrieLogsEnabled() {
|
||||
return DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default int getBonsaiTrieLogPruningWindowSize() {
|
||||
return DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
default DiffBasedSubStorageConfiguration getDiffBasedSubStorageConfiguration() {
|
||||
return DiffBasedSubStorageConfiguration.DEFAULT;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default boolean getReceiptCompactionEnabled() {
|
||||
return DEFAULT_RECEIPT_COMPACTION_ENABLED;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default Unstable getUnstable() {
|
||||
return Unstable.DEFAULT;
|
||||
}
|
||||
|
||||
@Value.Immutable
|
||||
interface Unstable {
|
||||
|
||||
boolean DEFAULT_BONSAI_FULL_FLAT_DB_ENABLED = true;
|
||||
boolean DEFAULT_BONSAI_CODE_USING_CODE_HASH_ENABLED = true;
|
||||
|
||||
boolean DEFAULT_PARALLEL_TRX_ENABLED = false;
|
||||
|
||||
DataStorageConfiguration.Unstable DEFAULT =
|
||||
ImmutableDataStorageConfiguration.Unstable.builder().build();
|
||||
|
||||
DataStorageConfiguration.Unstable DEFAULT_PARTIAL =
|
||||
ImmutableDataStorageConfiguration.Unstable.builder().bonsaiFullFlatDbEnabled(false).build();
|
||||
|
||||
@Value.Default
|
||||
default boolean getBonsaiFullFlatDbEnabled() {
|
||||
return DEFAULT_BONSAI_FULL_FLAT_DB_ENABLED;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default boolean getBonsaiCodeStoredByCodeHashEnabled() {
|
||||
return DEFAULT_BONSAI_CODE_USING_CODE_HASH_ENABLED;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default boolean isParallelTxProcessingEnabled() {
|
||||
return DEFAULT_PARALLEL_TRX_ENABLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Copyright ConsenSys AG.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.ethereum.worldstate;
|
||||
|
||||
import org.immutables.value.Value;
|
||||
|
||||
@Value.Immutable
|
||||
@Value.Enclosing
|
||||
public interface DiffBasedSubStorageConfiguration {
|
||||
|
||||
DiffBasedSubStorageConfiguration DEFAULT =
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder().build();
|
||||
|
||||
DiffBasedSubStorageConfiguration DISABLED =
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.limitTrieLogsEnabled(false)
|
||||
.unstable(DiffBasedUnstable.DISABLED)
|
||||
.build();
|
||||
|
||||
long DEFAULT_MAX_LAYERS_TO_LOAD = 512;
|
||||
boolean DEFAULT_LIMIT_TRIE_LOGS_ENABLED = true;
|
||||
long MINIMUM_TRIE_LOG_RETENTION_LIMIT = DEFAULT_MAX_LAYERS_TO_LOAD;
|
||||
int DEFAULT_TRIE_LOG_PRUNING_WINDOW_SIZE = 5_000;
|
||||
|
||||
@Value.Default
|
||||
default Long getMaxLayersToLoad() {
|
||||
return DEFAULT_MAX_LAYERS_TO_LOAD;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default boolean getLimitTrieLogsEnabled() {
|
||||
return DEFAULT_LIMIT_TRIE_LOGS_ENABLED;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default int getTrieLogPruningWindowSize() {
|
||||
return DEFAULT_TRIE_LOG_PRUNING_WINDOW_SIZE;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default DiffBasedUnstable getUnstable() {
|
||||
return DiffBasedUnstable.DEFAULT;
|
||||
}
|
||||
|
||||
@Value.Immutable
|
||||
interface DiffBasedUnstable {
|
||||
|
||||
DiffBasedSubStorageConfiguration.DiffBasedUnstable DEFAULT =
|
||||
ImmutableDiffBasedSubStorageConfiguration.DiffBasedUnstable.builder().build();
|
||||
|
||||
DiffBasedSubStorageConfiguration.DiffBasedUnstable PARTIAL_MODE =
|
||||
ImmutableDiffBasedSubStorageConfiguration.DiffBasedUnstable.builder()
|
||||
.fullFlatDbEnabled(false)
|
||||
.build();
|
||||
|
||||
DiffBasedSubStorageConfiguration.DiffBasedUnstable DISABLED =
|
||||
ImmutableDiffBasedSubStorageConfiguration.DiffBasedUnstable.builder()
|
||||
.fullFlatDbEnabled(false)
|
||||
.codeStoredByCodeHashEnabled(false)
|
||||
.isParallelTxProcessingEnabled(false)
|
||||
.build();
|
||||
|
||||
boolean DEFAULT_FULL_FLAT_DB_ENABLED = true;
|
||||
boolean DEFAULT_CODE_USING_CODE_HASH_ENABLED = true;
|
||||
|
||||
boolean DEFAULT_PARALLEL_TRX_ENABLED = false;
|
||||
|
||||
@Value.Default
|
||||
default boolean getFullFlatDbEnabled() {
|
||||
return DEFAULT_FULL_FLAT_DB_ENABLED;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default boolean getCodeStoredByCodeHashEnabled() {
|
||||
return DEFAULT_CODE_USING_CODE_HASH_ENABLED;
|
||||
}
|
||||
|
||||
@Value.Default
|
||||
default boolean isParallelTxProcessingEnabled() {
|
||||
return DEFAULT_PARALLEL_TRX_ENABLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,7 @@ package org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE;
|
||||
import static org.hyperledger.besu.ethereum.trie.diffbased.common.storage.DiffBasedWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration.DEFAULT_MAX_LAYERS_TO_LOAD;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
@@ -40,6 +40,7 @@ import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
|
||||
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator;
|
||||
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
|
||||
@@ -472,10 +473,13 @@ public class BonsaiWorldStateKeyValueStorageTest {
|
||||
new NoOpMetricsSystem(),
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(DataStorageFormat.BONSAI)
|
||||
.bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(
|
||||
ImmutableDataStorageConfiguration.Unstable.builder()
|
||||
.bonsaiCodeStoredByCodeHashEnabled(useCodeHashStorage)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(DEFAULT_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(
|
||||
ImmutableDiffBasedSubStorageConfiguration.DiffBasedUnstable.builder()
|
||||
.codeStoredByCodeHashEnabled(useCodeHashStorage)
|
||||
.build())
|
||||
.build())
|
||||
.build());
|
||||
}
|
||||
|
||||
@@ -15,15 +15,17 @@
|
||||
package org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
|
||||
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.FullFlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.PartialFlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.BonsaiFlatDbStrategyProvider;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.BonsaiFullFlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.BonsaiPartialFlatDbStrategy;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
|
||||
import org.hyperledger.besu.ethereum.worldstate.ImmutableDiffBasedSubStorageConfiguration;
|
||||
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
@@ -42,9 +44,10 @@ import org.junit.jupiter.params.provider.ValueSource;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class FlatDbStrategyProviderTest {
|
||||
private final FlatDbStrategyProvider flatDbStrategyProvider =
|
||||
new FlatDbStrategyProvider(new NoOpMetricsSystem(), DataStorageConfiguration.DEFAULT_CONFIG);
|
||||
class BonsaiFlatDbStrategyProviderTest {
|
||||
private final BonsaiFlatDbStrategyProvider flatDbStrategyProvider =
|
||||
new BonsaiFlatDbStrategyProvider(
|
||||
new NoOpMetricsSystem(), DataStorageConfiguration.DEFAULT_CONFIG);
|
||||
private final SegmentedKeyValueStorage composedWorldStateStorage =
|
||||
new SegmentedInMemoryKeyValueStorage(
|
||||
List.of(
|
||||
@@ -74,7 +77,7 @@ class FlatDbStrategyProviderTest {
|
||||
assertThat(flatDbStrategyProvider.flatDbMode).isEqualTo(FlatDbMode.FULL);
|
||||
assertThat(flatDbStrategyProvider.flatDbStrategy).isNotNull();
|
||||
assertThat(flatDbStrategyProvider.getFlatDbStrategy(composedWorldStateStorage))
|
||||
.isInstanceOf(FullFlatDbStrategy.class);
|
||||
.isInstanceOf(BonsaiFullFlatDbStrategy.class);
|
||||
assertThat(flatDbStrategyProvider.flatDbStrategy.codeStorageStrategy)
|
||||
.isInstanceOf(CodeHashCodeStorageStrategy.class);
|
||||
}
|
||||
@@ -85,14 +88,17 @@ class FlatDbStrategyProviderTest {
|
||||
final DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(DataStorageFormat.BONSAI)
|
||||
.bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(
|
||||
ImmutableDataStorageConfiguration.Unstable.builder()
|
||||
.bonsaiCodeStoredByCodeHashEnabled(codeByHashEnabled)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(DiffBasedSubStorageConfiguration.DEFAULT_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(
|
||||
ImmutableDiffBasedSubStorageConfiguration.DiffBasedUnstable.builder()
|
||||
.codeStoredByCodeHashEnabled(codeByHashEnabled)
|
||||
.build())
|
||||
.build())
|
||||
.build();
|
||||
final FlatDbStrategyProvider flatDbStrategyProvider =
|
||||
new FlatDbStrategyProvider(new NoOpMetricsSystem(), dataStorageConfiguration);
|
||||
final BonsaiFlatDbStrategyProvider flatDbStrategyProvider =
|
||||
new BonsaiFlatDbStrategyProvider(new NoOpMetricsSystem(), dataStorageConfiguration);
|
||||
|
||||
flatDbStrategyProvider.loadFlatDbStrategy(composedWorldStateStorage);
|
||||
final Class<? extends CodeStorageStrategy> expectedCodeStorageClass =
|
||||
@@ -110,14 +116,17 @@ class FlatDbStrategyProviderTest {
|
||||
final DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(DataStorageFormat.BONSAI)
|
||||
.bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(
|
||||
ImmutableDataStorageConfiguration.Unstable.builder()
|
||||
.bonsaiCodeStoredByCodeHashEnabled(codeByHashEnabled)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(DiffBasedSubStorageConfiguration.DEFAULT_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(
|
||||
ImmutableDiffBasedSubStorageConfiguration.DiffBasedUnstable.builder()
|
||||
.codeStoredByCodeHashEnabled(codeByHashEnabled)
|
||||
.build())
|
||||
.build())
|
||||
.build();
|
||||
final FlatDbStrategyProvider flatDbStrategyProvider =
|
||||
new FlatDbStrategyProvider(new NoOpMetricsSystem(), dataStorageConfiguration);
|
||||
final BonsaiFlatDbStrategyProvider flatDbStrategyProvider =
|
||||
new BonsaiFlatDbStrategyProvider(new NoOpMetricsSystem(), dataStorageConfiguration);
|
||||
|
||||
final SegmentedKeyValueStorageTransaction transaction =
|
||||
composedWorldStateStorage.startTransaction();
|
||||
@@ -140,14 +149,17 @@ class FlatDbStrategyProviderTest {
|
||||
final DataStorageConfiguration dataStorageConfiguration =
|
||||
ImmutableDataStorageConfiguration.builder()
|
||||
.dataStorageFormat(DataStorageFormat.BONSAI)
|
||||
.bonsaiMaxLayersToLoad(DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(
|
||||
ImmutableDataStorageConfiguration.Unstable.builder()
|
||||
.bonsaiCodeStoredByCodeHashEnabled(codeByHashEnabled)
|
||||
.diffBasedSubStorageConfiguration(
|
||||
ImmutableDiffBasedSubStorageConfiguration.builder()
|
||||
.maxLayersToLoad(DiffBasedSubStorageConfiguration.DEFAULT_MAX_LAYERS_TO_LOAD)
|
||||
.unstable(
|
||||
ImmutableDiffBasedSubStorageConfiguration.DiffBasedUnstable.builder()
|
||||
.codeStoredByCodeHashEnabled(codeByHashEnabled)
|
||||
.build())
|
||||
.build())
|
||||
.build();
|
||||
final FlatDbStrategyProvider flatDbStrategyProvider =
|
||||
new FlatDbStrategyProvider(new NoOpMetricsSystem(), dataStorageConfiguration);
|
||||
final BonsaiFlatDbStrategyProvider flatDbStrategyProvider =
|
||||
new BonsaiFlatDbStrategyProvider(new NoOpMetricsSystem(), dataStorageConfiguration);
|
||||
|
||||
final SegmentedKeyValueStorageTransaction transaction =
|
||||
composedWorldStateStorage.startTransaction();
|
||||
@@ -171,7 +183,7 @@ class FlatDbStrategyProviderTest {
|
||||
assertThat(flatDbStrategyProvider.flatDbMode).isEqualTo(FlatDbMode.PARTIAL);
|
||||
assertThat(flatDbStrategyProvider.flatDbStrategy).isNotNull();
|
||||
assertThat(flatDbStrategyProvider.getFlatDbStrategy(composedWorldStateStorage))
|
||||
.isInstanceOf(PartialFlatDbStrategy.class);
|
||||
.isInstanceOf(BonsaiPartialFlatDbStrategy.class);
|
||||
}
|
||||
|
||||
private void updateFlatDbMode(final FlatDbMode flatDbMode) {
|
||||
@@ -38,7 +38,7 @@ import org.hyperledger.besu.ethereum.rlp.RLP;
|
||||
import org.hyperledger.besu.ethereum.trie.CompactEncoding;
|
||||
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.common.storage.flat.FlatDbStrategyProvider;
|
||||
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.flat.BonsaiFlatDbStrategyProvider;
|
||||
import org.hyperledger.besu.ethereum.trie.patricia.SimpleMerklePatriciaTrie;
|
||||
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
|
||||
@@ -85,7 +85,8 @@ public class SnapServerTest {
|
||||
// force a full flat db with code stored by code hash:
|
||||
final BonsaiWorldStateKeyValueStorage inMemoryStorage =
|
||||
new BonsaiWorldStateKeyValueStorage(
|
||||
new FlatDbStrategyProvider(noopMetrics, DataStorageConfiguration.DEFAULT_BONSAI_CONFIG) {
|
||||
new BonsaiFlatDbStrategyProvider(
|
||||
noopMetrics, DataStorageConfiguration.DEFAULT_BONSAI_CONFIG) {
|
||||
@Override
|
||||
public FlatDbMode getFlatDbMode() {
|
||||
return FlatDbMode.FULL;
|
||||
|
||||
@@ -529,9 +529,7 @@ public class EvmToolCommand implements Runnable {
|
||||
messageFrame
|
||||
.getExceptionalHaltReason()
|
||||
.ifPresent(haltReason -> out.println(haltReason));
|
||||
messageFrame
|
||||
.getRevertReason()
|
||||
.ifPresent(bytes -> out.println(new String(bytes.toArrayUnsafe(), UTF_8)));
|
||||
messageFrame.getRevertReason().ifPresent(bytes -> out.println(bytes.toHexString()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
{"pc":7,"op":82,"gas":"0x2540be3fa","gasCost":"0x6","memSize":0,"stack":["0x4e6f7065","0x0"],"depth":1,"refund":0,"opName":"MSTORE"},
|
||||
{"pc":8,"op":96,"gas":"0x2540be3f4","gasCost":"0x3","memSize":32,"stack":[],"depth":1,"refund":0,"opName":"PUSH1"},
|
||||
{"pc":10,"op":96,"gas":"0x2540be3f1","gasCost":"0x3","memSize":32,"stack":["0x4"],"depth":1,"refund":0,"opName":"PUSH1"},
|
||||
{"pc":12,"op":253,"gas":"0x2540be3ee","gasCost":"0x0","memSize":32,"stack":["0x4","0x1c"],"depth":1,"refund":0,"opName":"REVERT","error":"Nope"},
|
||||
{"pc":12,"op":253,"gas":"0x2540be3ee","gasCost":"0x0","memSize":32,"stack":["0x4","0x1c"],"depth":1,"refund":0,"opName":"REVERT","error":"0x4e6f7065"},
|
||||
{"stateRoot":"0x405bbd98da2aca6dff77f79e0b270270c48d6a3e07b76db675b20e454b50bbcb","output":"0x4e6f7065","gasUsed":"0x12","pass":true,"fork":"Cancun"}
|
||||
]
|
||||
}
|
||||
@@ -14,8 +14,6 @@
|
||||
*/
|
||||
package org.hyperledger.besu.evm.tracing;
|
||||
|
||||
import static com.google.common.base.Strings.padStart;
|
||||
|
||||
import org.hyperledger.besu.evm.code.OpcodeInfo;
|
||||
import org.hyperledger.besu.evm.frame.ExceptionalHaltReason;
|
||||
import org.hyperledger.besu.evm.frame.MessageFrame;
|
||||
@@ -224,7 +222,7 @@ public class StandardJsonTracer implements OperationTracer {
|
||||
.append("\"");
|
||||
} else if (messageFrame.getRevertReason().isPresent()) {
|
||||
sb.append(",\"error\":\"")
|
||||
.append(quoteEscape(messageFrame.getRevertReason().orElse(Bytes.EMPTY)))
|
||||
.append(messageFrame.getRevertReason().get().toHexString())
|
||||
.append("\"");
|
||||
}
|
||||
|
||||
@@ -232,37 +230,6 @@ public class StandardJsonTracer implements OperationTracer {
|
||||
out.println(sb);
|
||||
}
|
||||
|
||||
private static String quoteEscape(final Bytes bytes) {
|
||||
final StringBuilder result = new StringBuilder(bytes.size());
|
||||
for (final byte b : bytes.toArrayUnsafe()) {
|
||||
final int c = Byte.toUnsignedInt(b);
|
||||
// list from RFC-4627 section 2
|
||||
if (c == '"') {
|
||||
result.append("\\\"");
|
||||
} else if (c == '\\') {
|
||||
result.append("\\\\");
|
||||
} else if (c == '/') {
|
||||
result.append("\\/");
|
||||
} else if (c == '\b') {
|
||||
result.append("\\b");
|
||||
} else if (c == '\f') {
|
||||
result.append("\\f");
|
||||
} else if (c == '\n') {
|
||||
result.append("\\n");
|
||||
} else if (c == '\r') {
|
||||
result.append("\\r");
|
||||
} else if (c == '\t') {
|
||||
result.append("\\t");
|
||||
} else if (c <= 0x1F) {
|
||||
result.append("\\u");
|
||||
result.append(padStart(Integer.toHexString(c), 4, '0'));
|
||||
} else {
|
||||
result.append((char) b);
|
||||
}
|
||||
}
|
||||
return result.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tracePrecompileCall(
|
||||
final MessageFrame frame, final long gasRequirement, final Bytes output) {
|
||||
|
||||
@@ -32,7 +32,6 @@ import org.hyperledger.besu.evm.worldstate.WorldUpdater;
|
||||
|
||||
import java.io.PrintStream;
|
||||
import java.math.BigInteger;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Deque;
|
||||
import java.util.List;
|
||||
|
||||
@@ -208,9 +207,7 @@ public class EvmToyCommand implements Runnable {
|
||||
out.println(messageFrame.getExceptionalHaltReason().get());
|
||||
}
|
||||
if (messageFrame.getRevertReason().isPresent()) {
|
||||
out.println(
|
||||
new String(
|
||||
messageFrame.getRevertReason().get().toArrayUnsafe(), StandardCharsets.UTF_8));
|
||||
out.println(messageFrame.getRevertReason().get().toHexString());
|
||||
}
|
||||
}
|
||||
if (messageFrameStack.isEmpty()) {
|
||||
|
||||
@@ -71,7 +71,7 @@ Calculated : ${currentHash}
|
||||
tasks.register('checkAPIChanges', FileStateChecker) {
|
||||
description = "Checks that the API for the Plugin-API project does not change without deliberate thought"
|
||||
files = sourceSets.main.allJava.files
|
||||
knownHash = '1VIGlJuGiaEVUksIjTTHDt7SIjjJE9+DU8rYk/ze3XM='
|
||||
knownHash = 'G3cpM0HGYp4G1u6dN2CRZiEEsgce6jy9rkIlT1blUb4='
|
||||
}
|
||||
check.dependsOn('checkAPIChanges')
|
||||
|
||||
|
||||
@@ -14,8 +14,11 @@
|
||||
*/
|
||||
package org.hyperledger.besu.plugin.services.transactionpool;
|
||||
|
||||
import org.hyperledger.besu.datatypes.PendingTransaction;
|
||||
import org.hyperledger.besu.plugin.services.BesuService;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
/** Service to enable and disable the transaction pool. */
|
||||
public interface TransactionPoolService extends BesuService {
|
||||
/** Enables the transaction pool. */
|
||||
@@ -23,4 +26,11 @@ public interface TransactionPoolService extends BesuService {
|
||||
|
||||
/** Disables the transaction pool. */
|
||||
void enableTransactionPool();
|
||||
|
||||
/**
|
||||
* Returns the collection of pending transactions.
|
||||
*
|
||||
* @return a collection of pending transactions
|
||||
*/
|
||||
Collection<? extends PendingTransaction> getPendingTransactions();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user