Remove deprecated Forest pruning (#6810)

* Remove deprecated Forest pruning

Signed-off-by: Fabio Di Fabio <fabio.difabio@consensys.net>

* Update CHANGELOG.md

Co-authored-by: Sally MacFarlane <macfarla.github@gmail.com>
Signed-off-by: Fabio Di Fabio <fabio.difabio@consensys.net>

---------

Signed-off-by: Fabio Di Fabio <fabio.difabio@consensys.net>
Co-authored-by: Sally MacFarlane <macfarla.github@gmail.com>
This commit is contained in:
Fabio Di Fabio
2024-03-29 16:57:45 +01:00
committed by GitHub
parent 1679525ae2
commit d8e1e1710c
18 changed files with 3 additions and 1840 deletions

View File

@@ -5,6 +5,7 @@
### Breaking Changes
- RocksDB database metadata format has changed to be more expressive, the migration of an existing metadata file to the new format is automatic at startup. Before performing a downgrade to a previous version it is mandatory to revert to the original format using the subcommand `besu --data-path=/path/to/besu/datadir storage revert-metadata v2-to-v1`.
- BFT networks won't start with SNAP or CHECKPOINT sync (previously Besu would start with this config but quietly fail to sync, so it's now more obvious that it won't work) [#6625](https://github.com/hyperledger/besu/pull/6625), [#6667](https://github.com/hyperledger/besu/pull/6667)
- Forest pruning has been removed, it was deprecated since 24.1.0. In case you are still using it you must now remove any of the following options: `pruning-enabled`, `pruning-blocks-retained` and `pruning-block-confirmations`, from your configuration, and you may want to consider switching to Bonsai.
### Upcoming Breaking Changes
- Receipt compaction will be enabled by default in a future version of Besu. After this change it will not be possible to downgrade to the previous Besu version.
@@ -29,7 +30,7 @@
- Reduce storage size of receipts [#6602](https://github.com/hyperledger/besu/pull/6602)
- Dedicated log marker for invalid txs removed from the txpool [#6826](https://github.com/hyperledger/besu/pull/6826)
- Prevent startup with BONSAI and privacy enabled [#6809](https://github.com/hyperledger/besu/pull/6809)
- Remove deprecated Forest pruning [#6810](https://github.com/hyperledger/besu/pull/6810)
### Bug fixes
- Fix txpool dump/restore race condition [#6665](https://github.com/hyperledger/besu/pull/6665)

View File

@@ -140,7 +140,6 @@ import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.transaction.TransactionSimulator;
import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.evm.precompile.AbstractAltBnPrecompiledContract;
import org.hyperledger.besu.evm.precompile.BigIntegerModularExponentiationPrecompiledContract;
@@ -797,12 +796,6 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
"How deep a chain reorganization must be in order for it to be logged (default: ${DEFAULT-VALUE})")
private final Long reorgLoggingThreshold = 6L;
@Option(
names = {"--pruning-enabled"},
description =
"Enable disk-space saving optimization that removes old state that is unlikely to be required (default: ${DEFAULT-VALUE})")
private final Boolean pruningEnabled = false;
// Permission Option Group
@CommandLine.ArgGroup(validate = false, heading = "@|bold Permissions Options|@%n")
PermissionsOptions permissionsOptions = new PermissionsOptions();
@@ -852,23 +845,6 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
private final Map<String, String> genesisConfigOverrides =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
@Option(
names = {"--pruning-blocks-retained"},
paramLabel = "<INTEGER>",
description =
"Minimum number of recent blocks for which to keep entire world state (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlocksRetained = PrunerConfiguration.DEFAULT_PRUNING_BLOCKS_RETAINED;
@Option(
names = {"--pruning-block-confirmations"},
paramLabel = "<INTEGER>",
description =
"Minimum number of confirmations on a block before marking begins (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlockConfirmations =
PrunerConfiguration.DEFAULT_PRUNING_BLOCK_CONFIRMATIONS;
@CommandLine.Option(
names = {"--pid-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
@@ -1683,18 +1659,6 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
"--node-private-key-file",
"--security-module=" + DEFAULT_SECURITY_MODULE);
}
if (isPruningEnabled()) {
if (dataStorageOptions
.toDomainObject()
.getDataStorageFormat()
.equals(DataStorageFormat.BONSAI)) {
logger.warn("Forest pruning is ignored with Bonsai data storage format.");
} else {
logger.warn(
"Forest pruning is deprecated and will be removed soon. To save disk space consider switching to Bonsai data storage format.");
}
}
}
private void configure() throws Exception {
@@ -1840,9 +1804,6 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
.clock(Clock.systemUTC())
.isRevertReasonEnabled(isRevertReasonEnabled)
.storageProvider(storageProvider)
.isPruningEnabled(isPruningEnabled())
.pruningConfiguration(
new PrunerConfiguration(pruningBlockConfirmations, pruningBlocksRetained))
.genesisConfigOverrides(genesisConfigOverrides)
.gasLimitCalculator(
getMiningParameters().getTargetGasLimit().isPresent()
@@ -1982,9 +1943,6 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
if (getDataStorageConfiguration().getDataStorageFormat().equals(DataStorageFormat.BONSAI)) {
throw new ParameterException(commandLine, String.format("%s %s", "Bonsai", errorSuffix));
}
if (isPruningEnabled()) {
throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix));
}
if (Boolean.TRUE.equals(privacyOptionGroup.isPrivacyMultiTenancyEnabled)
&& Boolean.FALSE.equals(jsonRpcConfiguration.isAuthenticationEnabled())
@@ -2206,10 +2164,6 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
return OptionalInt.empty();
}
private boolean isPruningEnabled() {
return pruningEnabled;
}
// Blockchain synchronization from peers.
private Runner synchronize(
final BesuController controller,

View File

@@ -87,9 +87,6 @@ import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldSt
import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogManager;
import org.hyperledger.besu.ethereum.trie.diffbased.common.trielog.TrieLogPruner;
import org.hyperledger.besu.ethereum.trie.forest.ForestWorldStateArchive;
import org.hyperledger.besu.ethereum.trie.forest.pruner.MarkSweepPruner;
import org.hyperledger.besu.ethereum.trie.forest.pruner.Pruner;
import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStateKeyValueStorage;
@@ -159,10 +156,6 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
GasLimitCalculator gasLimitCalculator;
/** The Storage provider. */
protected StorageProvider storageProvider;
/** The Is pruning enabled. */
protected boolean isPruningEnabled;
/** The Pruner configuration. */
protected PrunerConfiguration prunerConfiguration;
/** The Required blocks. */
protected Map<Long, Hash> requiredBlocks = Collections.emptyMap();
/** The Reorg logging threshold. */
@@ -372,28 +365,6 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
return this;
}
/**
* Is pruning enabled besu controller builder.
*
* @param isPruningEnabled the is pruning enabled
* @return the besu controller builder
*/
public BesuControllerBuilder isPruningEnabled(final boolean isPruningEnabled) {
this.isPruningEnabled = isPruningEnabled;
return this;
}
/**
* Pruning configuration besu controller builder.
*
* @param prunerConfiguration the pruner configuration
* @return the besu controller builder
*/
public BesuControllerBuilder pruningConfiguration(final PrunerConfiguration prunerConfiguration) {
this.prunerConfiguration = prunerConfiguration;
return this;
}
/**
* Genesis config overrides besu controller builder.
*
@@ -607,25 +578,6 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
protocolSchedule.setPublicWorldStateArchiveForPrivacyBlockProcessor(
protocolContext.getWorldStateArchive());
Optional<Pruner> maybePruner = Optional.empty();
if (isPruningEnabled) {
if (dataStorageConfiguration.getDataStorageFormat().equals(DataStorageFormat.BONSAI)) {
LOG.warn(
"Cannot enable pruning with Bonsai data storage format. Disabling. Change the data storage format or disable pruning explicitly on the command line to remove this warning.");
} else {
maybePruner =
Optional.of(
new Pruner(
new MarkSweepPruner(
((ForestWorldStateArchive) worldStateArchive).getWorldStateStorage(),
blockchain,
storageProvider.getStorageBySegmentIdentifier(
KeyValueSegmentIdentifier.PRUNING_STATE),
metricsSystem),
blockchain,
prunerConfiguration));
}
}
final int maxMessageSize = ethereumWireProtocolConfiguration.getMaxMessageSize();
final Supplier<ProtocolSpec> currentProtocolSpecSupplier =
() -> protocolSchedule.getByBlockHeader(blockchain.getChainHeadHeader());
@@ -712,7 +664,6 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
protocolSchedule,
worldStateStorageCoordinator,
protocolContext,
maybePruner,
ethContext,
syncState,
ethProtocolManager,
@@ -803,7 +754,6 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
* @param protocolSchedule the protocol schedule
* @param worldStateStorageCoordinator the world state storage
* @param protocolContext the protocol context
* @param maybePruner the maybe pruner
* @param ethContext the eth context
* @param syncState the sync state
* @param ethProtocolManager the eth protocol manager
@@ -814,7 +764,6 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
final ProtocolSchedule protocolSchedule,
final WorldStateStorageCoordinator worldStateStorageCoordinator,
final ProtocolContext protocolContext,
final Optional<Pruner> maybePruner,
final EthContext ethContext,
final SyncState syncState,
final EthProtocolManager ethProtocolManager,
@@ -826,7 +775,6 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
protocolContext,
worldStateStorageCoordinator,
ethProtocolManager.getBlockBroadcaster(),
maybePruner,
ethContext,
syncState,
dataDirectory,

View File

@@ -56,7 +56,6 @@ import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfigurati
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.p2p.config.SubProtocolConfiguration;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
@@ -368,20 +367,6 @@ public class ConsensusScheduleBesuControllerBuilder extends BesuControllerBuilde
return super.isRevertReasonEnabled(isRevertReasonEnabled);
}
@Override
public BesuControllerBuilder isPruningEnabled(final boolean isPruningEnabled) {
besuControllerBuilderSchedule.values().forEach(b -> b.isPruningEnabled(isPruningEnabled));
return super.isPruningEnabled(isPruningEnabled);
}
@Override
public BesuControllerBuilder pruningConfiguration(final PrunerConfiguration prunerConfiguration) {
besuControllerBuilderSchedule
.values()
.forEach(b -> b.pruningConfiguration(prunerConfiguration));
return super.pruningConfiguration(prunerConfiguration);
}
@Override
public BesuControllerBuilder genesisConfigOverrides(
final Map<String, String> genesisConfigOverrides) {

View File

@@ -52,8 +52,6 @@ import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.trie.forest.pruner.Pruner;
import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator;
@@ -219,7 +217,6 @@ public class TransitionBesuControllerBuilder extends BesuControllerBuilder {
final ProtocolSchedule protocolSchedule,
final WorldStateStorageCoordinator worldStateStorageCoordinator,
final ProtocolContext protocolContext,
final Optional<Pruner> maybePruner,
final EthContext ethContext,
final SyncState syncState,
final EthProtocolManager ethProtocolManager,
@@ -231,7 +228,6 @@ public class TransitionBesuControllerBuilder extends BesuControllerBuilder {
protocolSchedule,
worldStateStorageCoordinator,
protocolContext,
maybePruner,
ethContext,
syncState,
ethProtocolManager,
@@ -387,18 +383,6 @@ public class TransitionBesuControllerBuilder extends BesuControllerBuilder {
return propagateConfig(z -> z.isRevertReasonEnabled(isRevertReasonEnabled));
}
@Override
public BesuControllerBuilder isPruningEnabled(final boolean isPruningEnabled) {
super.isPruningEnabled(isPruningEnabled);
return propagateConfig(z -> z.isPruningEnabled(isPruningEnabled));
}
@Override
public BesuControllerBuilder pruningConfiguration(final PrunerConfiguration prunerConfiguration) {
super.pruningConfiguration(prunerConfiguration);
return propagateConfig(z -> z.pruningConfiguration(prunerConfiguration));
}
@Override
public BesuControllerBuilder genesisConfigOverrides(
final Map<String, String> genesisConfigOverrides) {

View File

@@ -13,12 +13,6 @@
"config-key": "sync-mode",
"available-options": "org.hyperledger.besu.ethereum.eth.sync.SyncMode"
},
{
"prompt-type": "CONFIRM",
"question": "Do you want to enable pruning?",
"config-key": "pruning-enabled",
"default-option": "no"
},
{
"prompt-type": "INPUT",
"question": "What is the data directory ?",

View File

@@ -283,8 +283,6 @@ public abstract class CommandTestAbstract {
when(mockControllerBuilder.clock(any())).thenReturn(mockControllerBuilder);
when(mockControllerBuilder.isRevertReasonEnabled(false)).thenReturn(mockControllerBuilder);
when(mockControllerBuilder.storageProvider(any())).thenReturn(mockControllerBuilder);
when(mockControllerBuilder.isPruningEnabled(anyBoolean())).thenReturn(mockControllerBuilder);
when(mockControllerBuilder.pruningConfiguration(any())).thenReturn(mockControllerBuilder);
when(mockControllerBuilder.genesisConfigOverrides(any())).thenReturn(mockControllerBuilder);
when(mockControllerBuilder.gasLimitCalculator(any())).thenReturn(mockControllerBuilder);
when(mockControllerBuilder.requiredBlocks(any())).thenReturn(mockControllerBuilder);

View File

@@ -215,15 +215,6 @@ public class PrivacyOptionsTest extends CommandTestAbstract {
assertThat(commandOutput.toString(UTF_8)).isEmpty();
}
@Test
public void privacyWithPruningMustError() {
parseCommand("--pruning-enabled", "--privacy-enabled");
assertThat(commandErrorOutput.toString(UTF_8))
.contains("Pruning cannot be enabled with privacy.");
assertThat(commandOutput.toString(UTF_8)).isEmpty();
}
@Test
public void privacyWithoutPrivacyPublicKeyFails() {
parseCommand("--privacy-enabled", "--privacy-url", ENCLAVE_URI);

View File

@@ -1,126 +0,0 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.contains;
import static org.mockito.Mockito.verify;
import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
public class PruningOptionsTest extends CommandTestAbstract {
@Disabled
public void pruningIsEnabledIfSyncModeIsFast() {
parseCommand("--sync-mode", "FAST");
verify(mockControllerBuilder).isPruningEnabled(true);
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Disabled
public void pruningIsDisabledIfSyncModeIsFull() {
parseCommand("--sync-mode", "FULL");
verify(mockControllerBuilder).isPruningEnabled(false);
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void pruningEnabledExplicitly() {
parseCommand("--pruning-enabled", "--sync-mode=FULL");
verify(mockControllerBuilder).isPruningEnabled(true);
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Disabled
public void pruningDisabledExplicitly() {
parseCommand("--pruning-enabled=false", "--sync-mode=FAST");
verify(mockControllerBuilder).isPruningEnabled(false);
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void pruningDisabledByDefault() {
parseCommand();
verify(mockControllerBuilder).isPruningEnabled(false);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
}
@Test
public void pruningParametersAreCaptured() throws Exception {
parseCommand(
"--pruning-enabled", "--pruning-blocks-retained=15", "--pruning-block-confirmations=4");
final ArgumentCaptor<PrunerConfiguration> pruningArg =
ArgumentCaptor.forClass(PrunerConfiguration.class);
verify(mockControllerBuilder).pruningConfiguration(pruningArg.capture());
verify(mockControllerBuilder).build();
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
assertThat(pruningArg.getValue().getBlocksRetained()).isEqualTo(15);
assertThat(pruningArg.getValue().getBlockConfirmations()).isEqualTo(4);
}
@Test
public void pruningLogsDeprecationWarningWithForest() {
parseCommand("--pruning-enabled", "--data-storage-format=FOREST");
verify(mockControllerBuilder).isPruningEnabled(true);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
verify(mockLogger)
.warn(
contains(
"Forest pruning is deprecated and will be removed soon."
+ " To save disk space consider switching to Bonsai data storage format."));
}
@Test
public void pruningLogsIgnoredWarningWithBonsai() {
parseCommand("--pruning-enabled", "--data-storage-format=BONSAI");
verify(mockControllerBuilder).isPruningEnabled(true);
assertThat(commandOutput.toString(UTF_8)).isEmpty();
assertThat(commandErrorOutput.toString(UTF_8)).isEmpty();
verify(mockLogger).warn(contains("Forest pruning is ignored with Bonsai data storage format."));
}
}

View File

@@ -1,218 +0,0 @@
/*
* Copyright 2019 ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.controller;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.config.CheckpointConfigOptions;
import org.hyperledger.besu.config.EthashConfigOptions;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.cryptoservices.NodeKey;
import org.hyperledger.besu.cryptoservices.NodeKeyUtils;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.GasLimitCalculator;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.eth.EthProtocolConfiguration;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.mainnet.MainnetBlockHeaderFunctions;
import org.hyperledger.besu.ethereum.p2p.config.NetworkingConfiguration;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStoragePrefixedKeyBlockchainStorage;
import org.hyperledger.besu.ethereum.storage.keyvalue.VariablesKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.cache.BonsaiCachedMerkleTrieLoader;
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.worldview.BonsaiWorldState;
import org.hyperledger.besu.ethereum.trie.forest.pruner.PrunerConfiguration;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.ImmutableDataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import java.math.BigInteger;
import java.nio.file.Path;
import java.time.Clock;
import java.util.OptionalLong;
import com.google.common.collect.Range;
import org.apache.tuweni.bytes.Bytes;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
import org.mockito.Answers;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
public class BesuControllerBuilderTest {
private BesuControllerBuilder besuControllerBuilder;
private static final NodeKey nodeKey = NodeKeyUtils.generate();
@Mock GenesisConfigFile genesisConfigFile;
@Mock GenesisConfigOptions genesisConfigOptions;
@Mock EthashConfigOptions ethashConfigOptions;
@Mock CheckpointConfigOptions checkpointConfigOptions;
@Mock SynchronizerConfiguration synchronizerConfiguration;
@Mock EthProtocolConfiguration ethProtocolConfiguration;
@Mock PrivacyParameters privacyParameters;
@Mock Clock clock;
@Mock StorageProvider storageProvider;
@Mock GasLimitCalculator gasLimitCalculator;
@Mock WorldStateArchive worldStateArchive;
@Mock BonsaiWorldStateKeyValueStorage bonsaiWorldStateStorage;
@Mock WorldStatePreimageStorage worldStatePreimageStorage;
private final TransactionPoolConfiguration poolConfiguration =
TransactionPoolConfiguration.DEFAULT;
private final MiningParameters miningParameters = MiningParameters.newDefault();
private final ObservableMetricsSystem observableMetricsSystem = new NoOpMetricsSystem();
BigInteger networkId = BigInteger.ONE;
@TempDir Path tempDir;
@BeforeEach
public void setup() {
final ForestWorldStateKeyValueStorage worldStateKeyValueStorage =
mock(ForestWorldStateKeyValueStorage.class);
final WorldStateStorageCoordinator worldStateStorageCoordinator =
new WorldStateStorageCoordinator(worldStateKeyValueStorage);
when(genesisConfigFile.getParentHash()).thenReturn(Hash.ZERO.toHexString());
when(genesisConfigFile.getDifficulty()).thenReturn(Bytes.of(0).toHexString());
when(genesisConfigFile.getExtraData()).thenReturn(Bytes.EMPTY.toHexString());
when(genesisConfigFile.getMixHash()).thenReturn(Hash.ZERO.toHexString());
when(genesisConfigFile.getNonce()).thenReturn(Long.toHexString(1));
when(genesisConfigFile.getConfigOptions(any())).thenReturn(genesisConfigOptions);
when(genesisConfigFile.getConfigOptions()).thenReturn(genesisConfigOptions);
when(genesisConfigOptions.getThanosBlockNumber()).thenReturn(OptionalLong.empty());
when(genesisConfigOptions.getEthashConfigOptions()).thenReturn(ethashConfigOptions);
when(genesisConfigOptions.getCheckpointOptions()).thenReturn(checkpointConfigOptions);
when(ethashConfigOptions.getFixedDifficulty()).thenReturn(OptionalLong.empty());
when(storageProvider.getStorageBySegmentIdentifier(any()))
.thenReturn(new InMemoryKeyValueStorage());
when(storageProvider.createBlockchainStorage(any(), any(), any()))
.thenReturn(
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions(),
false));
when(synchronizerConfiguration.getDownloaderParallelism()).thenReturn(1);
when(synchronizerConfiguration.getTransactionsParallelism()).thenReturn(1);
when(synchronizerConfiguration.getComputationParallelism()).thenReturn(1);
when(synchronizerConfiguration.getBlockPropagationRange()).thenReturn(Range.closed(1L, 2L));
lenient()
.when(
storageProvider.createWorldStateStorageCoordinator(
DataStorageConfiguration.DEFAULT_FOREST_CONFIG))
.thenReturn(worldStateStorageCoordinator);
lenient()
.when(storageProvider.createWorldStatePreimageStorage())
.thenReturn(worldStatePreimageStorage);
lenient().when(worldStateKeyValueStorage.isWorldStateAvailable(any())).thenReturn(true);
lenient()
.when(worldStatePreimageStorage.updater())
.thenReturn(mock(WorldStatePreimageStorage.Updater.class));
lenient()
.when(worldStateKeyValueStorage.updater())
.thenReturn(mock(ForestWorldStateKeyValueStorage.Updater.class));
besuControllerBuilder = spy(visitWithMockConfigs(new MainnetBesuControllerBuilder()));
}
BesuControllerBuilder visitWithMockConfigs(final BesuControllerBuilder builder) {
return builder
.gasLimitCalculator(gasLimitCalculator)
.genesisConfigFile(genesisConfigFile)
.synchronizerConfiguration(synchronizerConfiguration)
.ethProtocolConfiguration(ethProtocolConfiguration)
.miningParameters(miningParameters)
.metricsSystem(observableMetricsSystem)
.privacyParameters(privacyParameters)
.dataDirectory(tempDir)
.clock(clock)
.transactionPoolConfiguration(poolConfiguration)
.nodeKey(nodeKey)
.storageProvider(storageProvider)
.evmConfiguration(EvmConfiguration.DEFAULT)
.networkConfiguration(NetworkingConfiguration.create())
.networkId(networkId);
}
@Test
public void shouldDisablePruningIfBonsaiIsEnabled() {
DataStorageConfiguration dataStorageConfiguration =
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(DataStorageFormat.BONSAI)
.bonsaiMaxLayersToLoad(DataStorageConfiguration.DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
.build();
BonsaiWorldState mockWorldState = mock(BonsaiWorldState.class, Answers.RETURNS_DEEP_STUBS);
doReturn(worldStateArchive)
.when(besuControllerBuilder)
.createWorldStateArchive(
any(WorldStateStorageCoordinator.class),
any(Blockchain.class),
any(BonsaiCachedMerkleTrieLoader.class));
doReturn(mockWorldState).when(worldStateArchive).getMutable();
when(storageProvider.createWorldStateStorageCoordinator(dataStorageConfiguration))
.thenReturn(new WorldStateStorageCoordinator(bonsaiWorldStateStorage));
besuControllerBuilder.isPruningEnabled(true).dataStorageConfiguration(dataStorageConfiguration);
besuControllerBuilder.build();
verify(storageProvider, never())
.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.PRUNING_STATE);
}
@Test
public void shouldUsePruningIfForestIsEnabled() {
besuControllerBuilder
.isPruningEnabled(true)
.pruningConfiguration(new PrunerConfiguration(1, 2))
.dataStorageConfiguration(
ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(DataStorageFormat.FOREST)
.bonsaiMaxLayersToLoad(DataStorageConfiguration.DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD)
.build());
besuControllerBuilder.build();
verify(storageProvider).getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.PRUNING_STATE);
}
}

View File

@@ -151,11 +151,6 @@ Xminer-remote-sealers-limit=1000
Xminer-remote-sealers-hashrate-ttl=10
Xpos-block-creation-max-time=5
# Pruning
pruning-enabled=true
pruning-blocks-retained=1024
pruning-block-confirmations=10
# Permissioning
permissions-nodes-config-file-enabled=false
permissions-nodes-config-file="./permissions_config.toml"

View File

@@ -1,267 +0,0 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.trie.forest.pruner;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider.createInMemoryBlockchain;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockDataGenerator;
import org.hyperledger.besu.ethereum.core.BlockDataGenerator.BlockOptions;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.MutableWorldState;
import org.hyperledger.besu.ethereum.core.TransactionReceipt;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.storage.keyvalue.WorldStatePreimageKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.forest.ForestWorldStateArchive;
import org.hyperledger.besu.ethereum.trie.forest.pruner.Pruner.PruningPhase;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.evm.worldstate.WorldState;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import org.hyperledger.besu.testutil.MockExecutorService;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.junit.jupiter.api.Test;
public class PrunerIntegrationTest {
private final BlockDataGenerator gen = new BlockDataGenerator();
private final NoOpMetricsSystem metricsSystem = new NoOpMetricsSystem();
private final Map<Bytes, Optional<byte[]>> hashValueStore = new HashMap<>();
private final InMemoryKeyValueStorage stateStorage = new TestInMemoryStorage(hashValueStore);
private final ForestWorldStateKeyValueStorage worldStateKeyValueStorage =
new ForestWorldStateKeyValueStorage(stateStorage);
private final WorldStateArchive worldStateArchive =
new ForestWorldStateArchive(
new WorldStateStorageCoordinator(worldStateKeyValueStorage),
new WorldStatePreimageKeyValueStorage(new InMemoryKeyValueStorage()),
EvmConfiguration.DEFAULT);
private final InMemoryKeyValueStorage markStorage = new InMemoryKeyValueStorage();
private final Block genesisBlock = gen.genesisBlock();
private final MutableBlockchain blockchain = createInMemoryBlockchain(genesisBlock);
@Test
public void pruner_smallState_manyOpsPerTx() {
testPruner(3, 1, 1, 4, 100_000);
}
@Test
public void pruner_largeState_fewOpsPerTx() {
testPruner(2, 5, 5, 6, 5);
}
@Test
public void pruner_emptyBlocks() {
testPruner(5, 0, 2, 5, 10);
}
@Test
public void pruner_markChainhead() {
testPruner(4, 2, 1, 10, 20);
}
@Test
public void pruner_lowRelativeBlockConfirmations() {
testPruner(3, 2, 1, 4, 20);
}
@Test
public void pruner_highRelativeBlockConfirmations() {
testPruner(3, 2, 9, 10, 20);
}
private void testPruner(
final int numCycles,
final int accountsPerBlock,
final int blockConfirmations,
final int numBlocksToKeep,
final int opsPerTransaction) {
final var markSweepPruner =
new MarkSweepPruner(
worldStateKeyValueStorage, blockchain, markStorage, metricsSystem, opsPerTransaction);
final var pruner =
new Pruner(
markSweepPruner,
blockchain,
new PrunerConfiguration(blockConfirmations, numBlocksToKeep),
new MockExecutorService());
pruner.start();
for (int cycle = 0; cycle < numCycles; ++cycle) {
int numBlockInCycle =
numBlocksToKeep
+ 1; // +1 to get it to switch from MARKING_COMPLETE TO SWEEPING on each cycle
var fullyMarkedBlockNum = cycle * numBlockInCycle + 1;
// This should cause a full mark and sweep cycle
assertThat(pruner.getPruningPhase()).isEqualByComparingTo(PruningPhase.IDLE);
generateBlockchainData(numBlockInCycle, accountsPerBlock);
assertThat(pruner.getPruningPhase()).isEqualByComparingTo(PruningPhase.IDLE);
// Collect the nodes we expect to keep
final Set<Bytes> expectedNodes = new HashSet<>();
for (int i = fullyMarkedBlockNum; i <= blockchain.getChainHeadBlockNumber(); i++) {
final Hash stateRoot = blockchain.getBlockHeader(i).get().getStateRoot();
collectWorldStateNodes(stateRoot, expectedNodes);
}
if (accountsPerBlock != 0) {
assertThat(hashValueStore.size())
.isGreaterThanOrEqualTo(expectedNodes.size()); // Sanity check
}
// Assert that blocks from mark point onward are still accessible
for (int i = fullyMarkedBlockNum; i <= blockchain.getChainHeadBlockNumber(); i++) {
final BlockHeader blockHeader = blockchain.getBlockHeader(i).get();
final Hash stateRoot = blockHeader.getStateRoot();
assertThat(worldStateArchive.get(stateRoot, blockHeader.getHash())).isPresent();
final WorldState markedState =
worldStateArchive.get(stateRoot, blockHeader.getHash()).get();
// Traverse accounts and make sure all are accessible
final int expectedAccounts = accountsPerBlock * i;
final long accounts =
markedState.streamAccounts(Bytes32.ZERO, expectedAccounts * 2).count();
assertThat(accounts).isEqualTo(expectedAccounts);
// Traverse storage to ensure that all storage is accessible
markedState
.streamAccounts(Bytes32.ZERO, expectedAccounts * 2)
.forEach(a -> a.storageEntriesFrom(Bytes32.ZERO, 1000));
}
// All other state roots should have been removed
for (int i = 0; i < fullyMarkedBlockNum; i++) {
final BlockHeader curHeader = blockchain.getBlockHeader(i).get();
if (!curHeader.getStateRoot().equals(Hash.EMPTY_TRIE_HASH)) {
assertThat(worldStateArchive.get(curHeader.getStateRoot(), curHeader.getHash()))
.isEmpty();
}
}
// Check that storage contains only the values we expect
assertThat(hashValueStore.size()).isEqualTo(expectedNodes.size());
assertThat(hashValueStore.values().stream().map(Optional::get))
.containsExactlyInAnyOrderElementsOf(
expectedNodes.stream().map(Bytes::toArrayUnsafe).collect(Collectors.toSet()));
}
pruner.stop();
}
private void generateBlockchainData(final int numBlocks, final int numAccounts) {
Block parentBlock = blockchain.getChainHeadBlock();
for (int i = 0; i < numBlocks; i++) {
final BlockHeader parentHeader = parentBlock.getHeader();
final Hash parentHash = parentBlock.getHash();
final MutableWorldState worldState =
worldStateArchive.getMutable(parentHeader.getStateRoot(), parentHash).get();
gen.createRandomContractAccountsWithNonEmptyStorage(worldState, numAccounts);
final Hash stateRoot = worldState.rootHash();
final Block block =
gen.block(
BlockOptions.create()
.setStateRoot(stateRoot)
.setBlockNumber(parentHeader.getNumber() + 1L)
.setParentHash(parentHash));
final List<TransactionReceipt> receipts = gen.receipts(block);
blockchain.appendBlock(block, receipts);
parentBlock = block;
}
}
private Set<Bytes> collectWorldStateNodes(final Hash stateRootHash, final Set<Bytes> collector) {
final List<Hash> storageRoots = new ArrayList<>();
final MerkleTrie<Bytes32, Bytes> stateTrie = createStateTrie(stateRootHash);
// Collect storage roots and code
stateTrie
.entriesFrom(Bytes32.ZERO, 1000)
.forEach(
(key, val) -> {
final StateTrieAccountValue accountValue =
StateTrieAccountValue.readFrom(RLP.input(val));
stateStorage
.get(accountValue.getCodeHash().toArray())
.ifPresent(v -> collector.add(Bytes.wrap(v)));
storageRoots.add(accountValue.getStorageRoot());
});
// Collect state nodes
collectTrieNodes(stateTrie, collector);
// Collect storage nodes
for (Hash storageRoot : storageRoots) {
final MerkleTrie<Bytes32, Bytes> storageTrie = createStorageTrie(storageRoot);
collectTrieNodes(storageTrie, collector);
}
return collector;
}
private void collectTrieNodes(final MerkleTrie<Bytes32, Bytes> trie, final Set<Bytes> collector) {
final Bytes32 rootHash = trie.getRootHash();
trie.visitAll(
node -> {
if (node.isReferencedByHash() || node.getHash().equals(rootHash)) {
collector.add(node.getEncodedBytes());
}
});
}
private MerkleTrie<Bytes32, Bytes> createStateTrie(final Bytes32 rootHash) {
return new StoredMerklePatriciaTrie<>(
(location, hash) -> worldStateKeyValueStorage.getAccountStateTrieNode(hash),
rootHash,
Function.identity(),
Function.identity());
}
private MerkleTrie<Bytes32, Bytes> createStorageTrie(final Bytes32 rootHash) {
return new StoredMerklePatriciaTrie<>(
(location, hash) -> worldStateKeyValueStorage.getAccountStorageTrieNode(hash),
rootHash,
Function.identity(),
Function.identity());
}
// Proxy class so that we have access to the constructor that takes our own map
private static class TestInMemoryStorage extends InMemoryKeyValueStorage {
public TestInMemoryStorage(final Map<Bytes, Optional<byte[]>> hashValueStore) {
super(hashValueStore);
}
}
}

View File

@@ -1,317 +0,0 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.trie.forest.pruner;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.plugin.services.metrics.Counter;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingDeque;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Function;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Stopwatch;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MarkSweepPruner {
private static final Logger LOG = LoggerFactory.getLogger(MarkSweepPruner.class);
private static final byte[] IN_USE = Bytes.of(1).toArrayUnsafe();
private static final int DEFAULT_OPS_PER_TRANSACTION = 10_000;
private static final int MAX_MARKING_THREAD_POOL_SIZE = 2;
private final int operationsPerTransaction;
private final ForestWorldStateKeyValueStorage worldStateKeyValueStorage;
private final MutableBlockchain blockchain;
private final KeyValueStorage markStorage;
private final Counter markedNodesCounter;
private final Counter markOperationCounter;
private final Counter sweepOperationCounter;
private final Counter sweptNodesCounter;
private final Stopwatch markStopwatch;
private volatile long nodeAddedListenerId;
private final ReadWriteLock pendingMarksLock = new ReentrantReadWriteLock();
private final Set<Bytes32> pendingMarks = Collections.newSetFromMap(new ConcurrentHashMap<>());
public MarkSweepPruner(
final ForestWorldStateKeyValueStorage worldStateKeyValueStorage,
final MutableBlockchain blockchain,
final KeyValueStorage markStorage,
final ObservableMetricsSystem metricsSystem) {
this(
worldStateKeyValueStorage,
blockchain,
markStorage,
metricsSystem,
DEFAULT_OPS_PER_TRANSACTION);
}
public MarkSweepPruner(
final ForestWorldStateKeyValueStorage worldStateKeyValueStorage,
final MutableBlockchain blockchain,
final KeyValueStorage markStorage,
final ObservableMetricsSystem metricsSystem,
final int operationsPerTransaction) {
this.worldStateKeyValueStorage = worldStateKeyValueStorage;
this.markStorage = markStorage;
this.blockchain = blockchain;
this.operationsPerTransaction = operationsPerTransaction;
markedNodesCounter =
metricsSystem.createCounter(
BesuMetricCategory.PRUNER,
"marked_nodes_total",
"Total number of nodes marked as in use");
markOperationCounter =
metricsSystem.createCounter(
BesuMetricCategory.PRUNER,
"mark_operations_total",
"Total number of mark operations performed");
sweptNodesCounter =
metricsSystem.createCounter(
BesuMetricCategory.PRUNER, "swept_nodes_total", "Total number of unused nodes removed");
sweepOperationCounter =
metricsSystem.createCounter(
BesuMetricCategory.PRUNER,
"sweep_operations_total",
"Total number of sweep operations performed");
markStopwatch = Stopwatch.createUnstarted();
metricsSystem.createLongGauge(
BesuMetricCategory.PRUNER,
"mark_time_duration",
"Cumulative number of seconds spent marking the state trie across all pruning cycles",
() -> markStopwatch.elapsed(TimeUnit.SECONDS));
LOG.debug("Using {} pruner threads", MAX_MARKING_THREAD_POOL_SIZE);
}
public void prepare() {
// Optimization for the case where the previous cycle was interrupted (like the node was shut
// down). If the previous cycle was interrupted, there will be marks in the mark storage from
// last time, causing the first sweep to be smaller than it needs to be.
clearMarks();
nodeAddedListenerId = worldStateKeyValueStorage.addNodeAddedListener(this::markNodes);
}
/**
* This is a parallel mark implementation.
*
* <p>The parallel task production is by sub-trie, so calling `visitAll` on a root node will
* eventually spawn up to 16 tasks (for a hexary trie).
*
* <p>If we marked each sub-trie in its own thread, with no common queue of tasks, our mark speed
* would be limited by the sub-trie with the maximum number of nodes. In practice for the Ethereum
* mainnet, we see a large imbalance in sub-trie size so without a common task pool the time in
* which there is only 1 thread left marking its big sub-trie would be substantial.
*
* <p>If we were to leave all threads to produce mark tasks before starting to mark, we would run
* out of memory quickly.
*
* <p>If we were to have a constant number of threads producing the mark tasks with the others
* consuming them, we would have to optimize the production/consumption balance.
*
* <p>To get the best of both worlds, the marking executor has a {@link
* ThreadPoolExecutor.CallerRunsPolicy} which causes the producing tasks to essentially consume
* their own mark task immediately when the task queue is full. The resulting behavior is threads
* that mark their own sub-trie until they finish that sub-trie, at which point they switch to
* marking the sub-trie tasks produced by another thread.
*
* @param rootHash The root hash of the whole state trie. Roots of storage tries will be
* discovered though traversal.
*/
public void mark(final Hash rootHash) {
markOperationCounter.inc();
markStopwatch.start();
final ExecutorService markingExecutorService =
new ThreadPoolExecutor(
0,
MAX_MARKING_THREAD_POOL_SIZE,
5L,
TimeUnit.SECONDS,
new LinkedBlockingDeque<>(16),
new ThreadFactoryBuilder()
.setDaemon(true)
.setPriority(Thread.MIN_PRIORITY)
.setNameFormat(this.getClass().getSimpleName() + "-%d")
.build(),
new ThreadPoolExecutor.CallerRunsPolicy());
createStateTrie(rootHash)
.visitAll(
node -> {
markNode(node.getHash());
node.getValue()
.ifPresent(value -> processAccountState(value, markingExecutorService));
},
markingExecutorService)
.join() /* This will block on all the marking tasks to be _produced_ but doesn't guarantee that the marking tasks have been completed. */;
markingExecutorService.shutdown();
try {
// This ensures that the marking tasks complete.
markingExecutorService.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
} catch (InterruptedException e) {
LOG.info("Interrupted while marking", e);
}
markStopwatch.stop();
LOG.debug("Completed marking used nodes for pruning");
}
public void sweepBefore(final long markedBlockNumber) {
sweepOperationCounter.inc();
LOG.debug("Sweeping unused nodes");
// Sweep state roots first, walking backwards until we get to a state root that isn't in the
// storage
long prunedNodeCount = 0;
ForestWorldStateKeyValueStorage.Updater updater = worldStateKeyValueStorage.updater();
for (long blockNumber = markedBlockNumber - 1; blockNumber >= 0; blockNumber--) {
final BlockHeader blockHeader = blockchain.getBlockHeader(blockNumber).get();
final Hash candidateStateRootHash = blockHeader.getStateRoot();
if (!worldStateKeyValueStorage.isWorldStateAvailable(candidateStateRootHash)) {
break;
}
if (!isMarked(candidateStateRootHash)) {
updater.removeAccountStateTrieNode(candidateStateRootHash);
prunedNodeCount++;
if (prunedNodeCount % operationsPerTransaction == 0) {
updater.commit();
updater = worldStateKeyValueStorage.updater();
}
}
}
updater.commit();
// Sweep non-state-root nodes
prunedNodeCount += worldStateKeyValueStorage.prune(this::isMarked);
sweptNodesCounter.inc(prunedNodeCount);
clearMarks();
LOG.debug("Completed sweeping unused nodes");
}
public void cleanup() {
worldStateKeyValueStorage.removeNodeAddedListener(nodeAddedListenerId);
clearMarks();
}
public void clearMarks() {
markStorage.clear();
pendingMarks.clear();
}
private boolean isMarked(final Bytes32 key) {
return pendingMarks.contains(key) || markStorage.containsKey(key.toArrayUnsafe());
}
private boolean isMarked(final byte[] key) {
return pendingMarks.contains(Bytes32.wrap(key)) || markStorage.containsKey(key);
}
private MerkleTrie<Bytes32, Bytes> createStateTrie(final Bytes32 rootHash) {
return new StoredMerklePatriciaTrie<>(
(location, hash) -> worldStateKeyValueStorage.getAccountStateTrieNode(hash),
rootHash,
Function.identity(),
Function.identity());
}
private MerkleTrie<Bytes32, Bytes> createStorageTrie(final Bytes32 rootHash) {
return new StoredMerklePatriciaTrie<>(
(location, hash) -> worldStateKeyValueStorage.getAccountStorageTrieNode(hash),
rootHash,
Function.identity(),
Function.identity());
}
private void processAccountState(final Bytes value, final ExecutorService executorService) {
final StateTrieAccountValue accountValue = StateTrieAccountValue.readFrom(RLP.input(value));
markNode(accountValue.getCodeHash());
createStorageTrie(accountValue.getStorageRoot())
.visitAll(storageNode -> markNode(storageNode.getHash()), executorService);
}
@VisibleForTesting
public void markNode(final Bytes32 hash) {
markThenMaybeFlush(() -> pendingMarks.add(hash), 1);
}
private void markNodes(final Collection<Bytes32> nodeHashes) {
markThenMaybeFlush(() -> pendingMarks.addAll(nodeHashes), nodeHashes.size());
}
private void markThenMaybeFlush(final Runnable nodeMarker, final int numberOfNodes) {
// We use the read lock here because pendingMarks is threadsafe and we want to allow all the
// marking threads access simultaneously.
final Lock markLock = pendingMarksLock.readLock();
markLock.lock();
try {
nodeMarker.run();
} finally {
markLock.unlock();
}
markedNodesCounter.inc(numberOfNodes);
// However, when the size of pendingMarks grows too large, we want all the threads to stop
// adding because we're going to clear the set.
// Therefore, we need to take out a write lock.
if (pendingMarks.size() >= operationsPerTransaction) {
final Lock flushLock = pendingMarksLock.writeLock();
flushLock.lock();
try {
// Check once again that the condition holds. If it doesn't, that means another thread
// already flushed them.
if (pendingMarks.size() >= operationsPerTransaction) {
flushPendingMarks();
}
} finally {
flushLock.unlock();
}
}
}
private void flushPendingMarks() {
final KeyValueStorageTransaction transaction = markStorage.startTransaction();
pendingMarks.forEach(node -> transaction.put(node.toArrayUnsafe(), IN_USE));
transaction.commit();
pendingMarks.clear();
}
}

View File

@@ -1,199 +0,0 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.trie.forest.pruner;
import static com.google.common.base.Preconditions.checkArgument;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.BlockAddedEvent;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.trie.MerkleTrieException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Pruner {
private static final Logger LOG = LoggerFactory.getLogger(Pruner.class);
private final MarkSweepPruner pruningStrategy;
private final Blockchain blockchain;
private Long blockAddedObserverId;
private final long blocksRetained;
private final AtomicReference<PruningPhase> pruningPhase =
new AtomicReference<>(PruningPhase.IDLE);
private volatile long markBlockNumber = 0;
private volatile BlockHeader markedBlockHeader;
private final long blockConfirmations;
private final AtomicReference<State> state = new AtomicReference<>(State.IDLE);
private final ExecutorService executorService;
@VisibleForTesting
Pruner(
final MarkSweepPruner pruningStrategy,
final Blockchain blockchain,
final PrunerConfiguration prunerConfiguration,
final ExecutorService executorService) {
this.pruningStrategy = pruningStrategy;
this.blockchain = blockchain;
this.executorService = executorService;
this.blocksRetained = prunerConfiguration.getBlocksRetained();
this.blockConfirmations = prunerConfiguration.getBlockConfirmations();
checkArgument(
blockConfirmations >= 0 && blockConfirmations < blocksRetained,
"blockConfirmations and blocksRetained must be non-negative. blockConfirmations must be less than blockRetained.");
}
public Pruner(
final MarkSweepPruner pruningStrategy,
final Blockchain blockchain,
final PrunerConfiguration prunerConfiguration) {
this(
pruningStrategy,
blockchain,
prunerConfiguration,
// This is basically the out-of-the-box `Executors.newSingleThreadExecutor` except we want
// the `corePoolSize` to be 0
new ThreadPoolExecutor(
0,
1,
0L,
TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(),
new ThreadFactoryBuilder()
.setDaemon(true)
.setPriority(Thread.MIN_PRIORITY)
.setNameFormat("StatePruning-%d")
.build()));
}
public void start() {
execute(
() -> {
if (state.compareAndSet(State.IDLE, State.RUNNING)) {
LOG.info("Starting Pruner.");
pruningStrategy.prepare();
blockAddedObserverId = blockchain.observeBlockAdded(this::handleNewBlock);
}
});
}
public void stop() {
if (state.compareAndSet(State.RUNNING, State.STOPPED)) {
LOG.info("Stopping Pruner.");
pruningStrategy.cleanup();
blockchain.removeObserver(blockAddedObserverId);
executorService.shutdownNow();
}
}
public void awaitStop() throws InterruptedException {
if (!executorService.awaitTermination(10, TimeUnit.SECONDS)) {
LOG.error("Failed to shutdown Pruner executor service.");
}
}
private void handleNewBlock(final BlockAddedEvent event) {
if (!event.isNewCanonicalHead()) {
return;
}
final long blockNumber = event.getBlock().getHeader().getNumber();
if (pruningPhase.compareAndSet(
PruningPhase.IDLE, PruningPhase.MARK_BLOCK_CONFIRMATIONS_AWAITING)) {
markBlockNumber = blockNumber;
} else if (blockNumber >= markBlockNumber + blockConfirmations
&& pruningPhase.compareAndSet(
PruningPhase.MARK_BLOCK_CONFIRMATIONS_AWAITING, PruningPhase.MARKING)) {
markedBlockHeader = blockchain.getBlockHeader(markBlockNumber).get();
mark(markedBlockHeader);
} else if (blockNumber >= markBlockNumber + blocksRetained
&& blockchain.blockIsOnCanonicalChain(markedBlockHeader.getHash())
&& pruningPhase.compareAndSet(PruningPhase.MARKING_COMPLETE, PruningPhase.SWEEPING)) {
sweep();
}
}
private void mark(final BlockHeader header) {
final Hash stateRoot = header.getStateRoot();
LOG.info(
"Begin marking used nodes for pruning. Block number: {} State root: {}",
markBlockNumber,
stateRoot);
execute(
() -> {
pruningStrategy.mark(stateRoot);
pruningPhase.compareAndSet(PruningPhase.MARKING, PruningPhase.MARKING_COMPLETE);
});
}
private void sweep() {
LOG.info(
"Begin sweeping unused nodes for pruning. Keeping full state for blocks {} to {}",
markBlockNumber,
markBlockNumber + blocksRetained);
execute(
() -> {
pruningStrategy.sweepBefore(markBlockNumber);
pruningPhase.compareAndSet(PruningPhase.SWEEPING, PruningPhase.IDLE);
});
}
private void execute(final Runnable action) {
try {
executorService.execute(action);
} catch (final MerkleTrieException mte) {
LOG.error(
"An unrecoverable error occurred while pruning. The database directory must be deleted and resynced.",
mte);
System.exit(1);
} catch (final Exception e) {
LOG.error(
"An unexpected error occurred in the {} pruning phase: {}. Reattempting.",
getPruningPhase(),
e.getMessage());
pruningStrategy.clearMarks();
pruningPhase.set(PruningPhase.IDLE);
}
}
PruningPhase getPruningPhase() {
return pruningPhase.get();
}
enum PruningPhase {
IDLE,
MARK_BLOCK_CONFIRMATIONS_AWAITING,
MARKING,
MARKING_COMPLETE,
SWEEPING;
}
private enum State {
IDLE,
RUNNING,
STOPPED
}
}

View File

@@ -1,42 +0,0 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.trie.forest.pruner;
public class PrunerConfiguration {
public static final int DEFAULT_PRUNING_BLOCKS_RETAINED = 1024;
public static final int DEFAULT_PRUNING_BLOCK_CONFIRMATIONS = 10;
private final int blocksRetainedBeforeSweeping;
private final int blockConfirmationsBeforeMarking;
public PrunerConfiguration(
final int blockConfirmationsBeforeMarking, final int blocksRetainedBeforeSweeping) {
this.blockConfirmationsBeforeMarking = blockConfirmationsBeforeMarking;
this.blocksRetainedBeforeSweeping = blocksRetainedBeforeSweeping;
}
public static PrunerConfiguration getDefault() {
return new PrunerConfiguration(
DEFAULT_PRUNING_BLOCK_CONFIRMATIONS, DEFAULT_PRUNING_BLOCKS_RETAINED);
}
public int getBlocksRetained() {
return blocksRetainedBeforeSweeping;
}
public int getBlockConfirmations() {
return blockConfirmationsBeforeMarking;
}
}

View File

@@ -1,293 +0,0 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.trie.forest.pruner;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider.createInMemoryBlockchain;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.spy;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockDataGenerator;
import org.hyperledger.besu.ethereum.core.BlockDataGenerator.BlockOptions;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.MutableWorldState;
import org.hyperledger.besu.ethereum.core.TransactionReceipt;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.ethereum.storage.keyvalue.WorldStatePreimageKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.MerkleTrie;
import org.hyperledger.besu.ethereum.trie.forest.ForestWorldStateArchive;
import org.hyperledger.besu.ethereum.trie.forest.storage.ForestWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.patricia.StoredMerklePatriciaTrie;
import org.hyperledger.besu.ethereum.worldstate.StateTrieAccountValue;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.evm.worldstate.WorldState;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InOrder;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
class MarkSweepPrunerTest {
private final BlockDataGenerator gen = new BlockDataGenerator();
private final NoOpMetricsSystem metricsSystem = new NoOpMetricsSystem();
private final Map<Bytes, Optional<byte[]>> hashValueStore = spy(new HashMap<>());
private final InMemoryKeyValueStorage stateStorage = new TestInMemoryStorage(hashValueStore);
private final ForestWorldStateKeyValueStorage worldStateKeyValueStorage =
spy(new ForestWorldStateKeyValueStorage(stateStorage));
private final WorldStateArchive worldStateArchive =
new ForestWorldStateArchive(
new WorldStateStorageCoordinator(worldStateKeyValueStorage),
new WorldStatePreimageKeyValueStorage(new InMemoryKeyValueStorage()),
EvmConfiguration.DEFAULT);
private final InMemoryKeyValueStorage markStorage = new InMemoryKeyValueStorage();
private final Block genesisBlock = gen.genesisBlock();
private final MutableBlockchain blockchain = createInMemoryBlockchain(genesisBlock);
@Test
void mark_marksAllExpectedNodes() {
final MarkSweepPruner pruner =
new MarkSweepPruner(worldStateKeyValueStorage, blockchain, markStorage, metricsSystem);
// Generate accounts and save corresponding state root
final int numBlocks = 15;
final int numAccounts = 10;
generateBlockchainData(numBlocks, numAccounts);
final int markBlockNumber = 10;
final BlockHeader markBlock = blockchain.getBlockHeader(markBlockNumber).get();
// Collect the nodes we expect to keep
final Set<Bytes> expectedNodes = collectWorldStateNodes(markBlock.getStateRoot());
assertThat(hashValueStore).hasSizeGreaterThan(expectedNodes.size()); // Sanity check
// Mark and sweep
pruner.mark(markBlock.getStateRoot());
pruner.sweepBefore(markBlock.getNumber());
// Assert that the block we marked is still present and all accounts are accessible
assertThat(worldStateArchive.get(markBlock.getStateRoot(), markBlock.getHash())).isPresent();
final WorldState markedState =
worldStateArchive.get(markBlock.getStateRoot(), markBlock.getHash()).get();
// Traverse accounts and make sure all are accessible
final int expectedAccounts = numAccounts * markBlockNumber;
final long accounts = markedState.streamAccounts(Bytes32.ZERO, expectedAccounts * 2).count();
assertThat(accounts).isEqualTo(expectedAccounts);
// Traverse storage to ensure that all storage is accessible
markedState
.streamAccounts(Bytes32.ZERO, expectedAccounts * 2)
.forEach(a -> a.storageEntriesFrom(Bytes32.ZERO, 1000));
// All other state roots should have been removed
for (int i = 0; i < numBlocks; i++) {
final BlockHeader curHeader = blockchain.getBlockHeader(i + 1L).get();
if (curHeader.getNumber() == markBlock.getNumber()) {
continue;
}
assertThat(worldStateArchive.get(curHeader.getStateRoot(), curHeader.getHash())).isEmpty();
}
// Check that storage contains only the values we expect
assertThat(hashValueStore).hasSameSizeAs(expectedNodes);
assertThat(hashValueStore.values().stream().map(Optional::get))
.containsExactlyInAnyOrderElementsOf(
expectedNodes.stream().map(Bytes::toArrayUnsafe).collect(Collectors.toSet()));
}
@Test
void sweepBefore_shouldSweepStateRootFirst() {
final MarkSweepPruner pruner =
new MarkSweepPruner(worldStateKeyValueStorage, blockchain, markStorage, metricsSystem);
// Generate accounts and save corresponding state root
final int numBlocks = 15;
final int numAccounts = 10;
generateBlockchainData(numBlocks, numAccounts);
final int markBlockNumber = 10;
final BlockHeader markBlock = blockchain.getBlockHeader(markBlockNumber).get();
// Collect state roots we expect to be swept first
final List<Bytes32> stateRoots = new ArrayList<>();
for (int i = markBlockNumber - 1; i >= 0; i--) {
stateRoots.add(blockchain.getBlockHeader(i).get().getStateRoot());
}
// Mark and sweep
pruner.mark(markBlock.getStateRoot());
pruner.sweepBefore(markBlock.getNumber());
// we use individual inOrders because we only want to make sure we remove a state root before
// the full prune but without enforcing an ordering between the state root removals
stateRoots.forEach(
stateRoot -> {
final InOrder thisRootsOrdering =
inOrder(worldStateKeyValueStorage, hashValueStore, worldStateKeyValueStorage);
thisRootsOrdering.verify(worldStateKeyValueStorage).isWorldStateAvailable(stateRoot);
thisRootsOrdering.verify(hashValueStore).keySet();
thisRootsOrdering.verify(worldStateKeyValueStorage).prune(any());
});
}
@Test
void sweepBefore_shouldNotRemoveMarkedStateRoots() {
final MarkSweepPruner pruner =
new MarkSweepPruner(worldStateKeyValueStorage, blockchain, markStorage, metricsSystem);
// Generate accounts and save corresponding state root
final int numBlocks = 15;
final int numAccounts = 10;
generateBlockchainData(numBlocks, numAccounts);
final int markBlockNumber = 10;
final BlockHeader markBlock = blockchain.getBlockHeader(markBlockNumber).get();
// Collect state roots we expect to be swept first
final List<Bytes32> stateRoots = new ArrayList<>();
for (int i = markBlockNumber - 1; i >= 0; i--) {
stateRoots.add(blockchain.getBlockHeader(i).get().getStateRoot());
}
// Mark
pruner.mark(markBlock.getStateRoot());
// Mark an extra state root
Hash markedRoot = Hash.wrap(stateRoots.remove(stateRoots.size() / 2));
pruner.markNode(markedRoot);
// Sweep
pruner.sweepBefore(markBlock.getNumber());
// we use individual inOrders because we only want to make sure we remove a state root before
// the full prune but without enforcing an ordering between the state root removals
stateRoots.forEach(
stateRoot -> {
final InOrder thisRootsOrdering =
inOrder(worldStateKeyValueStorage, hashValueStore, worldStateKeyValueStorage);
thisRootsOrdering.verify(worldStateKeyValueStorage).isWorldStateAvailable(stateRoot);
thisRootsOrdering.verify(hashValueStore).keySet();
thisRootsOrdering.verify(worldStateKeyValueStorage).prune(any());
});
assertThat(stateStorage.containsKey(markedRoot.toArray())).isTrue();
}
private void generateBlockchainData(final int numBlocks, final int numAccounts) {
Block parentBlock = blockchain.getChainHeadBlock();
for (int i = 0; i < numBlocks; i++) {
final BlockHeader parentHeader = parentBlock.getHeader();
final MutableWorldState worldState =
worldStateArchive.getMutable(parentHeader.getStateRoot(), parentHeader.getHash()).get();
gen.createRandomContractAccountsWithNonEmptyStorage(worldState, numAccounts);
final Hash stateRoot = worldState.rootHash();
final Block block =
gen.block(
BlockOptions.create()
.setStateRoot(stateRoot)
.setBlockNumber(parentHeader.getNumber() + 1L)
.setParentHash(parentBlock.getHash()));
final List<TransactionReceipt> receipts = gen.receipts(block);
blockchain.appendBlock(block, receipts);
parentBlock = block;
}
}
private Set<Bytes> collectWorldStateNodes(final Hash stateRootHash) {
final Set<Bytes> nodeData = new HashSet<>();
collectWorldStateNodes(stateRootHash, nodeData);
return nodeData;
}
private Set<Bytes> collectWorldStateNodes(final Hash stateRootHash, final Set<Bytes> collector) {
final List<Hash> storageRoots = new ArrayList<>();
final MerkleTrie<Bytes32, Bytes> stateTrie = createStateTrie(stateRootHash);
// Collect storage roots and code
stateTrie
.entriesFrom(Bytes32.ZERO, 1000)
.forEach(
(key, val) -> {
final StateTrieAccountValue accountValue =
StateTrieAccountValue.readFrom(RLP.input(val));
stateStorage
.get(accountValue.getCodeHash().toArray())
.ifPresent(v -> collector.add(Bytes.wrap(v)));
storageRoots.add(accountValue.getStorageRoot());
});
// Collect state nodes
collectTrieNodes(stateTrie, collector);
// Collect storage nodes
for (Hash storageRoot : storageRoots) {
final MerkleTrie<Bytes32, Bytes> storageTrie = createStorageTrie(storageRoot);
collectTrieNodes(storageTrie, collector);
}
return collector;
}
private void collectTrieNodes(final MerkleTrie<Bytes32, Bytes> trie, final Set<Bytes> collector) {
final Bytes32 rootHash = trie.getRootHash();
trie.visitAll(
(node) -> {
if (node.isReferencedByHash() || node.getHash().equals(rootHash)) {
collector.add(node.getEncodedBytes());
}
});
}
private MerkleTrie<Bytes32, Bytes> createStateTrie(final Bytes32 rootHash) {
return new StoredMerklePatriciaTrie<>(
(location, hash) -> worldStateKeyValueStorage.getAccountStateTrieNode(hash),
rootHash,
Function.identity(),
Function.identity());
}
private MerkleTrie<Bytes32, Bytes> createStorageTrie(final Bytes32 rootHash) {
return new StoredMerklePatriciaTrie<>(
(location, hash) -> worldStateKeyValueStorage.getAccountStorageTrieNode(hash),
rootHash,
Function.identity(),
Function.identity());
}
// Proxy class so that we have access to the constructor that takes our own map
private static class TestInMemoryStorage extends InMemoryKeyValueStorage {
public TestInMemoryStorage(final Map<Bytes, Optional<byte[]>> hashValueStore) {
super(hashValueStore);
}
}
}

View File

@@ -1,214 +0,0 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.trie.forest.pruner;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.verify;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.chain.BlockchainStorage;
import org.hyperledger.besu.ethereum.chain.DefaultBlockchain;
import org.hyperledger.besu.ethereum.chain.MutableBlockchain;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockDataGenerator;
import org.hyperledger.besu.ethereum.core.BlockDataGenerator.BlockOptions;
import org.hyperledger.besu.ethereum.core.TransactionReceipt;
import org.hyperledger.besu.ethereum.mainnet.MainnetBlockHeaderFunctions;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStoragePrefixedKeyBlockchainStorage;
import org.hyperledger.besu.ethereum.storage.keyvalue.VariablesKeyValueStorage;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
import org.hyperledger.besu.testutil.MockExecutorService;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@ExtendWith(MockitoExtension.class)
public class PrunerTest {
private final NoOpMetricsSystem metricsSystem = new NoOpMetricsSystem();
private final BlockDataGenerator gen = new BlockDataGenerator();
@Mock private MarkSweepPruner markSweepPruner;
private final ExecutorService mockExecutorService = new MockExecutorService();
private final Block genesisBlock = gen.genesisBlock();
@Test
public void shouldMarkCorrectBlockAndSweep() throws ExecutionException, InterruptedException {
final BlockchainStorage blockchainStorage =
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions(),
false);
final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(genesisBlock, blockchainStorage, metricsSystem, 0);
final Pruner pruner =
new Pruner(markSweepPruner, blockchain, new PrunerConfiguration(0, 1), mockExecutorService);
pruner.start();
final Block block1 = appendBlockWithParent(blockchain, genesisBlock);
appendBlockWithParent(blockchain, block1);
appendBlockWithParent(blockchain, blockchain.getChainHeadBlock());
verify(markSweepPruner).mark(block1.getHeader().getStateRoot());
verify(markSweepPruner).sweepBefore(1);
pruner.stop();
}
@Test
public void shouldOnlySweepAfterBlockConfirmationPeriodAndRetentionPeriodEnds() {
final BlockchainStorage blockchainStorage =
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions(),
false);
final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(genesisBlock, blockchainStorage, metricsSystem, 0);
final Pruner pruner =
new Pruner(markSweepPruner, blockchain, new PrunerConfiguration(1, 2), mockExecutorService);
pruner.start();
final Hash markBlockStateRootHash =
appendBlockWithParent(blockchain, genesisBlock).getHeader().getStateRoot();
verify(markSweepPruner, never()).mark(markBlockStateRootHash);
verify(markSweepPruner, never()).sweepBefore(anyLong());
appendBlockWithParent(blockchain, blockchain.getChainHeadBlock());
verify(markSweepPruner).mark(markBlockStateRootHash);
verify(markSweepPruner, never()).sweepBefore(anyLong());
appendBlockWithParent(blockchain, blockchain.getChainHeadBlock());
verify(markSweepPruner).sweepBefore(1);
pruner.stop();
}
@Test
public void abortsPruningWhenFullyMarkedBlockNoLongerOnCanonicalChain() {
final BlockchainStorage blockchainStorage =
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions(),
false);
final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(genesisBlock, blockchainStorage, metricsSystem, 0);
// start pruner so it can start handling block added events
final Pruner pruner =
new Pruner(markSweepPruner, blockchain, new PrunerConfiguration(0, 1), mockExecutorService);
pruner.start();
/*
Set up pre-marking state:
O <---- marking of this block's parent will begin when this block is added
|
| O <- this is a fork as of now (non-canonical)
O | <- this is the initially canonical block that will be marked
\/
O <--- the common ancestor when the reorg happens
*/
final Block initiallyCanonicalBlock = appendBlockWithParent(blockchain, genesisBlock);
appendBlockWithParent(blockchain, initiallyCanonicalBlock);
final Block forkBlock = appendBlockWithParent(blockchain, genesisBlock);
/*
Cause reorg:
Set up pre-marking state:
O
| O <---- this block causes a reorg; this branch becomes canonical
| O <---- which means that state here is referring to nodes from the common ancestor block,
O | <- because this was block at which marking began
\/
O
*/
appendBlockWithParent(blockchain, forkBlock);
verify(markSweepPruner).mark(initiallyCanonicalBlock.getHeader().getStateRoot());
verify(markSweepPruner, never()).sweepBefore(anyLong());
pruner.stop();
}
@Test
public void shouldRejectInvalidArguments() {
final Blockchain mockchain = mock(Blockchain.class);
assertThatThrownBy(
() ->
new Pruner(
markSweepPruner,
mockchain,
new PrunerConfiguration(-1, -2),
mockExecutorService))
.isInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(
() ->
new Pruner(
markSweepPruner,
mockchain,
new PrunerConfiguration(10, 8),
mockExecutorService))
.isInstanceOf(IllegalArgumentException.class);
assertThatThrownBy(
() ->
new Pruner(
markSweepPruner,
mockchain,
new PrunerConfiguration(10, 10),
mockExecutorService))
.isInstanceOf(IllegalArgumentException.class);
}
@Test
public void shouldCleanUpPruningStrategyOnShutdown() throws InterruptedException {
final BlockchainStorage blockchainStorage =
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions(),
false);
final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(genesisBlock, blockchainStorage, metricsSystem, 0);
final Pruner pruner =
new Pruner(markSweepPruner, blockchain, new PrunerConfiguration(0, 1), mockExecutorService);
pruner.start();
pruner.stop();
verify(markSweepPruner).cleanup();
}
private Block appendBlockWithParent(final MutableBlockchain blockchain, final Block parent) {
BlockOptions options =
new BlockOptions()
.setBlockNumber(parent.getHeader().getNumber() + 1)
.setParentHash(parent.getHash());
final Block newBlock = gen.block(options);
final List<TransactionReceipt> receipts = gen.receipts(newBlock);
blockchain.appendBlock(newBlock, receipts);
return newBlock;
}
}

View File

@@ -35,7 +35,6 @@ import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.trie.diffbased.bonsai.BonsaiWorldStateProvider;
import org.hyperledger.besu.ethereum.trie.forest.pruner.Pruner;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorageCoordinator;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.plugin.data.SyncStatus;
@@ -62,7 +61,6 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
private static final Logger LOG = LoggerFactory.getLogger(DefaultSynchronizer.class);
private final Optional<Pruner> maybePruner;
private final SyncState syncState;
private final AtomicBoolean running = new AtomicBoolean(false);
private final Optional<BlockPropagationManager> blockPropagationManager;
@@ -79,7 +77,6 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
final ProtocolContext protocolContext,
final WorldStateStorageCoordinator worldStateStorageCoordinator,
final BlockBroadcaster blockBroadcaster,
final Optional<Pruner> maybePruner,
final EthContext ethContext,
final SyncState syncState,
final Path dataDirectory,
@@ -88,7 +85,6 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
final MetricsSystem metricsSystem,
final SyncTerminationCondition terminationCondition,
final PivotBlockSelector pivotBlockSelector) {
this.maybePruner = maybePruner;
this.syncState = syncState;
this.pivotBlockSelector = pivotBlockSelector;
this.protocolContext = protocolContext;
@@ -228,7 +224,6 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
LOG.info("Stopping synchronizer");
fastSyncDownloader.ifPresent(FastSyncDownloader::stop);
fullSyncDownloader.ifPresent(FullSyncDownloader::stop);
maybePruner.ifPresent(Pruner::stop);
blockPropagationManager.ifPresent(
manager -> {
if (manager.isRunning()) {
@@ -239,11 +234,7 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
}
@Override
public void awaitStop() throws InterruptedException {
if (maybePruner.isPresent()) {
maybePruner.get().awaitStop();
}
}
public void awaitStop() {}
private CompletableFuture<Void> handleSyncResult(final FastSyncState result) {
if (!running.get()) {
@@ -270,7 +261,6 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
}
private CompletableFuture<Void> startFullSync() {
maybePruner.ifPresent(Pruner::start);
return fullSyncDownloader
.map(FullSyncDownloader::start)
.orElse(CompletableFuture.completedFuture(null))
@@ -376,7 +366,6 @@ public class DefaultSynchronizer implements Synchronizer, UnverifiedForkchoiceLi
LOG.info("Stopping block propagation.");
blockPropagationManager.ifPresent(BlockPropagationManager::stop);
LOG.info("Stopping the pruner.");
maybePruner.ifPresent(Pruner::stop);
running.set(false);
return null;
}