Reduce receipt size (#6602)

Signed-off-by: Jason Frame <jason.frame@consensys.net>
This commit is contained in:
Jason Frame
2024-03-26 11:00:05 +10:00
committed by GitHub
parent bb9ba13cf8
commit 15d54afdf9
48 changed files with 602 additions and 100 deletions

View File

@@ -7,6 +7,7 @@
- BFT networks won't start with SNAP or CHECKPOINT sync (previously Besu would start with this config but quietly fail to sync, so it's now more obvious that it won't work) [#6625](https://github.com/hyperledger/besu/pull/6625), [#6667](https://github.com/hyperledger/besu/pull/6667)
### Upcoming Breaking Changes
- Receipt compaction will be enabled by default in a future version of Besu. After this change it will not be possible to downgrade to the previous Besu version.
### Deprecations
@@ -25,6 +26,7 @@
- `eth_call` for blob tx allows for empty `maxFeePerBlobGas` [#6731](https://github.com/hyperledger/besu/pull/6731)
- Extend error handling of plugin RPC methods [#6759](https://github.com/hyperledger/besu/pull/6759)
- Added engine_newPayloadV4 and engine_getPayloadV4 methods [#6783](https://github.com/hyperledger/besu/pull/6783)
- Reduce storage size of receipts [#6602](https://github.com/hyperledger/besu/pull/6602)
### Bug fixes
- Fix txpool dump/restore race condition [#6665](https://github.com/hyperledger/besu/pull/6665)

View File

@@ -17,6 +17,7 @@
package org.hyperledger.besu.cli.options.stable;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.DEFAULT_RECEIPT_COMPACTION_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_CODE_USING_CODE_HASH_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_LIMIT_TRIE_LOGS_ENABLED;
import static org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration.Unstable.DEFAULT_BONSAI_TRIE_LOG_PRUNING_WINDOW_SIZE;
@@ -61,6 +62,12 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
arity = "1")
private Long bonsaiMaxLayersToLoad = DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD;
@Option(
names = "--receipt-compaction-enabled",
description = "Enables compact storing of receipts (default: ${DEFAULT-VALUE}).",
arity = "1")
private Boolean receiptCompactionEnabled = DEFAULT_RECEIPT_COMPACTION_ENABLED;
@CommandLine.ArgGroup(validate = false)
private final DataStorageOptions.Unstable unstableOptions = new Unstable();
@@ -149,6 +156,7 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
final DataStorageOptions dataStorageOptions = DataStorageOptions.create();
dataStorageOptions.dataStorageFormat = domainObject.getDataStorageFormat();
dataStorageOptions.bonsaiMaxLayersToLoad = domainObject.getBonsaiMaxLayersToLoad();
dataStorageOptions.receiptCompactionEnabled = domainObject.getReceiptCompactionEnabled();
dataStorageOptions.unstableOptions.bonsaiLimitTrieLogsEnabled =
domainObject.getUnstable().getBonsaiLimitTrieLogsEnabled();
dataStorageOptions.unstableOptions.bonsaiTrieLogPruningWindowSize =
@@ -164,6 +172,7 @@ public class DataStorageOptions implements CLIOptions<DataStorageConfiguration>
return ImmutableDataStorageConfiguration.builder()
.dataStorageFormat(dataStorageFormat)
.bonsaiMaxLayersToLoad(bonsaiMaxLayersToLoad)
.receiptCompactionEnabled(receiptCompactionEnabled)
.unstable(
ImmutableDataStorageConfiguration.Unstable.builder()
.bonsaiLimitTrieLogsEnabled(unstableOptions.bonsaiLimitTrieLogsEnabled)

View File

@@ -564,7 +564,8 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
storageProvider.createWorldStateStorageCoordinator(dataStorageConfiguration);
final BlockchainStorage blockchainStorage =
storageProvider.createBlockchainStorage(protocolSchedule, variablesStorage);
storageProvider.createBlockchainStorage(
protocolSchedule, variablesStorage, dataStorageConfiguration);
final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(

View File

@@ -67,4 +67,38 @@ public class BesuConfigurationImpl implements BesuConfiguration {
public Wei getMinGasPrice() {
return miningParameters.getMinTransactionGasPrice();
}
@Override
public org.hyperledger.besu.plugin.services.storage.DataStorageConfiguration
getDataStorageConfiguration() {
return new DataStoreConfigurationImpl(dataStorageConfiguration);
}
/**
* A concrete implementation of DataStorageConfiguration which is used in Besu plugin framework.
*/
public static class DataStoreConfigurationImpl
implements org.hyperledger.besu.plugin.services.storage.DataStorageConfiguration {
private final DataStorageConfiguration dataStorageConfiguration;
/**
* Instantiate the concrete implementation of the plugin DataStorageConfiguration.
*
* @param dataStorageConfiguration The Ethereum core module data storage configuration
*/
public DataStoreConfigurationImpl(final DataStorageConfiguration dataStorageConfiguration) {
this.dataStorageConfiguration = dataStorageConfiguration;
}
@Override
public DataStorageFormat getDatabaseFormat() {
return dataStorageConfiguration.getDataStorageFormat();
}
@Override
public boolean getReceiptCompactionEnabled() {
return dataStorageConfiguration.getReceiptCompactionEnabled();
}
}
}

View File

@@ -110,6 +110,24 @@ public class DataStorageOptionsTest
"false");
}
@Test
public void receiptCompactionCanBeEnabled() {
internalTestSuccess(
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getReceiptCompactionEnabled()).isEqualTo(true),
"--receipt-compaction-enabled",
"true");
}
@Test
public void receiptCompactionCanBeDisabled() {
internalTestSuccess(
dataStorageConfiguration ->
assertThat(dataStorageConfiguration.getReceiptCompactionEnabled()).isEqualTo(false),
"--receipt-compaction-enabled",
"false");
}
@Override
protected DataStorageConfiguration createDefaultDomainObject() {
return DataStorageConfiguration.DEFAULT_CONFIG;

View File

@@ -108,12 +108,13 @@ public abstract class AbstractBftBesuControllerBuilderTest {
lenient().when(genesisConfigFile.getConfigOptions()).thenReturn(genesisConfigOptions);
lenient().when(genesisConfigOptions.getCheckpointOptions()).thenReturn(checkpointConfigOptions);
lenient()
.when(storageProvider.createBlockchainStorage(any(), any()))
.when(storageProvider.createBlockchainStorage(any(), any(), any()))
.thenReturn(
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions()));
new MainnetBlockHeaderFunctions(),
false));
lenient()
.when(
storageProvider.createWorldStateStorageCoordinator(

View File

@@ -124,12 +124,13 @@ public class BesuControllerBuilderTest {
when(ethashConfigOptions.getFixedDifficulty()).thenReturn(OptionalLong.empty());
when(storageProvider.getStorageBySegmentIdentifier(any()))
.thenReturn(new InMemoryKeyValueStorage());
when(storageProvider.createBlockchainStorage(any(), any()))
when(storageProvider.createBlockchainStorage(any(), any(), any()))
.thenReturn(
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions()));
new MainnetBlockHeaderFunctions(),
false));
when(synchronizerConfiguration.getDownloaderParallelism()).thenReturn(1);
when(synchronizerConfiguration.getTransactionsParallelism()).thenReturn(1);
when(synchronizerConfiguration.getComputationParallelism()).thenReturn(1);

View File

@@ -117,12 +117,13 @@ public class CliqueBesuControllerBuilderTest {
lenient().when(genesisConfigFile.getConfigOptions()).thenReturn(genesisConfigOptions);
lenient().when(genesisConfigOptions.getCheckpointOptions()).thenReturn(checkpointConfigOptions);
lenient()
.when(storageProvider.createBlockchainStorage(any(), any()))
.when(storageProvider.createBlockchainStorage(any(), any(), any()))
.thenReturn(
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions()));
new MainnetBlockHeaderFunctions(),
false));
lenient()
.when(
storageProvider.createWorldStateStorageCoordinator(

View File

@@ -133,12 +133,13 @@ public class MergeBesuControllerBuilderTest {
when(genesisConfigOptions.getTerminalBlockHash()).thenReturn(Optional.of(Hash.ZERO));
lenient().when(genesisConfigOptions.getTerminalBlockNumber()).thenReturn(OptionalLong.of(1L));
lenient()
.when(storageProvider.createBlockchainStorage(any(), any()))
.when(storageProvider.createBlockchainStorage(any(), any(), any()))
.thenReturn(
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions()));
new MainnetBlockHeaderFunctions(),
false));
lenient()
.when(storageProvider.getStorageBySegmentIdentifier(any()))
.thenReturn(new InMemoryKeyValueStorage());

View File

@@ -122,7 +122,8 @@ public class BesuEventsImplTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions()),
new MainnetBlockHeaderFunctions(),
false),
new NoOpMetricsSystem(),
0);

View File

@@ -215,6 +215,7 @@ ethstats-cacert-file="./root.cert"
# Data storage
data-storage-format="BONSAI"
bonsai-historical-block-limit=512
receipt-compaction-enabled=true
# feature flags
Xsecp256k1-native-enabled=false

View File

@@ -274,7 +274,7 @@ public class TransactionAdapter extends AdapterBase {
.map(
receipt -> {
final BytesValueRLPOutput rlpOutput = new BytesValueRLPOutput();
receipt.getReceipt().writeTo(rlpOutput);
receipt.getReceipt().writeToForNetwork(rlpOutput);
return rlpOutput.encoded();
});
}

View File

@@ -55,7 +55,7 @@ public class DebugGetRawReceipts extends AbstractBlockParameterOrBlockHashMethod
private String[] toRLP(final List<TransactionReceipt> receipts) {
return receipts.stream()
.map(receipt -> RLP.encode(receipt::writeTo).toHexString())
.map(receipt -> RLP.encode(receipt::writeToForNetwork).toHexString())
.toArray(String[]::new);
}
}

View File

@@ -311,7 +311,7 @@ public class StateBackupService {
bodyWriter.writeBytes(bodyOutput.encoded().toArrayUnsafe());
final BytesValueRLPOutput receiptsOutput = new BytesValueRLPOutput();
receiptsOutput.writeList(receipts.get(), TransactionReceipt::writeToWithRevertReason);
receiptsOutput.writeList(receipts.get(), (r, rlpOut) -> r.writeToForStorage(rlpOut, false));
receiptsWriter.writeBytes(receiptsOutput.encoded().toArrayUnsafe());
backupStatus.storedBlock = blockNumber;

View File

@@ -63,7 +63,8 @@ public class NewBlockHeadersSubscriptionServiceTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions());
new MainnetBlockHeaderFunctions(),
false);
private final Block genesisBlock = gen.genesisBlock();
private final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(genesisBlock, blockchainStorage, new NoOpMetricsSystem(), 0);

View File

@@ -167,7 +167,8 @@ public abstract class AbstractBlockTransactionSelectorTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions()),
new MainnetBlockHeaderFunctions(),
false),
new NoOpMetricsSystem(),
0);

View File

@@ -30,6 +30,7 @@ import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import org.apache.tuweni.bytes.Bytes;
@@ -169,23 +170,26 @@ public class TransactionReceipt implements org.hyperledger.besu.plugin.data.Tran
*
* @param out The RLP output to write to
*/
public void writeTo(final RLPOutput out) {
writeTo(out, false);
public void writeToForNetwork(final RLPOutput out) {
writeTo(out, false, false);
}
public void writeToWithRevertReason(final RLPOutput out) {
writeTo(out, true);
public void writeToForStorage(final RLPOutput out, final boolean compacted) {
writeTo(out, true, compacted);
}
private void writeTo(final RLPOutput rlpOutput, final boolean withRevertReason) {
@VisibleForTesting
void writeTo(final RLPOutput rlpOutput, final boolean withRevertReason, final boolean compacted) {
if (transactionType.equals(TransactionType.FRONTIER)) {
writeToForReceiptTrie(rlpOutput, withRevertReason);
writeToForReceiptTrie(rlpOutput, withRevertReason, compacted);
} else {
rlpOutput.writeBytes(RLP.encode(out -> writeToForReceiptTrie(out, withRevertReason)));
rlpOutput.writeBytes(
RLP.encode(out -> writeToForReceiptTrie(out, withRevertReason, compacted)));
}
}
public void writeToForReceiptTrie(final RLPOutput rlpOutput, final boolean withRevertReason) {
public void writeToForReceiptTrie(
final RLPOutput rlpOutput, final boolean withRevertReason, final boolean compacted) {
if (!transactionType.equals(TransactionType.FRONTIER)) {
rlpOutput.writeIntScalar(transactionType.getSerializedType());
}
@@ -200,8 +204,10 @@ public class TransactionReceipt implements org.hyperledger.besu.plugin.data.Tran
rlpOutput.writeLongScalar(status);
}
rlpOutput.writeLongScalar(cumulativeGasUsed);
rlpOutput.writeBytes(bloomFilter);
rlpOutput.writeList(logs, Log::writeTo);
if (!compacted) {
rlpOutput.writeBytes(bloomFilter);
}
rlpOutput.writeList(logs, (log, logOutput) -> log.writeTo(logOutput, compacted));
if (withRevertReason && revertReason.isPresent()) {
rlpOutput.writeBytes(revertReason.get());
}
@@ -240,10 +246,21 @@ public class TransactionReceipt implements org.hyperledger.besu.plugin.data.Tran
// correct transaction receipt encoding to use.
final RLPInput firstElement = input.readAsRlp();
final long cumulativeGas = input.readLongScalar();
// The logs below will populate the bloom filter upon construction.
LogsBloomFilter bloomFilter = null;
final boolean hasLogs = !input.nextIsList() && input.nextSize() == LogsBloomFilter.BYTE_SIZE;
if (hasLogs) {
// The logs below will populate the bloom filter upon construction.
bloomFilter = LogsBloomFilter.readFrom(input);
}
// TODO consider validating that the logs and bloom filter match.
final LogsBloomFilter bloomFilter = LogsBloomFilter.readFrom(input);
final List<Log> logs = input.readList(Log::readFrom);
final boolean compacted = !hasLogs;
final List<Log> logs = input.readList(logInput -> Log.readFrom(logInput, compacted));
if (compacted) {
bloomFilter = LogsBloomFilter.builder().insertLogs(logs).build();
}
final Optional<Bytes> revertReason;
if (input.isEndOfCurrentList()) {
revertReason = Optional.empty();

View File

@@ -118,7 +118,8 @@ public final class BodyValidation {
trie.put(
indexKey(i),
RLP.encode(
rlpOutput -> receipts.get(i).writeToForReceiptTrie(rlpOutput, false))));
rlpOutput ->
receipts.get(i).writeToForReceiptTrie(rlpOutput, false, false))));
return Hash.wrap(trie.getRootHash());
}

View File

@@ -33,7 +33,9 @@ public interface StorageProvider extends Closeable {
VariablesStorage createVariablesStorage();
BlockchainStorage createBlockchainStorage(
ProtocolSchedule protocolSchedule, VariablesStorage variablesStorage);
ProtocolSchedule protocolSchedule,
VariablesStorage variablesStorage,
DataStorageConfiguration storageConfiguration);
WorldStateKeyValueStorage createWorldStateStorage(
DataStorageConfiguration dataStorageConfiguration);

View File

@@ -59,14 +59,17 @@ public class KeyValueStoragePrefixedKeyBlockchainStorage implements BlockchainSt
final KeyValueStorage blockchainStorage;
final VariablesStorage variablesStorage;
final BlockHeaderFunctions blockHeaderFunctions;
final boolean receiptCompaction;
public KeyValueStoragePrefixedKeyBlockchainStorage(
final KeyValueStorage blockchainStorage,
final VariablesStorage variablesStorage,
final BlockHeaderFunctions blockHeaderFunctions) {
final BlockHeaderFunctions blockHeaderFunctions,
final boolean receiptCompaction) {
this.blockchainStorage = blockchainStorage;
this.variablesStorage = variablesStorage;
this.blockHeaderFunctions = blockHeaderFunctions;
this.receiptCompaction = receiptCompaction;
migrateVariables();
}
@@ -125,7 +128,8 @@ public class KeyValueStoragePrefixedKeyBlockchainStorage implements BlockchainSt
@Override
public Updater updater() {
return new Updater(blockchainStorage.startTransaction(), variablesStorage.updater());
return new Updater(
blockchainStorage.startTransaction(), variablesStorage.updater(), receiptCompaction);
}
private List<TransactionReceipt> rlpDecodeTransactionReceipts(final Bytes bytes) {
@@ -253,12 +257,15 @@ public class KeyValueStoragePrefixedKeyBlockchainStorage implements BlockchainSt
private final KeyValueStorageTransaction blockchainTransaction;
private final VariablesStorage.Updater variablesUpdater;
private final boolean receiptCompaction;
Updater(
final KeyValueStorageTransaction blockchainTransaction,
final VariablesStorage.Updater variablesUpdater) {
final VariablesStorage.Updater variablesUpdater,
final boolean receiptCompaction) {
this.blockchainTransaction = blockchainTransaction;
this.variablesUpdater = variablesUpdater;
this.receiptCompaction = receiptCompaction;
}
@Override
@@ -365,7 +372,10 @@ public class KeyValueStoragePrefixedKeyBlockchainStorage implements BlockchainSt
}
private Bytes rlpEncode(final List<TransactionReceipt> receipts) {
return RLP.encode(o -> o.writeList(receipts, TransactionReceipt::writeToWithRevertReason));
return RLP.encode(
o ->
o.writeList(
receipts, (r, rlpOutput) -> r.writeToForStorage(rlpOutput, receiptCompaction)));
}
private void removeVariables() {

View File

@@ -69,11 +69,14 @@ public class KeyValueStorageProvider implements StorageProvider {
@Override
public BlockchainStorage createBlockchainStorage(
final ProtocolSchedule protocolSchedule, final VariablesStorage variablesStorage) {
final ProtocolSchedule protocolSchedule,
final VariablesStorage variablesStorage,
final DataStorageConfiguration dataStorageConfiguration) {
return new KeyValueStoragePrefixedKeyBlockchainStorage(
getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.BLOCKCHAIN),
variablesStorage,
ScheduleBasedBlockHeaderFunctions.create(protocolSchedule));
ScheduleBasedBlockHeaderFunctions.create(protocolSchedule),
dataStorageConfiguration.getReceiptCompactionEnabled());
}
@Override

View File

@@ -25,6 +25,7 @@ import org.immutables.value.Value;
public interface DataStorageConfiguration {
long DEFAULT_BONSAI_MAX_LAYERS_TO_LOAD = 512;
boolean DEFAULT_RECEIPT_COMPACTION_ENABLED = false;
DataStorageConfiguration DEFAULT_CONFIG =
ImmutableDataStorageConfiguration.builder()
@@ -50,6 +51,11 @@ public interface DataStorageConfiguration {
Long getBonsaiMaxLayersToLoad();
@Value.Default
default boolean getReceiptCompactionEnabled() {
return DEFAULT_RECEIPT_COMPACTION_ENABLED;
}
@Value.Default
default Unstable getUnstable() {
return Unstable.DEFAULT;

View File

@@ -64,7 +64,8 @@ public class ExecutionContextTestFixture {
new KeyValueStoragePrefixedKeyBlockchainStorage(
blockchainKeyValueStorage,
new VariablesKeyValueStorage(variablesKeyValueStorage),
new MainnetBlockHeaderFunctions()),
new MainnetBlockHeaderFunctions(),
false),
new NoOpMetricsSystem(),
0);
this.stateArchive = createInMemoryWorldStateArchive();

View File

@@ -73,7 +73,7 @@ public class InMemoryKeyValueStorageProvider extends KeyValueStorageProvider {
return DefaultBlockchain.createMutable(
genesisBlock,
new KeyValueStoragePrefixedKeyBlockchainStorage(
keyValueStorage, variablesStorage, blockHeaderFunctions),
keyValueStorage, variablesStorage, blockHeaderFunctions, false),
new NoOpMetricsSystem(),
0);
}

View File

@@ -41,7 +41,8 @@ public class ChainDataPrunerTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions());
new MainnetBlockHeaderFunctions(),
false);
final ChainDataPruner chainDataPruner =
new ChainDataPruner(
blockchainStorage,
@@ -79,7 +80,8 @@ public class ChainDataPrunerTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions());
new MainnetBlockHeaderFunctions(),
false);
final ChainDataPruner chainDataPruner =
new ChainDataPruner(
blockchainStorage,

View File

@@ -1055,7 +1055,8 @@ public class DefaultBlockchainTest {
return new KeyValueStoragePrefixedKeyBlockchainStorage(
kvStoreChain,
new VariablesKeyValueStorage(kvStorageVariables),
new MainnetBlockHeaderFunctions());
new MainnetBlockHeaderFunctions(),
false);
}
private DefaultBlockchain createMutableBlockchain(

View File

@@ -18,16 +18,43 @@ import static org.assertj.core.api.Assertions.assertThat;
import org.hyperledger.besu.ethereum.rlp.RLP;
import org.hyperledger.besu.evm.log.Log;
import org.hyperledger.besu.evm.log.LogTopic;
import java.util.List;
import org.apache.tuweni.bytes.Bytes;
import org.junit.jupiter.api.Test;
public class LogTest {
final BlockDataGenerator gen = new BlockDataGenerator();
@Test
public void toFromRlp() {
final BlockDataGenerator gen = new BlockDataGenerator();
final Log log = gen.log();
final Log log = gen.log(2);
final Log copy = Log.readFrom(RLP.input(RLP.encode(log::writeTo)));
assertThat(copy).isEqualTo(log);
}
@Test
public void toFromRlpCompacted() {
final Log log = gen.log(2);
final Log copy = Log.readFrom(RLP.input(RLP.encode(rlpOut -> log.writeTo(rlpOut, true))), true);
assertThat(copy).isEqualTo(log);
}
@Test
public void toFromRlpCompactedWithLeadingZeros() {
final Bytes logData = bytesWithLeadingZeros(10, 100);
final List<LogTopic> logTopics =
List.of(
LogTopic.of(bytesWithLeadingZeros(20, 32)), LogTopic.of(bytesWithLeadingZeros(30, 32)));
final Log log = new Log(gen.address(), logData, logTopics);
final Log copy = Log.readFrom(RLP.input(RLP.encode(rlpOut -> log.writeTo(rlpOut, true))), true);
assertThat(copy).isEqualTo(log);
}
private Bytes bytesWithLeadingZeros(final int noLeadingZeros, final int totalSize) {
return Bytes.concatenate(
Bytes.repeat((byte) 0, noLeadingZeros), gen.bytesValue(totalSize - noLeadingZeros));
}
}

View File

@@ -28,7 +28,7 @@ public class TransactionReceiptTest {
final BlockDataGenerator gen = new BlockDataGenerator();
final TransactionReceipt receipt = gen.receipt();
final TransactionReceipt copy =
TransactionReceipt.readFrom(RLP.input(RLP.encode(receipt::writeToWithRevertReason)), false);
TransactionReceipt.readFrom(RLP.input(RLP.encode(receipt::writeToForNetwork)), false);
assertThat(copy).isEqualTo(receipt);
}
@@ -37,7 +37,40 @@ public class TransactionReceiptTest {
final BlockDataGenerator gen = new BlockDataGenerator();
final TransactionReceipt receipt = gen.receipt(Bytes.fromHexString("0x1122334455667788"));
final TransactionReceipt copy =
TransactionReceipt.readFrom(RLP.input(RLP.encode(receipt::writeToWithRevertReason)));
TransactionReceipt.readFrom(
RLP.input(RLP.encode(rlpOut -> receipt.writeToForReceiptTrie(rlpOut, true, false))));
assertThat(copy).isEqualTo(receipt);
}
@Test
public void toFromRlpCompacted() {
final BlockDataGenerator gen = new BlockDataGenerator();
final TransactionReceipt receipt = gen.receipt(Bytes.fromHexString("0x1122334455667788"));
final TransactionReceipt copy =
TransactionReceipt.readFrom(
RLP.input(RLP.encode(rlpOut -> receipt.writeToForReceiptTrie(rlpOut, false, true))));
assertThat(copy).isEqualTo(receipt);
}
@Test
public void toFromRlpCompactedWithReason() {
final BlockDataGenerator gen = new BlockDataGenerator();
final TransactionReceipt receipt = gen.receipt(Bytes.fromHexString("0x1122334455667788"));
final TransactionReceipt copy =
TransactionReceipt.readFrom(
RLP.input(RLP.encode(rlpOut -> receipt.writeToForReceiptTrie(rlpOut, true, true))));
assertThat(copy).isEqualTo(receipt);
}
@Test
public void uncompactedAndCompactedDecodeToSameReceipt() {
final BlockDataGenerator gen = new BlockDataGenerator();
final TransactionReceipt receipt = gen.receipt(Bytes.fromHexString("0x1122334455667788"));
final Bytes compactedReceipt =
RLP.encode(rlpOut -> receipt.writeToForReceiptTrie(rlpOut, false, true));
final Bytes unCompactedReceipt =
RLP.encode(rlpOut -> receipt.writeToForReceiptTrie(rlpOut, false, false));
assertThat(TransactionReceipt.readFrom(RLP.input(compactedReceipt))).isEqualTo(receipt);
assertThat(TransactionReceipt.readFrom(RLP.input(unCompactedReceipt))).isEqualTo(receipt);
}
}

View File

@@ -62,7 +62,7 @@ public class KeyValueStoragePrefixedKeyBlockchainStorageTest {
final var blockchainStorage =
new KeyValueStoragePrefixedKeyBlockchainStorage(
kvBlockchain, variablesStorage, blockHeaderFunctions);
kvBlockchain, variablesStorage, blockHeaderFunctions, false);
assertNoVariablesInStorage(kvBlockchain);
assertVariablesPresentInVariablesStorage(kvVariables, variableValues);
@@ -80,7 +80,7 @@ public class KeyValueStoragePrefixedKeyBlockchainStorageTest {
final var blockchainStorage =
new KeyValueStoragePrefixedKeyBlockchainStorage(
kvBlockchain, variablesStorage, blockHeaderFunctions);
kvBlockchain, variablesStorage, blockHeaderFunctions, false);
assertNoVariablesInStorage(kvBlockchain);
assertVariablesPresentInVariablesStorage(kvVariables, variableValues);
@@ -96,7 +96,7 @@ public class KeyValueStoragePrefixedKeyBlockchainStorageTest {
final var blockchainStorage =
new KeyValueStoragePrefixedKeyBlockchainStorage(
kvBlockchain, variablesStorage, blockHeaderFunctions);
kvBlockchain, variablesStorage, blockHeaderFunctions, false);
assertNoVariablesInStorage(kvBlockchain);
assertVariablesPresentInVariablesStorage(kvVariables, variableValues);
@@ -114,6 +114,6 @@ public class KeyValueStoragePrefixedKeyBlockchainStorageTest {
IllegalStateException.class,
() ->
new KeyValueStoragePrefixedKeyBlockchainStorage(
kvBlockchain, variablesStorage, blockHeaderFunctions));
kvBlockchain, variablesStorage, blockHeaderFunctions, false));
}
}

View File

@@ -214,6 +214,22 @@ public abstract class AbstractIsolationTests {
public Wei getMinGasPrice() {
return MiningParameters.newDefault().getMinTransactionGasPrice();
}
@Override
public org.hyperledger.besu.plugin.services.storage.DataStorageConfiguration
getDataStorageConfiguration() {
return new org.hyperledger.besu.plugin.services.storage.DataStorageConfiguration() {
@Override
public DataStorageFormat getDatabaseFormat() {
return DataStorageFormat.BONSAI;
}
@Override
public boolean getReceiptCompactionEnabled() {
return false;
}
};
}
})
.withMetricsSystem(new NoOpMetricsSystem())
.build();

View File

@@ -63,7 +63,8 @@ public class PrunerTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions());
new MainnetBlockHeaderFunctions(),
false);
final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(genesisBlock, blockchainStorage, metricsSystem, 0);
@@ -86,7 +87,8 @@ public class PrunerTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions());
new MainnetBlockHeaderFunctions(),
false);
final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(genesisBlock, blockchainStorage, metricsSystem, 0);
@@ -114,7 +116,8 @@ public class PrunerTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions());
new MainnetBlockHeaderFunctions(),
false);
final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(genesisBlock, blockchainStorage, metricsSystem, 0);
@@ -186,7 +189,8 @@ public class PrunerTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions());
new MainnetBlockHeaderFunctions(),
false);
final MutableBlockchain blockchain =
DefaultBlockchain.createMutable(genesisBlock, blockchainStorage, metricsSystem, 0);

View File

@@ -234,7 +234,7 @@ class EthServer {
}
final BytesValueRLPOutput encodedReceipts = new BytesValueRLPOutput();
encodedReceipts.startList();
maybeReceipts.get().forEach(r -> r.writeTo(encodedReceipts));
maybeReceipts.get().forEach(r -> r.writeToForNetwork(encodedReceipts));
encodedReceipts.endList();
final int encodedSize = encodedReceipts.encodedSize();
if (responseSizeEstimate + encodedSize > maxMessageSize) {

View File

@@ -46,7 +46,7 @@ public final class ReceiptsMessage extends AbstractMessageData {
receipts.forEach(
(receiptSet) -> {
tmp.startList();
receiptSet.forEach(r -> r.writeTo(tmp));
receiptSet.forEach(r -> r.writeToForNetwork(tmp));
tmp.endList();
});
tmp.endList();

View File

@@ -399,7 +399,7 @@ public class EthServerTest {
private int calculateRlpEncodedSize(final List<TransactionReceipt> receipts) {
final BytesValueRLPOutput rlp = new BytesValueRLPOutput();
rlp.startList();
receipts.forEach(r -> r.writeTo(rlp));
receipts.forEach(r -> r.writeToForNetwork(rlp));
rlp.endList();
return rlp.encodedSize();
}

View File

@@ -52,7 +52,8 @@ public class CheckPointBlockImportStepTest {
new KeyValueStoragePrefixedKeyBlockchainStorage(
new InMemoryKeyValueStorage(),
new VariablesKeyValueStorage(new InMemoryKeyValueStorage()),
new MainnetBlockHeaderFunctions());
new MainnetBlockHeaderFunctions(),
false);
blockchain =
DefaultBlockchain.createMutable(
generateBlock(0), blockchainStorage, mock(MetricsSystem.class), 0);

View File

@@ -142,6 +142,9 @@ public class DataStoreModule {
@Named("variables") final KeyValueStorage variablesKeyValueStorage,
final BlockHeaderFunctions blockHashFunction) {
return new KeyValueStoragePrefixedKeyBlockchainStorage(
keyValueStorage, new VariablesKeyValueStorage(variablesKeyValueStorage), blockHashFunction);
keyValueStorage,
new VariablesKeyValueStorage(variablesKeyValueStorage),
blockHashFunction,
false);
}
}

View File

@@ -279,7 +279,7 @@ public class RetestethContext {
return DefaultBlockchain.createMutable(
genesisBlock,
new KeyValueStoragePrefixedKeyBlockchainStorage(
keyValueStorage, variablesStorage, blockHeaderFunctions),
keyValueStorage, variablesStorage, blockHeaderFunctions, false),
new NoOpMetricsSystem(),
100);
}

View File

@@ -26,6 +26,8 @@ import com.fasterxml.jackson.annotation.JsonProperty;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.bytes.Bytes32;
import org.apache.tuweni.bytes.MutableBytes;
/**
* A log entry is a tuple of a loggers address (the address of the contract that added the logs), a
@@ -60,13 +62,38 @@ public class Log {
* @param out the output in which to encode the log entry.
*/
public void writeTo(final RLPOutput out) {
writeTo(out, false);
}
/**
* Writes the log entry to the provided RLP output.
*
* @param out the output in which to encode the log entry.
* @param compacted whether to compact the rlp log entry by trimming leading zeros on topics and
* data.
*/
public void writeTo(final RLPOutput out, final boolean compacted) {
out.startList();
out.writeBytes(logger);
out.writeList(topics, (topic, listOut) -> listOut.writeBytes(topic));
out.writeBytes(data);
if (compacted) {
out.writeList(topics, (topic, listOut) -> encodeTrimmedData(listOut, topic));
encodeTrimmedData(out, data);
} else {
out.writeList(topics, (topic, listOut) -> listOut.writeBytes(topic));
out.writeBytes(data);
}
out.endList();
}
private void encodeTrimmedData(final RLPOutput rlpOutput, final Bytes data) {
rlpOutput.startList();
final Bytes shortData = data.trimLeadingZeros();
final int zeroLeadDataSize = data.size() - shortData.size();
rlpOutput.writeIntScalar(zeroLeadDataSize);
rlpOutput.writeBytes(shortData);
rlpOutput.endList();
}
/**
* Reads the log entry from the provided RLP input.
*
@@ -74,14 +101,45 @@ public class Log {
* @return the read log entry.
*/
public static Log readFrom(final RLPInput in) {
return readFrom(in, false);
}
/**
* Reads the log entry from the provided RLP input.
*
* @param in the input from which to decode the log entry.
* @param compacted whether to compact the rlp log entry by trimming leading zeros on topics and
* data.
* @return the read log entry.
*/
public static Log readFrom(final RLPInput in, final boolean compacted) {
in.enterList();
final Address logger = Address.wrap(in.readBytes());
final List<LogTopic> topics = in.readList(listIn -> LogTopic.wrap(listIn.readBytes32()));
final Bytes data = in.readBytes();
final List<LogTopic> topics;
final Bytes data;
if (compacted) {
topics = in.readList(listIn -> LogTopic.wrap(Bytes32.wrap(readTrimmedData(in))));
data = Bytes.wrap(readTrimmedData(in));
} else {
topics = in.readList(listIn -> LogTopic.wrap(listIn.readBytes32()));
data = in.readBytes();
}
in.leaveList();
return new Log(logger, data, topics);
}
private static Bytes readTrimmedData(final RLPInput in) {
in.enterList();
final int zeroLeadDataSize = in.readIntScalar();
final Bytes shortData = in.readBytes();
final MutableBytes data = MutableBytes.create(zeroLeadDataSize + shortData.size());
data.set(zeroLeadDataSize, shortData);
in.leaveList();
return data;
}
/**
* Gets logger address.
*

View File

@@ -69,7 +69,7 @@ Calculated : ${currentHash}
tasks.register('checkAPIChanges', FileStateChecker) {
description = "Checks that the API for the Plugin-API project does not change without deliberate thought"
files = sourceSets.main.allJava.files
knownHash = '/FHIztl2tLW5Gzc0qnfEeuVQa6ljVfUce7YE6JLDdZU='
knownHash = 'YH+8rbilrhatRAh8rK8/36qxwrqkybBaaNeg+AkZ0c4='
}
check.dependsOn('checkAPIChanges')

View File

@@ -16,6 +16,7 @@ package org.hyperledger.besu.plugin.services;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.plugin.Unstable;
import org.hyperledger.besu.plugin.services.storage.DataStorageConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.nio.file.Path;
@@ -43,6 +44,7 @@ public interface BesuConfiguration extends BesuService {
* @return Database format.
*/
@Unstable
@Deprecated
DataStorageFormat getDatabaseFormat();
/**
@@ -52,4 +54,12 @@ public interface BesuConfiguration extends BesuService {
*/
@Unstable
Wei getMinGasPrice();
/**
* Database storage configuration.
*
* @return Database storage configuration.
*/
@Unstable
DataStorageConfiguration getDataStorageConfiguration();
}

View File

@@ -0,0 +1,40 @@
/*
* Copyright Hyperledger Besu Contributors.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.plugin.services.storage;
import org.hyperledger.besu.plugin.Unstable;
/** Data storage configuration */
@Unstable
public interface DataStorageConfiguration {
/**
* Database format. This sets the list of segmentIdentifiers that should be initialized.
*
* @return Database format.
*/
@Unstable
DataStorageFormat getDatabaseFormat();
/**
* Whether receipt compaction is enabled. When enabled this reduces the storage needed for
* receipts.
*
* @return Whether receipt compaction is enabled
*/
@Unstable
boolean getReceiptCompactionEnabled();
}

View File

@@ -46,7 +46,9 @@ public class RocksDBKeyValuePrivacyStorageFactory implements PrivacyKeyValueStor
private static final Set<PrivacyVersionedStorageFormat> SUPPORTED_VERSIONS =
EnumSet.of(
PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES,
PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES);
PrivacyVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION,
PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES,
PrivacyVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION);
private static final String PRIVATE_DATABASE_PATH = "private";
private final RocksDBKeyValueStorageFactory publicFactory;
private DatabaseMetadata databaseMetadata;
@@ -145,7 +147,8 @@ public class RocksDBKeyValuePrivacyStorageFactory implements PrivacyKeyValueStor
privacyMetadata = existingPrivacyMetadata;
final int existingPrivacyVersion = maybeExistingPrivacyVersion.getAsInt();
final var runtimeVersion =
PrivacyVersionedStorageFormat.defaultForNewDB(commonConfiguration.getDatabaseFormat());
PrivacyVersionedStorageFormat.defaultForNewDB(
commonConfiguration.getDataStorageConfiguration());
if (existingPrivacyVersion > runtimeVersion.getPrivacyVersion().getAsInt()) {
final var maybeDowngradedMetadata =
@@ -194,6 +197,16 @@ public class RocksDBKeyValuePrivacyStorageFactory implements PrivacyKeyValueStor
// In case we do an automated downgrade, then we also need to update the metadata on disk to
// reflect the change to the runtime version, and return it.
// Besu supports both formats of receipts so no downgrade is needed
if (runtimeVersion == PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES
|| runtimeVersion == PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES) {
LOG.warn(
"Database contains compacted receipts but receipt compaction is not enabled, new receipts will "
+ "be not stored in the compacted format. If you want to remove compacted receipts from the "
+ "database it is necessary to resync Besu. Besu can support both compacted and non-compacted receipts.");
return Optional.empty();
}
// for the moment there are supported automated downgrades, so we just fail.
String error =
String.format(
@@ -216,6 +229,18 @@ public class RocksDBKeyValuePrivacyStorageFactory implements PrivacyKeyValueStor
// In case we do an automated upgrade, then we also need to update the metadata on disk to
// reflect the change to the runtime version, and return it.
// Besu supports both formats of receipts so no upgrade is needed other than updating metadata
if (runtimeVersion == PrivacyVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION
|| runtimeVersion == PrivacyVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION) {
final DatabaseMetadata metadata = new DatabaseMetadata(runtimeVersion);
try {
metadata.writeToDirectory(dataDir);
return Optional.of(metadata);
} catch (IOException e) {
throw new StorageException("Database upgrade to use receipt compaction failed", e);
}
}
// for the moment there are no planned automated upgrades, so we just fail.
String error =
String.format(

View File

@@ -14,7 +14,9 @@
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb;
import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION;
import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES;
import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION;
import static org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat.FOREST_WITH_VARIABLES;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
@@ -55,7 +57,11 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
private static final Logger LOG = LoggerFactory.getLogger(RocksDBKeyValueStorageFactory.class);
private static final EnumSet<BaseVersionedStorageFormat> SUPPORTED_VERSIONED_FORMATS =
EnumSet.of(FOREST_WITH_VARIABLES, BONSAI_WITH_VARIABLES);
EnumSet.of(
FOREST_WITH_VARIABLES,
FOREST_WITH_RECEIPT_COMPACTION,
BONSAI_WITH_VARIABLES,
BONSAI_WITH_RECEIPT_COMPACTION);
private static final String NAME = "rocksdb";
private final RocksDBMetricsFactory rocksDBMetricsFactory;
private DatabaseMetadata databaseMetadata;
@@ -220,12 +226,13 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
if (!metadata
.getVersionedStorageFormat()
.getFormat()
.equals(commonConfiguration.getDatabaseFormat())) {
.equals(commonConfiguration.getDataStorageConfiguration().getDatabaseFormat())) {
handleFormatMismatch(commonConfiguration, dataDir, metadata);
}
final var runtimeVersion =
BaseVersionedStorageFormat.defaultForNewDB(commonConfiguration.getDatabaseFormat());
BaseVersionedStorageFormat.defaultForNewDB(
commonConfiguration.getDataStorageConfiguration());
if (metadata.getVersionedStorageFormat().getVersion() > runtimeVersion.getVersion()) {
final var maybeDowngradedMetadata =
@@ -247,7 +254,7 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
LOG.info("Existing database at {}. Metadata {}. Processing WAL...", dataDir, metadata);
} else {
metadata = DatabaseMetadata.defaultForNewDb(commonConfiguration.getDatabaseFormat());
metadata = DatabaseMetadata.defaultForNewDb(commonConfiguration);
LOG.info(
"No existing database at {}. Using default metadata for new db {}", dataDir, metadata);
if (!dataDirExists) {
@@ -275,7 +282,7 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
+ "Please check your config.",
dataDir,
existingMetadata.getVersionedStorageFormat().getFormat().name(),
commonConfiguration.getDatabaseFormat());
commonConfiguration.getDataStorageConfiguration().getDatabaseFormat());
throw new StorageException(error);
}
@@ -290,6 +297,15 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
// In case we do an automated downgrade, then we also need to update the metadata on disk to
// reflect the change to the runtime version, and return it.
// Besu supports both formats of receipts so no downgrade is needed
if (runtimeVersion == BONSAI_WITH_VARIABLES || runtimeVersion == FOREST_WITH_VARIABLES) {
LOG.warn(
"Database contains compacted receipts but receipt compaction is not enabled, new receipts will "
+ "be not stored in the compacted format. If you want to remove compacted receipts from the "
+ "database it is necessary to resync Besu. Besu can support both compacted and non-compacted receipts.");
return Optional.empty();
}
// for the moment there are supported automated downgrades, so we just fail.
String error =
String.format(
@@ -312,6 +328,18 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
// In case we do an automated upgrade, then we also need to update the metadata on disk to
// reflect the change to the runtime version, and return it.
// Besu supports both formats of receipts so no upgrade is needed other than updating metadata
if (runtimeVersion == BONSAI_WITH_RECEIPT_COMPACTION
|| runtimeVersion == FOREST_WITH_RECEIPT_COMPACTION) {
final DatabaseMetadata metadata = new DatabaseMetadata(runtimeVersion);
try {
metadata.writeToDirectory(dataDir);
return Optional.of(metadata);
} catch (IOException e) {
throw new StorageException("Database upgrade to use receipt compaction failed", e);
}
}
// for the moment there are no planned automated upgrades, so we just fail.
String error =
String.format(

View File

@@ -14,6 +14,7 @@
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb.configuration;
import org.hyperledger.besu.plugin.services.storage.DataStorageConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.OptionalInt;
@@ -27,13 +28,23 @@ public enum BaseVersionedStorageFormat implements VersionedStorageFormat {
* make BlobDB more effective
*/
FOREST_WITH_VARIABLES(DataStorageFormat.FOREST, 2),
/**
* Current Forest version, with receipts using compaction, in order to make Receipts use less disk
* space
*/
FOREST_WITH_RECEIPT_COMPACTION(DataStorageFormat.FOREST, 3),
/** Original Bonsai version, not used since replace by BONSAI_WITH_VARIABLES */
BONSAI_ORIGINAL(DataStorageFormat.BONSAI, 1),
/**
* Current Bonsai version, with blockchain variables in a dedicated column family, in order to
* make BlobDB more effective
*/
BONSAI_WITH_VARIABLES(DataStorageFormat.BONSAI, 2);
BONSAI_WITH_VARIABLES(DataStorageFormat.BONSAI, 2),
/**
* Current Bonsai version, with receipts using compaction, in order to make Receipts use less disk
* space
*/
BONSAI_WITH_RECEIPT_COMPACTION(DataStorageFormat.BONSAI, 3);
private final DataStorageFormat format;
private final int version;
@@ -46,13 +57,18 @@ public enum BaseVersionedStorageFormat implements VersionedStorageFormat {
/**
* Return the default version for new db for a specific format
*
* @param format data storage format
* @param configuration data storage configuration
* @return the version to use for new db
*/
public static BaseVersionedStorageFormat defaultForNewDB(final DataStorageFormat format) {
return switch (format) {
case FOREST -> FOREST_WITH_VARIABLES;
case BONSAI -> BONSAI_WITH_VARIABLES;
public static BaseVersionedStorageFormat defaultForNewDB(
final DataStorageConfiguration configuration) {
return switch (configuration.getDatabaseFormat()) {
case FOREST -> configuration.getReceiptCompactionEnabled()
? FOREST_WITH_RECEIPT_COMPACTION
: FOREST_WITH_VARIABLES;
case BONSAI -> configuration.getReceiptCompactionEnabled()
? BONSAI_WITH_RECEIPT_COMPACTION
: BONSAI_WITH_VARIABLES;
};
}

View File

@@ -14,6 +14,7 @@
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb.configuration;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
@@ -46,18 +47,25 @@ public class DatabaseMetadata {
.enable(SerializationFeature.INDENT_OUTPUT);
private final VersionedStorageFormat versionedStorageFormat;
private DatabaseMetadata(final VersionedStorageFormat versionedStorageFormat) {
/**
* Instantiates a new Database metadata.
*
* @param versionedStorageFormat the version storage format
*/
public DatabaseMetadata(final VersionedStorageFormat versionedStorageFormat) {
this.versionedStorageFormat = versionedStorageFormat;
}
/**
* Return the default metadata for new db for a specific format
*
* @param dataStorageFormat data storage format
* @param besuConfiguration besu configuration
* @return the metadata to use for new db
*/
public static DatabaseMetadata defaultForNewDb(final DataStorageFormat dataStorageFormat) {
return new DatabaseMetadata(BaseVersionedStorageFormat.defaultForNewDB(dataStorageFormat));
public static DatabaseMetadata defaultForNewDb(final BesuConfiguration besuConfiguration) {
return new DatabaseMetadata(
BaseVersionedStorageFormat.defaultForNewDB(
besuConfiguration.getDataStorageConfiguration()));
}
/**
@@ -222,6 +230,7 @@ public class DatabaseMetadata {
case FOREST -> switch (versionedStorageFormat.getVersion()) {
case 1 -> PrivacyVersionedStorageFormat.FOREST_ORIGINAL;
case 2 -> PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES;
case 3 -> PrivacyVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION;
default -> throw new StorageException(
"Unsupported database with format FOREST and version "
+ versionedStorageFormat.getVersion());
@@ -229,6 +238,7 @@ public class DatabaseMetadata {
case BONSAI -> switch (versionedStorageFormat.getVersion()) {
case 1 -> PrivacyVersionedStorageFormat.BONSAI_ORIGINAL;
case 2 -> PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES;
case 3 -> PrivacyVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION;
default -> throw new StorageException(
"Unsupported database with format BONSAI and version "
+ versionedStorageFormat.getVersion());

View File

@@ -14,6 +14,7 @@
*/
package org.hyperledger.besu.plugin.services.storage.rocksdb.configuration;
import org.hyperledger.besu.plugin.services.storage.DataStorageConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import java.util.OptionalInt;
@@ -27,13 +28,23 @@ public enum PrivacyVersionedStorageFormat implements VersionedStorageFormat {
* make BlobDB more effective
*/
FOREST_WITH_VARIABLES(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES, 1),
/**
* Current Forest version, with receipts using compaction, in order to make Receipts use less disk
* space
*/
FOREST_WITH_RECEIPT_COMPACTION(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES, 2),
/** Original Bonsai version, not used since replace by BONSAI_WITH_VARIABLES */
BONSAI_ORIGINAL(BaseVersionedStorageFormat.BONSAI_ORIGINAL, 1),
/**
* Current Bonsai version, with blockchain variables in a dedicated column family, in order to
* make BlobDB more effective
*/
BONSAI_WITH_VARIABLES(BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES, 1);
BONSAI_WITH_VARIABLES(BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES, 1),
/**
* Current Bonsai version, with receipts using compaction, in order to make Receipts use less disk
* space
*/
BONSAI_WITH_RECEIPT_COMPACTION(BaseVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION, 2);
private final VersionedStorageFormat baseVersionedStorageFormat;
private final OptionalInt privacyVersion;
@@ -47,13 +58,18 @@ public enum PrivacyVersionedStorageFormat implements VersionedStorageFormat {
/**
* Return the default version for new db for a specific format
*
* @param format data storage format
* @param configuration data storage configuration
* @return the version to use for new db
*/
public static VersionedStorageFormat defaultForNewDB(final DataStorageFormat format) {
return switch (format) {
case FOREST -> FOREST_WITH_VARIABLES;
case BONSAI -> BONSAI_WITH_VARIABLES;
public static VersionedStorageFormat defaultForNewDB(
final DataStorageConfiguration configuration) {
return switch (configuration.getDatabaseFormat()) {
case FOREST -> configuration.getReceiptCompactionEnabled()
? FOREST_WITH_RECEIPT_COMPACTION
: FOREST_WITH_VARIABLES;
case BONSAI -> configuration.getReceiptCompactionEnabled()
? BONSAI_WITH_RECEIPT_COMPACTION
: BONSAI_WITH_VARIABLES;
};
}

View File

@@ -15,13 +15,17 @@
package org.hyperledger.besu.plugin.services.storage.rocksdb;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.BONSAI;
import static org.hyperledger.besu.plugin.services.storage.DataStorageFormat.FOREST;
import static org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.RocksDBColumnarKeyValueStorageTest.TestSegment;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.when;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.DatabaseMetadata;
@@ -35,6 +39,8 @@ import java.util.List;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@@ -42,6 +48,7 @@ import org.mockito.junit.jupiter.MockitoExtension;
public class RocksDBKeyValuePrivacyStorageFactoryTest {
@Mock private RocksDBFactoryConfiguration rocksDbConfiguration;
@Mock private BesuConfiguration commonConfiguration;
@Mock private DataStorageConfiguration dataStorageConfiguration;
@TempDir private Path temporaryFolder;
private final ObservableMetricsSystem metricsSystem = new NoOpMetricsSystem();
private final SegmentIdentifier segment = TestSegment.BAR;
@@ -94,20 +101,25 @@ public class RocksDBKeyValuePrivacyStorageFactoryTest {
}
}
@Test
public void shouldUpdateCorrectMetadataFileForLatestVersion() throws Exception {
@ParameterizedTest
@EnumSource(DataStorageFormat.class)
public void shouldUpdateCorrectMetadataFileForLatestVersion(
final DataStorageFormat dataStorageFormat) throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
mockCommonConfiguration(tempDataDir, tempDatabaseDir);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, dataStorageFormat);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PRIVATE_ROCKS_DB_METRICS);
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
final BaseVersionedStorageFormat expectedBaseVersion =
dataStorageFormat == BONSAI
? BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES
: BaseVersionedStorageFormat.FOREST_WITH_VARIABLES;
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES);
.isEqualTo(expectedBaseVersion);
}
storageFactory.close();
@@ -116,16 +128,67 @@ public class RocksDBKeyValuePrivacyStorageFactoryTest {
try (final var storage =
privacyStorageFactory.create(segment, commonConfiguration, metricsSystem)) {
final PrivacyVersionedStorageFormat expectedPrivacyVersion =
dataStorageFormat == BONSAI
? PrivacyVersionedStorageFormat.BONSAI_WITH_VARIABLES
: PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES;
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(PrivacyVersionedStorageFormat.FOREST_WITH_VARIABLES);
.isEqualTo(expectedPrivacyVersion);
}
privacyStorageFactory.close();
}
@ParameterizedTest
@EnumSource(DataStorageFormat.class)
public void shouldUpdateCorrectMetadataFileForLatestVersionWithReceiptCompaction(
final DataStorageFormat dataStorageFormat) throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
mockCommonConfiguration(tempDataDir, tempDatabaseDir, dataStorageFormat);
when(dataStorageConfiguration.getReceiptCompactionEnabled()).thenReturn(true);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PRIVATE_ROCKS_DB_METRICS);
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
final BaseVersionedStorageFormat expectedBaseVersion =
dataStorageFormat == BONSAI
? BaseVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION
: BaseVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION;
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(expectedBaseVersion);
}
storageFactory.close();
final RocksDBKeyValuePrivacyStorageFactory privacyStorageFactory =
new RocksDBKeyValuePrivacyStorageFactory(storageFactory);
try (final var storage =
privacyStorageFactory.create(segment, commonConfiguration, metricsSystem)) {
final PrivacyVersionedStorageFormat expectedPrivacyVersion =
dataStorageFormat == BONSAI
? PrivacyVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION
: PrivacyVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION;
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(expectedPrivacyVersion);
}
privacyStorageFactory.close();
}
private void mockCommonConfiguration(final Path tempDataDir, final Path tempDatabaseDir) {
mockCommonConfiguration(tempDataDir, tempDatabaseDir, FOREST);
}
private void mockCommonConfiguration(
final Path tempDataDir,
final Path tempDatabaseDir,
final DataStorageFormat dataStorageFormat) {
when(commonConfiguration.getStoragePath()).thenReturn(tempDatabaseDir);
when(commonConfiguration.getDataPath()).thenReturn(tempDataDir);
when(commonConfiguration.getDatabaseFormat()).thenReturn(FOREST);
when(dataStorageConfiguration.getDatabaseFormat()).thenReturn(dataStorageFormat);
lenient()
.when(commonConfiguration.getDataStorageConfiguration())
.thenReturn(dataStorageConfiguration);
}
}

View File

@@ -27,6 +27,7 @@ import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.storage.DataStorageConfiguration;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.BaseVersionedStorageFormat;
@@ -43,6 +44,8 @@ import org.junit.jupiter.api.condition.DisabledOnOs;
import org.junit.jupiter.api.condition.OS;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.EnumSource;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
@@ -51,16 +54,19 @@ public class RocksDBKeyValueStorageFactoryTest {
@Mock private RocksDBFactoryConfiguration rocksDbConfiguration;
@Mock private BesuConfiguration commonConfiguration;
@Mock private DataStorageConfiguration dataStorageConfiguration;
@TempDir public Path temporaryFolder;
private final ObservableMetricsSystem metricsSystem = new NoOpMetricsSystem();
private final SegmentIdentifier segment = TestSegment.FOO;
private final List<SegmentIdentifier> segments = List.of(TestSegment.DEFAULT, segment);
@Test
public void shouldCreateCorrectMetadataFileForLatestVersionForNewDb() throws Exception {
@ParameterizedTest
@EnumSource(DataStorageFormat.class)
public void shouldCreateCorrectMetadataFileForLatestVersionForNewDb(
final DataStorageFormat dataStorageFormat) throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
mockCommonConfiguration(tempDataDir, tempDatabaseDir, FOREST);
mockCommonConfiguration(tempDataDir, tempDatabaseDir, dataStorageFormat);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
@@ -68,8 +74,36 @@ public class RocksDBKeyValueStorageFactoryTest {
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
// Side effect is creation of the Metadata version file
final BaseVersionedStorageFormat expectedVersion =
dataStorageFormat == BONSAI
? BaseVersionedStorageFormat.BONSAI_WITH_VARIABLES
: BaseVersionedStorageFormat.FOREST_WITH_VARIABLES;
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(BaseVersionedStorageFormat.FOREST_WITH_VARIABLES);
.isEqualTo(expectedVersion);
}
}
@ParameterizedTest
@EnumSource(DataStorageFormat.class)
public void shouldCreateCorrectMetadataFileForLatestVersionForNewDbWithReceiptCompaction(
final DataStorageFormat dataStorageFormat) throws Exception {
final Path tempDataDir = temporaryFolder.resolve("data");
final Path tempDatabaseDir = temporaryFolder.resolve("db");
mockCommonConfiguration(tempDataDir, tempDatabaseDir, dataStorageFormat);
when(dataStorageConfiguration.getReceiptCompactionEnabled()).thenReturn(true);
final RocksDBKeyValueStorageFactory storageFactory =
new RocksDBKeyValueStorageFactory(
() -> rocksDbConfiguration, segments, RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
try (final var storage = storageFactory.create(segment, commonConfiguration, metricsSystem)) {
// Side effect is creation of the Metadata version file
final BaseVersionedStorageFormat expectedVersion =
dataStorageFormat == BONSAI
? BaseVersionedStorageFormat.BONSAI_WITH_RECEIPT_COMPACTION
: BaseVersionedStorageFormat.FOREST_WITH_RECEIPT_COMPACTION;
assertThat(DatabaseMetadata.lookUpFrom(tempDataDir).getVersionedStorageFormat())
.isEqualTo(expectedVersion);
}
}
@@ -273,6 +307,9 @@ public class RocksDBKeyValueStorageFactoryTest {
final Path tempDataDir, final Path tempDatabaseDir, final DataStorageFormat format) {
when(commonConfiguration.getStoragePath()).thenReturn(tempDatabaseDir);
when(commonConfiguration.getDataPath()).thenReturn(tempDataDir);
lenient().when(commonConfiguration.getDatabaseFormat()).thenReturn(format);
lenient().when(dataStorageConfiguration.getDatabaseFormat()).thenReturn(format);
lenient()
.when(commonConfiguration.getDataStorageConfiguration())
.thenReturn(dataStorageConfiguration);
}
}