mirror of
https://github.com/vacp2p/linea-besu.git
synced 2026-01-08 23:17:54 -05:00
Promote segmented storage (#5700)
promote segmented storage to plugin-api, implement SegmentedInMemoryKeyValueStorage Signed-off-by: garyschulte <garyschulte@gmail.com>
This commit is contained in:
@@ -621,10 +621,7 @@ public abstract class BesuControllerBuilder implements MiningParameterOverrides
|
||||
|
||||
Optional<Pruner> maybePruner = Optional.empty();
|
||||
if (isPruningEnabled) {
|
||||
if (!storageProvider.isWorldStateIterable()) {
|
||||
LOG.warn(
|
||||
"Cannot enable pruning with current database version. Disabling. Resync to get the latest database version or disable pruning explicitly on the command line to remove this warning.");
|
||||
} else if (dataStorageConfiguration.getDataStorageFormat().equals(DataStorageFormat.BONSAI)) {
|
||||
if (dataStorageConfiguration.getDataStorageFormat().equals(DataStorageFormat.BONSAI)) {
|
||||
LOG.warn(
|
||||
"Cannot enable pruning with Bonsai data storage format. Disabling. Change the data storage format or disable pruning explicitly on the command line to remove this warning.");
|
||||
} else {
|
||||
|
||||
@@ -73,6 +73,7 @@ import org.hyperledger.besu.plugin.services.StorageService;
|
||||
import org.hyperledger.besu.plugin.services.securitymodule.SecurityModule;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.services.BesuPluginContextImpl;
|
||||
import org.hyperledger.besu.services.PermissioningServiceImpl;
|
||||
import org.hyperledger.besu.services.PrivacyPluginServiceImpl;
|
||||
@@ -312,7 +313,7 @@ public abstract class CommandTestAbstract {
|
||||
.when(securityModuleService.getByName(eq("localfile")))
|
||||
.thenReturn(Optional.of(() -> securityModule));
|
||||
lenient()
|
||||
.when(rocksDBSPrivacyStorageFactory.create(any(), any(), any()))
|
||||
.when(rocksDBSPrivacyStorageFactory.create(any(SegmentIdentifier.class), any(), any()))
|
||||
.thenReturn(new InMemoryKeyValueStorage());
|
||||
|
||||
lenient()
|
||||
|
||||
@@ -23,13 +23,18 @@ import static org.hyperledger.besu.ethereum.core.VariablesStorageHelper.assertVa
|
||||
import static org.hyperledger.besu.ethereum.core.VariablesStorageHelper.getSampleVariableValues;
|
||||
import static org.hyperledger.besu.ethereum.core.VariablesStorageHelper.populateBlockchainStorage;
|
||||
import static org.hyperledger.besu.ethereum.core.VariablesStorageHelper.populateVariablesStorage;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.BLOCKCHAIN;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.VARIABLES;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import org.hyperledger.besu.cli.CommandTestAbstract;
|
||||
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
|
||||
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedInMemoryKeyValueStorage;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageAdapter;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
@@ -58,13 +63,14 @@ public class StorageSubCommandTest extends CommandTestAbstract {
|
||||
|
||||
@Test
|
||||
public void revertVariables() {
|
||||
final var kvVariables = new InMemoryKeyValueStorage();
|
||||
final var kvBlockchain = new InMemoryKeyValueStorage();
|
||||
when(rocksDBStorageFactory.create(eq(KeyValueSegmentIdentifier.VARIABLES), any(), any()))
|
||||
.thenReturn(kvVariables);
|
||||
when(rocksDBStorageFactory.create(eq(KeyValueSegmentIdentifier.BLOCKCHAIN), any(), any()))
|
||||
.thenReturn(kvBlockchain);
|
||||
|
||||
final var kvVariablesSeg = new SegmentedInMemoryKeyValueStorage();
|
||||
final var kvVariables = new SegmentedKeyValueStorageAdapter(VARIABLES, kvVariablesSeg);
|
||||
final var kvBlockchainSeg = new SegmentedInMemoryKeyValueStorage();
|
||||
final var kvBlockchain = new SegmentedKeyValueStorageAdapter(BLOCKCHAIN, kvBlockchainSeg);
|
||||
when(rocksDBStorageFactory.create(eq(List.of(VARIABLES)), any(), any()))
|
||||
.thenReturn(kvVariablesSeg);
|
||||
when(rocksDBStorageFactory.create(eq(List.of(BLOCKCHAIN)), any(), any()))
|
||||
.thenReturn(kvBlockchainSeg);
|
||||
final var variableValues = getSampleVariableValues();
|
||||
assertNoVariablesInStorage(kvBlockchain);
|
||||
populateVariablesStorage(kvVariables, variableValues);
|
||||
@@ -77,12 +83,14 @@ public class StorageSubCommandTest extends CommandTestAbstract {
|
||||
|
||||
@Test
|
||||
public void revertVariablesWhenSomeVariablesDoNotExist() {
|
||||
final var kvVariables = new InMemoryKeyValueStorage();
|
||||
final var kvBlockchain = new InMemoryKeyValueStorage();
|
||||
when(rocksDBStorageFactory.create(eq(KeyValueSegmentIdentifier.VARIABLES), any(), any()))
|
||||
.thenReturn(kvVariables);
|
||||
when(rocksDBStorageFactory.create(eq(KeyValueSegmentIdentifier.BLOCKCHAIN), any(), any()))
|
||||
.thenReturn(kvBlockchain);
|
||||
final var kvVariablesSeg = new SegmentedInMemoryKeyValueStorage();
|
||||
final var kvVariables = new SegmentedKeyValueStorageAdapter(VARIABLES, kvVariablesSeg);
|
||||
final var kvBlockchainSeg = new SegmentedInMemoryKeyValueStorage();
|
||||
final var kvBlockchain = new SegmentedKeyValueStorageAdapter(BLOCKCHAIN, kvBlockchainSeg);
|
||||
when(rocksDBStorageFactory.create(eq(List.of(VARIABLES)), any(), any()))
|
||||
.thenReturn(kvVariablesSeg);
|
||||
when(rocksDBStorageFactory.create(eq(List.of(BLOCKCHAIN)), any(), any()))
|
||||
.thenReturn(kvBlockchainSeg);
|
||||
|
||||
final var variableValues = getSampleVariableValues();
|
||||
variableValues.remove(FINALIZED_BLOCK_HASH);
|
||||
@@ -100,10 +108,8 @@ public class StorageSubCommandTest extends CommandTestAbstract {
|
||||
public void doesNothingWhenVariablesAlreadyReverted() {
|
||||
final var kvVariables = new InMemoryKeyValueStorage();
|
||||
final var kvBlockchain = new InMemoryKeyValueStorage();
|
||||
when(rocksDBStorageFactory.create(eq(KeyValueSegmentIdentifier.VARIABLES), any(), any()))
|
||||
.thenReturn(kvVariables);
|
||||
when(rocksDBStorageFactory.create(eq(KeyValueSegmentIdentifier.BLOCKCHAIN), any(), any()))
|
||||
.thenReturn(kvBlockchain);
|
||||
when(rocksDBStorageFactory.create(eq(VARIABLES), any(), any())).thenReturn(kvVariables);
|
||||
when(rocksDBStorageFactory.create(eq(BLOCKCHAIN), any(), any())).thenReturn(kvBlockchain);
|
||||
|
||||
final var variableValues = getSampleVariableValues();
|
||||
assertNoVariablesInStorage(kvVariables);
|
||||
|
||||
@@ -134,7 +134,6 @@ public class BesuControllerBuilderTest {
|
||||
when(storageProvider.createWorldStateStorage(DataStorageFormat.FOREST))
|
||||
.thenReturn(worldStateStorage);
|
||||
when(storageProvider.createWorldStatePreimageStorage()).thenReturn(worldStatePreimageStorage);
|
||||
when(storageProvider.isWorldStateIterable()).thenReturn(true);
|
||||
|
||||
when(worldStateStorage.isWorldStateAvailable(any(), any())).thenReturn(true);
|
||||
when(worldStatePreimageStorage.updater())
|
||||
|
||||
@@ -24,14 +24,17 @@ import org.hyperledger.besu.datatypes.Address;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import org.mockito.quality.Strictness;
|
||||
|
||||
public class BftContextBuilder {
|
||||
|
||||
public static BftContext setupContextWithValidators(final Collection<Address> validators) {
|
||||
final BftContext bftContext = mock(BftContext.class, withSettings().lenient());
|
||||
final BftContext bftContext =
|
||||
mock(BftContext.class, withSettings().strictness(Strictness.LENIENT));
|
||||
final ValidatorProvider mockValidatorProvider =
|
||||
mock(ValidatorProvider.class, withSettings().lenient());
|
||||
mock(ValidatorProvider.class, withSettings().strictness(Strictness.LENIENT));
|
||||
final BftBlockInterface mockBftBlockInterface =
|
||||
mock(BftBlockInterface.class, withSettings().lenient());
|
||||
mock(BftBlockInterface.class, withSettings().strictness(Strictness.LENIENT));
|
||||
when(bftContext.getValidatorProvider()).thenReturn(mockValidatorProvider);
|
||||
when(mockValidatorProvider.getValidatorsAfterBlock(any())).thenReturn(validators);
|
||||
when(bftContext.getBlockInterface()).thenReturn(mockBftBlockInterface);
|
||||
@@ -48,11 +51,11 @@ public class BftContextBuilder {
|
||||
final Class<T> contextClazz,
|
||||
final Collection<Address> validators,
|
||||
final BftExtraData bftExtraData) {
|
||||
final T bftContext = mock(contextClazz, withSettings().lenient());
|
||||
final T bftContext = mock(contextClazz, withSettings().strictness(Strictness.LENIENT));
|
||||
final ValidatorProvider mockValidatorProvider =
|
||||
mock(ValidatorProvider.class, withSettings().lenient());
|
||||
mock(ValidatorProvider.class, withSettings().strictness(Strictness.LENIENT));
|
||||
final BftBlockInterface mockBftBlockInterface =
|
||||
mock(BftBlockInterface.class, withSettings().lenient());
|
||||
mock(BftBlockInterface.class, withSettings().strictness(Strictness.LENIENT));
|
||||
when(bftContext.getValidatorProvider()).thenReturn(mockValidatorProvider);
|
||||
when(mockValidatorProvider.getValidatorsAfterBlock(any())).thenReturn(validators);
|
||||
when(bftContext.getBlockInterface()).thenReturn(mockBftBlockInterface);
|
||||
@@ -70,9 +73,9 @@ public class BftContextBuilder {
|
||||
final Class<T> contextClazz,
|
||||
final Collection<Address> validators,
|
||||
final BftExtraDataCodec bftExtraDataCodec) {
|
||||
final T bftContext = mock(contextClazz, withSettings().lenient());
|
||||
final T bftContext = mock(contextClazz, withSettings().strictness(Strictness.LENIENT));
|
||||
final ValidatorProvider mockValidatorProvider =
|
||||
mock(ValidatorProvider.class, withSettings().lenient());
|
||||
mock(ValidatorProvider.class, withSettings().strictness(Strictness.LENIENT));
|
||||
when(bftContext.getValidatorProvider()).thenReturn(mockValidatorProvider);
|
||||
when(mockValidatorProvider.getValidatorsAfterBlock(any())).thenReturn(validators);
|
||||
when(bftContext.getBlockInterface()).thenReturn(new BftBlockInterface(bftExtraDataCodec));
|
||||
|
||||
@@ -31,7 +31,7 @@ import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfigurationBuilder;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.OptimisticRocksDBColumnarKeyValueStorage;
|
||||
import org.hyperledger.besu.services.kvstore.SnappableSegmentedKeyValueStorageAdapter;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageAdapter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
@@ -70,7 +70,7 @@ public class OperationBenchmarkHelper {
|
||||
RocksDBMetricsFactory.PUBLIC_ROCKS_DB_METRICS);
|
||||
|
||||
final KeyValueStorage keyValueStorage =
|
||||
new SnappableSegmentedKeyValueStorageAdapter<>(
|
||||
new SegmentedKeyValueStorageAdapter(
|
||||
KeyValueSegmentIdentifier.BLOCKCHAIN, optimisticRocksDBColumnarKeyValueStorage);
|
||||
|
||||
final ExecutionContextTestFixture executionContext =
|
||||
|
||||
@@ -42,19 +42,13 @@ public class BonsaiSnapshotWorldStateKeyValueStorage extends BonsaiWorldStateKey
|
||||
|
||||
public BonsaiSnapshotWorldStateKeyValueStorage(
|
||||
final BonsaiWorldStateKeyValueStorage parentWorldStateStorage,
|
||||
final SnappedKeyValueStorage accountStorage,
|
||||
final SnappedKeyValueStorage codeStorage,
|
||||
final SnappedKeyValueStorage storageStorage,
|
||||
final SnappedKeyValueStorage trieBranchStorage,
|
||||
final SnappedKeyValueStorage segmentedWorldStateStorage,
|
||||
final KeyValueStorage trieLogStorage,
|
||||
final ObservableMetricsSystem metricsSystem) {
|
||||
super(
|
||||
parentWorldStateStorage.flatDbMode,
|
||||
parentWorldStateStorage.flatDbReaderStrategy,
|
||||
accountStorage,
|
||||
codeStorage,
|
||||
storageStorage,
|
||||
trieBranchStorage,
|
||||
segmentedWorldStateStorage,
|
||||
trieLogStorage,
|
||||
metricsSystem);
|
||||
this.parentWorldStateStorage = parentWorldStateStorage;
|
||||
@@ -66,10 +60,7 @@ public class BonsaiSnapshotWorldStateKeyValueStorage extends BonsaiWorldStateKey
|
||||
final ObservableMetricsSystem metricsSystem) {
|
||||
this(
|
||||
worldStateStorage,
|
||||
((SnappableKeyValueStorage) worldStateStorage.accountStorage).takeSnapshot(),
|
||||
((SnappableKeyValueStorage) worldStateStorage.codeStorage).takeSnapshot(),
|
||||
((SnappableKeyValueStorage) worldStateStorage.storageStorage).takeSnapshot(),
|
||||
((SnappableKeyValueStorage) worldStateStorage.trieBranchStorage).takeSnapshot(),
|
||||
((SnappableKeyValueStorage) worldStateStorage.composedWorldStateStorage).takeSnapshot(),
|
||||
worldStateStorage.trieLogStorage,
|
||||
metricsSystem);
|
||||
}
|
||||
@@ -85,10 +76,7 @@ public class BonsaiSnapshotWorldStateKeyValueStorage extends BonsaiWorldStateKey
|
||||
@Override
|
||||
public BonsaiUpdater updater() {
|
||||
return new Updater(
|
||||
((SnappedKeyValueStorage) accountStorage).getSnapshotTransaction(),
|
||||
((SnappedKeyValueStorage) codeStorage).getSnapshotTransaction(),
|
||||
((SnappedKeyValueStorage) storageStorage).getSnapshotTransaction(),
|
||||
((SnappedKeyValueStorage) trieBranchStorage).getSnapshotTransaction(),
|
||||
((SnappedKeyValueStorage) composedWorldStateStorage).getSnapshotTransaction(),
|
||||
trieLogStorage.startTransaction());
|
||||
}
|
||||
|
||||
@@ -223,10 +211,7 @@ public class BonsaiSnapshotWorldStateKeyValueStorage extends BonsaiWorldStateKey
|
||||
subscribers.forEach(BonsaiStorageSubscriber::onCloseStorage);
|
||||
|
||||
// close all of the SnappedKeyValueStorages:
|
||||
accountStorage.close();
|
||||
codeStorage.close();
|
||||
storageStorage.close();
|
||||
trieBranchStorage.close();
|
||||
composedWorldStateStorage.close();
|
||||
|
||||
// unsubscribe the parent worldstate
|
||||
parentWorldStateStorage.unSubscribe(subscribeParentId);
|
||||
|
||||
@@ -14,6 +14,11 @@
|
||||
*/
|
||||
package org.hyperledger.besu.ethereum.bonsai.storage;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE;
|
||||
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.datatypes.StorageSlotKey;
|
||||
import org.hyperledger.besu.ethereum.bonsai.storage.flat.FlatDbReaderStrategy;
|
||||
@@ -29,9 +34,12 @@ import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
|
||||
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.util.Subscribers;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
@@ -60,10 +68,7 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
protected FlatDbMode flatDbMode;
|
||||
protected FlatDbReaderStrategy flatDbReaderStrategy;
|
||||
|
||||
protected final KeyValueStorage accountStorage;
|
||||
protected final KeyValueStorage codeStorage;
|
||||
protected final KeyValueStorage storageStorage;
|
||||
protected final KeyValueStorage trieBranchStorage;
|
||||
protected final SegmentedKeyValueStorage composedWorldStateStorage;
|
||||
protected final KeyValueStorage trieLogStorage;
|
||||
|
||||
protected final ObservableMetricsSystem metricsSystem;
|
||||
@@ -76,14 +81,10 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
|
||||
public BonsaiWorldStateKeyValueStorage(
|
||||
final StorageProvider provider, final ObservableMetricsSystem metricsSystem) {
|
||||
this.accountStorage =
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE);
|
||||
this.codeStorage =
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.CODE_STORAGE);
|
||||
this.storageStorage =
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE);
|
||||
this.trieBranchStorage =
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE);
|
||||
this.composedWorldStateStorage =
|
||||
provider.getStorageBySegmentIdentifiers(
|
||||
List.of(
|
||||
ACCOUNT_INFO_STATE, CODE_STORAGE, ACCOUNT_STORAGE_STORAGE, TRIE_BRANCH_STORAGE));
|
||||
this.trieLogStorage =
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
|
||||
this.metricsSystem = metricsSystem;
|
||||
@@ -93,18 +94,12 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
public BonsaiWorldStateKeyValueStorage(
|
||||
final FlatDbMode flatDbMode,
|
||||
final FlatDbReaderStrategy flatDbReaderStrategy,
|
||||
final KeyValueStorage accountStorage,
|
||||
final KeyValueStorage codeStorage,
|
||||
final KeyValueStorage storageStorage,
|
||||
final KeyValueStorage trieBranchStorage,
|
||||
final SegmentedKeyValueStorage composedWorldStateStorage,
|
||||
final KeyValueStorage trieLogStorage,
|
||||
final ObservableMetricsSystem metricsSystem) {
|
||||
this.flatDbMode = flatDbMode;
|
||||
this.flatDbReaderStrategy = flatDbReaderStrategy;
|
||||
this.accountStorage = accountStorage;
|
||||
this.codeStorage = codeStorage;
|
||||
this.storageStorage = storageStorage;
|
||||
this.trieBranchStorage = trieBranchStorage;
|
||||
this.composedWorldStateStorage = composedWorldStateStorage;
|
||||
this.trieLogStorage = trieLogStorage;
|
||||
this.metricsSystem = metricsSystem;
|
||||
}
|
||||
@@ -112,8 +107,8 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
public void loadFlatDbStrategy() {
|
||||
this.flatDbMode =
|
||||
FlatDbMode.fromVersion(
|
||||
trieBranchStorage
|
||||
.get(FLAT_DB_MODE)
|
||||
composedWorldStateStorage
|
||||
.get(TRIE_BRANCH_STORAGE, FLAT_DB_MODE)
|
||||
.map(Bytes::wrap)
|
||||
.orElse(
|
||||
FlatDbMode.PARTIAL
|
||||
@@ -146,7 +141,7 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
if (codeHash.equals(Hash.EMPTY)) {
|
||||
return Optional.of(Bytes.EMPTY);
|
||||
} else {
|
||||
return getFlatDbReaderStrategy().getCode(codeHash, accountHash, codeStorage);
|
||||
return getFlatDbReaderStrategy().getCode(codeHash, accountHash, composedWorldStateStorage);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,7 +151,7 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
this::getWorldStateRootHash,
|
||||
this::getAccountStateTrieNode,
|
||||
accountHash,
|
||||
accountStorage);
|
||||
composedWorldStateStorage);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -164,8 +159,8 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
if (nodeHash.equals(MerkleTrie.EMPTY_TRIE_NODE_HASH)) {
|
||||
return Optional.of(MerkleTrie.EMPTY_TRIE_NODE);
|
||||
} else {
|
||||
return trieBranchStorage
|
||||
.get(location.toArrayUnsafe())
|
||||
return composedWorldStateStorage
|
||||
.get(TRIE_BRANCH_STORAGE, location.toArrayUnsafe())
|
||||
.map(Bytes::wrap)
|
||||
.filter(b -> Hash.hash(b).equals(nodeHash));
|
||||
}
|
||||
@@ -186,8 +181,8 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
if (maybeNodeHash.filter(hash -> hash.equals(MerkleTrie.EMPTY_TRIE_NODE_HASH)).isPresent()) {
|
||||
return Optional.of(MerkleTrie.EMPTY_TRIE_NODE);
|
||||
} else {
|
||||
return trieBranchStorage
|
||||
.get(Bytes.concatenate(accountHash, location).toArrayUnsafe())
|
||||
return composedWorldStateStorage
|
||||
.get(TRIE_BRANCH_STORAGE, Bytes.concatenate(accountHash, location).toArrayUnsafe())
|
||||
.map(Bytes::wrap)
|
||||
.filter(data -> maybeNodeHash.map(hash -> Hash.hash(data).equals(hash)).orElse(true));
|
||||
}
|
||||
@@ -204,15 +199,20 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
}
|
||||
|
||||
public Optional<Bytes> getStateTrieNode(final Bytes location) {
|
||||
return trieBranchStorage.get(location.toArrayUnsafe()).map(Bytes::wrap);
|
||||
return composedWorldStateStorage
|
||||
.get(TRIE_BRANCH_STORAGE, location.toArrayUnsafe())
|
||||
.map(Bytes::wrap);
|
||||
}
|
||||
|
||||
public Optional<Bytes> getWorldStateRootHash() {
|
||||
return trieBranchStorage.get(WORLD_ROOT_HASH_KEY).map(Bytes::wrap);
|
||||
return composedWorldStateStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY).map(Bytes::wrap);
|
||||
}
|
||||
|
||||
public Optional<Hash> getWorldStateBlockHash() {
|
||||
return trieBranchStorage.get(WORLD_BLOCK_HASH_KEY).map(Bytes32::wrap).map(Hash::wrap);
|
||||
return composedWorldStateStorage
|
||||
.get(TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY)
|
||||
.map(Bytes32::wrap)
|
||||
.map(Hash::wrap);
|
||||
}
|
||||
|
||||
public Optional<Bytes> getStorageValueByStorageSlotKey(
|
||||
@@ -240,21 +240,22 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
(location, hash) -> getAccountStorageTrieNode(accountHash, location, hash),
|
||||
accountHash,
|
||||
storageSlotKey,
|
||||
storageStorage);
|
||||
composedWorldStateStorage);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<Bytes32, Bytes> streamFlatAccounts(
|
||||
final Bytes startKeyHash, final Bytes32 endKeyHash, final long max) {
|
||||
return getFlatDbReaderStrategy()
|
||||
.streamAccountFlatDatabase(accountStorage, startKeyHash, endKeyHash, max);
|
||||
.streamAccountFlatDatabase(composedWorldStateStorage, startKeyHash, endKeyHash, max);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<Bytes32, Bytes> streamFlatStorages(
|
||||
final Hash accountHash, final Bytes startKeyHash, final Bytes32 endKeyHash, final long max) {
|
||||
return getFlatDbReaderStrategy()
|
||||
.streamStorageFlatDatabase(storageStorage, accountHash, startKeyHash, endKeyHash, max);
|
||||
.streamStorageFlatDatabase(
|
||||
composedWorldStateStorage, accountHash, startKeyHash, endKeyHash, max);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -264,23 +265,27 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
|
||||
@Override
|
||||
public boolean isWorldStateAvailable(final Bytes32 rootHash, final Hash blockHash) {
|
||||
return trieBranchStorage
|
||||
.get(WORLD_ROOT_HASH_KEY)
|
||||
return composedWorldStateStorage
|
||||
.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY)
|
||||
.map(Bytes32::wrap)
|
||||
.map(hash -> hash.equals(rootHash) || trieLogStorage.containsKey(blockHash.toArrayUnsafe()))
|
||||
.orElse(false);
|
||||
}
|
||||
|
||||
public void upgradeToFullFlatDbMode() {
|
||||
final KeyValueStorageTransaction transaction = trieBranchStorage.startTransaction();
|
||||
transaction.put(FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe());
|
||||
final SegmentedKeyValueStorageTransaction transaction =
|
||||
composedWorldStateStorage.startTransaction();
|
||||
transaction.put(
|
||||
TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.FULL.getVersion().toArrayUnsafe());
|
||||
transaction.commit();
|
||||
loadFlatDbStrategy(); // force reload of flat db reader strategy
|
||||
}
|
||||
|
||||
public void downgradeToPartialFlatDbMode() {
|
||||
final KeyValueStorageTransaction transaction = trieBranchStorage.startTransaction();
|
||||
transaction.put(FLAT_DB_MODE, FlatDbMode.PARTIAL.getVersion().toArrayUnsafe());
|
||||
final SegmentedKeyValueStorageTransaction transaction =
|
||||
composedWorldStateStorage.startTransaction();
|
||||
transaction.put(
|
||||
TRIE_BRANCH_STORAGE, FLAT_DB_MODE, FlatDbMode.PARTIAL.getVersion().toArrayUnsafe());
|
||||
transaction.commit();
|
||||
loadFlatDbStrategy(); // force reload of flat db reader strategy
|
||||
}
|
||||
@@ -288,8 +293,8 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
@Override
|
||||
public void clear() {
|
||||
subscribers.forEach(BonsaiStorageSubscriber::onClearStorage);
|
||||
getFlatDbReaderStrategy().clearAll(accountStorage, storageStorage, codeStorage);
|
||||
trieBranchStorage.clear();
|
||||
getFlatDbReaderStrategy().clearAll(composedWorldStateStorage);
|
||||
composedWorldStateStorage.clear(TRIE_BRANCH_STORAGE);
|
||||
trieLogStorage.clear();
|
||||
loadFlatDbStrategy(); // force reload of flat db reader strategy
|
||||
}
|
||||
@@ -303,17 +308,13 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
@Override
|
||||
public void clearFlatDatabase() {
|
||||
subscribers.forEach(BonsaiStorageSubscriber::onClearFlatDatabaseStorage);
|
||||
getFlatDbReaderStrategy().resetOnResync(accountStorage, storageStorage);
|
||||
getFlatDbReaderStrategy().resetOnResync(composedWorldStateStorage);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BonsaiUpdater updater() {
|
||||
return new Updater(
|
||||
accountStorage.startTransaction(),
|
||||
codeStorage.startTransaction(),
|
||||
storageStorage.startTransaction(),
|
||||
trieBranchStorage.startTransaction(),
|
||||
trieLogStorage.startTransaction());
|
||||
composedWorldStateStorage.startTransaction(), trieLogStorage.startTransaction());
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -343,36 +344,27 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
|
||||
void removeStorageValueBySlotHash(final Hash accountHash, final Hash slotHash);
|
||||
|
||||
KeyValueStorageTransaction getTrieBranchStorageTransaction();
|
||||
SegmentedKeyValueStorageTransaction getWorldStateTransaction();
|
||||
|
||||
KeyValueStorageTransaction getTrieLogStorageTransaction();
|
||||
}
|
||||
|
||||
public static class Updater implements BonsaiUpdater {
|
||||
|
||||
private final KeyValueStorageTransaction accountStorageTransaction;
|
||||
private final KeyValueStorageTransaction codeStorageTransaction;
|
||||
private final KeyValueStorageTransaction storageStorageTransaction;
|
||||
private final KeyValueStorageTransaction trieBranchStorageTransaction;
|
||||
private final SegmentedKeyValueStorageTransaction composedWorldStateTransaction;
|
||||
private final KeyValueStorageTransaction trieLogStorageTransaction;
|
||||
|
||||
public Updater(
|
||||
final KeyValueStorageTransaction accountStorageTransaction,
|
||||
final KeyValueStorageTransaction codeStorageTransaction,
|
||||
final KeyValueStorageTransaction storageStorageTransaction,
|
||||
final KeyValueStorageTransaction trieBranchStorageTransaction,
|
||||
final SegmentedKeyValueStorageTransaction composedWorldStateTransaction,
|
||||
final KeyValueStorageTransaction trieLogStorageTransaction) {
|
||||
|
||||
this.accountStorageTransaction = accountStorageTransaction;
|
||||
this.codeStorageTransaction = codeStorageTransaction;
|
||||
this.storageStorageTransaction = storageStorageTransaction;
|
||||
this.trieBranchStorageTransaction = trieBranchStorageTransaction;
|
||||
this.composedWorldStateTransaction = composedWorldStateTransaction;
|
||||
this.trieLogStorageTransaction = trieLogStorageTransaction;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BonsaiUpdater removeCode(final Hash accountHash) {
|
||||
codeStorageTransaction.remove(accountHash.toArrayUnsafe());
|
||||
composedWorldStateTransaction.remove(CODE_STORAGE, accountHash.toArrayUnsafe());
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -382,13 +374,14 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
// Don't save empty values
|
||||
return this;
|
||||
}
|
||||
codeStorageTransaction.put(accountHash.toArrayUnsafe(), code.toArrayUnsafe());
|
||||
composedWorldStateTransaction.put(
|
||||
CODE_STORAGE, accountHash.toArrayUnsafe(), code.toArrayUnsafe());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BonsaiUpdater removeAccountInfoState(final Hash accountHash) {
|
||||
accountStorageTransaction.remove(accountHash.toArrayUnsafe());
|
||||
composedWorldStateTransaction.remove(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe());
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -398,16 +391,20 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
// Don't save empty values
|
||||
return this;
|
||||
}
|
||||
accountStorageTransaction.put(accountHash.toArrayUnsafe(), accountValue.toArrayUnsafe());
|
||||
composedWorldStateTransaction.put(
|
||||
ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe(), accountValue.toArrayUnsafe());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public WorldStateStorage.Updater saveWorldState(
|
||||
final Bytes blockHash, final Bytes32 nodeHash, final Bytes node) {
|
||||
trieBranchStorageTransaction.put(Bytes.EMPTY.toArrayUnsafe(), node.toArrayUnsafe());
|
||||
trieBranchStorageTransaction.put(WORLD_ROOT_HASH_KEY, nodeHash.toArrayUnsafe());
|
||||
trieBranchStorageTransaction.put(WORLD_BLOCK_HASH_KEY, blockHash.toArrayUnsafe());
|
||||
composedWorldStateTransaction.put(
|
||||
TRIE_BRANCH_STORAGE, Bytes.EMPTY.toArrayUnsafe(), node.toArrayUnsafe());
|
||||
composedWorldStateTransaction.put(
|
||||
TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, nodeHash.toArrayUnsafe());
|
||||
composedWorldStateTransaction.put(
|
||||
TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY, blockHash.toArrayUnsafe());
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -418,13 +415,14 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
// Don't save empty nodes
|
||||
return this;
|
||||
}
|
||||
trieBranchStorageTransaction.put(location.toArrayUnsafe(), node.toArrayUnsafe());
|
||||
composedWorldStateTransaction.put(
|
||||
TRIE_BRANCH_STORAGE, location.toArrayUnsafe(), node.toArrayUnsafe());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BonsaiUpdater removeAccountStateTrieNode(final Bytes location, final Bytes32 nodeHash) {
|
||||
trieBranchStorageTransaction.remove(location.toArrayUnsafe());
|
||||
composedWorldStateTransaction.remove(TRIE_BRANCH_STORAGE, location.toArrayUnsafe());
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -435,28 +433,33 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
// Don't save empty nodes
|
||||
return this;
|
||||
}
|
||||
trieBranchStorageTransaction.put(
|
||||
Bytes.concatenate(accountHash, location).toArrayUnsafe(), node.toArrayUnsafe());
|
||||
composedWorldStateTransaction.put(
|
||||
TRIE_BRANCH_STORAGE,
|
||||
Bytes.concatenate(accountHash, location).toArrayUnsafe(),
|
||||
node.toArrayUnsafe());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized BonsaiUpdater putStorageValueBySlotHash(
|
||||
final Hash accountHash, final Hash slotHash, final Bytes storage) {
|
||||
storageStorageTransaction.put(
|
||||
Bytes.concatenate(accountHash, slotHash).toArrayUnsafe(), storage.toArrayUnsafe());
|
||||
composedWorldStateTransaction.put(
|
||||
ACCOUNT_STORAGE_STORAGE,
|
||||
Bytes.concatenate(accountHash, slotHash).toArrayUnsafe(),
|
||||
storage.toArrayUnsafe());
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void removeStorageValueBySlotHash(
|
||||
final Hash accountHash, final Hash slotHash) {
|
||||
storageStorageTransaction.remove(Bytes.concatenate(accountHash, slotHash).toArrayUnsafe());
|
||||
composedWorldStateTransaction.remove(
|
||||
ACCOUNT_STORAGE_STORAGE, Bytes.concatenate(accountHash, slotHash).toArrayUnsafe());
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyValueStorageTransaction getTrieBranchStorageTransaction() {
|
||||
return trieBranchStorageTransaction;
|
||||
public SegmentedKeyValueStorageTransaction getWorldStateTransaction() {
|
||||
return composedWorldStateTransaction;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -466,19 +469,14 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
|
||||
@Override
|
||||
public void commit() {
|
||||
accountStorageTransaction.commit();
|
||||
codeStorageTransaction.commit();
|
||||
storageStorageTransaction.commit();
|
||||
trieBranchStorageTransaction.commit();
|
||||
// write the log ahead, then the worldstate
|
||||
trieLogStorageTransaction.commit();
|
||||
composedWorldStateTransaction.commit();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
accountStorageTransaction.rollback();
|
||||
codeStorageTransaction.rollback();
|
||||
storageStorageTransaction.rollback();
|
||||
trieBranchStorageTransaction.rollback();
|
||||
composedWorldStateTransaction.rollback();
|
||||
trieLogStorageTransaction.rollback();
|
||||
}
|
||||
}
|
||||
@@ -521,10 +519,7 @@ public class BonsaiWorldStateKeyValueStorage implements WorldStateStorage, AutoC
|
||||
subscribers.forEach(BonsaiStorageSubscriber::onCloseStorage);
|
||||
|
||||
// close all of the KeyValueStorages:
|
||||
accountStorage.close();
|
||||
codeStorage.close();
|
||||
storageStorage.close();
|
||||
trieBranchStorage.close();
|
||||
composedWorldStateStorage.close();
|
||||
trieLogStorage.close();
|
||||
|
||||
// set storage closed
|
||||
|
||||
@@ -27,31 +27,18 @@ public class BonsaiWorldStateLayerStorage extends BonsaiSnapshotWorldStateKeyVal
|
||||
|
||||
public BonsaiWorldStateLayerStorage(final BonsaiWorldStateKeyValueStorage parent) {
|
||||
this(
|
||||
new LayeredKeyValueStorage(parent.accountStorage),
|
||||
new LayeredKeyValueStorage(parent.codeStorage),
|
||||
new LayeredKeyValueStorage(parent.storageStorage),
|
||||
new LayeredKeyValueStorage(parent.trieBranchStorage),
|
||||
new LayeredKeyValueStorage(parent.composedWorldStateStorage),
|
||||
parent.trieLogStorage,
|
||||
parent,
|
||||
parent.metricsSystem);
|
||||
}
|
||||
|
||||
public BonsaiWorldStateLayerStorage(
|
||||
final SnappedKeyValueStorage accountStorage,
|
||||
final SnappedKeyValueStorage codeStorage,
|
||||
final SnappedKeyValueStorage storageStorage,
|
||||
final SnappedKeyValueStorage trieBranchStorage,
|
||||
final SnappedKeyValueStorage composedWorldStateStorage,
|
||||
final KeyValueStorage trieLogStorage,
|
||||
final BonsaiWorldStateKeyValueStorage parent,
|
||||
final ObservableMetricsSystem metricsSystem) {
|
||||
super(
|
||||
parent,
|
||||
accountStorage,
|
||||
codeStorage,
|
||||
storageStorage,
|
||||
trieBranchStorage,
|
||||
trieLogStorage,
|
||||
metricsSystem);
|
||||
super(parent, composedWorldStateStorage, trieLogStorage, metricsSystem);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -62,10 +49,7 @@ public class BonsaiWorldStateLayerStorage extends BonsaiSnapshotWorldStateKeyVal
|
||||
@Override
|
||||
public BonsaiWorldStateLayerStorage clone() {
|
||||
return new BonsaiWorldStateLayerStorage(
|
||||
((LayeredKeyValueStorage) accountStorage).clone(),
|
||||
((LayeredKeyValueStorage) codeStorage).clone(),
|
||||
((LayeredKeyValueStorage) storageStorage).clone(),
|
||||
((LayeredKeyValueStorage) trieBranchStorage).clone(),
|
||||
((LayeredKeyValueStorage) composedWorldStateStorage).clone(),
|
||||
trieLogStorage,
|
||||
parentWorldStateStorage,
|
||||
metricsSystem);
|
||||
|
||||
@@ -15,13 +15,17 @@
|
||||
*/
|
||||
package org.hyperledger.besu.ethereum.bonsai.storage.flat;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE;
|
||||
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.datatypes.StorageSlotKey;
|
||||
import org.hyperledger.besu.ethereum.trie.NodeLoader;
|
||||
import org.hyperledger.besu.metrics.BesuMetricCategory;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.metrics.Counter;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
@@ -84,7 +88,7 @@ public abstract class FlatDbReaderStrategy {
|
||||
Supplier<Optional<Bytes>> worldStateRootHashSupplier,
|
||||
NodeLoader nodeLoader,
|
||||
Hash accountHash,
|
||||
KeyValueStorage accountStorage);
|
||||
SegmentedKeyValueStorage storage);
|
||||
|
||||
/*
|
||||
* Retrieves the storage value for the given account hash and storage slot key, using the world state root hash supplier, storage root supplier, and node loader.
|
||||
@@ -96,46 +100,42 @@ public abstract class FlatDbReaderStrategy {
|
||||
NodeLoader nodeLoader,
|
||||
Hash accountHash,
|
||||
StorageSlotKey storageSlotKey,
|
||||
KeyValueStorage storageStorage);
|
||||
SegmentedKeyValueStorage storageStorage);
|
||||
|
||||
/*
|
||||
* Retrieves the code data for the given code hash and account hash.
|
||||
*/
|
||||
public Optional<Bytes> getCode(
|
||||
final Bytes32 codeHash, final Hash accountHash, final KeyValueStorage codeStorage) {
|
||||
final Bytes32 codeHash, final Hash accountHash, final SegmentedKeyValueStorage storage) {
|
||||
if (codeHash.equals(Hash.EMPTY)) {
|
||||
return Optional.of(Bytes.EMPTY);
|
||||
} else {
|
||||
return codeStorage
|
||||
.get(accountHash.toArrayUnsafe())
|
||||
return storage
|
||||
.get(CODE_STORAGE, accountHash.toArrayUnsafe())
|
||||
.map(Bytes::wrap)
|
||||
.filter(b -> Hash.hash(b).equals(codeHash));
|
||||
}
|
||||
}
|
||||
|
||||
public void clearAll(
|
||||
final KeyValueStorage accountStorage,
|
||||
final KeyValueStorage storageStorage,
|
||||
final KeyValueStorage codeStorage) {
|
||||
accountStorage.clear();
|
||||
storageStorage.clear();
|
||||
codeStorage.clear();
|
||||
public void clearAll(final SegmentedKeyValueStorage storage) {
|
||||
storage.clear(ACCOUNT_INFO_STATE);
|
||||
storage.clear(ACCOUNT_STORAGE_STORAGE);
|
||||
storage.clear(CODE_STORAGE);
|
||||
}
|
||||
|
||||
public void resetOnResync(
|
||||
final KeyValueStorage accountStorage, final KeyValueStorage storageStorage) {
|
||||
accountStorage.clear();
|
||||
storageStorage.clear();
|
||||
public void resetOnResync(final SegmentedKeyValueStorage storage) {
|
||||
storage.clear(ACCOUNT_INFO_STATE);
|
||||
storage.clear(ACCOUNT_STORAGE_STORAGE);
|
||||
}
|
||||
|
||||
public Map<Bytes32, Bytes> streamAccountFlatDatabase(
|
||||
final KeyValueStorage accountStorage,
|
||||
final SegmentedKeyValueStorage storage,
|
||||
final Bytes startKeyHash,
|
||||
final Bytes32 endKeyHash,
|
||||
final long max) {
|
||||
final Stream<Pair<Bytes32, Bytes>> pairStream =
|
||||
accountStorage
|
||||
.streamFromKey(startKeyHash.toArrayUnsafe())
|
||||
storage
|
||||
.streamFromKey(ACCOUNT_INFO_STATE, startKeyHash.toArrayUnsafe())
|
||||
.limit(max)
|
||||
.map(pair -> new Pair<>(Bytes32.wrap(pair.getKey()), Bytes.wrap(pair.getValue())))
|
||||
.takeWhile(pair -> pair.getFirst().compareTo(endKeyHash) <= 0);
|
||||
@@ -148,14 +148,16 @@ public abstract class FlatDbReaderStrategy {
|
||||
}
|
||||
|
||||
public Map<Bytes32, Bytes> streamStorageFlatDatabase(
|
||||
final KeyValueStorage storageStorage,
|
||||
final SegmentedKeyValueStorage storage,
|
||||
final Hash accountHash,
|
||||
final Bytes startKeyHash,
|
||||
final Bytes32 endKeyHash,
|
||||
final long max) {
|
||||
final Stream<Pair<Bytes32, Bytes>> pairStream =
|
||||
storageStorage
|
||||
.streamFromKey(Bytes.concatenate(accountHash, startKeyHash).toArrayUnsafe())
|
||||
storage
|
||||
.streamFromKey(
|
||||
ACCOUNT_STORAGE_STORAGE,
|
||||
Bytes.concatenate(accountHash, startKeyHash).toArrayUnsafe())
|
||||
.takeWhile(pair -> Bytes.wrap(pair.getKey()).slice(0, Hash.SIZE).equals(accountHash))
|
||||
.limit(max)
|
||||
.map(
|
||||
|
||||
@@ -15,13 +15,16 @@
|
||||
*/
|
||||
package org.hyperledger.besu.ethereum.bonsai.storage.flat;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE;
|
||||
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.datatypes.StorageSlotKey;
|
||||
import org.hyperledger.besu.ethereum.trie.NodeLoader;
|
||||
import org.hyperledger.besu.metrics.BesuMetricCategory;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.metrics.Counter;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
@@ -55,10 +58,10 @@ public class FullFlatDbReaderStrategy extends FlatDbReaderStrategy {
|
||||
final Supplier<Optional<Bytes>> worldStateRootHashSupplier,
|
||||
final NodeLoader nodeLoader,
|
||||
final Hash accountHash,
|
||||
final KeyValueStorage accountStorage) {
|
||||
final SegmentedKeyValueStorage storage) {
|
||||
getAccountCounter.inc();
|
||||
final Optional<Bytes> accountFound =
|
||||
accountStorage.get(accountHash.toArrayUnsafe()).map(Bytes::wrap);
|
||||
storage.get(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe()).map(Bytes::wrap);
|
||||
if (accountFound.isPresent()) {
|
||||
getAccountFoundInFlatDatabaseCounter.inc();
|
||||
} else {
|
||||
@@ -74,11 +77,13 @@ public class FullFlatDbReaderStrategy extends FlatDbReaderStrategy {
|
||||
final NodeLoader nodeLoader,
|
||||
final Hash accountHash,
|
||||
final StorageSlotKey storageSlotKey,
|
||||
final KeyValueStorage storageStorage) {
|
||||
final SegmentedKeyValueStorage storage) {
|
||||
getStorageValueCounter.inc();
|
||||
final Optional<Bytes> storageFound =
|
||||
storageStorage
|
||||
.get(Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
|
||||
storage
|
||||
.get(
|
||||
ACCOUNT_STORAGE_STORAGE,
|
||||
Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
|
||||
.map(Bytes::wrap);
|
||||
if (storageFound.isPresent()) {
|
||||
getStorageValueFlatDatabaseCounter.inc();
|
||||
@@ -90,8 +95,7 @@ public class FullFlatDbReaderStrategy extends FlatDbReaderStrategy {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void resetOnResync(
|
||||
final KeyValueStorage accountStorage, final KeyValueStorage storageStorage) {
|
||||
public void resetOnResync(final SegmentedKeyValueStorage storage) {
|
||||
// NOOP
|
||||
// not need to reset anything in full mode
|
||||
}
|
||||
|
||||
@@ -15,6 +15,9 @@
|
||||
*/
|
||||
package org.hyperledger.besu.ethereum.bonsai.storage.flat;
|
||||
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE;
|
||||
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
import org.hyperledger.besu.datatypes.StorageSlotKey;
|
||||
import org.hyperledger.besu.ethereum.trie.NodeLoader;
|
||||
@@ -23,7 +26,7 @@ import org.hyperledger.besu.ethereum.trie.patricia.StoredNodeFactory;
|
||||
import org.hyperledger.besu.metrics.BesuMetricCategory;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.metrics.Counter;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
@@ -81,9 +84,10 @@ public class PartialFlatDbReaderStrategy extends FlatDbReaderStrategy {
|
||||
final Supplier<Optional<Bytes>> worldStateRootHashSupplier,
|
||||
final NodeLoader nodeLoader,
|
||||
final Hash accountHash,
|
||||
final KeyValueStorage accountStorage) {
|
||||
final SegmentedKeyValueStorage storage) {
|
||||
getAccountCounter.inc();
|
||||
Optional<Bytes> response = accountStorage.get(accountHash.toArrayUnsafe()).map(Bytes::wrap);
|
||||
Optional<Bytes> response =
|
||||
storage.get(ACCOUNT_INFO_STATE, accountHash.toArrayUnsafe()).map(Bytes::wrap);
|
||||
if (response.isEmpty()) {
|
||||
// after a snapsync/fastsync we only have the trie branches.
|
||||
final Optional<Bytes> worldStateRootHash = worldStateRootHashSupplier.get();
|
||||
@@ -113,11 +117,13 @@ public class PartialFlatDbReaderStrategy extends FlatDbReaderStrategy {
|
||||
final NodeLoader nodeLoader,
|
||||
final Hash accountHash,
|
||||
final StorageSlotKey storageSlotKey,
|
||||
final KeyValueStorage storageStorage) {
|
||||
final SegmentedKeyValueStorage storage) {
|
||||
getStorageValueCounter.inc();
|
||||
Optional<Bytes> response =
|
||||
storageStorage
|
||||
.get(Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
|
||||
storage
|
||||
.get(
|
||||
ACCOUNT_STORAGE_STORAGE,
|
||||
Bytes.concatenate(accountHash, storageSlotKey.getSlotHash()).toArrayUnsafe())
|
||||
.map(Bytes::wrap);
|
||||
if (response.isEmpty()) {
|
||||
final Optional<Hash> storageRoot = storageRootSupplier.get();
|
||||
|
||||
@@ -19,6 +19,7 @@ package org.hyperledger.besu.ethereum.bonsai.worldview;
|
||||
import static org.hyperledger.besu.ethereum.bonsai.BonsaiAccount.fromRLP;
|
||||
import static org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage.WORLD_BLOCK_HASH_KEY;
|
||||
import static org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE;
|
||||
|
||||
import org.hyperledger.besu.datatypes.Address;
|
||||
import org.hyperledger.besu.datatypes.Hash;
|
||||
@@ -43,6 +44,8 @@ import org.hyperledger.besu.evm.account.Account;
|
||||
import org.hyperledger.besu.evm.worldstate.WorldUpdater;
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
@@ -182,7 +185,11 @@ public class BonsaiWorldState
|
||||
bonsaiUpdater -> {
|
||||
accountTrie.commit(
|
||||
(location, hash, value) ->
|
||||
writeTrieNode(bonsaiUpdater.getTrieBranchStorageTransaction(), location, value));
|
||||
writeTrieNode(
|
||||
TRIE_BRANCH_STORAGE,
|
||||
bonsaiUpdater.getWorldStateTransaction(),
|
||||
location,
|
||||
value));
|
||||
});
|
||||
final Bytes32 rootHash = accountTrie.getRootHash();
|
||||
return Hash.wrap(rootHash);
|
||||
@@ -391,17 +398,17 @@ public class BonsaiWorldState
|
||||
};
|
||||
|
||||
stateUpdater
|
||||
.getTrieBranchStorageTransaction()
|
||||
.put(WORLD_BLOCK_HASH_KEY, blockHeader.getHash().toArrayUnsafe());
|
||||
.getWorldStateTransaction()
|
||||
.put(TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY, blockHeader.getHash().toArrayUnsafe());
|
||||
worldStateBlockHash = blockHeader.getHash();
|
||||
} else {
|
||||
stateUpdater.getTrieBranchStorageTransaction().remove(WORLD_BLOCK_HASH_KEY);
|
||||
stateUpdater.getWorldStateTransaction().remove(TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY);
|
||||
worldStateBlockHash = null;
|
||||
}
|
||||
|
||||
stateUpdater
|
||||
.getTrieBranchStorageTransaction()
|
||||
.put(WORLD_ROOT_HASH_KEY, newWorldStateRootHash.toArrayUnsafe());
|
||||
.getWorldStateTransaction()
|
||||
.put(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, newWorldStateRootHash.toArrayUnsafe());
|
||||
worldStateRootHash = newWorldStateRootHash;
|
||||
success = true;
|
||||
} finally {
|
||||
@@ -454,11 +461,35 @@ public class BonsaiWorldState
|
||||
}
|
||||
};
|
||||
|
||||
static final SegmentedKeyValueStorageTransaction noOpSegmentedTx =
|
||||
new SegmentedKeyValueStorageTransaction() {
|
||||
|
||||
@Override
|
||||
public void put(
|
||||
final SegmentIdentifier segmentIdentifier, final byte[] key, final byte[] value) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(final SegmentIdentifier segmentIdentifier, final byte[] key) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit() throws StorageException {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
// no-op
|
||||
}
|
||||
};
|
||||
|
||||
@Override
|
||||
public Hash frontierRootHash() {
|
||||
return calculateRootHash(
|
||||
Optional.of(
|
||||
new BonsaiWorldStateKeyValueStorage.Updater(noOpTx, noOpTx, noOpTx, noOpTx, noOpTx)),
|
||||
Optional.of(new BonsaiWorldStateKeyValueStorage.Updater(noOpSegmentedTx, noOpTx)),
|
||||
accumulator.copy());
|
||||
}
|
||||
|
||||
@@ -484,8 +515,11 @@ public class BonsaiWorldState
|
||||
}
|
||||
|
||||
private void writeTrieNode(
|
||||
final KeyValueStorageTransaction tx, final Bytes location, final Bytes value) {
|
||||
tx.put(location.toArrayUnsafe(), value.toArrayUnsafe());
|
||||
final SegmentIdentifier segmentId,
|
||||
final SegmentedKeyValueStorageTransaction tx,
|
||||
final Bytes location,
|
||||
final Bytes value) {
|
||||
tx.put(segmentId, location.toArrayUnsafe(), value.toArrayUnsafe());
|
||||
}
|
||||
|
||||
protected Optional<Bytes> getStorageTrieNode(
|
||||
|
||||
@@ -22,9 +22,10 @@ import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
|
||||
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappableKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.List;
|
||||
|
||||
public interface StorageProvider extends Closeable {
|
||||
|
||||
@@ -39,9 +40,5 @@ public interface StorageProvider extends Closeable {
|
||||
|
||||
KeyValueStorage getStorageBySegmentIdentifier(SegmentIdentifier segment);
|
||||
|
||||
SnappableKeyValueStorage getSnappableStorageBySegmentIdentifier(SegmentIdentifier segment);
|
||||
|
||||
boolean isWorldStateIterable();
|
||||
|
||||
boolean isWorldStateSnappable();
|
||||
SegmentedKeyValueStorage getStorageBySegmentIdentifiers(List<SegmentIdentifier> segment);
|
||||
}
|
||||
|
||||
@@ -26,35 +26,35 @@ import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
|
||||
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappableKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageAdapter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class KeyValueStorageProvider implements StorageProvider {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(StorageProvider.class);
|
||||
|
||||
public static final boolean SEGMENT_ISOLATION_SUPPORTED = true;
|
||||
public static final boolean SNAPSHOT_ISOLATION_UNSUPPORTED = false;
|
||||
|
||||
protected final Function<SegmentIdentifier, KeyValueStorage> storageCreator;
|
||||
protected final Function<List<SegmentIdentifier>, SegmentedKeyValueStorage>
|
||||
segmentedStorageCreator;
|
||||
private final KeyValueStorage worldStatePreimageStorage;
|
||||
private final boolean isWorldStateIterable;
|
||||
private final boolean isWorldStateSnappable;
|
||||
protected final Map<SegmentIdentifier, KeyValueStorage> storageInstances = new HashMap<>();
|
||||
protected final Map<List<SegmentIdentifier>, SegmentedKeyValueStorage> storageInstances =
|
||||
new HashMap<>();
|
||||
private final ObservableMetricsSystem metricsSystem;
|
||||
|
||||
public KeyValueStorageProvider(
|
||||
final Function<SegmentIdentifier, KeyValueStorage> storageCreator,
|
||||
final Function<List<SegmentIdentifier>, SegmentedKeyValueStorage> segmentedStorageCreator,
|
||||
final KeyValueStorage worldStatePreimageStorage,
|
||||
final boolean segmentIsolationSupported,
|
||||
final boolean storageSnapshotIsolationSupported,
|
||||
final ObservableMetricsSystem metricsSystem) {
|
||||
this.storageCreator = storageCreator;
|
||||
this.segmentedStorageCreator = segmentedStorageCreator;
|
||||
this.worldStatePreimageStorage = worldStatePreimageStorage;
|
||||
this.isWorldStateIterable = segmentIsolationSupported;
|
||||
this.isWorldStateSnappable = storageSnapshotIsolationSupported;
|
||||
this.metricsSystem = metricsSystem;
|
||||
}
|
||||
|
||||
@@ -90,29 +90,34 @@ public class KeyValueStorageProvider implements StorageProvider {
|
||||
|
||||
@Override
|
||||
public KeyValueStorage getStorageBySegmentIdentifier(final SegmentIdentifier segment) {
|
||||
return storageInstances.computeIfAbsent(segment, storageCreator);
|
||||
return new SegmentedKeyValueStorageAdapter(
|
||||
segment, storageInstances.computeIfAbsent(List.of(segment), segmentedStorageCreator));
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnappableKeyValueStorage getSnappableStorageBySegmentIdentifier(
|
||||
final SegmentIdentifier segment) {
|
||||
return (SnappableKeyValueStorage) getStorageBySegmentIdentifier(segment);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isWorldStateIterable() {
|
||||
return isWorldStateIterable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isWorldStateSnappable() {
|
||||
return isWorldStateSnappable;
|
||||
public SegmentedKeyValueStorage getStorageBySegmentIdentifiers(
|
||||
final List<SegmentIdentifier> segments) {
|
||||
return segmentedStorageCreator.apply(segments);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
for (final KeyValueStorage kvs : storageInstances.values()) {
|
||||
kvs.close();
|
||||
}
|
||||
storageInstances.entrySet().stream()
|
||||
.filter(storage -> storage instanceof AutoCloseable)
|
||||
.forEach(
|
||||
storage -> {
|
||||
try {
|
||||
storage.getValue().close();
|
||||
} catch (final IOException e) {
|
||||
LOG.atWarn()
|
||||
.setMessage("Failed to close storage instance {}")
|
||||
.addArgument(
|
||||
storage.getKey().stream()
|
||||
.map(SegmentIdentifier::getName)
|
||||
.collect(Collectors.joining(",")))
|
||||
.setCause(e)
|
||||
.log();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,13 +58,9 @@ public class KeyValueStorageProviderBuilder {
|
||||
final KeyValueStorage worldStatePreImageStorage =
|
||||
new LimitedInMemoryKeyValueStorage(DEFAULT_WORLD_STATE_PRE_IMAGE_CACHE_SIZE);
|
||||
|
||||
// this tickles init needed for isSegmentIsolationSupported
|
||||
storageFactory.create(KeyValueSegmentIdentifier.BLOCKCHAIN, commonConfiguration, metricsSystem);
|
||||
return new KeyValueStorageProvider(
|
||||
segment -> storageFactory.create(segment, commonConfiguration, metricsSystem),
|
||||
segments -> storageFactory.create(segments, commonConfiguration, metricsSystem),
|
||||
worldStatePreImageStorage,
|
||||
storageFactory.isSegmentIsolationSupported(),
|
||||
storageFactory.isSnapshotIsolationSupported(),
|
||||
(ObservableMetricsSystem) metricsSystem);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,15 +33,14 @@ import org.hyperledger.besu.ethereum.worldstate.DefaultMutableWorldState;
|
||||
import org.hyperledger.besu.ethereum.worldstate.DefaultWorldStateArchive;
|
||||
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
|
||||
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedInMemoryKeyValueStorage;
|
||||
|
||||
public class InMemoryKeyValueStorageProvider extends KeyValueStorageProvider {
|
||||
|
||||
public InMemoryKeyValueStorageProvider() {
|
||||
super(
|
||||
segmentIdentifier -> new InMemoryKeyValueStorage(),
|
||||
segmentIdentifiers -> new SegmentedInMemoryKeyValueStorage(),
|
||||
new InMemoryKeyValueStorage(),
|
||||
SEGMENT_ISOLATION_SUPPORTED,
|
||||
SNAPSHOT_ISOLATION_UNSUPPORTED,
|
||||
new NoOpMetricsSystem());
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,10 @@ package org.hyperledger.besu.ethereum.bonsai;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage.WORLD_BLOCK_HASH_KEY;
|
||||
import static org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.BLOCKCHAIN;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyList;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
@@ -39,10 +42,11 @@ import org.hyperledger.besu.ethereum.core.BlockHeader;
|
||||
import org.hyperledger.besu.ethereum.core.BlockHeaderTestFixture;
|
||||
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPOutput;
|
||||
import org.hyperledger.besu.ethereum.storage.StorageProvider;
|
||||
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
|
||||
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappableKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
@@ -59,36 +63,40 @@ import org.mockito.quality.Strictness;
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
@MockitoSettings(strictness = Strictness.LENIENT)
|
||||
public class BonsaiWorldStateArchiveTest {
|
||||
|
||||
final BlockHeaderTestFixture blockBuilder = new BlockHeaderTestFixture();
|
||||
|
||||
@Mock Blockchain blockchain;
|
||||
|
||||
@Mock StorageProvider storageProvider;
|
||||
|
||||
@Mock SnappableKeyValueStorage keyValueStorage;
|
||||
|
||||
@Mock SegmentedKeyValueStorage segmentedKeyValueStorage;
|
||||
@Mock KeyValueStorage trieLogStorage;
|
||||
@Mock SegmentedKeyValueStorageTransaction segmentedKeyValueStorageTransaction;
|
||||
BonsaiWorldStateProvider bonsaiWorldStateArchive;
|
||||
|
||||
@Mock TrieLogManager trieLogManager;
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() {
|
||||
when(storageProvider.getStorageBySegmentIdentifier(any(KeyValueSegmentIdentifier.class)))
|
||||
.thenReturn(keyValueStorage);
|
||||
when(storageProvider.getStorageBySegmentIdentifiers(anyList()))
|
||||
.thenReturn(segmentedKeyValueStorage);
|
||||
when(segmentedKeyValueStorage.startTransaction())
|
||||
.thenReturn(segmentedKeyValueStorageTransaction);
|
||||
when(storageProvider.getStorageBySegmentIdentifier(any())).thenReturn(trieLogStorage);
|
||||
when(trieLogStorage.startTransaction()).thenReturn(mock(KeyValueStorageTransaction.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetMutableReturnPersistedStateWhenNeeded() {
|
||||
final BlockHeader chainHead = blockBuilder.number(0).buildHeader();
|
||||
|
||||
when(keyValueStorage.get(WORLD_ROOT_HASH_KEY))
|
||||
when(segmentedKeyValueStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY))
|
||||
.thenReturn(Optional.of(chainHead.getStateRoot().toArrayUnsafe()));
|
||||
when(keyValueStorage.get(WORLD_BLOCK_HASH_KEY))
|
||||
when(segmentedKeyValueStorage.get(TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY))
|
||||
.thenReturn(Optional.of(chainHead.getHash().toArrayUnsafe()));
|
||||
when(keyValueStorage.get(WORLD_ROOT_HASH_KEY))
|
||||
when(segmentedKeyValueStorage.get(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY))
|
||||
.thenReturn(Optional.of(chainHead.getStateRoot().toArrayUnsafe()));
|
||||
when(keyValueStorage.get(WORLD_BLOCK_HASH_KEY))
|
||||
when(segmentedKeyValueStorage.get(TRIE_BRANCH_STORAGE, WORLD_BLOCK_HASH_KEY))
|
||||
.thenReturn(Optional.of(chainHead.getHash().toArrayUnsafe()));
|
||||
bonsaiWorldStateArchive =
|
||||
new BonsaiWorldStateProvider(
|
||||
@@ -145,7 +153,6 @@ public class BonsaiWorldStateArchiveTest {
|
||||
@Test
|
||||
public void testGetMutableWithStorageInconsistencyRollbackTheState() {
|
||||
|
||||
when(keyValueStorage.startTransaction()).thenReturn(mock(KeyValueStorageTransaction.class));
|
||||
doAnswer(__ -> Optional.of(mock(TrieLogLayer.class)))
|
||||
.when(trieLogManager)
|
||||
.getTrieLogLayer(any(Hash.class));
|
||||
@@ -170,12 +177,10 @@ public class BonsaiWorldStateArchiveTest {
|
||||
verify(trieLogManager).getTrieLogLayer(Hash.ZERO);
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
// @SuppressWarnings({"unchecked", "rawtypes"})
|
||||
@Test
|
||||
public void testGetMutableWithStorageConsistencyNotRollbackTheState() {
|
||||
|
||||
when(keyValueStorage.startTransaction()).thenReturn(mock(KeyValueStorageTransaction.class));
|
||||
|
||||
var worldStateStorage =
|
||||
new BonsaiWorldStateKeyValueStorage(storageProvider, new NoOpMetricsSystem());
|
||||
bonsaiWorldStateArchive =
|
||||
@@ -201,7 +206,6 @@ public class BonsaiWorldStateArchiveTest {
|
||||
@SuppressWarnings({"unchecked"})
|
||||
@Test
|
||||
public void testGetMutableWithStorageConsistencyToRollbackAndRollForwardTheState() {
|
||||
when(keyValueStorage.startTransaction()).thenReturn(mock(KeyValueStorageTransaction.class));
|
||||
final BlockHeader genesis = blockBuilder.number(0).buildHeader();
|
||||
final BlockHeader blockHeaderChainA =
|
||||
blockBuilder.number(1).timestamp(1).parentHash(genesis.getHash()).buildHeader();
|
||||
@@ -242,9 +246,8 @@ public class BonsaiWorldStateArchiveTest {
|
||||
// TODO: refactor to test original intent
|
||||
@Disabled("needs refactor, getMutable(hash, hash) cannot trigger saveTrieLog")
|
||||
public void testGetMutableWithRollbackNotOverrideTrieLogLayer() {
|
||||
final KeyValueStorageTransaction keyValueStorageTransaction =
|
||||
mock(KeyValueStorageTransaction.class);
|
||||
when(keyValueStorage.startTransaction()).thenReturn(keyValueStorageTransaction);
|
||||
when(segmentedKeyValueStorage.startTransaction())
|
||||
.thenReturn(segmentedKeyValueStorageTransaction);
|
||||
final BlockHeader genesis = blockBuilder.number(0).buildHeader();
|
||||
final BlockHeader blockHeaderChainA =
|
||||
blockBuilder.number(1).timestamp(1).parentHash(genesis.getHash()).buildHeader();
|
||||
@@ -270,7 +273,7 @@ public class BonsaiWorldStateArchiveTest {
|
||||
final TrieLogLayer trieLogLayerBlockB = new TrieLogLayer();
|
||||
trieLogLayerBlockB.setBlockHash(blockHeaderChainB.getHash());
|
||||
TrieLogFactoryImpl.writeTo(trieLogLayerBlockB, rlpLogBlockB);
|
||||
when(keyValueStorage.get(blockHeaderChainB.getHash().toArrayUnsafe()))
|
||||
when(segmentedKeyValueStorage.get(BLOCKCHAIN, blockHeaderChainB.getHash().toArrayUnsafe()))
|
||||
.thenReturn(Optional.of(rlpLogBlockB.encoded().toArrayUnsafe()));
|
||||
|
||||
when(blockchain.getBlockHeader(eq(blockHeaderChainB.getHash())))
|
||||
@@ -281,9 +284,9 @@ public class BonsaiWorldStateArchiveTest {
|
||||
.containsInstanceOf(BonsaiWorldState.class);
|
||||
|
||||
// verify is not persisting if already present
|
||||
verify(keyValueStorageTransaction, never())
|
||||
.put(eq(blockHeaderChainA.getHash().toArrayUnsafe()), any());
|
||||
verify(keyValueStorageTransaction, never())
|
||||
.put(eq(blockHeaderChainB.getHash().toArrayUnsafe()), any());
|
||||
verify(segmentedKeyValueStorageTransaction, never())
|
||||
.put(BLOCKCHAIN, eq(blockHeaderChainA.getHash().toArrayUnsafe()), any());
|
||||
verify(segmentedKeyValueStorageTransaction, never())
|
||||
.put(BLOCKCHAIN, eq(blockHeaderChainB.getHash().toArrayUnsafe()), any());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ package org.hyperledger.besu.ethereum.bonsai;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage.WORLD_ROOT_HASH_KEY;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.spy;
|
||||
@@ -210,8 +211,8 @@ public class BonsaiWorldStateKeyValueStorageTest {
|
||||
// save world state root hash
|
||||
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
|
||||
updater
|
||||
.getTrieBranchStorageTransaction()
|
||||
.put(WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
.getWorldStateTransaction()
|
||||
.put(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
updater.commit();
|
||||
|
||||
// remove flat database
|
||||
@@ -240,8 +241,8 @@ public class BonsaiWorldStateKeyValueStorageTest {
|
||||
// save world state root hash
|
||||
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
|
||||
updater
|
||||
.getTrieBranchStorageTransaction()
|
||||
.put(WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
.getWorldStateTransaction()
|
||||
.put(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
updater.commit();
|
||||
|
||||
// remove flat database
|
||||
@@ -270,8 +271,8 @@ public class BonsaiWorldStateKeyValueStorageTest {
|
||||
// save world state root hash
|
||||
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
|
||||
updater
|
||||
.getTrieBranchStorageTransaction()
|
||||
.put(WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
.getWorldStateTransaction()
|
||||
.put(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
updater.commit();
|
||||
|
||||
Mockito.reset(storage);
|
||||
@@ -317,8 +318,8 @@ public class BonsaiWorldStateKeyValueStorageTest {
|
||||
// save world state root hash
|
||||
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
|
||||
updater
|
||||
.getTrieBranchStorageTransaction()
|
||||
.put(WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
.getWorldStateTransaction()
|
||||
.put(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
updater.commit();
|
||||
|
||||
// remove flat database
|
||||
@@ -348,8 +349,8 @@ public class BonsaiWorldStateKeyValueStorageTest {
|
||||
// save world state root hash
|
||||
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
|
||||
updater
|
||||
.getTrieBranchStorageTransaction()
|
||||
.put(WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
.getWorldStateTransaction()
|
||||
.put(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, trie.getRootHash().toArrayUnsafe());
|
||||
updater.commit();
|
||||
|
||||
// remove flat database
|
||||
@@ -416,7 +417,9 @@ public class BonsaiWorldStateKeyValueStorageTest {
|
||||
|
||||
final BonsaiWorldStateKeyValueStorage.BonsaiUpdater updater = storage.updater();
|
||||
final Bytes rootHashKey = Bytes32.fromHexString("0x01");
|
||||
updater.getTrieBranchStorageTransaction().put(WORLD_ROOT_HASH_KEY, rootHashKey.toArrayUnsafe());
|
||||
updater
|
||||
.getWorldStateTransaction()
|
||||
.put(TRIE_BRANCH_STORAGE, WORLD_ROOT_HASH_KEY, rootHashKey.toArrayUnsafe());
|
||||
updater.commit();
|
||||
assertThat(storage.isWorldStateAvailable(Hash.wrap(Bytes32.wrap(rootHashKey)), Hash.EMPTY))
|
||||
.isTrue();
|
||||
|
||||
@@ -41,7 +41,6 @@ import org.hyperledger.besu.evm.worldstate.WorldUpdater;
|
||||
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
@@ -59,19 +58,19 @@ public class LogRollingTests {
|
||||
private BonsaiWorldStateProvider archive;
|
||||
|
||||
private InMemoryKeyValueStorageProvider provider;
|
||||
private InMemoryKeyValueStorage accountStorage;
|
||||
private InMemoryKeyValueStorage codeStorage;
|
||||
private InMemoryKeyValueStorage storageStorage;
|
||||
private InMemoryKeyValueStorage trieBranchStorage;
|
||||
private InMemoryKeyValueStorage trieLogStorage;
|
||||
private KeyValueStorage accountStorage;
|
||||
private KeyValueStorage codeStorage;
|
||||
private KeyValueStorage storageStorage;
|
||||
private KeyValueStorage trieBranchStorage;
|
||||
private KeyValueStorage trieLogStorage;
|
||||
|
||||
private InMemoryKeyValueStorageProvider secondProvider;
|
||||
private BonsaiWorldStateProvider secondArchive;
|
||||
private InMemoryKeyValueStorage secondAccountStorage;
|
||||
private InMemoryKeyValueStorage secondCodeStorage;
|
||||
private InMemoryKeyValueStorage secondStorageStorage;
|
||||
private InMemoryKeyValueStorage secondTrieBranchStorage;
|
||||
private InMemoryKeyValueStorage secondTrieLogStorage;
|
||||
private KeyValueStorage secondAccountStorage;
|
||||
private KeyValueStorage secondCodeStorage;
|
||||
private KeyValueStorage secondStorageStorage;
|
||||
private KeyValueStorage secondTrieBranchStorage;
|
||||
private KeyValueStorage secondTrieLogStorage;
|
||||
private final Blockchain blockchain = mock(Blockchain.class);
|
||||
|
||||
private static final Address addressOne =
|
||||
@@ -133,21 +132,14 @@ public class LogRollingTests {
|
||||
new BonsaiWorldStateProvider(
|
||||
provider, blockchain, cachedMerkleTrieLoader, new NoOpMetricsSystem(), null);
|
||||
accountStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE);
|
||||
codeStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.CODE_STORAGE);
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE);
|
||||
codeStorage = provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.CODE_STORAGE);
|
||||
storageStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(
|
||||
KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE);
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE);
|
||||
trieBranchStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE);
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE);
|
||||
trieLogStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
|
||||
|
||||
secondProvider = new InMemoryKeyValueStorageProvider();
|
||||
final CachedMerkleTrieLoader secondOptimizedMerkleTrieLoader =
|
||||
@@ -160,24 +152,16 @@ public class LogRollingTests {
|
||||
new NoOpMetricsSystem(),
|
||||
null);
|
||||
secondAccountStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
secondProvider.getStorageBySegmentIdentifier(
|
||||
KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE);
|
||||
secondProvider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE);
|
||||
secondCodeStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
secondProvider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.CODE_STORAGE);
|
||||
secondProvider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.CODE_STORAGE);
|
||||
secondStorageStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
secondProvider.getStorageBySegmentIdentifier(
|
||||
KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE);
|
||||
secondProvider.getStorageBySegmentIdentifier(
|
||||
KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE);
|
||||
secondTrieBranchStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
secondProvider.getStorageBySegmentIdentifier(
|
||||
KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE);
|
||||
secondProvider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE);
|
||||
secondTrieLogStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
secondProvider.getStorageBySegmentIdentifier(
|
||||
KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
|
||||
secondProvider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
|
||||
}
|
||||
|
||||
@Test
|
||||
@@ -329,7 +313,7 @@ public class LogRollingTests {
|
||||
assertThat(secondWorldState.rootHash()).isEqualByComparingTo(worldState.rootHash());
|
||||
}
|
||||
|
||||
private TrieLogLayer getTrieLogLayer(final InMemoryKeyValueStorage storage, final Bytes key) {
|
||||
private TrieLogLayer getTrieLogLayer(final KeyValueStorage storage, final Bytes key) {
|
||||
return storage
|
||||
.get(key.toArrayUnsafe())
|
||||
.map(bytes -> TrieLogFactoryImpl.readFrom(new BytesValueRLPInput(Bytes.wrap(bytes), false)))
|
||||
|
||||
@@ -17,6 +17,10 @@
|
||||
package org.hyperledger.besu.ethereum.bonsai;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.CODE_STORAGE;
|
||||
import static org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE;
|
||||
|
||||
import org.hyperledger.besu.ethereum.bonsai.cache.CachedMerkleTrieLoader;
|
||||
import org.hyperledger.besu.ethereum.bonsai.storage.BonsaiWorldStateKeyValueStorage;
|
||||
@@ -29,10 +33,12 @@ import org.hyperledger.besu.ethereum.rlp.BytesValueRLPInput;
|
||||
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueSegmentIdentifier;
|
||||
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
|
||||
import org.hyperledger.besu.services.kvstore.InMemoryKeyValueStorage;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedInMemoryKeyValueStorage;
|
||||
import org.hyperledger.besu.util.io.RollingFileReader;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
|
||||
@@ -53,19 +59,15 @@ public class RollingImport {
|
||||
final BonsaiWorldState bonsaiState =
|
||||
new BonsaiWorldState(
|
||||
archive, new BonsaiWorldStateKeyValueStorage(provider, new NoOpMetricsSystem()));
|
||||
final InMemoryKeyValueStorage accountStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.ACCOUNT_INFO_STATE);
|
||||
final InMemoryKeyValueStorage codeStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.CODE_STORAGE);
|
||||
final InMemoryKeyValueStorage storageStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(
|
||||
KeyValueSegmentIdentifier.ACCOUNT_STORAGE_STORAGE);
|
||||
final InMemoryKeyValueStorage trieBranchStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_BRANCH_STORAGE);
|
||||
final SegmentedInMemoryKeyValueStorage worldStateStorage =
|
||||
(SegmentedInMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifiers(
|
||||
List.of(
|
||||
ACCOUNT_INFO_STATE,
|
||||
CODE_STORAGE,
|
||||
ACCOUNT_STORAGE_STORAGE,
|
||||
TRIE_BRANCH_STORAGE));
|
||||
|
||||
final InMemoryKeyValueStorage trieLogStorage =
|
||||
(InMemoryKeyValueStorage)
|
||||
provider.getStorageBySegmentIdentifier(KeyValueSegmentIdentifier.TRIE_LOG_STORAGE);
|
||||
@@ -125,10 +127,7 @@ public class RollingImport {
|
||||
}
|
||||
}
|
||||
System.out.printf("Back to zero!%n");
|
||||
accountStorage.dump(System.out);
|
||||
codeStorage.dump(System.out);
|
||||
storageStorage.dump(System.out);
|
||||
trieBranchStorage.dump(System.out);
|
||||
worldStateStorage.dump(System.out);
|
||||
trieLogStorage.dump(System.out);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -146,8 +146,10 @@ public class MarkSweepPrunerTest {
|
||||
// the full prune but without enforcing an ordering between the state root removals
|
||||
stateRoots.forEach(
|
||||
stateRoot -> {
|
||||
final InOrder thisRootsOrdering = inOrder(hashValueStore, worldStateStorage);
|
||||
thisRootsOrdering.verify(hashValueStore).remove(stateRoot);
|
||||
final InOrder thisRootsOrdering =
|
||||
inOrder(worldStateStorage, hashValueStore, worldStateStorage);
|
||||
thisRootsOrdering.verify(worldStateStorage).isWorldStateAvailable(stateRoot, null);
|
||||
thisRootsOrdering.verify(hashValueStore).keySet();
|
||||
thisRootsOrdering.verify(worldStateStorage).prune(any());
|
||||
});
|
||||
}
|
||||
@@ -183,8 +185,10 @@ public class MarkSweepPrunerTest {
|
||||
// the full prune but without enforcing an ordering between the state root removals
|
||||
stateRoots.forEach(
|
||||
stateRoot -> {
|
||||
final InOrder thisRootsOrdering = inOrder(hashValueStore, worldStateStorage);
|
||||
thisRootsOrdering.verify(hashValueStore).remove(stateRoot);
|
||||
final InOrder thisRootsOrdering =
|
||||
inOrder(worldStateStorage, hashValueStore, worldStateStorage);
|
||||
thisRootsOrdering.verify(worldStateStorage).isWorldStateAvailable(stateRoot, null);
|
||||
thisRootsOrdering.verify(hashValueStore).keySet();
|
||||
thisRootsOrdering.verify(worldStateStorage).prune(any());
|
||||
});
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
package org.hyperledger.besu.ethereum.eth.transactions;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.assertj.core.util.Preconditions.checkNotNull;
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider.createInMemoryBlockchain;
|
||||
import static org.hyperledger.besu.ethereum.core.InMemoryKeyValueStorageProvider.createInMemoryWorldStateArchive;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
@@ -96,8 +96,8 @@ public class TestNode implements Closeable {
|
||||
final Integer port,
|
||||
final KeyPair kp,
|
||||
final DiscoveryConfiguration discoveryCfg) {
|
||||
checkNotNull(vertx);
|
||||
checkNotNull(discoveryCfg);
|
||||
requireNonNull(vertx);
|
||||
requireNonNull(discoveryCfg);
|
||||
|
||||
final int listenPort = port != null ? port : 0;
|
||||
this.nodeKey = kp != null ? NodeKeyUtils.createFrom(kp) : NodeKeyUtils.generate();
|
||||
|
||||
1
ethereum/evmtool/txs.rlp
Normal file
1
ethereum/evmtool/txs.rlp
Normal file
@@ -0,0 +1 @@
|
||||
0xf90620f860800a8307a12094000000000000000000000000000000000000010080801ca0f73b923883495dc2174285c8fa4176de3d45accfb11cc8034ea1dd09831a4ddfa01c6bccbcd655b4022bcc27de4b9d5cee9ce999cdb8459b0afec4f5054ea02243f860010a8307a12094000000000000000000000000000000000000010180801ba0bffca3f433f61c957d822af37f2b49c57700ff338588d51ea82dc9f720c91d9da0168bb65cc72d586384383f8ceef3a6a60e54b7f4aaa978a6dad271ced54b2ebff860020a8307a12094000000000000000000000000000000000000010280801ba03d9f110bcf0c44be552d4d0ec8387b705604f7d3bb3794dcef4004c38963103ea013bda734f3b5987b8c855f6aab046754506266ff32352ba0898c4eba4acaec8bf860030a8307a12094000000000000000000000000000000000000010380801ba0ecb276d2486664ea779813e599b6f07b7b0df746626d7fdddf60ea425efcb324a0739841682e79a8302dc2e146dfd1eecbdc611d386d42287bcdd94a39bf536020f860040a8307a12094000000000000000000000000000000000000010480801ba002866b5c5fa5dbfa3d88b71a49b82a779c2d508cda631893176782dbcd7435aaa003c380a9af9bfdb3503abcfd5037d3c66f39bb7a19011a3291712d22292c5236f860050a8307a12094000000000000000000000000000000000000010580801ca0c70d2e000e503933d0f1a9a923dc647924811a912adf77692ff7d8f6808d5617a04ad82c92b980580a4a67e4c405e83d560a14201c3fd4b3a42d34dcc19336479af860060a8307a12094000000000000000000000000000000000000010680801ca07f2527f8cbe14e021d270dd214a1820355c7af128001889f57b7f9bba46a6c5da03033308de0d39b9d1b47d28f81df39ceaff330349298c65deb836efe8bce273ff860070a8307a12094000000000000000000000000000000000000010780801ba0ecb720a8764f8967b95dc66e961c6261fceb392c0e90461d7d66113d3c8bbd12a02655e28b751cc2e03a835aa817d884b540765dba12968bc53f53737b4234ee21f860080a8307a12094000000000000000000000000000000000000010880801ba095a2e27c0b296679141c0ad61be112f689b134c04d1773814ddae67fefb2dfbda02955f126d57d8b9777f47c520ffe4285890ca2dd1189e67b3407d6369997e7ecf860090a8307a12094000000000000000000000000000000000000010980801ca02468a120d0ee8c57caac354f56842a1db10813169a328f9f852279668b573907a03971f4c2e6bc0aa666812712719199df6fe37c0e1e122131cdb47d6c0c77b371f8600a0a8307a12094000000000000000000000000000000000000010a80801ba0a3a2018ab0bc2695b94bb85d710f4d07132a94f8c3e0f385824da5fee11899a5a00d2dfe430ea5aaff3de8bbb9339e7485474c8e4e34636f787124a7a91e4d6d6af8600b0a8307a12094000000000000000000000000000000000000010b80801ba0b91968fdb3aecea26094ec30649daa4de81a875bcb1a123e732b8f3f112ce232a02ef8cd85969d8bcef5f4ee1f5d20783b8d9b7466726c15ebf911565825187665f8600c0a8307a12094000000000000000000000000000000000000010c80801ca0dd27e75aa990793205805c22265b04be8299b208fad4f37a7f652ecf32b67390a05aa8cda18521548ff8f95e88f49f309d05cab32de28a0942b8a7a824c50df459f8600d0a8307a12094000000000000000000000000000000000000010d80801ba0fad07ce7139dd4e00266194e6a51c048f74eaba3c0a1b03ece378a810abfaa63a04fec880dafaa5382797b4f88b16138b1f0c4e084817072c77ff9bf17ddd4ac26f8600e0a8307a12094000000000000000000000000000000000000010e80801ca0208b22ab245221bdc5cae6586d2ef019a2c37be41166e04b8abe354c41a8f5b6a032d5d6ef07731cd1684531c775c1727ef6ba75de18cda96d998aaa0c1db0bd68f8600f0a8307a12094000000000000000000000000000000000000010f80801ba0055225ffd3d8b2d19c32aa68cb46e7b52c1d99844fb8b7a53b922ea1649e9c5ba06ae2a1e3b9712354b706d0f4da6ea76ed2f8f75277a51a14a3e0ccf25b85c626
|
||||
@@ -69,7 +69,7 @@ Calculated : ${currentHash}
|
||||
tasks.register('checkAPIChanges', FileStateChecker) {
|
||||
description = "Checks that the API for the Plugin-API project does not change without deliberate thought"
|
||||
files = sourceSets.main.allJava.files
|
||||
knownHash = 'DyHMAiRn7aROnI2DCHrrkhU9szOs+NbI6FAxELrwEGQ='
|
||||
knownHash = 'Tv7ZbXqvytaHJFsEtGuGFrhITRAlJ02T4/54GjhEI5Y='
|
||||
}
|
||||
check.dependsOn('checkAPIChanges')
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.List;
|
||||
|
||||
/** Factory for creating key-value storage instances. */
|
||||
@Unstable
|
||||
@@ -53,6 +54,25 @@ public interface KeyValueStorageFactory extends Closeable {
|
||||
SegmentIdentifier segment, BesuConfiguration configuration, MetricsSystem metricsSystem)
|
||||
throws StorageException;
|
||||
|
||||
/**
|
||||
* Creates a new segmented key-value storage instance, appropriate for the given segment.
|
||||
*
|
||||
* <p>New segments may be introduced in future releases and should result in a new empty
|
||||
* key-space. Segments created with the identifier of an existing segment should have the same
|
||||
* data as that existing segment.
|
||||
*
|
||||
* @param segments list of segments identifiers that comprise the created segmented storage.
|
||||
* @param configuration common configuration available to plugins, in a populated state.
|
||||
* @param metricsSystem metrics component for recording key-value storage events.
|
||||
* @return the storage instance reserved for the given segment.
|
||||
* @exception StorageException problem encountered when creating storage for the segment.
|
||||
*/
|
||||
SegmentedKeyValueStorage create(
|
||||
List<SegmentIdentifier> segments,
|
||||
BesuConfiguration configuration,
|
||||
MetricsSystem metricsSystem)
|
||||
throws StorageException;
|
||||
|
||||
/**
|
||||
* Whether storage segment isolation is supported by the factory created instances.
|
||||
*
|
||||
@@ -71,7 +91,5 @@ public interface KeyValueStorageFactory extends Closeable {
|
||||
* @return <code>true</code> when the created storage supports snapshots <code>false</code> when
|
||||
* it does not.
|
||||
*/
|
||||
default boolean isSnapshotIsolationSupported() {
|
||||
return false;
|
||||
}
|
||||
boolean isSnapshotIsolationSupported();
|
||||
}
|
||||
|
||||
@@ -12,10 +12,9 @@
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.services.kvstore;
|
||||
package org.hyperledger.besu.plugin.services.storage;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.Optional;
|
||||
@@ -25,20 +24,8 @@ import java.util.stream.Stream;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
|
||||
/**
|
||||
* Service provided by Besu to facilitate persistent data storage.
|
||||
*
|
||||
* @param <S> the segment identifier type
|
||||
*/
|
||||
public interface SegmentedKeyValueStorage<S> extends Closeable {
|
||||
|
||||
/**
|
||||
* Gets segment identifier by name.
|
||||
*
|
||||
* @param segment the segment
|
||||
* @return the segment identifier by name
|
||||
*/
|
||||
S getSegmentIdentifierByName(SegmentIdentifier segment);
|
||||
/** Service provided by Besu to facilitate persistent data storage. */
|
||||
public interface SegmentedKeyValueStorage extends Closeable {
|
||||
|
||||
/**
|
||||
* Get the value from the associated segment and key.
|
||||
@@ -48,7 +35,7 @@ public interface SegmentedKeyValueStorage<S> extends Closeable {
|
||||
* @return The value persisted at the key index.
|
||||
* @throws StorageException the storage exception
|
||||
*/
|
||||
Optional<byte[]> get(S segment, byte[] key) throws StorageException;
|
||||
Optional<byte[]> get(SegmentIdentifier segment, byte[] key) throws StorageException;
|
||||
|
||||
/**
|
||||
* Contains key.
|
||||
@@ -58,7 +45,8 @@ public interface SegmentedKeyValueStorage<S> extends Closeable {
|
||||
* @return the boolean
|
||||
* @throws StorageException the storage exception
|
||||
*/
|
||||
default boolean containsKey(final S segment, final byte[] key) throws StorageException {
|
||||
default boolean containsKey(final SegmentIdentifier segment, final byte[] key)
|
||||
throws StorageException {
|
||||
return get(segment, key).isPresent();
|
||||
}
|
||||
|
||||
@@ -68,71 +56,74 @@ public interface SegmentedKeyValueStorage<S> extends Closeable {
|
||||
* @return An object representing the transaction.
|
||||
* @throws StorageException the storage exception
|
||||
*/
|
||||
Transaction<S> startTransaction() throws StorageException;
|
||||
SegmentedKeyValueStorageTransaction startTransaction() throws StorageException;
|
||||
|
||||
/**
|
||||
* Returns a stream of all keys for the segment.
|
||||
*
|
||||
* @param segmentHandle The segment handle whose keys we want to stream.
|
||||
* @param segmentIdentifier The segment identifier whose keys we want to stream.
|
||||
* @return A stream of all keys in the specified segment.
|
||||
*/
|
||||
Stream<Pair<byte[], byte[]>> stream(final S segmentHandle);
|
||||
Stream<Pair<byte[], byte[]>> stream(final SegmentIdentifier segmentIdentifier);
|
||||
|
||||
/**
|
||||
* Returns a stream of key-value pairs starting from the specified key. This method is used to
|
||||
* retrieve a stream of data from the storage, starting from the given key. If no data is
|
||||
* available from the specified key onwards, an empty stream is returned.
|
||||
*
|
||||
* @param segmentHandle The segment handle whose keys we want to stream.
|
||||
* @param segmentIdentifier The segment identifier whose keys we want to stream.
|
||||
* @param startKey The key from which the stream should start.
|
||||
* @return A stream of key-value pairs starting from the specified key.
|
||||
*/
|
||||
Stream<Pair<byte[], byte[]>> streamFromKey(final S segmentHandle, final byte[] startKey);
|
||||
Stream<Pair<byte[], byte[]>> streamFromKey(
|
||||
final SegmentIdentifier segmentIdentifier, final byte[] startKey);
|
||||
|
||||
/**
|
||||
* Stream keys.
|
||||
*
|
||||
* @param segmentHandle the segment handle
|
||||
* @param segmentIdentifier the segment identifier
|
||||
* @return the stream
|
||||
*/
|
||||
Stream<byte[]> streamKeys(final S segmentHandle);
|
||||
Stream<byte[]> streamKeys(final SegmentIdentifier segmentIdentifier);
|
||||
|
||||
/**
|
||||
* Delete the value corresponding to the given key in the given segment if a write lock can be
|
||||
* instantly acquired on the underlying storage. Do nothing otherwise.
|
||||
*
|
||||
* @param segmentHandle The segment handle whose keys we want to stream.
|
||||
* @param segmentIdentifier The segment identifier whose keys we want to stream.
|
||||
* @param key The key to delete.
|
||||
* @return false if the lock on the underlying storage could not be instantly acquired, true
|
||||
* otherwise
|
||||
* @throws StorageException any problem encountered during the deletion attempt.
|
||||
*/
|
||||
boolean tryDelete(S segmentHandle, byte[] key) throws StorageException;
|
||||
boolean tryDelete(SegmentIdentifier segmentIdentifier, byte[] key) throws StorageException;
|
||||
|
||||
/**
|
||||
* Gets all keys that matches condition.
|
||||
*
|
||||
* @param segmentHandle the segment handle
|
||||
* @param segmentIdentifier the segment identifier
|
||||
* @param returnCondition the return condition
|
||||
* @return set of result
|
||||
*/
|
||||
Set<byte[]> getAllKeysThat(S segmentHandle, Predicate<byte[]> returnCondition);
|
||||
Set<byte[]> getAllKeysThat(
|
||||
SegmentIdentifier segmentIdentifier, Predicate<byte[]> returnCondition);
|
||||
|
||||
/**
|
||||
* Gets all values from keys that matches condition.
|
||||
*
|
||||
* @param segmentHandle the segment handle
|
||||
* @param segmentIdentifier the segment identifier
|
||||
* @param returnCondition the return condition
|
||||
* @return the set of result
|
||||
*/
|
||||
Set<byte[]> getAllValuesFromKeysThat(final S segmentHandle, Predicate<byte[]> returnCondition);
|
||||
Set<byte[]> getAllValuesFromKeysThat(
|
||||
final SegmentIdentifier segmentIdentifier, Predicate<byte[]> returnCondition);
|
||||
|
||||
/**
|
||||
* Clear.
|
||||
*
|
||||
* @param segmentHandle the segment handle
|
||||
* @param segmentIdentifier the segment identifier
|
||||
*/
|
||||
void clear(S segmentHandle);
|
||||
void clear(SegmentIdentifier segmentIdentifier);
|
||||
|
||||
/**
|
||||
* Whether the underlying storage is closed.
|
||||
@@ -140,45 +131,4 @@ public interface SegmentedKeyValueStorage<S> extends Closeable {
|
||||
* @return boolean indicating whether the underlying storage is closed.
|
||||
*/
|
||||
boolean isClosed();
|
||||
|
||||
/**
|
||||
* Represents a set of changes to be committed atomically. A single transaction is not
|
||||
* thread-safe, but multiple transactions can execute concurrently.
|
||||
*
|
||||
* @param <S> the segment identifier type
|
||||
*/
|
||||
interface Transaction<S> {
|
||||
|
||||
/**
|
||||
* Add the given key-value pair to the set of updates to be committed.
|
||||
*
|
||||
* @param segment the database segment
|
||||
* @param key The key to set / modify.
|
||||
* @param value The value to be set.
|
||||
*/
|
||||
void put(S segment, byte[] key, byte[] value);
|
||||
|
||||
/**
|
||||
* Schedules the given key to be deleted from storage.
|
||||
*
|
||||
* @param segment the database segment
|
||||
* @param key The key to delete
|
||||
*/
|
||||
void remove(S segment, byte[] key);
|
||||
|
||||
/**
|
||||
* Atomically commit the set of changes contained in this transaction to the underlying
|
||||
* key-value storage from which this transaction was started. After committing, the transaction
|
||||
* is no longer usable and will throw exceptions if modifications are attempted.
|
||||
*
|
||||
* @throws StorageException the storage exception
|
||||
*/
|
||||
void commit() throws StorageException;
|
||||
|
||||
/**
|
||||
* Cancel this transaction. After rolling back, the transaction is no longer usable and will
|
||||
* throw exceptions if modifications are attempted.
|
||||
*/
|
||||
void rollback();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
* Copyright ConsenSys AG.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.plugin.services.storage;
|
||||
|
||||
import org.hyperledger.besu.plugin.Unstable;
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
|
||||
/**
|
||||
* A transaction that can atomically commit a sequence of operations to a segmented key-value store.
|
||||
*/
|
||||
@Unstable
|
||||
public interface SegmentedKeyValueStorageTransaction {
|
||||
|
||||
/**
|
||||
* Associates the specified value with the specified key.
|
||||
*
|
||||
* <p>If a previously value had been store against the given key, the old value is replaced by the
|
||||
* given value.
|
||||
*
|
||||
* @param segmentIdentifier the segment identifier
|
||||
* @param key the given value is to be associated with.
|
||||
* @param value associated with the specified key.
|
||||
*/
|
||||
void put(SegmentIdentifier segmentIdentifier, byte[] key, byte[] value);
|
||||
|
||||
/**
|
||||
* When the given key is present, the key and mapped value will be removed from storage.
|
||||
*
|
||||
* @param segmentIdentifier the segment identifier
|
||||
* @param key the key and mapped value that will be removed.
|
||||
*/
|
||||
void remove(SegmentIdentifier segmentIdentifier, byte[] key);
|
||||
|
||||
/**
|
||||
* Performs an atomic commit of all the operations queued in the transaction.
|
||||
*
|
||||
* @throws StorageException problem was encountered preventing the commit
|
||||
*/
|
||||
void commit() throws StorageException;
|
||||
|
||||
/** Reset the transaction to a state prior to any operations being queued. */
|
||||
void rollback();
|
||||
}
|
||||
@@ -16,7 +16,7 @@
|
||||
package org.hyperledger.besu.plugin.services.storage;
|
||||
|
||||
/** The interface Snappable key value storage. */
|
||||
public interface SnappableKeyValueStorage extends KeyValueStorage {
|
||||
public interface SnappableKeyValueStorage extends SegmentedKeyValueStorage {
|
||||
|
||||
/**
|
||||
* Take snapshot.
|
||||
|
||||
@@ -16,12 +16,12 @@
|
||||
package org.hyperledger.besu.plugin.services.storage;
|
||||
|
||||
/** The interface Snapped key value storage. */
|
||||
public interface SnappedKeyValueStorage extends KeyValueStorage {
|
||||
public interface SnappedKeyValueStorage extends SegmentedKeyValueStorage {
|
||||
|
||||
/**
|
||||
* Gets snapshot transaction.
|
||||
*
|
||||
* @return the snapshot transaction
|
||||
*/
|
||||
KeyValueStorageTransaction getSnapshotTransaction();
|
||||
SegmentedKeyValueStorageTransaction getSnapshotTransaction();
|
||||
}
|
||||
|
||||
@@ -20,11 +20,13 @@ import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.DatabaseMetadata;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
@@ -75,11 +77,33 @@ public class RocksDBKeyValuePrivacyStorageFactory implements PrivacyKeyValueStor
|
||||
return publicFactory.create(segment, commonConfiguration, metricsSystem);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SegmentedKeyValueStorage create(
|
||||
final List<SegmentIdentifier> segments,
|
||||
final BesuConfiguration commonConfiguration,
|
||||
final MetricsSystem metricsSystem)
|
||||
throws StorageException {
|
||||
if (databaseVersion == null) {
|
||||
try {
|
||||
databaseVersion = readDatabaseVersion(commonConfiguration);
|
||||
} catch (final IOException e) {
|
||||
throw new StorageException("Failed to retrieve the RocksDB database meta version", e);
|
||||
}
|
||||
}
|
||||
|
||||
return publicFactory.create(segments, commonConfiguration, metricsSystem);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSegmentIsolationSupported() {
|
||||
return publicFactory.isSegmentIsolationSupported();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSnapshotIsolationSupported() {
|
||||
return publicFactory.isSnapshotIsolationSupported();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
publicFactory.close();
|
||||
|
||||
@@ -14,8 +14,6 @@
|
||||
*/
|
||||
package org.hyperledger.besu.plugin.services.storage.rocksdb;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import org.hyperledger.besu.ethereum.worldstate.DataStorageFormat;
|
||||
import org.hyperledger.besu.plugin.services.BesuConfiguration;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
@@ -23,6 +21,7 @@ import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.DatabaseMetadata;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfiguration;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfigurationBuilder;
|
||||
@@ -31,7 +30,6 @@ import org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.Optimistic
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.RocksDBColumnarKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.TransactionDBRocksDBColumnarKeyValueStorage;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageAdapter;
|
||||
import org.hyperledger.besu.services.kvstore.SnappableSegmentedKeyValueStorageAdapter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
@@ -44,7 +42,10 @@ import java.util.stream.Collectors;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** The Rocks db key value storage factory. */
|
||||
/**
|
||||
* The Rocks db key value storage factory creates segmented storage and uses a adapter to support
|
||||
* unsegmented keyvalue storage.
|
||||
*/
|
||||
public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RocksDBKeyValueStorageFactory.class);
|
||||
@@ -55,31 +56,30 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
|
||||
private final int defaultVersion;
|
||||
private Integer databaseVersion;
|
||||
private Boolean isSegmentIsolationSupported;
|
||||
private RocksDBColumnarKeyValueStorage segmentedStorage;
|
||||
private RocksDBConfiguration rocksDBConfiguration;
|
||||
|
||||
private final Supplier<RocksDBFactoryConfiguration> configuration;
|
||||
private final List<SegmentIdentifier> segments;
|
||||
private final List<SegmentIdentifier> configuredSegments;
|
||||
private final List<SegmentIdentifier> ignorableSegments;
|
||||
|
||||
/**
|
||||
* Instantiates a new RocksDb key value storage factory.
|
||||
*
|
||||
* @param configuration the configuration
|
||||
* @param segments the segments
|
||||
* @param configuredSegments the segments
|
||||
* @param ignorableSegments the ignorable segments
|
||||
* @param defaultVersion the default version
|
||||
* @param rocksDBMetricsFactory the rocks db metrics factory
|
||||
*/
|
||||
public RocksDBKeyValueStorageFactory(
|
||||
final Supplier<RocksDBFactoryConfiguration> configuration,
|
||||
final List<SegmentIdentifier> segments,
|
||||
final List<SegmentIdentifier> configuredSegments,
|
||||
final List<SegmentIdentifier> ignorableSegments,
|
||||
final int defaultVersion,
|
||||
final RocksDBMetricsFactory rocksDBMetricsFactory) {
|
||||
this.configuration = configuration;
|
||||
this.segments = segments;
|
||||
this.configuredSegments = configuredSegments;
|
||||
this.ignorableSegments = ignorableSegments;
|
||||
this.defaultVersion = defaultVersion;
|
||||
this.rocksDBMetricsFactory = rocksDBMetricsFactory;
|
||||
@@ -89,46 +89,51 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
* Instantiates a new RocksDb key value storage factory.
|
||||
*
|
||||
* @param configuration the configuration
|
||||
* @param segments the segments
|
||||
* @param configuredSegments the segments
|
||||
* @param defaultVersion the default version
|
||||
* @param rocksDBMetricsFactory the rocks db metrics factory
|
||||
*/
|
||||
public RocksDBKeyValueStorageFactory(
|
||||
final Supplier<RocksDBFactoryConfiguration> configuration,
|
||||
final List<SegmentIdentifier> segments,
|
||||
final List<SegmentIdentifier> configuredSegments,
|
||||
final int defaultVersion,
|
||||
final RocksDBMetricsFactory rocksDBMetricsFactory) {
|
||||
this(configuration, segments, List.of(), defaultVersion, rocksDBMetricsFactory);
|
||||
this(configuration, configuredSegments, List.of(), defaultVersion, rocksDBMetricsFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new Rocks db key value storage factory.
|
||||
*
|
||||
* @param configuration the configuration
|
||||
* @param segments the segments
|
||||
* @param configuredSegments the segments
|
||||
* @param ignorableSegments the ignorable segments
|
||||
* @param rocksDBMetricsFactory the rocks db metrics factory
|
||||
*/
|
||||
public RocksDBKeyValueStorageFactory(
|
||||
final Supplier<RocksDBFactoryConfiguration> configuration,
|
||||
final List<SegmentIdentifier> segments,
|
||||
final List<SegmentIdentifier> configuredSegments,
|
||||
final List<SegmentIdentifier> ignorableSegments,
|
||||
final RocksDBMetricsFactory rocksDBMetricsFactory) {
|
||||
this(configuration, segments, ignorableSegments, DEFAULT_VERSION, rocksDBMetricsFactory);
|
||||
this(
|
||||
configuration,
|
||||
configuredSegments,
|
||||
ignorableSegments,
|
||||
DEFAULT_VERSION,
|
||||
rocksDBMetricsFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new Rocks db key value storage factory.
|
||||
*
|
||||
* @param configuration the configuration
|
||||
* @param segments the segments
|
||||
* @param configuredSegments the segments
|
||||
* @param rocksDBMetricsFactory the rocks db metrics factory
|
||||
*/
|
||||
public RocksDBKeyValueStorageFactory(
|
||||
final Supplier<RocksDBFactoryConfiguration> configuration,
|
||||
final List<SegmentIdentifier> segments,
|
||||
final List<SegmentIdentifier> configuredSegments,
|
||||
final RocksDBMetricsFactory rocksDBMetricsFactory) {
|
||||
this(configuration, segments, List.of(), DEFAULT_VERSION, rocksDBMetricsFactory);
|
||||
this(configuration, configuredSegments, List.of(), DEFAULT_VERSION, rocksDBMetricsFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -151,12 +156,32 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
final BesuConfiguration commonConfiguration,
|
||||
final MetricsSystem metricsSystem)
|
||||
throws StorageException {
|
||||
return new SegmentedKeyValueStorageAdapter(
|
||||
segment, create(List.of(segment), commonConfiguration, metricsSystem));
|
||||
}
|
||||
|
||||
@Override
|
||||
public SegmentedKeyValueStorage create(
|
||||
final List<SegmentIdentifier> segments,
|
||||
final BesuConfiguration commonConfiguration,
|
||||
final MetricsSystem metricsSystem)
|
||||
throws StorageException {
|
||||
final boolean isForestStorageFormat =
|
||||
DataStorageFormat.FOREST.getDatabaseVersion() == commonConfiguration.getDatabaseVersion();
|
||||
if (requiresInit()) {
|
||||
init(commonConfiguration);
|
||||
}
|
||||
|
||||
// safety check to see that segments all exist within configured segments
|
||||
if (!configuredSegments.containsAll(segments)) {
|
||||
throw new StorageException(
|
||||
"Attempted to create storage for segments that are not configured: "
|
||||
+ segments.stream()
|
||||
.filter(segment -> !configuredSegments.contains(segment))
|
||||
.map(SegmentIdentifier::toString)
|
||||
.collect(Collectors.joining(", ")));
|
||||
}
|
||||
|
||||
// It's probably a good idea for the creation logic to be entirely dependent on the database
|
||||
// version. Introducing intermediate booleans that represent database properties and dispatching
|
||||
// creation logic based on them is error-prone.
|
||||
@@ -164,7 +189,7 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
case 1, 2 -> {
|
||||
if (segmentedStorage == null) {
|
||||
final List<SegmentIdentifier> segmentsForVersion =
|
||||
segments.stream()
|
||||
configuredSegments.stream()
|
||||
.filter(segmentId -> segmentId.includeInDatabaseVersion(databaseVersion))
|
||||
.collect(Collectors.toList());
|
||||
if (isForestStorageFormat) {
|
||||
@@ -187,20 +212,7 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
rocksDBMetricsFactory);
|
||||
}
|
||||
}
|
||||
|
||||
final RocksDbSegmentIdentifier rocksSegment =
|
||||
segmentedStorage.getSegmentIdentifierByName(segment);
|
||||
|
||||
if (isForestStorageFormat) {
|
||||
return new SegmentedKeyValueStorageAdapter<>(segment, segmentedStorage);
|
||||
} else {
|
||||
return new SnappableSegmentedKeyValueStorageAdapter<>(
|
||||
segment,
|
||||
segmentedStorage,
|
||||
() ->
|
||||
((OptimisticRocksDBColumnarKeyValueStorage) segmentedStorage)
|
||||
.takeSnapshot(rocksSegment));
|
||||
}
|
||||
return segmentedStorage;
|
||||
}
|
||||
default -> throw new IllegalStateException(
|
||||
String.format(
|
||||
@@ -229,7 +241,6 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
+ " could not be found. You may not have the appropriate permission to access the item.";
|
||||
throw new StorageException(message, e);
|
||||
}
|
||||
isSegmentIsolationSupported = databaseVersion >= 1;
|
||||
rocksDBConfiguration =
|
||||
RocksDBConfigurationBuilder.from(configuration.get())
|
||||
.databaseDir(storagePath(commonConfiguration))
|
||||
@@ -278,9 +289,7 @@ public class RocksDBKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
|
||||
@Override
|
||||
public boolean isSegmentIsolationSupported() {
|
||||
return checkNotNull(
|
||||
isSegmentIsolationSupported,
|
||||
"Whether segment isolation is supported will be determined during creation. Call a creation method first");
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -0,0 +1,121 @@
|
||||
/*
|
||||
* Copyright Hyperledger Besu Contributors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.plugin.services.storage.rocksdb;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.metrics.OperationTimer;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
|
||||
import java.util.function.Function;
|
||||
|
||||
import org.rocksdb.ColumnFamilyHandle;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import org.rocksdb.Transaction;
|
||||
import org.rocksdb.WriteOptions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** The RocksDb transaction. */
|
||||
public class RocksDBTransaction implements SegmentedKeyValueStorageTransaction {
|
||||
private static final Logger logger = LoggerFactory.getLogger(RocksDBTransaction.class);
|
||||
private static final String NO_SPACE_LEFT_ON_DEVICE = "No space left on device";
|
||||
|
||||
private final RocksDBMetrics metrics;
|
||||
private final Transaction innerTx;
|
||||
private final WriteOptions options;
|
||||
private final Function<SegmentIdentifier, ColumnFamilyHandle> columnFamilyMapper;
|
||||
|
||||
/**
|
||||
* Instantiates a new RocksDb transaction.
|
||||
*
|
||||
* @param columnFamilyMapper mapper from segment identifier to column family handle
|
||||
* @param innerTx the inner tx
|
||||
* @param options the options
|
||||
* @param metrics the metrics
|
||||
*/
|
||||
public RocksDBTransaction(
|
||||
final Function<SegmentIdentifier, ColumnFamilyHandle> columnFamilyMapper,
|
||||
final Transaction innerTx,
|
||||
final WriteOptions options,
|
||||
final RocksDBMetrics metrics) {
|
||||
this.columnFamilyMapper = columnFamilyMapper;
|
||||
this.innerTx = innerTx;
|
||||
this.options = options;
|
||||
this.metrics = metrics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(final SegmentIdentifier segmentId, final byte[] key, final byte[] value) {
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getWriteLatency().startTimer()) {
|
||||
innerTx.put(columnFamilyMapper.apply(segmentId), key, value);
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
logger.error(e.getMessage());
|
||||
System.exit(0);
|
||||
}
|
||||
throw new StorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(final SegmentIdentifier segmentId, final byte[] key) {
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getRemoveLatency().startTimer()) {
|
||||
innerTx.delete(columnFamilyMapper.apply(segmentId), key);
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
logger.error(e.getMessage());
|
||||
System.exit(0);
|
||||
}
|
||||
throw new StorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit() throws StorageException {
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getCommitLatency().startTimer()) {
|
||||
innerTx.commit();
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
logger.error(e.getMessage());
|
||||
System.exit(0);
|
||||
}
|
||||
throw new StorageException(e);
|
||||
} finally {
|
||||
close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
try {
|
||||
innerTx.rollback();
|
||||
metrics.getRollbackCount().inc();
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
logger.error(e.getMessage());
|
||||
System.exit(0);
|
||||
}
|
||||
throw new StorageException(e);
|
||||
} finally {
|
||||
close();
|
||||
}
|
||||
}
|
||||
|
||||
private void close() {
|
||||
innerTx.close();
|
||||
options.close();
|
||||
}
|
||||
}
|
||||
@@ -17,10 +17,12 @@ package org.hyperledger.besu.plugin.services.storage.rocksdb.segmented;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappableKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbSegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfiguration;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageTransactionTransitionValidatorDecorator;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageTransactionValidatorDecorator;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@@ -30,7 +32,8 @@ import org.rocksdb.RocksDBException;
|
||||
import org.rocksdb.WriteOptions;
|
||||
|
||||
/** Optimistic RocksDB Columnar key value storage */
|
||||
public class OptimisticRocksDBColumnarKeyValueStorage extends RocksDBColumnarKeyValueStorage {
|
||||
public class OptimisticRocksDBColumnarKeyValueStorage extends RocksDBColumnarKeyValueStorage
|
||||
implements SnappableKeyValueStorage {
|
||||
private final OptimisticTransactionDB db;
|
||||
|
||||
/**
|
||||
@@ -57,7 +60,7 @@ public class OptimisticRocksDBColumnarKeyValueStorage extends RocksDBColumnarKey
|
||||
OptimisticTransactionDB.open(
|
||||
options, configuration.getDatabaseDir().toString(), columnDescriptors, columnHandles);
|
||||
initMetrics();
|
||||
initColumnHandler();
|
||||
initColumnHandles();
|
||||
|
||||
} catch (final RocksDBException e) {
|
||||
throw new StorageException(e);
|
||||
@@ -76,24 +79,25 @@ public class OptimisticRocksDBColumnarKeyValueStorage extends RocksDBColumnarKey
|
||||
* @throws StorageException the storage exception
|
||||
*/
|
||||
@Override
|
||||
public Transaction<RocksDbSegmentIdentifier> startTransaction() throws StorageException {
|
||||
public SegmentedKeyValueStorageTransaction startTransaction() throws StorageException {
|
||||
throwIfClosed();
|
||||
final WriteOptions writeOptions = new WriteOptions();
|
||||
writeOptions.setIgnoreMissingColumnFamilies(true);
|
||||
return new SegmentedKeyValueStorageTransactionTransitionValidatorDecorator<>(
|
||||
new RocksDbTransaction(db.beginTransaction(writeOptions), writeOptions), this.closed::get);
|
||||
return new SegmentedKeyValueStorageTransactionValidatorDecorator(
|
||||
new RocksDBTransaction(
|
||||
this::safeColumnHandle, db.beginTransaction(writeOptions), writeOptions, this.metrics),
|
||||
this.closed::get);
|
||||
}
|
||||
|
||||
/**
|
||||
* Take snapshot RocksDb columnar key value snapshot.
|
||||
*
|
||||
* @param segment the segment
|
||||
* @return the RocksDb columnar key value snapshot
|
||||
* @throws StorageException the storage exception
|
||||
*/
|
||||
public RocksDBColumnarKeyValueSnapshot takeSnapshot(final RocksDbSegmentIdentifier segment)
|
||||
throws StorageException {
|
||||
@Override
|
||||
public RocksDBColumnarKeyValueSnapshot takeSnapshot() throws StorageException {
|
||||
throwIfClosed();
|
||||
return new RocksDBColumnarKeyValueSnapshot(db, segment, metrics);
|
||||
return new RocksDBColumnarKeyValueSnapshot(db, this::safeColumnHandle, metrics);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,26 +18,30 @@ package org.hyperledger.besu.plugin.services.storage.rocksdb.segmented;
|
||||
import static java.util.stream.Collectors.toUnmodifiableSet;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetrics;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbSegmentIdentifier;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
import org.rocksdb.ColumnFamilyHandle;
|
||||
import org.rocksdb.OptimisticTransactionDB;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** The RocksDb columnar key value snapshot. */
|
||||
public class RocksDBColumnarKeyValueSnapshot implements SnappedKeyValueStorage {
|
||||
public class RocksDBColumnarKeyValueSnapshot
|
||||
implements SegmentedKeyValueStorage, SnappedKeyValueStorage {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RocksDBColumnarKeyValueSnapshot.class);
|
||||
|
||||
@@ -53,62 +57,66 @@ public class RocksDBColumnarKeyValueSnapshot implements SnappedKeyValueStorage {
|
||||
* Instantiates a new RocksDb columnar key value snapshot.
|
||||
*
|
||||
* @param db the db
|
||||
* @param segment the segment
|
||||
* @param metrics the metrics
|
||||
*/
|
||||
RocksDBColumnarKeyValueSnapshot(
|
||||
final OptimisticTransactionDB db,
|
||||
final RocksDbSegmentIdentifier segment,
|
||||
final Function<SegmentIdentifier, ColumnFamilyHandle> columnFamilyMapper,
|
||||
final RocksDBMetrics metrics) {
|
||||
this.db = db;
|
||||
this.snapTx = new RocksDBSnapshotTransaction(db, segment.get(), metrics);
|
||||
this.snapTx = new RocksDBSnapshotTransaction(db, columnFamilyMapper, metrics);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<byte[]> get(final byte[] key) throws StorageException {
|
||||
public Optional<byte[]> get(final SegmentIdentifier segment, final byte[] key)
|
||||
throws StorageException {
|
||||
throwIfClosed();
|
||||
return snapTx.get(key);
|
||||
return snapTx.get(segment, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> stream() {
|
||||
public Stream<Pair<byte[], byte[]>> stream(final SegmentIdentifier segment) {
|
||||
throwIfClosed();
|
||||
return snapTx.stream();
|
||||
return snapTx.stream(segment);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) {
|
||||
return stream().filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
|
||||
public Stream<Pair<byte[], byte[]>> streamFromKey(
|
||||
final SegmentIdentifier segment, final byte[] startKey) {
|
||||
return stream(segment).filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<byte[]> streamKeys() {
|
||||
public Stream<byte[]> streamKeys(final SegmentIdentifier segment) {
|
||||
throwIfClosed();
|
||||
return snapTx.streamKeys();
|
||||
return snapTx.streamKeys(segment);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryDelete(final byte[] key) throws StorageException {
|
||||
public boolean tryDelete(final SegmentIdentifier segment, final byte[] key)
|
||||
throws StorageException {
|
||||
throwIfClosed();
|
||||
snapTx.remove(key);
|
||||
snapTx.remove(segment, key);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
return streamKeys().filter(returnCondition).collect(toUnmodifiableSet());
|
||||
public Set<byte[]> getAllKeysThat(
|
||||
final SegmentIdentifier segment, final Predicate<byte[]> returnCondition) {
|
||||
return streamKeys(segment).filter(returnCondition).collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllValuesFromKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
return stream()
|
||||
public Set<byte[]> getAllValuesFromKeysThat(
|
||||
final SegmentIdentifier segment, final Predicate<byte[]> returnCondition) {
|
||||
return stream(segment)
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getValue)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyValueStorageTransaction startTransaction() throws StorageException {
|
||||
public SegmentedKeyValueStorageTransaction startTransaction() throws StorageException {
|
||||
// The use of a transaction on a transaction based key value store is dubious
|
||||
// at best. return our snapshot transaction instead.
|
||||
return snapTx;
|
||||
@@ -120,15 +128,16 @@ public class RocksDBColumnarKeyValueSnapshot implements SnappedKeyValueStorage {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
public void clear(final SegmentIdentifier segment) {
|
||||
throw new UnsupportedOperationException(
|
||||
"RocksDBColumnarKeyValueSnapshot does not support clear");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsKey(final byte[] key) throws StorageException {
|
||||
public boolean containsKey(final SegmentIdentifier segment, final byte[] key)
|
||||
throws StorageException {
|
||||
throwIfClosed();
|
||||
return snapTx.get(key).isPresent();
|
||||
return snapTx.get(segment, key).isPresent();
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -146,7 +155,7 @@ public class RocksDBColumnarKeyValueSnapshot implements SnappedKeyValueStorage {
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyValueStorageTransaction getSnapshotTransaction() {
|
||||
public SegmentedKeyValueStorageTransaction getSnapshotTransaction() {
|
||||
return snapTx;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,20 +14,19 @@
|
||||
*/
|
||||
package org.hyperledger.besu.plugin.services.storage.rocksdb.segmented;
|
||||
|
||||
import static java.util.Objects.requireNonNullElse;
|
||||
import static java.util.stream.Collectors.toUnmodifiableSet;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.metrics.OperationTimer;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetrics;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbIterator;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbSegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbUtil;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfiguration;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorage;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
@@ -41,9 +40,7 @@ import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
import org.rocksdb.BlockBasedTableConfig;
|
||||
import org.rocksdb.BloomFilter;
|
||||
import org.rocksdb.ColumnFamilyDescriptor;
|
||||
@@ -66,12 +63,10 @@ import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** The RocksDb columnar key value storage. */
|
||||
public abstract class RocksDBColumnarKeyValueStorage
|
||||
implements SegmentedKeyValueStorage<RocksDbSegmentIdentifier> {
|
||||
public abstract class RocksDBColumnarKeyValueStorage implements SegmentedKeyValueStorage {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RocksDBColumnarKeyValueStorage.class);
|
||||
static final String DEFAULT_COLUMN = "default";
|
||||
private static final String NO_SPACE_LEFT_ON_DEVICE = "No space left on device";
|
||||
private static final int ROCKSDB_FORMAT_VERSION = 5;
|
||||
private static final long ROCKSDB_BLOCK_SIZE = 32768;
|
||||
/** RocksDb blockcache size when using the high spec option */
|
||||
@@ -96,7 +91,6 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
private final MetricsSystem metricsSystem;
|
||||
private final RocksDBMetricsFactory rocksDBMetricsFactory;
|
||||
private final RocksDBConfiguration configuration;
|
||||
private Map<Bytes, String> segmentsById;
|
||||
/** RocksDB DB options */
|
||||
protected DBOptions options;
|
||||
|
||||
@@ -109,7 +103,7 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
protected RocksDBMetrics metrics;
|
||||
|
||||
/** Map of the columns handles by name */
|
||||
protected Map<String, RocksDbSegmentIdentifier> columnHandlesByName;
|
||||
protected Map<SegmentIdentifier, RocksDbSegmentIdentifier> columnHandlesBySegmentIdentifier;
|
||||
/** Column descriptors */
|
||||
protected List<ColumnFamilyDescriptor> columnDescriptors;
|
||||
/** Column handles */
|
||||
@@ -122,7 +116,7 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
* Instantiates a new Rocks db columnar key value storage.
|
||||
*
|
||||
* @param configuration the configuration
|
||||
* @param segments the segments
|
||||
* @param defaultSegments the segments
|
||||
* @param ignorableSegments the ignorable segments
|
||||
* @param metricsSystem the metrics system
|
||||
* @param rocksDBMetricsFactory the rocks db metrics factory
|
||||
@@ -130,7 +124,7 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
*/
|
||||
public RocksDBColumnarKeyValueStorage(
|
||||
final RocksDBConfiguration configuration,
|
||||
final List<SegmentIdentifier> segments,
|
||||
final List<SegmentIdentifier> defaultSegments,
|
||||
final List<SegmentIdentifier> ignorableSegments,
|
||||
final MetricsSystem metricsSystem,
|
||||
final RocksDBMetricsFactory rocksDBMetricsFactory)
|
||||
@@ -142,7 +136,7 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
|
||||
try {
|
||||
final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions();
|
||||
trimmedSegments = new ArrayList<>(segments);
|
||||
trimmedSegments = new ArrayList<>(defaultSegments);
|
||||
final List<byte[]> existingColumnFamilies =
|
||||
RocksDB.listColumnFamilies(new Options(), configuration.getDatabaseDir().toString());
|
||||
// Only ignore if not existed currently
|
||||
@@ -209,21 +203,32 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
metrics = rocksDBMetricsFactory.create(metricsSystem, configuration, getDB(), stats);
|
||||
}
|
||||
|
||||
void initColumnHandler() throws RocksDBException {
|
||||
|
||||
segmentsById =
|
||||
void initColumnHandles() throws RocksDBException {
|
||||
// will not include the DEFAULT columnHandle, we do not use it:
|
||||
columnHandlesBySegmentIdentifier =
|
||||
trimmedSegments.stream()
|
||||
.collect(
|
||||
Collectors.toMap(
|
||||
segment -> Bytes.wrap(segment.getId()), SegmentIdentifier::getName));
|
||||
final ImmutableMap.Builder<String, RocksDbSegmentIdentifier> builder = ImmutableMap.builder();
|
||||
|
||||
for (ColumnFamilyHandle columnHandle : columnHandles) {
|
||||
final String segmentName =
|
||||
requireNonNullElse(segmentsById.get(Bytes.wrap(columnHandle.getName())), DEFAULT_COLUMN);
|
||||
builder.put(segmentName, new RocksDbSegmentIdentifier(getDB(), columnHandle));
|
||||
}
|
||||
columnHandlesByName = builder.build();
|
||||
segmentId -> segmentId,
|
||||
segment -> {
|
||||
var columnHandle =
|
||||
columnHandles.stream()
|
||||
.filter(
|
||||
ch -> {
|
||||
try {
|
||||
return Arrays.equals(ch.getName(), segment.getId());
|
||||
} catch (RocksDBException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
})
|
||||
.findFirst()
|
||||
.orElseThrow(
|
||||
() ->
|
||||
new RuntimeException(
|
||||
"Column handle not found for segment "
|
||||
+ segment.getName()));
|
||||
return new RocksDbSegmentIdentifier(getDB(), columnHandle);
|
||||
}));
|
||||
}
|
||||
|
||||
BlockBasedTableConfig createBlockBasedTableConfig(final RocksDBConfiguration config) {
|
||||
@@ -239,49 +244,58 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
.setBlockSize(ROCKSDB_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RocksDbSegmentIdentifier getSegmentIdentifierByName(final SegmentIdentifier segment) {
|
||||
return columnHandlesByName.get(segment.getName());
|
||||
/**
|
||||
* Safe method to map segment identifier to column handle.
|
||||
*
|
||||
* @param segment segment identifier
|
||||
* @return column handle
|
||||
*/
|
||||
protected ColumnFamilyHandle safeColumnHandle(final SegmentIdentifier segment) {
|
||||
RocksDbSegmentIdentifier safeRef = columnHandlesBySegmentIdentifier.get(segment);
|
||||
if (safeRef == null) {
|
||||
throw new RuntimeException("Column handle not found for segment " + segment.getName());
|
||||
}
|
||||
return safeRef.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<byte[]> get(final RocksDbSegmentIdentifier segment, final byte[] key)
|
||||
public Optional<byte[]> get(final SegmentIdentifier segment, final byte[] key)
|
||||
throws StorageException {
|
||||
throwIfClosed();
|
||||
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getReadLatency().startTimer()) {
|
||||
return Optional.ofNullable(getDB().get(segment.get(), readOptions, key));
|
||||
return Optional.ofNullable(getDB().get(safeColumnHandle(segment), readOptions, key));
|
||||
} catch (final RocksDBException e) {
|
||||
throw new StorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> stream(final RocksDbSegmentIdentifier segmentHandle) {
|
||||
final RocksIterator rocksIterator = getDB().newIterator(segmentHandle.get());
|
||||
public Stream<Pair<byte[], byte[]>> stream(final SegmentIdentifier segmentIdentifier) {
|
||||
final RocksIterator rocksIterator = getDB().newIterator(safeColumnHandle(segmentIdentifier));
|
||||
rocksIterator.seekToFirst();
|
||||
return RocksDbIterator.create(rocksIterator).toStream();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> streamFromKey(
|
||||
final RocksDbSegmentIdentifier segmentHandle, final byte[] startKey) {
|
||||
final RocksIterator rocksIterator = getDB().newIterator(segmentHandle.get());
|
||||
final SegmentIdentifier segmentIdentifier, final byte[] startKey) {
|
||||
final RocksIterator rocksIterator = getDB().newIterator(safeColumnHandle(segmentIdentifier));
|
||||
rocksIterator.seek(startKey);
|
||||
return RocksDbIterator.create(rocksIterator).toStream();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<byte[]> streamKeys(final RocksDbSegmentIdentifier segmentHandle) {
|
||||
final RocksIterator rocksIterator = getDB().newIterator(segmentHandle.get());
|
||||
public Stream<byte[]> streamKeys(final SegmentIdentifier segmentIdentifier) {
|
||||
final RocksIterator rocksIterator = getDB().newIterator(safeColumnHandle(segmentIdentifier));
|
||||
rocksIterator.seekToFirst();
|
||||
return RocksDbIterator.create(rocksIterator).toStreamKeys();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryDelete(final RocksDbSegmentIdentifier segmentHandle, final byte[] key) {
|
||||
public boolean tryDelete(final SegmentIdentifier segmentIdentifier, final byte[] key) {
|
||||
try {
|
||||
getDB().delete(segmentHandle.get(), tryDeleteOptions, key);
|
||||
getDB().delete(safeColumnHandle(segmentIdentifier), tryDeleteOptions, key);
|
||||
return true;
|
||||
} catch (RocksDBException e) {
|
||||
if (e.getStatus().getCode() == Status.Code.Incomplete) {
|
||||
@@ -294,8 +308,8 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllKeysThat(
|
||||
final RocksDbSegmentIdentifier segmentHandle, final Predicate<byte[]> returnCondition) {
|
||||
return stream(segmentHandle)
|
||||
final SegmentIdentifier segmentIdentifier, final Predicate<byte[]> returnCondition) {
|
||||
return stream(segmentIdentifier)
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getKey)
|
||||
.collect(toUnmodifiableSet());
|
||||
@@ -303,19 +317,16 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllValuesFromKeysThat(
|
||||
final RocksDbSegmentIdentifier segmentHandle, final Predicate<byte[]> returnCondition) {
|
||||
return stream(segmentHandle)
|
||||
final SegmentIdentifier segmentIdentifier, final Predicate<byte[]> returnCondition) {
|
||||
return stream(segmentIdentifier)
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getValue)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear(final RocksDbSegmentIdentifier segmentHandle) {
|
||||
|
||||
columnHandlesByName.values().stream()
|
||||
.filter(e -> e.equals(segmentHandle))
|
||||
.findAny()
|
||||
public void clear(final SegmentIdentifier segmentIdentifier) {
|
||||
Optional.ofNullable(columnHandlesBySegmentIdentifier.get(segmentIdentifier))
|
||||
.ifPresent(RocksDbSegmentIdentifier::reset);
|
||||
}
|
||||
|
||||
@@ -325,7 +336,7 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
txOptions.close();
|
||||
options.close();
|
||||
tryDeleteOptions.close();
|
||||
columnHandlesByName.values().stream()
|
||||
columnHandlesBySegmentIdentifier.values().stream()
|
||||
.map(RocksDbSegmentIdentifier::get)
|
||||
.forEach(ColumnFamilyHandle::close);
|
||||
getDB().close();
|
||||
@@ -344,84 +355,5 @@ public abstract class RocksDBColumnarKeyValueStorage
|
||||
}
|
||||
}
|
||||
|
||||
class RocksDbTransaction implements Transaction<RocksDbSegmentIdentifier> {
|
||||
|
||||
private final org.rocksdb.Transaction innerTx;
|
||||
private final WriteOptions options;
|
||||
|
||||
/**
|
||||
* Instantiates a new RocksDb transaction.
|
||||
*
|
||||
* @param innerTx the inner tx
|
||||
* @param options the write options
|
||||
*/
|
||||
RocksDbTransaction(final org.rocksdb.Transaction innerTx, final WriteOptions options) {
|
||||
this.innerTx = innerTx;
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(final RocksDbSegmentIdentifier segment, final byte[] key, final byte[] value) {
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getWriteLatency().startTimer()) {
|
||||
innerTx.put(segment.get(), key, value);
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
LOG.error(e.getMessage());
|
||||
System.exit(0);
|
||||
}
|
||||
throw new StorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(final RocksDbSegmentIdentifier segment, final byte[] key) {
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getRemoveLatency().startTimer()) {
|
||||
innerTx.delete(segment.get(), key);
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
LOG.error(e.getMessage());
|
||||
System.exit(0);
|
||||
}
|
||||
throw new StorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void commit() throws StorageException {
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getCommitLatency().startTimer()) {
|
||||
innerTx.commit();
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
LOG.error(e.getMessage());
|
||||
System.exit(0);
|
||||
}
|
||||
throw new StorageException(e);
|
||||
} finally {
|
||||
close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
try {
|
||||
innerTx.rollback();
|
||||
metrics.getRollbackCount().inc();
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
LOG.error(e.getMessage());
|
||||
System.exit(0);
|
||||
}
|
||||
throw new StorageException(e);
|
||||
} finally {
|
||||
close();
|
||||
}
|
||||
}
|
||||
|
||||
private void close() {
|
||||
innerTx.close();
|
||||
options.close();
|
||||
}
|
||||
}
|
||||
|
||||
abstract RocksDB getDB();
|
||||
}
|
||||
|
||||
@@ -17,12 +17,14 @@ package org.hyperledger.besu.plugin.services.storage.rocksdb.segmented;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.metrics.OperationTimer;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetrics;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbIterator;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
@@ -37,12 +39,13 @@ import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** The Rocks db snapshot transaction. */
|
||||
public class RocksDBSnapshotTransaction implements KeyValueStorageTransaction, AutoCloseable {
|
||||
public class RocksDBSnapshotTransaction
|
||||
implements SegmentedKeyValueStorageTransaction, AutoCloseable {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RocksDBSnapshotTransaction.class);
|
||||
private static final String NO_SPACE_LEFT_ON_DEVICE = "No space left on device";
|
||||
private final RocksDBMetrics metrics;
|
||||
private final OptimisticTransactionDB db;
|
||||
private final ColumnFamilyHandle columnFamilyHandle;
|
||||
private final Function<SegmentIdentifier, ColumnFamilyHandle> columnFamilyMapper;
|
||||
private final Transaction snapTx;
|
||||
private final RocksDBSnapshot snapshot;
|
||||
private final WriteOptions writeOptions;
|
||||
@@ -53,16 +56,16 @@ public class RocksDBSnapshotTransaction implements KeyValueStorageTransaction, A
|
||||
* Instantiates a new RocksDb snapshot transaction.
|
||||
*
|
||||
* @param db the db
|
||||
* @param columnFamilyHandle the column family handle
|
||||
* @param columnFamilyMapper mapper from segment identifier to column family handle
|
||||
* @param metrics the metrics
|
||||
*/
|
||||
RocksDBSnapshotTransaction(
|
||||
final OptimisticTransactionDB db,
|
||||
final ColumnFamilyHandle columnFamilyHandle,
|
||||
final Function<SegmentIdentifier, ColumnFamilyHandle> columnFamilyMapper,
|
||||
final RocksDBMetrics metrics) {
|
||||
this.metrics = metrics;
|
||||
this.db = db;
|
||||
this.columnFamilyHandle = columnFamilyHandle;
|
||||
this.columnFamilyMapper = columnFamilyMapper;
|
||||
this.snapshot = new RocksDBSnapshot(db);
|
||||
this.writeOptions = new WriteOptions();
|
||||
this.snapTx = db.beginTransaction(writeOptions);
|
||||
@@ -72,14 +75,14 @@ public class RocksDBSnapshotTransaction implements KeyValueStorageTransaction, A
|
||||
|
||||
private RocksDBSnapshotTransaction(
|
||||
final OptimisticTransactionDB db,
|
||||
final ColumnFamilyHandle columnFamilyHandle,
|
||||
final Function<SegmentIdentifier, ColumnFamilyHandle> columnFamilyMapper,
|
||||
final RocksDBMetrics metrics,
|
||||
final RocksDBSnapshot snapshot,
|
||||
final Transaction snapTx,
|
||||
final ReadOptions readOptions) {
|
||||
this.metrics = metrics;
|
||||
this.db = db;
|
||||
this.columnFamilyHandle = columnFamilyHandle;
|
||||
this.columnFamilyMapper = columnFamilyMapper;
|
||||
this.snapshot = snapshot;
|
||||
this.writeOptions = new WriteOptions();
|
||||
this.readOptions = readOptions;
|
||||
@@ -89,25 +92,26 @@ public class RocksDBSnapshotTransaction implements KeyValueStorageTransaction, A
|
||||
/**
|
||||
* Get data against given key.
|
||||
*
|
||||
* @param segmentId the segment id
|
||||
* @param key the key
|
||||
* @return the optional data
|
||||
*/
|
||||
public Optional<byte[]> get(final byte[] key) {
|
||||
public Optional<byte[]> get(final SegmentIdentifier segmentId, final byte[] key) {
|
||||
throwIfClosed();
|
||||
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getReadLatency().startTimer()) {
|
||||
return Optional.ofNullable(snapTx.get(columnFamilyHandle, readOptions, key));
|
||||
return Optional.ofNullable(snapTx.get(columnFamilyMapper.apply(segmentId), readOptions, key));
|
||||
} catch (final RocksDBException e) {
|
||||
throw new StorageException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(final byte[] key, final byte[] value) {
|
||||
public void put(final SegmentIdentifier segmentId, final byte[] key, final byte[] value) {
|
||||
throwIfClosed();
|
||||
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getWriteLatency().startTimer()) {
|
||||
snapTx.put(columnFamilyHandle, key, value);
|
||||
snapTx.put(columnFamilyMapper.apply(segmentId), key, value);
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
LOG.error(e.getMessage());
|
||||
@@ -118,11 +122,11 @@ public class RocksDBSnapshotTransaction implements KeyValueStorageTransaction, A
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(final byte[] key) {
|
||||
public void remove(final SegmentIdentifier segmentId, final byte[] key) {
|
||||
throwIfClosed();
|
||||
|
||||
try (final OperationTimer.TimingContext ignored = metrics.getRemoveLatency().startTimer()) {
|
||||
snapTx.delete(columnFamilyHandle, key);
|
||||
snapTx.delete(columnFamilyMapper.apply(segmentId), key);
|
||||
} catch (final RocksDBException e) {
|
||||
if (e.getMessage().contains(NO_SPACE_LEFT_ON_DEVICE)) {
|
||||
LOG.error(e.getMessage());
|
||||
@@ -135,12 +139,14 @@ public class RocksDBSnapshotTransaction implements KeyValueStorageTransaction, A
|
||||
/**
|
||||
* Stream.
|
||||
*
|
||||
* @param segmentId the segment id
|
||||
* @return the stream
|
||||
*/
|
||||
public Stream<Pair<byte[], byte[]>> stream() {
|
||||
public Stream<Pair<byte[], byte[]>> stream(final SegmentIdentifier segmentId) {
|
||||
throwIfClosed();
|
||||
|
||||
final RocksIterator rocksIterator = db.newIterator(columnFamilyHandle, readOptions);
|
||||
final RocksIterator rocksIterator =
|
||||
db.newIterator(columnFamilyMapper.apply(segmentId), readOptions);
|
||||
rocksIterator.seekToFirst();
|
||||
return RocksDbIterator.create(rocksIterator).toStream();
|
||||
}
|
||||
@@ -148,12 +154,14 @@ public class RocksDBSnapshotTransaction implements KeyValueStorageTransaction, A
|
||||
/**
|
||||
* Stream keys.
|
||||
*
|
||||
* @param segmentId the segment id
|
||||
* @return the stream
|
||||
*/
|
||||
public Stream<byte[]> streamKeys() {
|
||||
public Stream<byte[]> streamKeys(final SegmentIdentifier segmentId) {
|
||||
throwIfClosed();
|
||||
|
||||
final RocksIterator rocksIterator = db.newIterator(columnFamilyHandle, readOptions);
|
||||
final RocksIterator rocksIterator =
|
||||
db.newIterator(columnFamilyMapper.apply(segmentId), readOptions);
|
||||
rocksIterator.seekToFirst();
|
||||
return RocksDbIterator.create(rocksIterator).toStreamKeys();
|
||||
}
|
||||
@@ -193,7 +201,7 @@ public class RocksDBSnapshotTransaction implements KeyValueStorageTransaction, A
|
||||
var copySnapTx = db.beginTransaction(writeOptions);
|
||||
copySnapTx.rebuildFromWriteBatch(snapTx.getWriteBatch().getWriteBatch());
|
||||
return new RocksDBSnapshotTransaction(
|
||||
db, columnFamilyHandle, metrics, snapshot, copySnapTx, copyReadOptions);
|
||||
db, columnFamilyMapper, metrics, snapshot, copySnapTx, copyReadOptions);
|
||||
} catch (Exception ex) {
|
||||
LOG.error("Failed to copy snapshot transaction", ex);
|
||||
snapshot.unMarkSnapshot();
|
||||
|
||||
@@ -17,10 +17,11 @@ package org.hyperledger.besu.plugin.services.storage.rocksdb.segmented;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbSegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfiguration;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageTransactionTransitionValidatorDecorator;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageTransactionValidatorDecorator;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@@ -62,7 +63,7 @@ public class TransactionDBRocksDBColumnarKeyValueStorage extends RocksDBColumnar
|
||||
columnDescriptors,
|
||||
columnHandles);
|
||||
initMetrics();
|
||||
initColumnHandler();
|
||||
initColumnHandles();
|
||||
|
||||
} catch (final RocksDBException e) {
|
||||
throw new StorageException(e);
|
||||
@@ -81,11 +82,13 @@ public class TransactionDBRocksDBColumnarKeyValueStorage extends RocksDBColumnar
|
||||
* @throws StorageException the storage exception
|
||||
*/
|
||||
@Override
|
||||
public Transaction<RocksDbSegmentIdentifier> startTransaction() throws StorageException {
|
||||
public SegmentedKeyValueStorageTransaction startTransaction() throws StorageException {
|
||||
throwIfClosed();
|
||||
final WriteOptions writeOptions = new WriteOptions();
|
||||
writeOptions.setIgnoreMissingColumnFamilies(true);
|
||||
return new SegmentedKeyValueStorageTransactionTransitionValidatorDecorator<>(
|
||||
new RocksDbTransaction(db.beginTransaction(writeOptions), writeOptions), this.closed::get);
|
||||
return new SegmentedKeyValueStorageTransactionValidatorDecorator(
|
||||
new RocksDBTransaction(
|
||||
this::safeColumnHandle, db.beginTransaction(writeOptions), writeOptions, metrics),
|
||||
this.closed::get);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
package org.hyperledger.besu.plugin.services.storage.rocksdb;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.RocksDBColumnarKeyValueStorageTest.TestSegment;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
|
||||
@@ -43,8 +44,8 @@ public class RocksDBKeyValuePrivacyStorageFactoryTest {
|
||||
@Mock private BesuConfiguration commonConfiguration;
|
||||
@TempDir private Path temporaryFolder;
|
||||
private final ObservableMetricsSystem metricsSystem = new NoOpMetricsSystem();
|
||||
private final List<SegmentIdentifier> segments = List.of();
|
||||
@Mock private SegmentIdentifier segment;
|
||||
private final SegmentIdentifier segment = TestSegment.BAR;
|
||||
private final List<SegmentIdentifier> segments = List.of(segment);
|
||||
|
||||
@Test
|
||||
public void shouldDetectVersion1DatabaseIfNoMetadataFileFound() throws Exception {
|
||||
|
||||
@@ -26,6 +26,7 @@ import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.DatabaseMetadata;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBFactoryConfiguration;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.segmented.RocksDBColumnarKeyValueStorageTest.TestSegment;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.Files;
|
||||
@@ -48,8 +49,8 @@ public class RocksDBKeyValueStorageFactoryTest {
|
||||
@Mock private BesuConfiguration commonConfiguration;
|
||||
@TempDir public Path temporaryFolder;
|
||||
private final ObservableMetricsSystem metricsSystem = new NoOpMetricsSystem();
|
||||
private final List<SegmentIdentifier> segments = List.of();
|
||||
@Mock private SegmentIdentifier segment;
|
||||
private final SegmentIdentifier segment = TestSegment.FOO;
|
||||
private final List<SegmentIdentifier> segments = List.of(segment);
|
||||
|
||||
@Test
|
||||
public void shouldCreateCorrectMetadataFileForLatestVersion() throws Exception {
|
||||
|
||||
@@ -17,10 +17,9 @@ package org.hyperledger.besu.plugin.services.storage.rocksdb.segmented;
|
||||
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbSegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfigurationBuilder;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorage;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
@@ -35,8 +34,7 @@ public class OptimisticTransactionDBRocksDBColumnarKeyValueStorageTest
|
||||
extends RocksDBColumnarKeyValueStorageTest {
|
||||
|
||||
@Override
|
||||
protected SegmentedKeyValueStorage<RocksDbSegmentIdentifier> createSegmentedStore()
|
||||
throws Exception {
|
||||
protected SegmentedKeyValueStorage createSegmentedStore() throws Exception {
|
||||
return new OptimisticRocksDBColumnarKeyValueStorage(
|
||||
new RocksDBConfigurationBuilder()
|
||||
.databaseDir(Files.createTempDirectory("segmentedStore"))
|
||||
@@ -48,7 +46,7 @@ public class OptimisticTransactionDBRocksDBColumnarKeyValueStorageTest
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SegmentedKeyValueStorage<RocksDbSegmentIdentifier> createSegmentedStore(
|
||||
protected SegmentedKeyValueStorage createSegmentedStore(
|
||||
final Path path,
|
||||
final List<SegmentIdentifier> segments,
|
||||
final List<SegmentIdentifier> ignorableSegments) {
|
||||
@@ -61,7 +59,7 @@ public class OptimisticTransactionDBRocksDBColumnarKeyValueStorageTest
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SegmentedKeyValueStorage<RocksDbSegmentIdentifier> createSegmentedStore(
|
||||
protected SegmentedKeyValueStorage createSegmentedStore(
|
||||
final Path path,
|
||||
final MetricsSystem metricsSystem,
|
||||
final List<SegmentIdentifier> segments,
|
||||
|
||||
@@ -33,10 +33,9 @@ import org.hyperledger.besu.plugin.services.metrics.LabelledMetric;
|
||||
import org.hyperledger.besu.plugin.services.metrics.OperationTimer;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbSegmentIdentifier;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorage.Transaction;
|
||||
import org.hyperledger.besu.services.kvstore.SnappableSegmentedKeyValueStorageAdapter;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorageAdapter;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Path;
|
||||
@@ -67,14 +66,14 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
final byte[] key = bytesFromHexString("0001");
|
||||
final byte[] val1 = bytesFromHexString("0FFF");
|
||||
final byte[] val2 = bytesFromHexString("1337");
|
||||
final SegmentedKeyValueStorage<RocksDbSegmentIdentifier> store = createSegmentedStore();
|
||||
RocksDbSegmentIdentifier segment = store.getSegmentIdentifierByName(TestSegment.FOO);
|
||||
final SegmentedKeyValueStorage store = createSegmentedStore();
|
||||
var segment = TestSegment.FOO;
|
||||
KeyValueStorage duplicateSegmentRef =
|
||||
new SnappableSegmentedKeyValueStorageAdapter<>(TestSegment.FOO, store);
|
||||
new SegmentedKeyValueStorageAdapter(TestSegment.FOO, store);
|
||||
|
||||
final Consumer<byte[]> insert =
|
||||
value -> {
|
||||
final Transaction<RocksDbSegmentIdentifier> tx = store.startTransaction();
|
||||
final SegmentedKeyValueStorageTransaction tx = store.startTransaction();
|
||||
tx.put(segment, key, value);
|
||||
tx.commit();
|
||||
};
|
||||
@@ -99,17 +98,13 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
|
||||
@Test
|
||||
public void twoSegmentsAreIndependent() throws Exception {
|
||||
final SegmentedKeyValueStorage<RocksDbSegmentIdentifier> store = createSegmentedStore();
|
||||
final SegmentedKeyValueStorage store = createSegmentedStore();
|
||||
|
||||
final Transaction<RocksDbSegmentIdentifier> tx = store.startTransaction();
|
||||
tx.put(
|
||||
store.getSegmentIdentifierByName(TestSegment.BAR),
|
||||
bytesFromHexString("0001"),
|
||||
bytesFromHexString("0FFF"));
|
||||
final SegmentedKeyValueStorageTransaction tx = store.startTransaction();
|
||||
tx.put(TestSegment.BAR, bytesFromHexString("0001"), bytesFromHexString("0FFF"));
|
||||
tx.commit();
|
||||
|
||||
final Optional<byte[]> result =
|
||||
store.get(store.getSegmentIdentifierByName(TestSegment.FOO), bytesFromHexString("0001"));
|
||||
final Optional<byte[]> result = store.get(TestSegment.FOO, bytesFromHexString("0001"));
|
||||
|
||||
assertThat(result).isEmpty();
|
||||
|
||||
@@ -121,43 +116,41 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
// we're looping this in order to catch intermittent failures when rocksdb objects are not close
|
||||
// properly
|
||||
for (int i = 0; i < 50; i++) {
|
||||
final SegmentedKeyValueStorage<RocksDbSegmentIdentifier> store = createSegmentedStore();
|
||||
final RocksDbSegmentIdentifier fooSegment = store.getSegmentIdentifierByName(TestSegment.FOO);
|
||||
final RocksDbSegmentIdentifier barSegment = store.getSegmentIdentifierByName(TestSegment.BAR);
|
||||
final SegmentedKeyValueStorage store = createSegmentedStore();
|
||||
|
||||
final Transaction<RocksDbSegmentIdentifier> tx = store.startTransaction();
|
||||
tx.put(fooSegment, bytesOf(1), bytesOf(1));
|
||||
tx.put(fooSegment, bytesOf(2), bytesOf(2));
|
||||
tx.put(fooSegment, bytesOf(3), bytesOf(3));
|
||||
tx.put(barSegment, bytesOf(4), bytesOf(4));
|
||||
tx.put(barSegment, bytesOf(5), bytesOf(5));
|
||||
tx.put(barSegment, bytesOf(6), bytesOf(6));
|
||||
final SegmentedKeyValueStorageTransaction tx = store.startTransaction();
|
||||
tx.put(TestSegment.FOO, bytesOf(1), bytesOf(1));
|
||||
tx.put(TestSegment.FOO, bytesOf(2), bytesOf(2));
|
||||
tx.put(TestSegment.FOO, bytesOf(3), bytesOf(3));
|
||||
tx.put(TestSegment.BAR, bytesOf(4), bytesOf(4));
|
||||
tx.put(TestSegment.BAR, bytesOf(5), bytesOf(5));
|
||||
tx.put(TestSegment.BAR, bytesOf(6), bytesOf(6));
|
||||
tx.commit();
|
||||
|
||||
store.stream(fooSegment)
|
||||
store.stream(TestSegment.FOO)
|
||||
.map(Pair::getKey)
|
||||
.forEach(
|
||||
key -> {
|
||||
if (!Arrays.equals(key, bytesOf(3))) store.tryDelete(fooSegment, key);
|
||||
if (!Arrays.equals(key, bytesOf(3))) store.tryDelete(TestSegment.FOO, key);
|
||||
});
|
||||
store.stream(barSegment)
|
||||
store.stream(TestSegment.BAR)
|
||||
.map(Pair::getKey)
|
||||
.forEach(
|
||||
key -> {
|
||||
if (!Arrays.equals(key, bytesOf(4))) store.tryDelete(barSegment, key);
|
||||
if (!Arrays.equals(key, bytesOf(4))) store.tryDelete(TestSegment.BAR, key);
|
||||
});
|
||||
|
||||
for (final RocksDbSegmentIdentifier segment : Set.of(fooSegment, barSegment)) {
|
||||
for (final var segment : Set.of(TestSegment.FOO, TestSegment.BAR)) {
|
||||
assertThat(store.stream(segment).count()).isEqualTo(1);
|
||||
}
|
||||
|
||||
assertThat(store.get(fooSegment, bytesOf(1))).isEmpty();
|
||||
assertThat(store.get(fooSegment, bytesOf(2))).isEmpty();
|
||||
assertThat(store.get(fooSegment, bytesOf(3))).contains(bytesOf(3));
|
||||
assertThat(store.get(TestSegment.FOO, bytesOf(1))).isEmpty();
|
||||
assertThat(store.get(TestSegment.FOO, bytesOf(2))).isEmpty();
|
||||
assertThat(store.get(TestSegment.FOO, bytesOf(3))).contains(bytesOf(3));
|
||||
|
||||
assertThat(store.get(barSegment, bytesOf(4))).contains(bytesOf(4));
|
||||
assertThat(store.get(barSegment, bytesOf(5))).isEmpty();
|
||||
assertThat(store.get(barSegment, bytesOf(6))).isEmpty();
|
||||
assertThat(store.get(TestSegment.BAR, bytesOf(4))).contains(bytesOf(4));
|
||||
assertThat(store.get(TestSegment.BAR, bytesOf(5))).isEmpty();
|
||||
assertThat(store.get(TestSegment.BAR, bytesOf(6))).isEmpty();
|
||||
|
||||
store.close();
|
||||
}
|
||||
@@ -165,26 +158,24 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
|
||||
@Test
|
||||
public void canGetThroughSegmentIteration() throws Exception {
|
||||
final SegmentedKeyValueStorage<RocksDbSegmentIdentifier> store = createSegmentedStore();
|
||||
final RocksDbSegmentIdentifier fooSegment = store.getSegmentIdentifierByName(TestSegment.FOO);
|
||||
final RocksDbSegmentIdentifier barSegment = store.getSegmentIdentifierByName(TestSegment.BAR);
|
||||
final SegmentedKeyValueStorage store = createSegmentedStore();
|
||||
|
||||
final Transaction<RocksDbSegmentIdentifier> tx = store.startTransaction();
|
||||
tx.put(fooSegment, bytesOf(1), bytesOf(1));
|
||||
tx.put(fooSegment, bytesOf(2), bytesOf(2));
|
||||
tx.put(fooSegment, bytesOf(3), bytesOf(3));
|
||||
tx.put(barSegment, bytesOf(4), bytesOf(4));
|
||||
tx.put(barSegment, bytesOf(5), bytesOf(5));
|
||||
tx.put(barSegment, bytesOf(6), bytesOf(6));
|
||||
final SegmentedKeyValueStorageTransaction tx = store.startTransaction();
|
||||
tx.put(TestSegment.FOO, bytesOf(1), bytesOf(1));
|
||||
tx.put(TestSegment.FOO, bytesOf(2), bytesOf(2));
|
||||
tx.put(TestSegment.FOO, bytesOf(3), bytesOf(3));
|
||||
tx.put(TestSegment.BAR, bytesOf(4), bytesOf(4));
|
||||
tx.put(TestSegment.BAR, bytesOf(5), bytesOf(5));
|
||||
tx.put(TestSegment.BAR, bytesOf(6), bytesOf(6));
|
||||
tx.commit();
|
||||
|
||||
final Set<byte[]> gotFromFoo =
|
||||
store.getAllKeysThat(fooSegment, x -> Arrays.equals(x, bytesOf(3)));
|
||||
store.getAllKeysThat(TestSegment.FOO, x -> Arrays.equals(x, bytesOf(3)));
|
||||
final Set<byte[]> gotFromBar =
|
||||
store.getAllKeysThat(
|
||||
barSegment, x -> Arrays.equals(x, bytesOf(4)) || Arrays.equals(x, bytesOf(5)));
|
||||
TestSegment.BAR, x -> Arrays.equals(x, bytesOf(4)) || Arrays.equals(x, bytesOf(5)));
|
||||
final Set<byte[]> gotEmpty =
|
||||
store.getAllKeysThat(fooSegment, x -> Arrays.equals(x, bytesOf(0)));
|
||||
store.getAllKeysThat(TestSegment.FOO, x -> Arrays.equals(x, bytesOf(0)));
|
||||
|
||||
assertThat(gotFromFoo.size()).isEqualTo(1);
|
||||
assertThat(gotFromBar.size()).isEqualTo(2);
|
||||
@@ -200,7 +191,7 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
public void dbShouldIgnoreExperimentalSegmentsIfNotExisted(@TempDir final Path testPath)
|
||||
throws Exception {
|
||||
// Create new db should ignore experimental column family
|
||||
SegmentedKeyValueStorage<RocksDbSegmentIdentifier> store =
|
||||
SegmentedKeyValueStorage store =
|
||||
createSegmentedStore(
|
||||
testPath,
|
||||
Arrays.asList(TestSegment.FOO, TestSegment.BAR, TestSegment.EXPERIMENTAL),
|
||||
@@ -218,7 +209,7 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
throws Exception {
|
||||
final Path testPath = tempDir.resolve("testdb");
|
||||
// Create new db with experimental column family
|
||||
SegmentedKeyValueStorage<RocksDbSegmentIdentifier> store =
|
||||
SegmentedKeyValueStorage store =
|
||||
createSegmentedStore(
|
||||
testPath,
|
||||
Arrays.asList(TestSegment.FOO, TestSegment.BAR, TestSegment.EXPERIMENTAL),
|
||||
@@ -248,7 +239,7 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
public void dbWillBeBackwardIncompatibleAfterExperimentalSegmentsAreAdded(
|
||||
@TempDir final Path testPath) throws Exception {
|
||||
// Create new db should ignore experimental column family
|
||||
SegmentedKeyValueStorage<RocksDbSegmentIdentifier> store =
|
||||
SegmentedKeyValueStorage store =
|
||||
createSegmentedStore(
|
||||
testPath,
|
||||
Arrays.asList(TestSegment.FOO, TestSegment.BAR, TestSegment.EXPERIMENTAL),
|
||||
@@ -300,12 +291,11 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
|
||||
// Actual call
|
||||
|
||||
final SegmentedKeyValueStorage<RocksDbSegmentIdentifier> store =
|
||||
final SegmentedKeyValueStorage store =
|
||||
createSegmentedStore(
|
||||
folder, metricsSystemMock, List.of(TestSegment.FOO), List.of(TestSegment.EXPERIMENTAL));
|
||||
|
||||
KeyValueStorage keyValueStorage =
|
||||
new SnappableSegmentedKeyValueStorageAdapter<>(TestSegment.FOO, store);
|
||||
KeyValueStorage keyValueStorage = new SegmentedKeyValueStorageAdapter(TestSegment.FOO, store);
|
||||
|
||||
// Assertions
|
||||
assertThat(keyValueStorage).isNotNull();
|
||||
@@ -389,15 +379,14 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract SegmentedKeyValueStorage<RocksDbSegmentIdentifier> createSegmentedStore()
|
||||
throws Exception;
|
||||
protected abstract SegmentedKeyValueStorage createSegmentedStore() throws Exception;
|
||||
|
||||
protected abstract SegmentedKeyValueStorage<RocksDbSegmentIdentifier> createSegmentedStore(
|
||||
protected abstract SegmentedKeyValueStorage createSegmentedStore(
|
||||
final Path path,
|
||||
final List<SegmentIdentifier> segments,
|
||||
final List<SegmentIdentifier> ignorableSegments);
|
||||
|
||||
protected abstract SegmentedKeyValueStorage<RocksDbSegmentIdentifier> createSegmentedStore(
|
||||
protected abstract SegmentedKeyValueStorage createSegmentedStore(
|
||||
final Path path,
|
||||
final MetricsSystem metricsSystem,
|
||||
final List<SegmentIdentifier> segments,
|
||||
@@ -405,6 +394,6 @@ public abstract class RocksDBColumnarKeyValueStorageTest extends AbstractKeyValu
|
||||
|
||||
@Override
|
||||
protected KeyValueStorage createStore() throws Exception {
|
||||
return new SnappableSegmentedKeyValueStorageAdapter<>(TestSegment.FOO, createSegmentedStore());
|
||||
return new SegmentedKeyValueStorageAdapter(TestSegment.FOO, createSegmentedStore());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,10 +17,9 @@ package org.hyperledger.besu.plugin.services.storage.rocksdb.segmented;
|
||||
import org.hyperledger.besu.metrics.noop.NoOpMetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.MetricsSystem;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBMetricsFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDbSegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.rocksdb.configuration.RocksDBConfigurationBuilder;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorage;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
@@ -34,8 +33,7 @@ public class TransactionDBRocksDBColumnarKeyValueStorageTest
|
||||
extends RocksDBColumnarKeyValueStorageTest {
|
||||
|
||||
@Override
|
||||
protected SegmentedKeyValueStorage<RocksDbSegmentIdentifier> createSegmentedStore()
|
||||
throws Exception {
|
||||
protected SegmentedKeyValueStorage createSegmentedStore() throws Exception {
|
||||
return new TransactionDBRocksDBColumnarKeyValueStorage(
|
||||
new RocksDBConfigurationBuilder().databaseDir(getTempSubFolder(folder)).build(),
|
||||
Arrays.asList(TestSegment.FOO, TestSegment.BAR),
|
||||
@@ -45,7 +43,7 @@ public class TransactionDBRocksDBColumnarKeyValueStorageTest
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SegmentedKeyValueStorage<RocksDbSegmentIdentifier> createSegmentedStore(
|
||||
protected SegmentedKeyValueStorage createSegmentedStore(
|
||||
final Path path,
|
||||
final List<SegmentIdentifier> segments,
|
||||
final List<SegmentIdentifier> ignorableSegments) {
|
||||
@@ -58,7 +56,7 @@ public class TransactionDBRocksDBColumnarKeyValueStorageTest
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SegmentedKeyValueStorage<RocksDbSegmentIdentifier> createSegmentedStore(
|
||||
protected SegmentedKeyValueStorage createSegmentedStore(
|
||||
final Path path,
|
||||
final MetricsSystem metricsSystem,
|
||||
final List<SegmentIdentifier> segments,
|
||||
|
||||
@@ -14,235 +14,84 @@
|
||||
*/
|
||||
package org.hyperledger.besu.services.kvstore;
|
||||
|
||||
import static java.util.stream.Collectors.toUnmodifiableSet;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappableKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
|
||||
import java.io.PrintStream;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
|
||||
/** The In memory key value storage. */
|
||||
public class InMemoryKeyValueStorage
|
||||
implements SnappedKeyValueStorage, SnappableKeyValueStorage, KeyValueStorage {
|
||||
/**
|
||||
* InMemoryKeyValueStorage is just a wrapper around a single segment instance of
|
||||
* SegmentedInMemoryKeyValueStorage.
|
||||
*/
|
||||
public class InMemoryKeyValueStorage extends SegmentedKeyValueStorageAdapter {
|
||||
|
||||
/** protected access for the backing hash map. */
|
||||
protected final Map<Bytes, Optional<byte[]>> hashValueStore;
|
||||
private static final SegmentIdentifier SEGMENT_IDENTIFIER =
|
||||
new SegmentIdentifier() {
|
||||
private static final String NAME = "SEGMENT_IDENTIFIER";
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getId() {
|
||||
return NAME.getBytes(StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsStaticData() {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
private static Map<SegmentIdentifier, Map<Bytes, Optional<byte[]>>> asSegmentMap(
|
||||
final Map<Bytes, Optional<byte[]>> initialMap) {
|
||||
final Map<SegmentIdentifier, Map<Bytes, Optional<byte[]>>> segmentMap = new HashMap<>();
|
||||
segmentMap.put(SEGMENT_IDENTIFIER, initialMap);
|
||||
return segmentMap;
|
||||
}
|
||||
|
||||
/** protected access to the rw lock. */
|
||||
protected final ReadWriteLock rwLock = new ReentrantReadWriteLock();
|
||||
protected final ReadWriteLock rwLock;
|
||||
|
||||
/** Instantiates a new In memory key value storage. */
|
||||
public InMemoryKeyValueStorage() {
|
||||
this(new HashMap<>());
|
||||
this(SEGMENT_IDENTIFIER);
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new In memory key value storage.
|
||||
* Instantiates a new In memory key value storage with an initial map.
|
||||
*
|
||||
* @param hashValueStore the hash value store
|
||||
* @param initialMap the initial map
|
||||
*/
|
||||
protected InMemoryKeyValueStorage(final Map<Bytes, Optional<byte[]>> hashValueStore) {
|
||||
this.hashValueStore = hashValueStore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
final Lock lock = rwLock.writeLock();
|
||||
lock.lock();
|
||||
try {
|
||||
hashValueStore.clear();
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsKey(final byte[] key) throws StorageException {
|
||||
return get(key).isPresent();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<byte[]> get(final byte[] key) throws StorageException {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return hashValueStore.getOrDefault(Bytes.wrap(key), Optional.empty());
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
return stream()
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getKey)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllValuesFromKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
return stream()
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getValue)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> stream() {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return ImmutableSet.copyOf(hashValueStore.entrySet()).stream()
|
||||
.filter(bytesEntry -> bytesEntry.getValue().isPresent())
|
||||
.map(
|
||||
bytesEntry ->
|
||||
Pair.of(bytesEntry.getKey().toArrayUnsafe(), bytesEntry.getValue().get()));
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) {
|
||||
return stream().filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<byte[]> streamKeys() {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return ImmutableSet.copyOf(hashValueStore.entrySet()).stream()
|
||||
.map(bytesEntry -> bytesEntry.getKey().toArrayUnsafe());
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryDelete(final byte[] key) {
|
||||
final Lock lock = rwLock.writeLock();
|
||||
if (lock.tryLock()) {
|
||||
try {
|
||||
hashValueStore.remove(Bytes.wrap(key));
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {}
|
||||
|
||||
@Override
|
||||
public KeyValueStorageTransaction startTransaction() {
|
||||
return new KeyValueStorageTransactionTransitionValidatorDecorator(new InMemoryTransaction());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
return false;
|
||||
public InMemoryKeyValueStorage(final Map<Bytes, Optional<byte[]>> initialMap) {
|
||||
super(SEGMENT_IDENTIFIER, new SegmentedInMemoryKeyValueStorage(asSegmentMap(initialMap)));
|
||||
rwLock = ((SegmentedInMemoryKeyValueStorage) storage).rwLock;
|
||||
}
|
||||
|
||||
/**
|
||||
* Key set.
|
||||
* Instantiates a new In memory key value storage with a single segment identifier.
|
||||
*
|
||||
* @return the set of keys
|
||||
* @param segmentIdentifier the segment identifier
|
||||
*/
|
||||
public Set<Bytes> keySet() {
|
||||
return Set.copyOf(hashValueStore.keySet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnappedKeyValueStorage takeSnapshot() {
|
||||
return new InMemoryKeyValueStorage(new HashMap<>(hashValueStore));
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyValueStorageTransaction getSnapshotTransaction() {
|
||||
return startTransaction();
|
||||
}
|
||||
|
||||
/** In memory transaction. */
|
||||
public class InMemoryTransaction implements KeyValueStorageTransaction {
|
||||
|
||||
/** protected access to updatedValues map for the transaction. */
|
||||
protected Map<Bytes, Optional<byte[]>> updatedValues = new HashMap<>();
|
||||
/** protected access to deletedValues set for the transaction. */
|
||||
protected Set<Bytes> removedKeys = new HashSet<>();
|
||||
|
||||
@Override
|
||||
public void put(final byte[] key, final byte[] value) {
|
||||
updatedValues.put(Bytes.wrap(key), Optional.of(value));
|
||||
removedKeys.remove(Bytes.wrap(key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(final byte[] key) {
|
||||
removedKeys.add(Bytes.wrap(key));
|
||||
updatedValues.remove(Bytes.wrap(key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit() throws StorageException {
|
||||
final Lock lock = rwLock.writeLock();
|
||||
lock.lock();
|
||||
try {
|
||||
hashValueStore.putAll(updatedValues);
|
||||
removedKeys.forEach(hashValueStore::remove);
|
||||
updatedValues = null;
|
||||
removedKeys = null;
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
updatedValues.clear();
|
||||
removedKeys.clear();
|
||||
}
|
||||
public InMemoryKeyValueStorage(final SegmentIdentifier segmentIdentifier) {
|
||||
super(segmentIdentifier, new SegmentedInMemoryKeyValueStorage());
|
||||
rwLock = ((SegmentedInMemoryKeyValueStorage) storage).rwLock;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump.
|
||||
* Dump the contents of the storage to the print stream.
|
||||
*
|
||||
* @param ps the PrintStream where to report the dump
|
||||
* @param ps the print stream.
|
||||
*/
|
||||
public void dump(final PrintStream ps) {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
ImmutableSet.copyOf(hashValueStore.entrySet()).stream()
|
||||
.filter(bytesEntry -> bytesEntry.getValue().isPresent())
|
||||
.forEach(
|
||||
entry ->
|
||||
ps.printf(
|
||||
" %s : %s%n",
|
||||
entry.getKey().toHexString(),
|
||||
Bytes.wrap(entry.getValue().get()).toHexString()));
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
((SegmentedInMemoryKeyValueStorage) storage).dump(ps);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,8 +23,10 @@ import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageFactory;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
@@ -35,8 +37,8 @@ public class InMemoryStoragePlugin implements BesuPlugin {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(InMemoryStoragePlugin.class);
|
||||
private BesuContext context;
|
||||
private MemoryKeyValueStorageFactory factory;
|
||||
private MemoryKeyValueStorageFactory privacyFactory;
|
||||
private InMemoryKeyValueStorageFactory factory;
|
||||
private InMemoryKeyValueStorageFactory privacyFactory;
|
||||
|
||||
@Override
|
||||
public void register(final BesuContext context) {
|
||||
@@ -73,8 +75,8 @@ public class InMemoryStoragePlugin implements BesuPlugin {
|
||||
|
||||
private void createAndRegister(final StorageService service) {
|
||||
|
||||
factory = new MemoryKeyValueStorageFactory("memory");
|
||||
privacyFactory = new MemoryKeyValueStorageFactory("memory-privacy");
|
||||
factory = new InMemoryKeyValueStorageFactory("memory");
|
||||
privacyFactory = new InMemoryKeyValueStorageFactory("memory-privacy");
|
||||
|
||||
service.registerKeyValueStorage(factory);
|
||||
service.registerKeyValueStorage(privacyFactory);
|
||||
@@ -89,17 +91,18 @@ public class InMemoryStoragePlugin implements BesuPlugin {
|
||||
}
|
||||
|
||||
/** The Memory key value storage factory. */
|
||||
public static class MemoryKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
public static class InMemoryKeyValueStorageFactory implements KeyValueStorageFactory {
|
||||
|
||||
private final String name;
|
||||
private final Map<SegmentIdentifier, InMemoryKeyValueStorage> storageMap = new HashMap<>();
|
||||
private final Map<List<SegmentIdentifier>, SegmentedInMemoryKeyValueStorage> storageMap =
|
||||
new HashMap<>();
|
||||
|
||||
/**
|
||||
* Instantiates a new Memory key value storage factory.
|
||||
*
|
||||
* @param name the name
|
||||
*/
|
||||
public MemoryKeyValueStorageFactory(final String name) {
|
||||
public InMemoryKeyValueStorageFactory(final String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@@ -114,7 +117,21 @@ public class InMemoryStoragePlugin implements BesuPlugin {
|
||||
final BesuConfiguration configuration,
|
||||
final MetricsSystem metricsSystem)
|
||||
throws StorageException {
|
||||
return storageMap.computeIfAbsent(segment, __ -> new InMemoryKeyValueStorage());
|
||||
var kvStorage =
|
||||
storageMap.computeIfAbsent(
|
||||
List.of(segment), seg -> new SegmentedInMemoryKeyValueStorage(seg));
|
||||
return new SegmentedKeyValueStorageAdapter(segment, kvStorage);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SegmentedKeyValueStorage create(
|
||||
final List<SegmentIdentifier> segments,
|
||||
final BesuConfiguration configuration,
|
||||
final MetricsSystem metricsSystem)
|
||||
throws StorageException {
|
||||
var kvStorage =
|
||||
storageMap.computeIfAbsent(segments, __ -> new SegmentedInMemoryKeyValueStorage());
|
||||
return kvStorage;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -122,6 +139,11 @@ public class InMemoryStoragePlugin implements BesuPlugin {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSnapshotIsolationSupported() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
storageMap.clear();
|
||||
|
||||
@@ -19,38 +19,45 @@ import static com.google.common.base.Preconditions.checkState;
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
|
||||
/** The Key value storage transaction transition validator decorator. */
|
||||
public class KeyValueStorageTransactionTransitionValidatorDecorator
|
||||
implements KeyValueStorageTransaction {
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/** The Key value storage transaction validator decorator. */
|
||||
public class KeyValueStorageTransactionValidatorDecorator implements KeyValueStorageTransaction {
|
||||
|
||||
private final KeyValueStorageTransaction transaction;
|
||||
private final Supplier<Boolean> isClosed;
|
||||
private boolean active = true;
|
||||
|
||||
/**
|
||||
* Instantiates a new Key value storage transaction transition validator decorator.
|
||||
*
|
||||
* @param toDecorate the to decorate
|
||||
* @param isClosed supplier function to determine if the storage is closed
|
||||
*/
|
||||
public KeyValueStorageTransactionTransitionValidatorDecorator(
|
||||
final KeyValueStorageTransaction toDecorate) {
|
||||
public KeyValueStorageTransactionValidatorDecorator(
|
||||
final KeyValueStorageTransaction toDecorate, final Supplier<Boolean> isClosed) {
|
||||
this.isClosed = isClosed;
|
||||
this.transaction = toDecorate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(final byte[] key, final byte[] value) {
|
||||
checkState(active, "Cannot invoke put() on a completed transaction.");
|
||||
checkState(!isClosed.get(), "Cannot invoke put() on a closed storage.");
|
||||
transaction.put(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(final byte[] key) {
|
||||
checkState(active, "Cannot invoke remove() on a completed transaction.");
|
||||
checkState(!isClosed.get(), "Cannot invoke remove() on a closed storage.");
|
||||
transaction.remove(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void commit() throws StorageException {
|
||||
checkState(active, "Cannot commit a completed transaction.");
|
||||
checkState(!isClosed.get(), "Cannot invoke commit() on a closed storage.");
|
||||
active = false;
|
||||
transaction.commit();
|
||||
}
|
||||
@@ -58,6 +65,7 @@ public class KeyValueStorageTransactionTransitionValidatorDecorator
|
||||
@Override
|
||||
public final void rollback() {
|
||||
checkState(active, "Cannot rollback a completed transaction.");
|
||||
checkState(!isClosed.get(), "Cannot invoke rollback() on a closed storage.");
|
||||
active = false;
|
||||
transaction.rollback();
|
||||
}
|
||||
@@ -16,17 +16,19 @@
|
||||
package org.hyperledger.besu.services.kvstore;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappedKeyValueStorage;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Streams;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
@@ -34,19 +36,19 @@ import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** Key value storage which stores in memory all updates to a parent worldstate storage. */
|
||||
public class LayeredKeyValueStorage extends InMemoryKeyValueStorage
|
||||
public class LayeredKeyValueStorage extends SegmentedInMemoryKeyValueStorage
|
||||
implements SnappedKeyValueStorage {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(LayeredKeyValueStorage.class);
|
||||
|
||||
private final KeyValueStorage parent;
|
||||
private final SegmentedKeyValueStorage parent;
|
||||
|
||||
/**
|
||||
* Instantiates a new Layered key value storage.
|
||||
*
|
||||
* @param parent the parent key value storage for this layered storage.
|
||||
*/
|
||||
public LayeredKeyValueStorage(final KeyValueStorage parent) {
|
||||
public LayeredKeyValueStorage(final SegmentedKeyValueStorage parent) {
|
||||
this(new ConcurrentHashMap<>(), parent);
|
||||
}
|
||||
|
||||
@@ -57,27 +59,31 @@ public class LayeredKeyValueStorage extends InMemoryKeyValueStorage
|
||||
* @param parent the parent key value storage for this layered storage.
|
||||
*/
|
||||
public LayeredKeyValueStorage(
|
||||
final Map<Bytes, Optional<byte[]>> map, final KeyValueStorage parent) {
|
||||
final Map<SegmentIdentifier, Map<Bytes, Optional<byte[]>>> map,
|
||||
final SegmentedKeyValueStorage parent) {
|
||||
super(map);
|
||||
this.parent = parent;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsKey(final byte[] key) throws StorageException {
|
||||
return get(key).isPresent();
|
||||
public boolean containsKey(final SegmentIdentifier segmentId, final byte[] key)
|
||||
throws StorageException {
|
||||
return get(segmentId, key).isPresent();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<byte[]> get(final byte[] key) throws StorageException {
|
||||
public Optional<byte[]> get(final SegmentIdentifier segmentId, final byte[] key)
|
||||
throws StorageException {
|
||||
throwIfClosed();
|
||||
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
Bytes wrapKey = Bytes.wrap(key);
|
||||
final Optional<byte[]> foundKey = hashValueStore.get(wrapKey);
|
||||
final Optional<byte[]> foundKey =
|
||||
hashValueStore.computeIfAbsent(segmentId, __ -> new HashMap<>()).get(wrapKey);
|
||||
if (foundKey == null) {
|
||||
return parent.get(key);
|
||||
return parent.get(segmentId, key);
|
||||
} else {
|
||||
return foundKey;
|
||||
}
|
||||
@@ -87,14 +93,17 @@ public class LayeredKeyValueStorage extends InMemoryKeyValueStorage
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> stream() {
|
||||
public Stream<Pair<byte[], byte[]>> stream(final SegmentIdentifier segmentId) {
|
||||
throwIfClosed();
|
||||
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
// immutable copy of our in memory store to use for streaming and filtering:
|
||||
Map<Bytes, Optional<byte[]>> ourLayerState = ImmutableMap.copyOf(hashValueStore);
|
||||
// copy of our in memory store to use for streaming and filtering:
|
||||
var ourLayerState =
|
||||
Optional.ofNullable(hashValueStore.get(segmentId))
|
||||
.map(HashMap::new)
|
||||
.orElse(new HashMap<>());
|
||||
|
||||
return Streams.concat(
|
||||
ourLayerState.entrySet().stream()
|
||||
@@ -104,26 +113,31 @@ public class LayeredKeyValueStorage extends InMemoryKeyValueStorage
|
||||
Pair.of(bytesEntry.getKey().toArrayUnsafe(), bytesEntry.getValue().get()))
|
||||
// since we are layered, concat a parent stream filtered by our map entries:
|
||||
,
|
||||
parent.stream().filter(e -> !ourLayerState.containsKey(Bytes.of(e.getLeft()))));
|
||||
parent.stream(segmentId).filter(e -> !ourLayerState.containsKey(Bytes.of(e.getLeft()))));
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) {
|
||||
return stream().filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
|
||||
public Stream<Pair<byte[], byte[]>> streamFromKey(
|
||||
final SegmentIdentifier segmentId, final byte[] startKey) {
|
||||
return stream(segmentId)
|
||||
.filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<byte[]> streamKeys() {
|
||||
public Stream<byte[]> streamKeys(final SegmentIdentifier segmentId) {
|
||||
throwIfClosed();
|
||||
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
// immutable copy of our in memory store to use for streaming and filtering:
|
||||
Map<Bytes, Optional<byte[]>> ourLayerState = ImmutableMap.copyOf(hashValueStore);
|
||||
// copy of our in memory store to use for streaming and filtering:
|
||||
var ourLayerState =
|
||||
Optional.ofNullable(hashValueStore.get(segmentId))
|
||||
.map(HashMap::new)
|
||||
.orElse(new HashMap<>());
|
||||
|
||||
return Streams.concat(
|
||||
ourLayerState.entrySet().stream()
|
||||
@@ -131,7 +145,7 @@ public class LayeredKeyValueStorage extends InMemoryKeyValueStorage
|
||||
.map(bytesEntry -> bytesEntry.getKey().toArrayUnsafe())
|
||||
// since we are layered, concat a parent stream filtered by our map entries:
|
||||
,
|
||||
parent.streamKeys().filter(e -> !ourLayerState.containsKey(Bytes.of(e))));
|
||||
parent.streamKeys(segmentId).filter(e -> !ourLayerState.containsKey(Bytes.of(e))));
|
||||
|
||||
} finally {
|
||||
lock.unlock();
|
||||
@@ -139,33 +153,50 @@ public class LayeredKeyValueStorage extends InMemoryKeyValueStorage
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryDelete(final byte[] key) {
|
||||
hashValueStore.put(Bytes.wrap(key), Optional.empty());
|
||||
public boolean tryDelete(final SegmentIdentifier segmentId, final byte[] key) {
|
||||
hashValueStore
|
||||
.computeIfAbsent(segmentId, __ -> new HashMap<>())
|
||||
.put(Bytes.wrap(key), Optional.empty());
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyValueStorageTransaction startTransaction() {
|
||||
public SegmentedKeyValueStorageTransaction startTransaction() {
|
||||
throwIfClosed();
|
||||
|
||||
return new KeyValueStorageTransactionTransitionValidatorDecorator(
|
||||
new InMemoryTransaction() {
|
||||
return new SegmentedKeyValueStorageTransactionValidatorDecorator(
|
||||
new SegmentedInMemoryTransaction() {
|
||||
@Override
|
||||
public void commit() throws StorageException {
|
||||
|
||||
final Lock lock = rwLock.writeLock();
|
||||
lock.lock();
|
||||
try {
|
||||
hashValueStore.putAll(updatedValues);
|
||||
removedKeys.forEach(key -> hashValueStore.put(key, Optional.empty()));
|
||||
// put empty and not removed to not ask parent in case of deletion
|
||||
updatedValues = null;
|
||||
removedKeys = null;
|
||||
updatedValues.entrySet().stream()
|
||||
.forEach(
|
||||
entry ->
|
||||
hashValueStore
|
||||
.computeIfAbsent(entry.getKey(), __ -> new HashMap<>())
|
||||
.putAll(entry.getValue()));
|
||||
|
||||
// put empty rather than remove in order to not ask parent in case of deletion
|
||||
removedKeys.entrySet().stream()
|
||||
.forEach(
|
||||
segmentEntry ->
|
||||
hashValueStore
|
||||
.computeIfAbsent(segmentEntry.getKey(), __ -> new HashMap<>())
|
||||
.putAll(
|
||||
segmentEntry.getValue().stream()
|
||||
.collect(
|
||||
Collectors.toMap(key -> key, __ -> Optional.empty()))));
|
||||
|
||||
updatedValues.clear();
|
||||
removedKeys.clear();
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
this::isClosed);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -146,7 +146,8 @@ public class LimitedInMemoryKeyValueStorage implements KeyValueStorage {
|
||||
|
||||
@Override
|
||||
public KeyValueStorageTransaction startTransaction() throws StorageException {
|
||||
return new KeyValueStorageTransactionTransitionValidatorDecorator(new MemoryTransaction());
|
||||
return new KeyValueStorageTransactionValidatorDecorator(
|
||||
new MemoryTransaction(), this::isClosed);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -0,0 +1,301 @@
|
||||
/*
|
||||
* Copyright Hyperledger Besu Contributors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.services.kvstore;
|
||||
|
||||
import static java.util.stream.Collectors.toUnmodifiableSet;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappableKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappedKeyValueStorage;
|
||||
|
||||
import java.io.PrintStream;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.tuweni.bytes.Bytes;
|
||||
|
||||
/** Segmented in memory key value storage. */
|
||||
public class SegmentedInMemoryKeyValueStorage
|
||||
implements SnappedKeyValueStorage, SnappableKeyValueStorage, SegmentedKeyValueStorage {
|
||||
/** protected access for the backing hash map. */
|
||||
final Map<SegmentIdentifier, Map<Bytes, Optional<byte[]>>> hashValueStore;
|
||||
|
||||
/** protected access to the rw lock. */
|
||||
protected final ReadWriteLock rwLock = new ReentrantReadWriteLock();
|
||||
|
||||
/** Instantiates a new In memory key value storage. */
|
||||
public SegmentedInMemoryKeyValueStorage() {
|
||||
this(new HashMap<>());
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new In memory key value storage.
|
||||
*
|
||||
* @param hashValueStore the hash value store
|
||||
*/
|
||||
protected SegmentedInMemoryKeyValueStorage(
|
||||
final Map<SegmentIdentifier, Map<Bytes, Optional<byte[]>>> hashValueStore) {
|
||||
this.hashValueStore = hashValueStore;
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new In memory key value storage with specific set of segments.
|
||||
*
|
||||
* @param segments the segments to be used
|
||||
*/
|
||||
public SegmentedInMemoryKeyValueStorage(final List<SegmentIdentifier> segments) {
|
||||
this(
|
||||
segments.stream()
|
||||
.collect(
|
||||
Collectors
|
||||
.<SegmentIdentifier, SegmentIdentifier, Map<Bytes, Optional<byte[]>>>toMap(
|
||||
s -> s, s -> new HashMap<>())));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear(final SegmentIdentifier segmentIdentifier) {
|
||||
final Lock lock = rwLock.writeLock();
|
||||
lock.lock();
|
||||
try {
|
||||
Optional.ofNullable(hashValueStore.get(segmentIdentifier)).ifPresent(Map::clear);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsKey(final SegmentIdentifier segmentIdentifier, final byte[] key)
|
||||
throws StorageException {
|
||||
return get(segmentIdentifier, key).isPresent();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<byte[]> get(final SegmentIdentifier segmentIdentifier, final byte[] key)
|
||||
throws StorageException {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return hashValueStore
|
||||
.computeIfAbsent(segmentIdentifier, s -> new HashMap<>())
|
||||
.getOrDefault(Bytes.wrap(key), Optional.empty());
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllKeysThat(
|
||||
final SegmentIdentifier segmentIdentifier, final Predicate<byte[]> returnCondition) {
|
||||
return stream(segmentIdentifier)
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getKey)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllValuesFromKeysThat(
|
||||
final SegmentIdentifier segmentIdentifier, final Predicate<byte[]> returnCondition) {
|
||||
return stream(segmentIdentifier)
|
||||
.filter(pair -> returnCondition.test(pair.getKey()))
|
||||
.map(Pair::getValue)
|
||||
.collect(toUnmodifiableSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> stream(final SegmentIdentifier segmentIdentifier) {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return ImmutableSet.copyOf(
|
||||
hashValueStore.computeIfAbsent(segmentIdentifier, s -> new HashMap<>()).entrySet())
|
||||
.stream()
|
||||
.filter(bytesEntry -> bytesEntry.getValue().isPresent())
|
||||
.map(
|
||||
bytesEntry ->
|
||||
Pair.of(bytesEntry.getKey().toArrayUnsafe(), bytesEntry.getValue().get()));
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> streamFromKey(
|
||||
final SegmentIdentifier segmentIdentifier, final byte[] startKey) {
|
||||
return stream(segmentIdentifier)
|
||||
.filter(e -> Bytes.wrap(startKey).compareTo(Bytes.wrap(e.getKey())) <= 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<byte[]> streamKeys(final SegmentIdentifier segmentIdentifier) {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
return ImmutableMap.copyOf(
|
||||
hashValueStore.computeIfAbsent(segmentIdentifier, s -> new HashMap<>()))
|
||||
.entrySet()
|
||||
.stream()
|
||||
.filter(bytesEntry -> bytesEntry.getValue().isPresent())
|
||||
.map(bytesEntry -> bytesEntry.getKey().toArrayUnsafe());
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryDelete(final SegmentIdentifier segmentIdentifier, final byte[] key) {
|
||||
final Lock lock = rwLock.writeLock();
|
||||
if (lock.tryLock()) {
|
||||
try {
|
||||
Optional.ofNullable(hashValueStore.get(segmentIdentifier))
|
||||
.ifPresent(store -> store.remove(Bytes.wrap(key)));
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {}
|
||||
|
||||
@Override
|
||||
public SegmentedKeyValueStorageTransaction startTransaction() {
|
||||
return new SegmentedKeyValueStorageTransactionValidatorDecorator(
|
||||
new SegmentedInMemoryTransaction(), this::isClosed);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SegmentedInMemoryKeyValueStorage takeSnapshot() {
|
||||
// need to clone the submaps also:
|
||||
return new SegmentedInMemoryKeyValueStorage(
|
||||
hashValueStore.entrySet().stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> new HashMap<>(e.getValue()))));
|
||||
}
|
||||
|
||||
@Override
|
||||
public SegmentedKeyValueStorageTransaction getSnapshotTransaction() {
|
||||
return startTransaction();
|
||||
}
|
||||
|
||||
/** In memory transaction. */
|
||||
public class SegmentedInMemoryTransaction implements SegmentedKeyValueStorageTransaction {
|
||||
|
||||
/** protected access to updatedValues map for the transaction. */
|
||||
protected Map<SegmentIdentifier, Map<Bytes, Optional<byte[]>>> updatedValues = new HashMap<>();
|
||||
/** protected access to deletedValues set for the transaction. */
|
||||
protected Map<SegmentIdentifier, Set<Bytes>> removedKeys = new HashMap<>();
|
||||
|
||||
@Override
|
||||
public void put(
|
||||
final SegmentIdentifier segmentIdentifier, final byte[] key, final byte[] value) {
|
||||
updatedValues
|
||||
.computeIfAbsent(segmentIdentifier, __ -> new HashMap<>())
|
||||
.put(Bytes.wrap(key), Optional.of(value));
|
||||
removedKeys.computeIfAbsent(segmentIdentifier, __ -> new HashSet<>()).remove(Bytes.wrap(key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(final SegmentIdentifier segmentIdentifier, final byte[] key) {
|
||||
removedKeys.computeIfAbsent(segmentIdentifier, __ -> new HashSet<>()).add(Bytes.wrap(key));
|
||||
updatedValues
|
||||
.computeIfAbsent(segmentIdentifier, __ -> new HashMap<>())
|
||||
.remove(Bytes.wrap(key));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit() throws StorageException {
|
||||
final Lock lock = rwLock.writeLock();
|
||||
lock.lock();
|
||||
try {
|
||||
updatedValues.entrySet().stream()
|
||||
.forEach(
|
||||
entry ->
|
||||
hashValueStore
|
||||
.computeIfAbsent(entry.getKey(), __ -> new HashMap<>())
|
||||
.putAll(entry.getValue()));
|
||||
|
||||
removedKeys.entrySet().stream()
|
||||
.forEach(
|
||||
entry -> {
|
||||
var keyset =
|
||||
hashValueStore
|
||||
.computeIfAbsent(entry.getKey(), __ -> new HashMap<>())
|
||||
.keySet();
|
||||
keyset.removeAll(entry.getValue());
|
||||
});
|
||||
|
||||
updatedValues.clear();
|
||||
removedKeys.clear();
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
updatedValues.clear();
|
||||
removedKeys.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump the content of the store to the provided PrintStream.
|
||||
*
|
||||
* @param ps the PrintStream to dump the content to.
|
||||
*/
|
||||
public void dump(final PrintStream ps) {
|
||||
final Lock lock = rwLock.readLock();
|
||||
lock.lock();
|
||||
try {
|
||||
ImmutableSet.copyOf(hashValueStore.entrySet()).stream()
|
||||
.forEach(
|
||||
map -> {
|
||||
ps.println("Segment: " + map.getKey().getName());
|
||||
map.getValue().entrySet().stream()
|
||||
.filter(bytesEntry -> bytesEntry.getValue().isPresent())
|
||||
.forEach(
|
||||
entry ->
|
||||
ps.printf(
|
||||
" %s : %s%n",
|
||||
entry.getKey().toHexString(),
|
||||
Bytes.wrap(entry.getValue().get()).toHexString()));
|
||||
});
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,8 @@ import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.KeyValueStorageTransaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
@@ -29,80 +31,77 @@ import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* The type Segmented key value storage adapter.
|
||||
*
|
||||
* @param <S> the type parameter
|
||||
*/
|
||||
public class SegmentedKeyValueStorageAdapter<S> implements KeyValueStorage {
|
||||
/** This class will adapt a SegmentedKeyValueStorage to a KeyValueStorage instance. */
|
||||
public class SegmentedKeyValueStorageAdapter implements KeyValueStorage {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(SegmentedKeyValueStorageAdapter.class);
|
||||
private final S segmentHandle;
|
||||
private final SegmentedKeyValueStorage<S> storage;
|
||||
private final SegmentIdentifier segmentIdentifier;
|
||||
/** The storage to wrap. */
|
||||
protected final SegmentedKeyValueStorage storage;
|
||||
|
||||
/**
|
||||
* Instantiates a new Segmented key value storage adapter.
|
||||
* Instantiates a new Segmented key value storage adapter for a single segment.
|
||||
*
|
||||
* @param segment the segment
|
||||
* @param segmentIdentifier the segmentIdentifier to wrap as a KeyValueStorage
|
||||
* @param storage the storage
|
||||
*/
|
||||
public SegmentedKeyValueStorageAdapter(
|
||||
final SegmentIdentifier segment, final SegmentedKeyValueStorage<S> storage) {
|
||||
segmentHandle = storage.getSegmentIdentifierByName(segment);
|
||||
final SegmentIdentifier segmentIdentifier, final SegmentedKeyValueStorage storage) {
|
||||
this.segmentIdentifier = segmentIdentifier;
|
||||
this.storage = storage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clear() {
|
||||
throwIfClosed();
|
||||
storage.clear(segmentHandle);
|
||||
storage.clear(segmentIdentifier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean containsKey(final byte[] key) throws StorageException {
|
||||
throwIfClosed();
|
||||
return storage.containsKey(segmentHandle, key);
|
||||
return storage.containsKey(segmentIdentifier, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<byte[]> get(final byte[] key) throws StorageException {
|
||||
throwIfClosed();
|
||||
return storage.get(segmentHandle, key);
|
||||
return storage.get(segmentIdentifier, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
throwIfClosed();
|
||||
return storage.getAllKeysThat(segmentHandle, returnCondition);
|
||||
return storage.getAllKeysThat(segmentIdentifier, returnCondition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<byte[]> getAllValuesFromKeysThat(final Predicate<byte[]> returnCondition) {
|
||||
throwIfClosed();
|
||||
return storage.getAllValuesFromKeysThat(segmentHandle, returnCondition);
|
||||
return storage.getAllValuesFromKeysThat(segmentIdentifier, returnCondition);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> stream() {
|
||||
throwIfClosed();
|
||||
return storage.stream(segmentHandle);
|
||||
return storage.stream(segmentIdentifier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<Pair<byte[], byte[]>> streamFromKey(final byte[] startKey) throws StorageException {
|
||||
return storage.streamFromKey(segmentHandle, startKey);
|
||||
return storage.streamFromKey(segmentIdentifier, startKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<byte[]> streamKeys() {
|
||||
throwIfClosed();
|
||||
return storage.streamKeys(segmentHandle);
|
||||
return storage.streamKeys(segmentIdentifier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryDelete(final byte[] key) {
|
||||
throwIfClosed();
|
||||
return storage.tryDelete(segmentHandle, key);
|
||||
return storage.tryDelete(segmentIdentifier, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -112,33 +111,7 @@ public class SegmentedKeyValueStorageAdapter<S> implements KeyValueStorage {
|
||||
|
||||
@Override
|
||||
public KeyValueStorageTransaction startTransaction() throws StorageException {
|
||||
final SegmentedKeyValueStorage.Transaction<S> transaction = storage.startTransaction();
|
||||
return new KeyValueStorageTransaction() {
|
||||
|
||||
@Override
|
||||
public void put(final byte[] key, final byte[] value) {
|
||||
throwIfClosed();
|
||||
transaction.put(segmentHandle, key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(final byte[] key) {
|
||||
throwIfClosed();
|
||||
transaction.remove(segmentHandle, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit() throws StorageException {
|
||||
throwIfClosed();
|
||||
transaction.commit();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
throwIfClosed();
|
||||
transaction.rollback();
|
||||
}
|
||||
};
|
||||
return new KeyValueStorageTransactionAdapter(segmentIdentifier, storage);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -152,4 +125,42 @@ public class SegmentedKeyValueStorageAdapter<S> implements KeyValueStorage {
|
||||
throw new StorageException("Storage has been closed");
|
||||
}
|
||||
}
|
||||
|
||||
/** This class will adapt a SegmentedKeyValueStorageTransaction to a KeyValueStorageTransaction */
|
||||
public static class KeyValueStorageTransactionAdapter implements KeyValueStorageTransaction {
|
||||
private final SegmentedKeyValueStorageTransaction segmentedTransaction;
|
||||
private final SegmentIdentifier segmentIdentifier;
|
||||
|
||||
/**
|
||||
* Instantiates a new Key value storage transaction adapter.
|
||||
*
|
||||
* @param segmentIdentifier the segmentIdentifier to use for the wrapped transaction
|
||||
* @param storage the storage
|
||||
*/
|
||||
public KeyValueStorageTransactionAdapter(
|
||||
final SegmentIdentifier segmentIdentifier, final SegmentedKeyValueStorage storage) {
|
||||
this.segmentedTransaction = storage.startTransaction();
|
||||
this.segmentIdentifier = segmentIdentifier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void put(final byte[] key, final byte[] value) {
|
||||
segmentedTransaction.put(segmentIdentifier, key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(final byte[] key) {
|
||||
segmentedTransaction.remove(segmentIdentifier, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commit() throws StorageException {
|
||||
segmentedTransaction.commit();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() {
|
||||
segmentedTransaction.rollback();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright ConsenSys AG.
|
||||
* Copyright Hyperledger Besu Contributors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
@@ -12,51 +12,49 @@
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package org.hyperledger.besu.services.kvstore;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkState;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.exception.StorageException;
|
||||
import org.hyperledger.besu.services.kvstore.SegmentedKeyValueStorage.Transaction;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentedKeyValueStorageTransaction;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* The Segmented key value storage transaction transition validator decorator.
|
||||
*
|
||||
* @param <S> the type parameter
|
||||
*/
|
||||
public class SegmentedKeyValueStorageTransactionTransitionValidatorDecorator<S>
|
||||
implements Transaction<S> {
|
||||
/** The Key value storage transaction validator decorator. */
|
||||
public class SegmentedKeyValueStorageTransactionValidatorDecorator
|
||||
implements SegmentedKeyValueStorageTransaction {
|
||||
|
||||
private final Transaction<S> transaction;
|
||||
private final SegmentedKeyValueStorageTransaction transaction;
|
||||
private final Supplier<Boolean> isClosed;
|
||||
private boolean active = true;
|
||||
|
||||
/**
|
||||
* Instantiates a new Segmented key value storage transaction transition validator decorator.
|
||||
* Instantiates a new Key value storage transaction transition validator decorator.
|
||||
*
|
||||
* @param toDecorate the to decorate
|
||||
* @param isClosed supplier that returns true if the storage is closed
|
||||
* @param isClosed supplier function to determine if the storage is closed
|
||||
*/
|
||||
public SegmentedKeyValueStorageTransactionTransitionValidatorDecorator(
|
||||
final Transaction<S> toDecorate, final Supplier<Boolean> isClosed) {
|
||||
this.transaction = toDecorate;
|
||||
public SegmentedKeyValueStorageTransactionValidatorDecorator(
|
||||
final SegmentedKeyValueStorageTransaction toDecorate, final Supplier<Boolean> isClosed) {
|
||||
this.isClosed = isClosed;
|
||||
this.transaction = toDecorate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void put(final S segment, final byte[] key, final byte[] value) {
|
||||
public void put(final SegmentIdentifier segmentId, final byte[] key, final byte[] value) {
|
||||
checkState(active, "Cannot invoke put() on a completed transaction.");
|
||||
checkState(!isClosed.get(), "Cannot invoke put() on a closed storage.");
|
||||
transaction.put(segment, key, value);
|
||||
transaction.put(segmentId, key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void remove(final S segment, final byte[] key) {
|
||||
public void remove(final SegmentIdentifier segmentId, final byte[] key) {
|
||||
checkState(active, "Cannot invoke remove() on a completed transaction.");
|
||||
checkState(!isClosed.get(), "Cannot invoke remove() on a closed storage.");
|
||||
transaction.remove(segment, key);
|
||||
transaction.remove(segmentId, key);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
* Copyright ConsenSys AG.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations under the License.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
package org.hyperledger.besu.services.kvstore;
|
||||
|
||||
import org.hyperledger.besu.plugin.services.storage.SegmentIdentifier;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappableKeyValueStorage;
|
||||
import org.hyperledger.besu.plugin.services.storage.SnappedKeyValueStorage;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* The type Segmented key value storage adapter.
|
||||
*
|
||||
* @param <S> the type parameter
|
||||
*/
|
||||
public class SnappableSegmentedKeyValueStorageAdapter<S> extends SegmentedKeyValueStorageAdapter<S>
|
||||
implements SnappableKeyValueStorage {
|
||||
private final Supplier<SnappedKeyValueStorage> snapshotSupplier;
|
||||
|
||||
/**
|
||||
* Instantiates a new Segmented key value storage adapter.
|
||||
*
|
||||
* @param segment the segment
|
||||
* @param storage the storage
|
||||
*/
|
||||
public SnappableSegmentedKeyValueStorageAdapter(
|
||||
final SegmentIdentifier segment, final SegmentedKeyValueStorage<S> storage) {
|
||||
this(
|
||||
segment,
|
||||
storage,
|
||||
() -> {
|
||||
throw new UnsupportedOperationException("Snapshot not supported");
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a new Segmented key value storage adapter.
|
||||
*
|
||||
* @param segment the segment
|
||||
* @param storage the storage
|
||||
* @param snapshotSupplier the snapshot supplier
|
||||
*/
|
||||
public SnappableSegmentedKeyValueStorageAdapter(
|
||||
final SegmentIdentifier segment,
|
||||
final SegmentedKeyValueStorage<S> storage,
|
||||
final Supplier<SnappedKeyValueStorage> snapshotSupplier) {
|
||||
super(segment, storage);
|
||||
this.snapshotSupplier = snapshotSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnappedKeyValueStorage takeSnapshot() {
|
||||
return snapshotSupplier.get();
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user