Enable Quorum/IBFT1 to Besu migration (#8262)

* Enable Quorum/IBFT1 to Besu migration

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* Fix BftMining acceptance test

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* Introduce delay after London fork update in BFT mining test to prevent timing issues

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* Update besu/src/main/java/org/hyperledger/besu/controller/IbftLegacyBesuControllerBuilder.java

Co-authored-by: Matt Whitehead <matthew1001@hotmail.com>
Signed-off-by: Bhanu Pulluri <59369753+pullurib@users.noreply.github.com>

* Review changes

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* update creating additional JSON RPC methods for all controllerbuidlers in consensus schedule

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* Create ethprotocol manager and plugin factory for both consensus controllers in migration

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* Refactor resource files

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* fix verification metadata

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* fix regression

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* update changelog

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* Fix controller selection at the transition block

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* Review changes

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

* Revert BftExtraData changes

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>

---------

Signed-off-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>
Signed-off-by: Bhanu Pulluri <59369753+pullurib@users.noreply.github.com>
Co-authored-by: Bhanu Pulluri <bhanu.pulluri@kaleido.io>
Co-authored-by: Matt Whitehead <matthew1001@hotmail.com>
Co-authored-by: Matt Whitehead <matthew.whitehead@kaleido.io>
Co-authored-by: Sally MacFarlane <macfarla.github@gmail.com>
This commit is contained in:
Bhanu Pulluri
2025-03-12 10:08:51 -04:00
committed by GitHub
parent 083b1d3986
commit 2db46e964c
55 changed files with 1708 additions and 79 deletions

View File

@@ -41,6 +41,7 @@
- Add TLS/mTLS options and configure the GraphQL HTTP service[#7910](https://github.com/hyperledger/besu/pull/7910)
- Update `eth_getLogs` to return a `Block not found` error when the requested block is not found. [#8290](https://github.com/hyperledger/besu/pull/8290)
- Change `Invalid block, unable to parse RLP` RPC error message to `Invalid block param (block not found)` [#8328](https://github.com/hyperledger/besu/pull/8328)
- Add IBFT1 to QBFT migration capability [#8262](https://github.com/hyperledger/besu/pull/8262)
- Support pending transaction score when saving and restoring txpool [#8363](https://github.com/hyperledger/besu/pull/8363)
- Upgrade to execution-spec-tests v4.1.0 including better EIP-2537 coverage for BLS [#8402](https://github.com/hyperledger/besu/pull/8402)
- Add era1 format to blocks import subcommand [#7935](https://github.com/hyperledger/besu/issues/7935)

View File

@@ -36,6 +36,6 @@ public class AwaitNetPeerCount implements Condition {
@Override
public void verify(final Node node) {
WaitUtils.waitFor(() -> assertThat(node.execute(transaction)).isEqualTo(expectedPeerCount));
WaitUtils.waitFor(50, () -> assertThat(node.execute(transaction)).isEqualTo(expectedPeerCount));
}
}

View File

@@ -490,6 +490,39 @@ public class BesuNodeFactory {
return create(builder.build());
}
public BesuNode createQbftMigrationNode(
final String name, final boolean fixedPort, final DataStorageFormat storageFormat)
throws IOException {
JsonRpcConfiguration rpcConfig = node.createJsonRpcWithQbftEnabledConfig(false);
rpcConfig.addRpcApi("ADMIN,TXPOOL");
if (fixedPort) {
rpcConfig.setPort(
Math.abs(name.hashCode() % 60000)
+ 1024); // Generate a consistent port for p2p based on node name
}
BesuNodeConfigurationBuilder builder =
new BesuNodeConfigurationBuilder()
.name(name)
.miningEnabled()
.jsonRpcConfiguration(rpcConfig)
.webSocketConfiguration(node.createWebSocketEnabledConfig())
.devMode(false)
.dataStorageConfiguration(
storageFormat == DataStorageFormat.FOREST
? DataStorageConfiguration.DEFAULT_FOREST_CONFIG
: DataStorageConfiguration.DEFAULT_BONSAI_CONFIG)
.genesisConfigProvider(GenesisConfigurationFactory::createQbftMigrationGenesisConfig);
if (fixedPort) {
builder.p2pPort(
Math.abs(name.hashCode() % 60000)
+ 1024
+ 500); // Generate a consistent port for p2p based on node name (+ 500 to avoid
// clashing with RPC port or other nodes with a similar name)
}
return create(builder.build());
}
public BesuNode createCustomGenesisNode(
final String name, final String genesisPath, final boolean canBeBootnode) throws IOException {
return createCustomGenesisNode(name, genesisPath, canBeBootnode, false);

View File

@@ -96,6 +96,13 @@ public class GenesisConfigurationFactory {
validators, template, QbftExtraDataCodec::createGenesisExtraDataString);
}
public static Optional<String> createQbftMigrationGenesisConfig(
final Collection<? extends RunnableNode> validators) {
final String template = readGenesisFile("/qbft/migration-ibft1/qbft-migration.json");
return updateGenesisExtraData(
validators, template, QbftExtraDataCodec::createGenesisExtraDataString);
}
@SuppressWarnings("unchecked")
public static Optional<String> createQbftValidatorContractGenesisConfig(
final Collection<? extends RunnableNode> validators) throws UncheckedIOException {

View File

@@ -0,0 +1,118 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.tests.acceptance;
import static org.apache.logging.log4j.util.LoaderUtil.getClassLoader;
import org.hyperledger.besu.plugin.services.storage.DataStorageFormat;
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import org.junit.jupiter.api.Test;
public class QuorumIBFTMigrationTest extends AcceptanceTestBase {
public static void copyKeyFilesToNodeDataDirs(final BesuNode... nodes) throws IOException {
for (BesuNode node : nodes) {
copyKeyFile(node, "key");
copyKeyFile(node, "key.pub");
}
}
private static void copyKeyFile(final BesuNode node, final String keyFileName)
throws IOException {
String resourceFileName = "qbft/migration-ibft1/" + node.getName() + keyFileName;
try (InputStream keyFileStream = getClassLoader().getResourceAsStream(resourceFileName)) {
if (keyFileStream == null) {
throw new IOException("Resource not found: " + resourceFileName);
}
Path targetPath = node.homeDirectory().resolve(keyFileName);
Files.createDirectories(targetPath.getParent());
Files.copy(keyFileStream, targetPath, StandardCopyOption.REPLACE_EXISTING);
}
}
public static void runBesuCommand(final Path dataPath) throws IOException, InterruptedException {
ProcessBuilder processBuilder =
new ProcessBuilder(
"../../build/install/besu/bin/besu",
"--genesis-file",
"src/test/resources/qbft/migration-ibft1/qbft-migration.json",
"--data-path",
dataPath.toString(),
"--data-storage-format",
"FOREST",
"blocks",
"import",
"src/test/resources/qbft/migration-ibft1/ibft.blocks");
processBuilder.directory(new File(System.getProperty("user.dir")));
processBuilder.inheritIO(); // This will redirect the output to the console
Process process = processBuilder.start();
int exitCode = process.waitFor();
if (exitCode == 0) {
System.out.println("Import command executed successfully.");
} else {
throw new RuntimeException("Import command execution failed with exit code: " + exitCode);
}
}
@Test
public void shouldImportIBFTBlocksAndTransitionToQBFT() throws Exception {
// Create a mix of Bonsai and Forest DB nodes
final BesuNode minerNode1 =
besu.createQbftMigrationNode("miner1", false, DataStorageFormat.FOREST);
final BesuNode minerNode2 =
besu.createQbftMigrationNode("miner2", false, DataStorageFormat.FOREST);
final BesuNode minerNode3 =
besu.createQbftMigrationNode("miner3", false, DataStorageFormat.FOREST);
final BesuNode minerNode4 =
besu.createQbftMigrationNode("miner4", false, DataStorageFormat.FOREST);
final BesuNode minerNode5 =
besu.createQbftMigrationNode("miner5", false, DataStorageFormat.FOREST);
// Copy key files to the node datadirs
// Use the key files saved in resources directory
copyKeyFilesToNodeDataDirs(minerNode1, minerNode2, minerNode3, minerNode4, minerNode5);
// start one node and import blocks from import file
// Use import file, genesis saved in resources directory
runBesuCommand(minerNode1.homeDirectory());
// After the import is done, start the rest of the nodes using the same genesis and respective
// node keys
cluster.start(minerNode1, minerNode2, minerNode3, minerNode4, minerNode5);
// Check that the chain is progressing as expected
cluster.verify(blockchain.reachesHeight(minerNode2, 1, 120));
}
@Override
public void tearDownAcceptanceTestBase() {
cluster.stop();
super.tearDownAcceptanceTestBase();
}
}

View File

@@ -42,8 +42,6 @@ public class BftMiningSoakTest extends ParameterizedBftTestBase {
private static final long ONE_MINUTE = Duration.of(1, ChronoUnit.MINUTES).toMillis();
private static final long THREE_MINUTES = Duration.of(3, ChronoUnit.MINUTES).toMillis();
private static final long TEN_SECONDS = Duration.of(10, ChronoUnit.SECONDS).toMillis();
static int getTestDurationMins() {
@@ -213,6 +211,8 @@ public class BftMiningSoakTest extends ParameterizedBftTestBase {
upgradeToLondon(
minerNode1, minerNode2, minerNode3, minerNode4, lastChainHeight.intValue() + 120);
cluster.verify(blockchain.reachesHeight(minerNode4, 1, 180));
previousStepEndTime = Instant.now();
chainHeight = minerNode1.execute(ethTransactions.blockNumber());
@@ -241,7 +241,7 @@ public class BftMiningSoakTest extends ParameterizedBftTestBase {
upgradeToShanghai(
minerNode1, minerNode2, minerNode3, minerNode4, Instant.now().getEpochSecond() + 120);
Thread.sleep(THREE_MINUTES);
cluster.verify(blockchain.reachesHeight(minerNode4, 1, 180));
SimpleStorageShanghai simpleStorageContractShanghai =
minerNode1.execute(contractTransactions.createSmartContract(SimpleStorageShanghai.class));

View File

@@ -0,0 +1 @@
0a46b91fe0c770a4355d1fec9ccd72d39264f46a74ed67a69a12ed4c265aa768

View File

@@ -0,0 +1 @@
8093fb3200c783555ed487b8b5210ef3369b062a1f3ce5762d83d7a62205693d1e4f253e840ca48ec98d8f20c5b41bbbd43f34f87a1f68324ab51afe73732b96

View File

@@ -0,0 +1 @@
17c2aacfdf1f6defde20e6ae7132c6d3991e758af3799a307a75b38135678a48

View File

@@ -0,0 +1 @@
d81f65976ccc44c5e7e6ca859c5ede06b0f484f72c52d35dd8f7bd7581a8b7020d9ef45878696b4593daf5575b48dda5259f78f192a3445d5cc97c032660642f

View File

@@ -0,0 +1 @@
917fb1b03034e5d7156b89bc2a3bc2aae1d146d2640d75e095f07110b0871bc1

View File

@@ -0,0 +1 @@
67785a2ac328648d94245abb25bdcfab853d06e68c70026d90f2fd5c8338b65c19235398eac205e4bbdb3fc1de9669ad6309e43ab203c8e7430664cea6451f56

View File

@@ -0,0 +1 @@
3b3cbae8c034c4ba2d5f4df44faa013888216a6eabb7fffa0b224003ea770ba7

View File

@@ -0,0 +1 @@
ec403552908986b5d9e4def3be9ddcb26d5b03def3b44ef2d5d728bb9a3028603405e7379c3e185cb2bc3e784548fcdd0e7616162029c3f407155b2fb25ba0ca

View File

@@ -0,0 +1 @@
4ddfb30d4fcd6f5a9f959961f734e4c1469af223bc16b217eef0d3796e64973d

View File

@@ -0,0 +1 @@
28c02b375d62b0adef8213b76882dfec264d5dcbb0a8ba73f167bc718a6d02f1833af80ab9f51a5f007e5f3b8320e8fc5ab287d4bd59ad497f8949bf4abb9e80

View File

@@ -0,0 +1,63 @@
{
"nonce": "0x0",
"timestamp": "0x58ee40ba",
"extraData": "0x0000000000000000000000000000000000000000000000000000000000000000f86df86994a18182ee8ca476f2f0fb8170a1d4620edb39c5e194065541903bf3bb8c088a18046b441f5d286288c994d1e106d68cac92668b100f6f43791ddcb2c7588094d156777a1e1539fe654fc82266f41fd5d4aa548494efbbd8900222d7b2f75d081c3e7446a1f4fe10ce80c0",
"gasLimit": "700000000",
"gasUsed": "0x0",
"number": "0x0",
"difficulty": "0x1",
"coinbase": "0x0000000000000000000000000000000000000000",
"mixHash": "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"config": {
"chainId": 1337,
"homesteadBlock": 10,
"eip150Block": 20,
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"eip155Block": 25,
"eip158Block": 30,
"byzantiumBlock": 50,
"constantinopleBlock": 60,
"petersburgBlock": 70,
"istanbulBlock": 80,
"ibft": {
"epochlength": 100,
"blockperiodseconds": 5,
"requesttimeoutseconds": 10,
"policy": 0,
"ceil2Nby3Block": 0,
"validatorcontractaddress": "0x0000000000000000000000000000000000000000"
},
"qbft": {
"epochLength": 30000,
"blockPeriodSeconds" : 1,
"requestTimeoutSeconds": 10,
"startBlock": 101
},
"txnSizeLimit": 64,
"maxCodeSize": 0,
"maxCodeSizeConfig": [
{
"block": 0,
"size": 64
}
]
},
"alloc": {
"0xde8e2ae09f2ee2c6c282c054b2384f8b5f9debee": {
"balance": "1000000000000000000000000000"
},
"0x23bcbca17fc4978909ab44ac82559c7d379aa006": {
"balance": "1000000000000000000000000000"
},
"0x870276532cca9f33e66273cfa494cf41e04b5a66": {
"balance": "1000000000000000000000000000"
},
"0x7d7fc9fdfa49e2db22fc6ebab593dcf3aeffbde8": {
"balance": "1000000000000000000000000000"
},
"0x4df76ad0678513846699056e0070c5f587580eb5": {
"balance": "1000000000000000000000000000"
}
}
}

View File

@@ -36,4 +36,4 @@
"number": "0x0",
"gasUsed": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
}

View File

@@ -37,6 +37,7 @@ dependencies {
implementation project(':consensus:clique')
implementation project(':consensus:common')
implementation project(':consensus:ibft')
implementation project(':consensus:ibftlegacy')
implementation project(':consensus:merge')
implementation project(':consensus:qbft')
implementation project(':consensus:qbft-core')

View File

@@ -173,6 +173,7 @@ public class Runner implements AutoCloseable {
LOG.info("Starting Ethereum main loop ... ");
natService.start();
networkRunner.start();
besuController.getMiningCoordinator().subscribe();
if (networkRunner.getNetwork().isP2pEnabled()) {
besuController.getSynchronizer().start();
}

View File

@@ -267,14 +267,19 @@ public class RlpBlockImporter implements Closeable {
private BlockHeader lookupPreviousHeader(
final MutableBlockchain blockchain, final BlockHeader header) {
return blockchain
.getBlockHeader(header.getParentHash())
.orElseThrow(
() ->
new IllegalStateException(
String.format(
"Block %s does not connect to the existing chain. Current chain head %s",
header.getNumber(), blockchain.getChainHeadBlockNumber())));
try {
return blockchain
.getBlockHeader(header.getParentHash())
.orElseThrow(
() ->
new IllegalStateException(
String.format(
"Block %s does not connect to the existing chain. Current chain head %s",
header.getNumber(), blockchain.getChainHeadBlockNumber())));
} catch (IllegalStateException e) {
LOG.info("Block {} does not connect to the existing chain.", header.getNumber());
}
return null;
}
@Override

View File

@@ -394,8 +394,7 @@ public class BesuController implements java.io.Closeable {
if (configOptions.isIbft2()) {
originalControllerBuilder = new IbftBesuControllerBuilder();
} else if (configOptions.isIbftLegacy()) {
throw new IllegalStateException(
"IBFT1 (legacy) is no longer supported. Consider using IBFT2 or QBFT.");
originalControllerBuilder = new IbftLegacyBesuControllerBuilder();
} else {
throw new IllegalStateException(
"Invalid genesis migration config. Migration is supported from IBFT (legacy) or IBFT2 to QBFT)");

View File

@@ -197,9 +197,10 @@ public class ConsensusScheduleBesuControllerBuilder extends BesuControllerBuilde
@Override
protected PluginServiceFactory createAdditionalPluginServices(
final Blockchain blockchain, final ProtocolContext protocolContext) {
return besuControllerBuilderSchedule
.get(0L)
.createAdditionalPluginServices(blockchain, protocolContext);
besuControllerBuilderSchedule
.values()
.forEach(b -> b.createAdditionalPluginServices(blockchain, protocolContext));
return new NoopPluginServiceFactory();
}
@Override
@@ -207,10 +208,14 @@ public class ConsensusScheduleBesuControllerBuilder extends BesuControllerBuilde
final ProtocolContext protocolContext,
final ProtocolSchedule protocolSchedule,
final MiningConfiguration miningConfiguration) {
return besuControllerBuilderSchedule
.get(0L)
.createAdditionalJsonRpcMethodFactory(
protocolContext, protocolSchedule, miningConfiguration);
besuControllerBuilderSchedule
.values()
.forEach(
b ->
b.createAdditionalJsonRpcMethodFactory(
protocolContext, protocolSchedule, miningConfiguration));
return super.createAdditionalJsonRpcMethodFactory(
protocolContext, protocolSchedule, miningConfiguration);
}
@Override
@@ -218,7 +223,7 @@ public class ConsensusScheduleBesuControllerBuilder extends BesuControllerBuilde
final EthProtocolManager ethProtocolManager,
final Optional<SnapProtocolManager> maybeSnapProtocolManager) {
return besuControllerBuilderSchedule
.get(0L)
.get(besuControllerBuilderSchedule.keySet().stream().skip(1).findFirst().orElseThrow())
.createSubProtocolConfiguration(ethProtocolManager, maybeSnapProtocolManager);
}
@@ -240,20 +245,34 @@ public class ConsensusScheduleBesuControllerBuilder extends BesuControllerBuilde
final List<PeerValidator> peerValidators,
final Optional<MergePeerFilter> mergePeerFilter,
final ForkIdManager forkIdManager) {
return besuControllerBuilderSchedule
.get(0L)
.createEthProtocolManager(
protocolContext,
synchronizerConfiguration,
transactionPool,
ethereumWireProtocolConfiguration,
ethPeers,
ethContext,
ethMessages,
scheduler,
peerValidators,
mergePeerFilter,
forkIdManager);
besuControllerBuilderSchedule
.values()
.forEach(
b ->
b.createEthProtocolManager(
protocolContext,
synchronizerConfiguration,
transactionPool,
ethereumWireProtocolConfiguration,
ethPeers,
ethContext,
ethMessages,
scheduler,
peerValidators,
mergePeerFilter,
forkIdManager));
return super.createEthProtocolManager(
protocolContext,
synchronizerConfiguration,
transactionPool,
ethereumWireProtocolConfiguration,
ethPeers,
ethContext,
ethMessages,
scheduler,
peerValidators,
mergePeerFilter,
forkIdManager);
}
@Override

View File

@@ -0,0 +1,99 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.controller;
import org.hyperledger.besu.config.IbftLegacyConfigOptions;
import org.hyperledger.besu.consensus.common.EpochManager;
import org.hyperledger.besu.consensus.common.bft.BftBlockInterface;
import org.hyperledger.besu.consensus.common.bft.BftContext;
import org.hyperledger.besu.consensus.common.validator.ValidatorProvider;
import org.hyperledger.besu.consensus.common.validator.blockbased.BlockValidatorProvider;
import org.hyperledger.besu.consensus.ibftlegacy.IbftExtraDataCodec;
import org.hyperledger.besu.consensus.ibftlegacy.IbftLegacyBlockInterface;
import org.hyperledger.besu.consensus.ibftlegacy.IbftProtocolSchedule;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.blockcreation.MiningCoordinator;
import org.hyperledger.besu.ethereum.blockcreation.NoopMiningCoordinator;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.MiningConfiguration;
import org.hyperledger.besu.ethereum.eth.manager.EthProtocolManager;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** The Ibft legacy besu controller builder. */
public class IbftLegacyBesuControllerBuilder extends BesuControllerBuilder {
private static final Logger LOG = LoggerFactory.getLogger(IbftLegacyBesuControllerBuilder.class);
private final BftBlockInterface blockInterface;
/** Default constructor */
public IbftLegacyBesuControllerBuilder() {
LOG.warn(
"IBFT1 is deprecated. This consensus configuration should be used only while migrating to another consensus mechanism.");
this.blockInterface = new IbftLegacyBlockInterface(new IbftExtraDataCodec());
}
@Override
protected MiningCoordinator createMiningCoordinator(
final ProtocolSchedule protocolSchedule,
final ProtocolContext protocolContext,
final TransactionPool transactionPool,
final MiningConfiguration miningConfiguration,
final SyncState syncState,
final EthProtocolManager ethProtocolManager) {
return new NoopMiningCoordinator(miningConfiguration);
}
@Override
protected ProtocolSchedule createProtocolSchedule() {
return IbftProtocolSchedule.create(
genesisConfigOptions, privacyParameters, isRevertReasonEnabled, evmConfiguration);
}
@Override
protected BftContext createConsensusContext(
final Blockchain blockchain,
final WorldStateArchive worldStateArchive,
final ProtocolSchedule protocolSchedule) {
final IbftLegacyConfigOptions ibftConfig = genesisConfigOptions.getIbftLegacyConfigOptions();
final EpochManager epochManager = new EpochManager(ibftConfig.getEpochLength());
final ValidatorProvider validatorProvider =
BlockValidatorProvider.nonForkingValidatorProvider(
blockchain, epochManager, blockInterface);
return new BftContext(validatorProvider, epochManager, blockInterface);
}
@Override
protected PluginServiceFactory createAdditionalPluginServices(
final Blockchain blockchain, final ProtocolContext protocolContext) {
return new NoopPluginServiceFactory();
}
@Override
protected void validateContext(final ProtocolContext context) {
final BlockHeader genesisBlockHeader = context.getBlockchain().getGenesisBlock().getHeader();
if (blockInterface.validatorsInBlock(genesisBlockHeader).isEmpty()) {
LOG.warn("Genesis block contains no signers - chain will not progress.");
}
}
}

View File

@@ -96,7 +96,6 @@ import org.hyperledger.besu.ethereum.eth.transactions.TransactionPool;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.p2p.config.SubProtocolConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.plugin.services.BesuEvents;
import org.hyperledger.besu.util.Subscribers;
import java.time.Duration;
@@ -152,7 +151,9 @@ public class QbftBesuControllerBuilder extends BesuControllerBuilder {
private ValidatorProvider createReadOnlyValidatorProvider(final Blockchain blockchain) {
checkNotNull(
transactionValidatorProvider, "transactionValidatorProvider should have been initialised");
final EpochManager epochManager = new EpochManager(qbftConfig.getEpochLength());
final long startBlock =
qbftConfig.getStartBlock().isPresent() ? qbftConfig.getStartBlock().getAsLong() : 0;
final EpochManager epochManager = new EpochManager(qbftConfig.getEpochLength(), startBlock);
// Must create our own voteTallyCache as using this would pollute the main voteTallyCache
final BlockValidatorProvider readOnlyBlockValidatorProvider =
BlockValidatorProvider.nonForkingValidatorProvider(
@@ -212,8 +213,16 @@ public class QbftBesuControllerBuilder extends BesuControllerBuilder {
qbftExtraDataCodec,
ethProtocolManager.ethContext().getScheduler());
final ValidatorProvider validatorProvider =
protocolContext.getConsensusContext(BftContext.class).getValidatorProvider();
final ValidatorProvider validatorProvider;
if (qbftConfig.getStartBlock().isPresent()) {
validatorProvider =
protocolContext
.getConsensusContext(BftContext.class, qbftConfig.getStartBlock().getAsLong())
.getValidatorProvider();
} else {
validatorProvider =
protocolContext.getConsensusContext(BftContext.class).getValidatorProvider();
}
final QbftValidatorProvider qbftValidatorProvider =
new QbftValidatorProviderAdaptor(validatorProvider);
@@ -316,7 +325,8 @@ public class QbftBesuControllerBuilder extends BesuControllerBuilder {
bftProcessor,
blockCreatorFactory,
blockchain,
bftEventQueue);
bftEventQueue,
syncState);
// Update the next block period in seconds according to the transition schedule
protocolContext
@@ -335,35 +345,6 @@ public class QbftBesuControllerBuilder extends BesuControllerBuilder {
.getEmptyBlockPeriodSeconds());
});
syncState.subscribeSyncStatus(
syncStatus -> {
if (syncState.syncTarget().isPresent()) {
// We're syncing so stop doing other stuff
LOG.info("Stopping QBFT mining coordinator while we are syncing");
miningCoordinator.stop();
} else {
LOG.info("Starting QBFT mining coordinator following sync");
miningCoordinator.enable();
miningCoordinator.start();
}
});
syncState.subscribeCompletionReached(
new BesuEvents.InitialSyncCompletionListener() {
@Override
public void onInitialSyncCompleted() {
LOG.info("Starting QBFT mining coordinator following initial sync");
miningCoordinator.enable();
miningCoordinator.start();
}
@Override
public void onInitialSyncRestart() {
// Nothing to do. The mining coordinator won't be started until
// sync has completed.
}
});
return miningCoordinator;
}
@@ -428,7 +409,9 @@ public class QbftBesuControllerBuilder extends BesuControllerBuilder {
final Blockchain blockchain,
final WorldStateArchive worldStateArchive,
final ProtocolSchedule protocolSchedule) {
final EpochManager epochManager = new EpochManager(qbftConfig.getEpochLength());
final long startBlock =
qbftConfig.getStartBlock().isPresent() ? qbftConfig.getStartBlock().getAsLong() : 0;
final EpochManager epochManager = new EpochManager(qbftConfig.getEpochLength(), startBlock);
final BftValidatorOverrides validatorOverrides =
convertBftForks(genesisConfigOptions.getTransitions().getQbftForks());

View File

@@ -14,6 +14,7 @@
*/
package org.hyperledger.besu.chainimport;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
import static org.mockito.Mockito.mock;
@@ -21,6 +22,7 @@ import static org.mockito.Mockito.mock;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.cli.config.NetworkName;
import org.hyperledger.besu.components.BesuComponent;
import org.hyperledger.besu.config.GenesisConfig;
import org.hyperledger.besu.config.MergeConfiguration;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.cryptoservices.NodeKeyUtils;
@@ -40,9 +42,12 @@ import org.hyperledger.besu.testutil.TestClock;
import java.io.IOException;
import java.math.BigInteger;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.concurrent.CompletionException;
import com.google.common.io.Resources;
import org.apache.tuweni.units.bigints.UInt256;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -152,4 +157,46 @@ public final class RlpBlockImporterTest {
assertThat(result.count).isEqualTo(1);
assertThat(result.td).isEqualTo(UInt256.valueOf(34351349760L));
}
@Test
public void ibftImport() throws IOException {
final Path source = dataDir.resolve("ibft.blocks");
final String config =
Resources.toString(this.getClass().getResource("/ibft-genesis-2.json"), UTF_8);
try {
Files.write(
source,
Resources.toByteArray(this.getClass().getResource("/ibft.blocks")),
StandardOpenOption.CREATE,
StandardOpenOption.TRUNCATE_EXISTING);
} catch (final IOException ex) {
throw new IllegalStateException(ex);
}
final BesuController controller =
new BesuController.Builder()
.fromGenesisFile(GenesisConfig.fromConfig(config), SyncMode.FULL)
.synchronizerConfiguration(SynchronizerConfiguration.builder().build())
.ethProtocolConfiguration(EthProtocolConfiguration.defaultConfig())
.storageProvider(new InMemoryKeyValueStorageProvider())
.networkId(BigInteger.valueOf(1337))
.miningParameters(MiningConfiguration.newDefault())
.nodeKey(NodeKeyUtils.generate())
.metricsSystem(new NoOpMetricsSystem())
.privacyParameters(PrivacyParameters.DEFAULT)
.dataDirectory(dataDir)
.clock(TestClock.fixed())
.transactionPoolConfiguration(TransactionPoolConfiguration.DEFAULT)
.evmConfiguration(EvmConfiguration.DEFAULT)
.networkConfiguration(NetworkingConfiguration.create())
.besuComponent(mock(BesuComponent.class))
.apiConfiguration(ImmutableApiConfiguration.builder().build())
.build();
final RlpBlockImporter.ImportResult result =
rlpBlockImporter.importBlockchain(source, controller, true);
// Don't count the Genesis block
assertThat(result.count).isEqualTo(100);
}
}

View File

@@ -98,6 +98,27 @@ public class BesuControllerTest {
.isInstanceOf(QbftBesuControllerBuilder.class);
}
@Test
public void createConsensusScheduleBesuControllerBuilderWhenMigratingFromIbftLegacyToQbft() {
final long qbftStartBlock = 10L;
mockGenesisConfigForMigration("ibftLegacy", OptionalLong.of(qbftStartBlock));
final BesuControllerBuilder besuControllerBuilder =
new BesuController.Builder().fromGenesisFile(genesisConfig, SyncMode.FULL);
assertThat(besuControllerBuilder).isInstanceOf(ConsensusScheduleBesuControllerBuilder.class);
final Map<Long, BesuControllerBuilder> besuControllerBuilderSchedule =
((ConsensusScheduleBesuControllerBuilder) besuControllerBuilder)
.getBesuControllerBuilderSchedule();
assertThat(besuControllerBuilderSchedule).containsKeys(0L, qbftStartBlock);
assertThat(besuControllerBuilderSchedule.get(0L))
.isInstanceOf(IbftLegacyBesuControllerBuilder.class);
assertThat(besuControllerBuilderSchedule.get(qbftStartBlock))
.isInstanceOf(QbftBesuControllerBuilder.class);
}
private void mockGenesisConfigForMigration(
final String consensus, final OptionalLong startBlock) {
when(genesisConfigOptions.isConsensusMigration()).thenReturn(true);
@@ -108,6 +129,11 @@ public class BesuControllerTest {
when(genesisConfigOptions.isIbft2()).thenReturn(true);
break;
}
case "ibftlegacy":
{
when(genesisConfigOptions.isIbftLegacy()).thenReturn(true);
break;
}
default:
fail("Invalid consensus algorithm");
}

View File

@@ -0,0 +1,65 @@
{
"nonce": "0x0",
"timestamp": "0x58ee40ba",
"extraData": "0x0000000000000000000000000000000000000000000000000000000000000000f86df86994a18182ee8ca476f2f0fb8170a1d4620edb39c5e194065541903bf3bb8c088a18046b441f5d286288c994d1e106d68cac92668b100f6f43791ddcb2c7588094d156777a1e1539fe654fc82266f41fd5d4aa548494efbbd8900222d7b2f75d081c3e7446a1f4fe10ce80c0",
"gasLimit": "700000000",
"gasUsed": "0x0",
"number": "0x0",
"difficulty": "0x1",
"coinbase": "0x0000000000000000000000000000000000000000",
"mixHash": "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"config": {
"chainId": 1337,
"homesteadBlock": 0,
"eip150Block": 20,
"eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"eip155Block": 30,
"eip158Block": 40,
"byzantiumBlock": 50,
"constantinopleBlock": 60,
"petersburgBlock": 70,
"istanbulBlock": 80,
"ibft": {
"epochlength": 1000,
"blockperiodseconds": 5,
"requesttimeoutseconds": 10,
"policy": 0,
"ceil2Nby3Block": 0,
"validatorcontractaddress": "0x0000000000000000000000000000000000000000"
},
"qbft": {
"epochLength": 30000,
"blockPeriodSeconds" : 1,
"requestTimeoutSeconds": 10,
"startBlock": 101
},
"txnSizeLimit": 64,
"maxCodeSize": 0,
"maxCodeSizeConfig": [
{
"block": 0,
"size": 64
}
],
"isMPS": false
},
"alloc": {
"0xde8e2ae09f2ee2c6c282c054b2384f8b5f9debee": {
"balance": "1000000000000000000000000000"
},
"0x23bcbca17fc4978909ab44ac82559c7d379aa006": {
"balance": "1000000000000000000000000000"
},
"0x870276532cca9f33e66273cfa494cf41e04b5a66": {
"balance": "1000000000000000000000000000"
},
"0x7d7fc9fdfa49e2db22fc6ebab593dcf3aeffbde8": {
"balance": "1000000000000000000000000000"
},
"0x4df76ad0678513846699056e0070c5f587580eb5": {
"balance": "1000000000000000000000000000"
}
}
}

BIN
besu/src/test/resources/ibft.blocks Normal file → Executable file

Binary file not shown.

View File

@@ -78,7 +78,7 @@ public interface GenesisConfigOptions {
* @return the boolean
*/
default boolean isConsensusMigration() {
return isIbft2() && isQbft();
return (isIbft2() || isIbftLegacy()) && isQbft();
}
/**
@@ -88,6 +88,13 @@ public interface GenesisConfigOptions {
*/
String getConsensusEngine();
/**
* Gets ibft legacy config options.
*
* @return the ibft legacy config options
*/
IbftLegacyConfigOptions getIbftLegacyConfigOptions();
/**
* Gets checkpoint options.
*

View File

@@ -0,0 +1,73 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.config;
import java.util.Map;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.ImmutableMap;
/** The Ibft legacy config options. */
public class IbftLegacyConfigOptions {
/** The constant DEFAULT. */
public static final IbftLegacyConfigOptions DEFAULT =
new IbftLegacyConfigOptions(JsonUtil.createEmptyObjectNode());
private static final long DEFAULT_EPOCH_LENGTH = 30_000;
private static final int DEFAULT_BLOCK_PERIOD_SECONDS = 1;
private final ObjectNode ibftConfigRoot;
/**
* Instantiates a new Ibft legacy config options.
*
* @param ibftConfigRoot the ibft config root
*/
IbftLegacyConfigOptions(final ObjectNode ibftConfigRoot) {
this.ibftConfigRoot = ibftConfigRoot;
}
/*
*/
/**
* Gets epoch length.
*
* @return the epoch length
*/
public long getEpochLength() {
return JsonUtil.getLong(ibftConfigRoot, "epochlength", DEFAULT_EPOCH_LENGTH);
}
/**
* Gets block period seconds.
*
* @return the block period seconds
*/
public int getBlockPeriodSeconds() {
return JsonUtil.getPositiveInt(
ibftConfigRoot, "blockperiodseconds", DEFAULT_BLOCK_PERIOD_SECONDS);
}
/**
* As map.
*
* @return the map
*/
Map<String, Object> asMap() {
final ImmutableMap.Builder<String, Object> builder = ImmutableMap.builder();
return builder.build();
}
}

View File

@@ -118,6 +118,8 @@ public class JsonGenesisConfigOptions implements GenesisConfigOptions {
return ETHASH_CONFIG_KEY;
} else if (isIbft2()) {
return IBFT2_CONFIG_KEY;
} else if (isIbftLegacy()) {
return IBFT_LEGACY_CONFIG_KEY;
} else if (isQbft()) {
return QBFT_CONFIG_KEY;
} else if (isClique()) {
@@ -157,6 +159,13 @@ public class JsonGenesisConfigOptions implements GenesisConfigOptions {
return isQbft() || isClique() || isIbft2() || isIbftLegacy();
}
@Override
public IbftLegacyConfigOptions getIbftLegacyConfigOptions() {
return JsonUtil.getObjectNode(configRoot, IBFT_LEGACY_CONFIG_KEY)
.map(IbftLegacyConfigOptions::new)
.orElse(IbftLegacyConfigOptions.DEFAULT);
}
@Override
public BftConfigOptions getBftConfigOptions() {
final String fieldKey = isIbft2() ? IBFT2_CONFIG_KEY : QBFT_CONFIG_KEY;
@@ -529,6 +538,9 @@ public class JsonGenesisConfigOptions implements GenesisConfigOptions {
if (isEthHash()) {
builder.put("ethash", getEthashConfigOptions().asMap());
}
if (isIbftLegacy()) {
builder.put("ibft", getIbftLegacyConfigOptions().asMap());
}
if (isIbft2()) {
builder.put("ibft2", getBftConfigOptions().asMap());
}

View File

@@ -116,6 +116,11 @@ public class StubGenesisConfigOptions implements GenesisConfigOptions, Cloneable
return false;
}
@Override
public IbftLegacyConfigOptions getIbftLegacyConfigOptions() {
return IbftLegacyConfigOptions.DEFAULT;
}
@Override
public boolean isIbft2() {
return false;
@@ -416,6 +421,9 @@ public class StubGenesisConfigOptions implements GenesisConfigOptions, Cloneable
if (isEthHash()) {
builder.put("ethash", getEthashConfigOptions().asMap());
}
if (isIbftLegacy()) {
builder.put("ibft", getIbftLegacyConfigOptions().asMap());
}
if (isIbft2()) {
builder.put("ibft2", getBftConfigOptions().asMap());
}

View File

@@ -18,6 +18,7 @@ package org.hyperledger.besu.consensus.common;
public class EpochManager {
private final long epochLengthInBlocks;
private final long startBlock;
/**
* Instantiates a new Epoch manager.
@@ -25,7 +26,18 @@ public class EpochManager {
* @param epochLengthInBlocks the epoch length in blocks
*/
public EpochManager(final long epochLengthInBlocks) {
this(epochLengthInBlocks, 0);
}
/**
* Instantiates a new Epoch manager.
*
* @param epochLengthInBlocks the epoch length in blocks
* @param startBlock the block number where the epoch counting starts
*/
public EpochManager(final long epochLengthInBlocks, final long startBlock) {
this.epochLengthInBlocks = epochLengthInBlocks;
this.startBlock = startBlock;
}
/**
@@ -35,7 +47,13 @@ public class EpochManager {
* @return the boolean
*/
public boolean isEpochBlock(final long blockNumber) {
return (blockNumber % epochLengthInBlocks) == 0;
if (blockNumber < 0) {
throw new IllegalArgumentException("Block number must be 0 or greater.");
}
if (blockNumber < startBlock - 1) {
return false;
}
return (blockNumber - (startBlock == 0 ? 0 : startBlock - 1)) % epochLengthInBlocks == 0;
}
/**
@@ -45,6 +63,9 @@ public class EpochManager {
* @return the last epoch block
*/
public long getLastEpochBlock(final long blockNumber) {
return blockNumber - (blockNumber % epochLengthInBlocks);
if (blockNumber < startBlock) {
throw new IllegalArgumentException("Block number is before start block.");
}
return startBlock + ((blockNumber - startBlock) / epochLengthInBlocks) * epochLengthInBlocks;
}
}

View File

@@ -54,7 +54,7 @@ public class MigratingMiningCoordinator implements MiningCoordinator, BlockAdded
this.miningCoordinatorSchedule = miningCoordinatorSchedule;
this.blockchain = blockchain;
this.activeMiningCoordinator =
this.miningCoordinatorSchedule.getFork(blockchain.getChainHeadBlockNumber()).getValue();
this.miningCoordinatorSchedule.getFork(blockchain.getChainHeadBlockNumber() + 1).getValue();
}
@Override

View File

@@ -45,6 +45,12 @@ public class MigratingProtocolContext extends ProtocolContext {
@Override
public <C extends ConsensusContext> C getConsensusContext(final Class<C> klass) {
final long chainHeadBlockNumber = getBlockchain().getChainHeadBlockNumber();
return consensusContextSchedule.getFork(chainHeadBlockNumber).getValue().as(klass);
return consensusContextSchedule.getFork(chainHeadBlockNumber + 1).getValue().as(klass);
}
@Override
public <C extends ConsensusContext> C getConsensusContext(
final Class<C> klass, final long blockNumber) {
return consensusContextSchedule.getFork(blockNumber).getValue().as(klass);
}
}

View File

@@ -28,6 +28,8 @@ import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.Block;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.Transaction;
import org.hyperledger.besu.ethereum.eth.sync.state.SyncState;
import org.hyperledger.besu.plugin.services.BesuEvents;
import java.util.List;
import java.util.Optional;
@@ -65,6 +67,8 @@ public class BftMiningCoordinator implements MiningCoordinator, BlockAddedObserv
private long blockAddedObserverId;
private final AtomicReference<State> state = new AtomicReference<>(State.PAUSED);
private SyncState syncState;
/**
* Instantiates a new Bft mining coordinator.
*
@@ -91,6 +95,35 @@ public class BftMiningCoordinator implements MiningCoordinator, BlockAddedObserv
this.blockchain = blockchain;
}
/**
* Instantiates a new Bft mining coordinator.
*
* @param bftExecutors the bft executors
* @param eventHandler the event handler
* @param bftProcessor the bft processor
* @param blockCreatorFactory the block creator factory
* @param blockchain the blockchain
* @param eventQueue the event queue
* @param syncState the sync state
*/
public BftMiningCoordinator(
final BftExecutors bftExecutors,
final BftEventHandler eventHandler,
final BftProcessor bftProcessor,
final BftBlockCreatorFactory<?> blockCreatorFactory,
final Blockchain blockchain,
final BftEventQueue eventQueue,
final SyncState syncState) {
this.bftExecutors = bftExecutors;
this.eventHandler = eventHandler;
this.bftProcessor = bftProcessor;
this.blockCreatorFactory = blockCreatorFactory;
this.eventQueue = eventQueue;
this.blockchain = blockchain;
this.syncState = syncState;
}
@Override
public void start() {
if (state.compareAndSet(State.IDLE, State.RUNNING)
@@ -120,6 +153,41 @@ public class BftMiningCoordinator implements MiningCoordinator, BlockAddedObserv
}
}
@Override
public void subscribe() {
if (syncState == null) {
return;
}
syncState.subscribeSyncStatus(
syncStatus -> {
if (syncState.syncTarget().isPresent()) {
// We're syncing so stop doing other stuff
LOG.info("Stopping BFT mining coordinator while we are syncing");
stop();
} else {
LOG.info("Starting BFT mining coordinator following sync");
enable();
start();
}
});
syncState.subscribeCompletionReached(
new BesuEvents.InitialSyncCompletionListener() {
@Override
public void onInitialSyncCompleted() {
LOG.info("Starting BFT mining coordinator following initial sync");
enable();
start();
}
@Override
public void onInitialSyncRestart() {
// Nothing to do. The mining coordinator won't be started until
// sync has completed.
}
});
}
@Override
public void awaitStop() throws InterruptedException {
bftExecutors.awaitStop();

View File

@@ -0,0 +1,55 @@
apply plugin: 'java-library'
jar {
archiveBaseName = 'besu-ibftlegacy'
manifest {
attributes(
'Specification-Title': archiveBaseName,
'Specification-Version': project.version,
'Implementation-Title': archiveBaseName,
'Implementation-Version': calculateVersion(),
'Commit-Hash': getGitCommitDetails(40).hash
)
}
}
dependencies {
implementation project(':config')
implementation project(':consensus:common')
implementation project(':consensus:ibft')
implementation project(':crypto:algorithms')
implementation project(':datatypes')
implementation project(':ethereum:api')
implementation project(':ethereum:blockcreation')
implementation project(':ethereum:core')
implementation project(':ethereum:eth')
implementation project(':ethereum:p2p')
implementation project(':ethereum:rlp')
implementation project(':evm')
implementation project(':metrics:core')
implementation project(':services:kvstore')
implementation 'com.google.guava:guava'
implementation 'io.vertx:vertx-core'
implementation 'com.fasterxml.jackson.core:jackson-databind'
implementation 'io.tmio:tuweni-bytes'
implementation 'io.tmio:tuweni-units'
testImplementation project(path: ':consensus:common', configuration: 'testSupportArtifacts')
testImplementation project(path: ':consensus:ibft', configuration: 'testSupportArtifacts')
testImplementation project(path: ':ethereum:core', configuration: 'testSupportArtifacts')
testImplementation project(path: ':ethereum:eth', configuration: 'testSupportArtifacts')
testImplementation project(':metrics:core')
testImplementation project(':testutil')
integrationTestImplementation 'org.assertj:assertj-core'
integrationTestImplementation 'org.junit.jupiter:junit-jupiter-api'
integrationTestImplementation 'org.mockito:mockito-core'
integrationTestImplementation 'org.mockito:mockito-junit-jupiter'
testImplementation 'org.assertj:assertj-core'
testImplementation 'org.junit.jupiter:junit-jupiter'
testImplementation 'org.awaitility:awaitility'
testImplementation 'org.mockito:mockito-core'
testImplementation 'org.mockito:mockito-junit-jupiter'
}

View File

@@ -0,0 +1,169 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.ibftlegacy;
import org.hyperledger.besu.consensus.common.bft.BftBlockHeaderFunctions;
import org.hyperledger.besu.consensus.common.bft.BftExtraData;
import org.hyperledger.besu.crypto.SECPSignature;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.BlockHeaderBuilder;
import org.hyperledger.besu.ethereum.core.Util;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPOutput;
import java.util.List;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.tuweni.bytes.Bytes;
/** The Ibft block hashing. */
public class IbftBlockHashing {
/** Default constructor */
public IbftBlockHashing() {}
private static final Bytes COMMIT_MSG_CODE = Bytes.wrap(new byte[] {2});
private static final IbftExtraDataCodec ibftExtraDataCodec = new IbftExtraDataCodec();
/**
* Constructs a hash of the block header, suitable for use when creating the proposer seal. The
* extra data is modified to have a null proposer seal and empty list of committed seals.
*
* @param header The header for which a proposer seal is to be calculated
* @param ibftExtraData The extra data block which is to be inserted to the header once seal is
* calculated
* @return the hash of the header suitable for signing as the proposer seal
*/
public static Hash calculateDataHashForProposerSeal(
final BlockHeader header, final BftExtraData ibftExtraData) {
final Bytes headerRlp =
serializeHeader(header, () -> encodeExtraDataWithoutCommittedSeals(ibftExtraData, null));
// Proposer hash is the hash of the hash
return Hash.hash(Hash.hash(headerRlp));
}
/**
* Constructs a hash of the block header suitable for signing as a committed seal. The extra data
* in the hash uses an empty list for the committed seals.
*
* @param header The header for which a proposer seal is to be calculated (without extra data)
* @param ibftExtraData The extra data block which is to be inserted to the header once seal is
* calculated
* @return the hash of the header including the validator and proposer seal in the extra data
*/
public static Hash calculateDataHashForCommittedSeal(
final BlockHeader header, final IbftLegacyExtraData ibftExtraData) {
// The data signed by a committer is an array of [Hash, COMMIT_MSG_CODE]
final Hash dataHash = Hash.hash(serializeHeaderWithoutCommittedSeals(header, ibftExtraData));
final Bytes seal = Bytes.wrap(dataHash, COMMIT_MSG_CODE);
return Hash.hash(seal);
}
/**
* Constructs a hash of the block header, but omits the committerSeals (as this changes on each of
* the potentially circulated blocks at the current chain height).
*
* @param header The header for which a block hash is to be calculated
* @return the hash of the header including the validator and proposer seal in the extra data
*/
public static Hash calculateHashOfIbftBlockOnchain(final BlockHeader header) {
final IbftLegacyExtraData ibftExtraData = ibftExtraDataCodec.decode(header);
Hash hash = Hash.hash(serializeHeaderWithoutCommittedSeals(header, ibftExtraData));
return hash;
}
private static Bytes serializeHeaderWithoutCommittedSeals(
final BlockHeader header, final IbftLegacyExtraData ibftExtraData) {
return serializeHeader(
header,
() -> encodeExtraDataWithoutCommittedSeals(ibftExtraData, ibftExtraData.getProposerSeal()));
}
/**
* Recovers the proposer's {@link Address} from the proposer seal.
*
* @param header the block header that was signed by the proposer seal
* @param ibftExtraData the parsed IBftExtraData from the header
* @return the proposer address
*/
public static Address recoverProposerAddress(
final BlockHeader header, final IbftLegacyExtraData ibftExtraData) {
final Hash proposerHash = calculateDataHashForProposerSeal(header, ibftExtraData);
Address addr = Util.signatureToAddress(ibftExtraData.getProposerSeal(), proposerHash);
return addr;
}
/**
* Recovers the {@link Address} for each validator that contributed a committed seal to the block.
*
* @param header the block header that was signed by the committed seals
* @param ibftExtraData the parsed IBftExtraData from the header
* @return the addresses of validators that provided a committed seal
*/
public static List<Address> recoverCommitterAddresses(
final BlockHeader header, final IbftLegacyExtraData ibftExtraData) {
final Hash committerHash =
IbftBlockHashing.calculateDataHashForCommittedSeal(header, ibftExtraData);
return ibftExtraData.getSeals().stream()
.map(p -> Util.signatureToAddress(p, committerHash))
.collect(Collectors.toList());
}
private static Bytes encodeExtraDataWithoutCommittedSeals(
final BftExtraData ibftExtraData, final SECPSignature proposerSeal) {
final BytesValueRLPOutput extraDataEncoding = new BytesValueRLPOutput();
extraDataEncoding.startList();
extraDataEncoding.writeList(
ibftExtraData.getValidators(), (validator, rlp) -> rlp.writeBytes(validator));
if (proposerSeal != null) {
extraDataEncoding.writeBytes(proposerSeal.encodedBytes());
} else {
extraDataEncoding.writeNull();
}
// Represents an empty committer list (i.e this is not included in the hashing of the block)
extraDataEncoding.writeEmptyList();
extraDataEncoding.endList();
Bytes vanityBytes = ibftExtraData.getVanityData();
return Bytes.wrap(vanityBytes, extraDataEncoding.encoded());
}
private static Bytes serializeHeader(
final BlockHeader header, final Supplier<Bytes> extraDataSerializer) {
// create a block header which is a copy of the header supplied as parameter except of the
// extraData field
final BlockHeaderBuilder builder = BlockHeaderBuilder.fromHeader(header);
builder.blockHeaderFunctions(BftBlockHeaderFunctions.forOnchainBlock(ibftExtraDataCodec));
// set the extraData field using the supplied extraDataSerializer if the block height is not 0
if (header.getNumber() == BlockHeader.GENESIS_BLOCK_NUMBER) {
builder.extraData(header.getExtraData());
} else {
builder.extraData(extraDataSerializer.get());
}
final BytesValueRLPOutput out = new BytesValueRLPOutput();
builder.buildBlockHeader().writeTo(out);
return out.encoded();
}
}

View File

@@ -0,0 +1,93 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.ibftlegacy;
import static org.hyperledger.besu.ethereum.mainnet.AbstractGasLimitSpecification.DEFAULT_MAX_GAS_LIMIT;
import static org.hyperledger.besu.ethereum.mainnet.AbstractGasLimitSpecification.DEFAULT_MIN_GAS_LIMIT;
import org.hyperledger.besu.consensus.ibftlegacy.headervalidationrules.IbftExtraDataValidationRule;
import org.hyperledger.besu.consensus.ibftlegacy.headervalidationrules.VoteValidationRule;
import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.mainnet.BlockHeaderValidator;
import org.hyperledger.besu.ethereum.mainnet.headervalidationrules.AncestryValidationRule;
import org.hyperledger.besu.ethereum.mainnet.headervalidationrules.ConstantFieldValidationRule;
import org.hyperledger.besu.ethereum.mainnet.headervalidationrules.GasLimitRangeAndDeltaValidationRule;
import org.hyperledger.besu.ethereum.mainnet.headervalidationrules.GasUsageValidationRule;
import org.hyperledger.besu.ethereum.mainnet.headervalidationrules.TimestampBoundedByFutureParameter;
import org.hyperledger.besu.ethereum.mainnet.headervalidationrules.TimestampMoreRecentThanParent;
import org.apache.tuweni.units.bigints.UInt256;
/** The Ibft block header validation ruleset factory. */
public class IbftBlockHeaderValidationRulesetFactory {
/** Default constructor */
public IbftBlockHeaderValidationRulesetFactory() {}
/**
* Produces a BlockHeaderValidator configured for assessing ibft block headers which are to form
* part of the BlockChain (i.e. not proposed blocks, which do not contain commit seals)
*
* @param secondsBetweenBlocks the minimum number of seconds which must elapse between blocks.
* @param ceil2nBy3Block the block after which 2/3n commit seals must exist, rather than 2F+1
* @return BlockHeaderValidator configured for assessing ibft block headers
*/
public static BlockHeaderValidator.Builder ibftBlockHeaderValidatorBuilder(
final long secondsBetweenBlocks, final long ceil2nBy3Block) {
return createBlockHeaderValidatorBuilder(secondsBetweenBlocks, true, ceil2nBy3Block);
}
/**
* Creates a builder for the IBFT block header validator.
*
* @param secondsBetweenBlocks the minimum number of seconds which must elapse between blocks.
* @return a builder for the IBFT block header validator
*/
public static BlockHeaderValidator.Builder ibftBlockHeaderValidatorBuilder(
final long secondsBetweenBlocks) {
return createBlockHeaderValidatorBuilder(secondsBetweenBlocks);
}
private static BlockHeaderValidator.Builder createBlockHeaderValidatorBuilder(
final long secondsBetweenBlocks,
final boolean validateCommitSeals,
final long ceil2nBy3Block) {
BlockHeaderValidator.Builder builder = createBlockHeaderValidatorBuilder(secondsBetweenBlocks);
builder.addRule(new IbftExtraDataValidationRule(validateCommitSeals, ceil2nBy3Block));
return builder;
}
private static BlockHeaderValidator.Builder createBlockHeaderValidatorBuilder(
final long secondsBetweenBlocks) {
return new BlockHeaderValidator.Builder()
.addRule(new AncestryValidationRule())
.addRule(new GasUsageValidationRule())
.addRule(
new GasLimitRangeAndDeltaValidationRule(DEFAULT_MIN_GAS_LIMIT, DEFAULT_MAX_GAS_LIMIT))
.addRule(new TimestampBoundedByFutureParameter(1))
.addRule(new TimestampMoreRecentThanParent(secondsBetweenBlocks))
.addRule(
new ConstantFieldValidationRule<>(
"MixHash", BlockHeader::getMixHash, IbftHelpers.EXPECTED_MIX_HASH))
.addRule(
new ConstantFieldValidationRule<>(
"OmmersHash", BlockHeader::getOmmersHash, Hash.EMPTY_LIST_HASH))
.addRule(
new ConstantFieldValidationRule<>(
"Difficulty", BlockHeader::getDifficulty, UInt256.ONE))
.addRule(new VoteValidationRule());
}
}

View File

@@ -0,0 +1,113 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.ibftlegacy;
import static com.google.common.base.Preconditions.checkArgument;
import org.hyperledger.besu.consensus.common.bft.BftExtraData;
import org.hyperledger.besu.consensus.common.bft.BftExtraDataCodec;
import org.hyperledger.besu.crypto.SECPSignature;
import org.hyperledger.besu.crypto.SignatureAlgorithm;
import org.hyperledger.besu.crypto.SignatureAlgorithmFactory;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.rlp.BytesValueRLPInput;
import org.hyperledger.besu.ethereum.rlp.RLPInput;
import java.util.Collection;
import com.google.common.base.Supplier;
import com.google.common.base.Suppliers;
import org.apache.tuweni.bytes.Bytes;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Represents encoder/decoder of the serialized data structure stored in the extraData field of the
* BlockHeader used when operating under an IBFT consensus mechanism.
*/
public class IbftExtraDataCodec extends BftExtraDataCodec {
/** The constant EXTRA_VANITY_LENGTH. */
public static final int EXTRA_VANITY_LENGTH = 32;
private static final Supplier<SignatureAlgorithm> SIGNATURE_ALGORITHM =
Suppliers.memoize(SignatureAlgorithmFactory::getInstance);
private static final Logger LOG = LoggerFactory.getLogger(IbftExtraDataCodec.class);
/** Default constructor */
public IbftExtraDataCodec() {}
/**
* Decode.
*
* @param blockHeader the block header
* @return the bft extra data
*/
@Override
public IbftLegacyExtraData decode(final BlockHeader blockHeader) {
final Object inputExtraData = blockHeader.getParsedExtraData();
if (inputExtraData instanceof IbftLegacyExtraData) {
return (IbftLegacyExtraData) inputExtraData;
}
LOG.warn(
"Expected a BftExtraData instance but got {}. Reparsing required.",
inputExtraData != null ? inputExtraData.getClass().getName() : "null");
return decodeRaw(blockHeader.getExtraData());
}
/**
* Decode raw input and return ibft extra data.
*
* @param input the input
* @return the ibft extra data
*/
@Override
public IbftLegacyExtraData decodeRaw(final Bytes input) {
checkArgument(
input.size() > EXTRA_VANITY_LENGTH,
"Invalid Bytes supplied - too short to produce a valid IBFT Extra Data object.");
final Bytes vanityData = input.slice(0, EXTRA_VANITY_LENGTH);
final Bytes rlpData = input.slice(EXTRA_VANITY_LENGTH);
final RLPInput rlpInput = new BytesValueRLPInput(rlpData, false);
rlpInput.enterList(); // This accounts for the "root node" which contains IBFT data items.
final Collection<Address> validators = rlpInput.readList(Address::readFrom);
final SECPSignature proposerSeal = parseProposerSeal(rlpInput);
final Collection<SECPSignature> seals =
rlpInput.readList(rlp -> SIGNATURE_ALGORITHM.get().decodeSignature(rlp.readBytes()));
rlpInput.leaveList();
return new IbftLegacyExtraData(vanityData, seals, proposerSeal, validators);
}
private static SECPSignature parseProposerSeal(final RLPInput rlpInput) {
final Bytes data = rlpInput.readBytes();
return data.isZero() ? null : SIGNATURE_ALGORITHM.get().decodeSignature(data);
}
/**
* Encode extra data to bytes.
*
* @return the bytes
*/
@Override
public Bytes encode(final BftExtraData bftExtraData, final EncodingType encodingType) {
throw new UnsupportedOperationException("The encode method is not supported.");
}
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.ibftlegacy;
import org.hyperledger.besu.datatypes.Hash;
/** The Ibft helpers utility class. */
public class IbftHelpers {
/** Default constructor */
public IbftHelpers() {}
/** The constant EXPECTED_MIX_HASH. */
public static final Hash EXPECTED_MIX_HASH =
Hash.fromHexString("0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365");
/**
* Calculate required validator quorum.
*
* @param validatorCount the validator count
* @return the int
*/
public static int calculateRequiredValidatorQuorum(final int validatorCount) {
final int F = (validatorCount - 1) / 3;
return (2 * F) + 1;
}
}

View File

@@ -0,0 +1,103 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.ibftlegacy;
import org.hyperledger.besu.consensus.common.bft.BftBlockHashing;
import org.hyperledger.besu.consensus.common.bft.BftBlockHeaderFunctions;
import org.hyperledger.besu.consensus.common.bft.BftBlockInterface;
import org.hyperledger.besu.consensus.common.bft.BftExtraDataCodec;
import org.hyperledger.besu.consensus.common.validator.ValidatorVote;
import org.hyperledger.besu.consensus.common.validator.VoteType;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import java.util.Collection;
import java.util.Optional;
import com.google.common.collect.ImmutableBiMap;
import org.apache.tuweni.bytes.Bytes;
/** The Ibft legacy block interface. */
public class IbftLegacyBlockInterface extends BftBlockInterface {
/** The constant NO_VOTE_SUBJECT. */
public static final Address NO_VOTE_SUBJECT = Address.wrap(Bytes.wrap(new byte[Address.SIZE]));
/** The constant ADD_NONCE. */
public static final long ADD_NONCE = 0xFFFFFFFFFFFFFFFFL;
/** The constant DROP_NONCE. */
public static final long DROP_NONCE = 0x0L;
private static final ImmutableBiMap<VoteType, Long> voteToValue =
ImmutableBiMap.of(
VoteType.ADD, ADD_NONCE,
VoteType.DROP, DROP_NONCE);
private static final IbftExtraDataCodec ibftExtraDataCodec = new IbftExtraDataCodec();
/**
* Constructor for IbftLegacyBlockInterface.
*
* @param bftExtraDataCodec the codec for BFT extra data
*/
public IbftLegacyBlockInterface(final BftExtraDataCodec bftExtraDataCodec) {
super(bftExtraDataCodec);
}
@Override
public Address getProposerOfBlock(final BlockHeader header) {
final IbftLegacyExtraData ibftExtraData = ibftExtraDataCodec.decode(header);
return IbftBlockHashing.recoverProposerAddress(header, ibftExtraData);
}
@Override
public Address getProposerOfBlock(final org.hyperledger.besu.plugin.data.BlockHeader header) {
return getProposerOfBlock(
BlockHeader.convertPluginBlockHeader(
header,
new BftBlockHeaderFunctions(
h -> new BftBlockHashing(ibftExtraDataCodec).calculateDataHashForCommittedSeal(h),
ibftExtraDataCodec)));
}
@Override
public Optional<ValidatorVote> extractVoteFromHeader(final BlockHeader header) {
final Address candidate = header.getCoinbase();
if (!candidate.equals(NO_VOTE_SUBJECT)) {
final Address proposer = getProposerOfBlock(header);
final VoteType votePolarity = voteToValue.inverse().get(header.getNonce());
final Address recipient = header.getCoinbase();
return Optional.of(new ValidatorVote(votePolarity, proposer, recipient));
}
return Optional.empty();
}
@Override
public Collection<Address> validatorsInBlock(final BlockHeader header) {
return ibftExtraDataCodec.decode(header).getValidators();
}
/**
* Is valid vote value.
*
* @param value the value
* @return the boolean
*/
public static boolean isValidVoteValue(final long value) {
return voteToValue.values().contains(value);
}
}

View File

@@ -0,0 +1,61 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.ibftlegacy;
import org.hyperledger.besu.consensus.common.bft.BftExtraData;
import org.hyperledger.besu.crypto.SECPSignature;
import org.hyperledger.besu.datatypes.Address;
import java.util.Collection;
import java.util.Optional;
import org.apache.tuweni.bytes.Bytes;
/** The Ibft Legacy extra data. */
public class IbftLegacyExtraData extends BftExtraData {
private final SECPSignature proposerSeal;
/**
* Instantiates a new Bft extra data.
*
* @param vanityData the vanity data
* @param seals the seals
* @param validators the validators
* @param proposerSeal the proposer seal
*/
public IbftLegacyExtraData(
final Bytes vanityData,
final Collection<SECPSignature> seals,
final SECPSignature proposerSeal,
final Collection<Address> validators) {
super(vanityData, seals, Optional.empty(), 0, validators);
this.proposerSeal = proposerSeal;
}
/**
* Gets proposer seal.
*
* @return the proposer seal
*/
public SECPSignature getProposerSeal() {
return proposerSeal;
}
@Override
public String toString() {
return "IbftLegacyExtraData{" + super.toString() + ", proposerSeal=" + proposerSeal + '}';
}
}

View File

@@ -0,0 +1,110 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.ibftlegacy;
import static org.hyperledger.besu.consensus.ibftlegacy.IbftBlockHeaderValidationRulesetFactory.ibftBlockHeaderValidatorBuilder;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.config.IbftLegacyConfigOptions;
import org.hyperledger.besu.consensus.common.bft.BftBlockHeaderFunctions;
import org.hyperledger.besu.datatypes.Wei;
import org.hyperledger.besu.ethereum.chain.BadBlockManager;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.mainnet.MainnetBlockBodyValidator;
import org.hyperledger.besu.ethereum.mainnet.MainnetBlockImporter;
import org.hyperledger.besu.ethereum.mainnet.MainnetProtocolSpecs;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSchedule;
import org.hyperledger.besu.ethereum.mainnet.ProtocolScheduleBuilder;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSpecAdapters;
import org.hyperledger.besu.ethereum.mainnet.ProtocolSpecBuilder;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import java.math.BigInteger;
import java.util.Optional;
/** Defines the protocol behaviours for a blockchain using IBFT. */
public class IbftProtocolSchedule {
// Default constructor
/** Default constructor */
public IbftProtocolSchedule() {}
private static final BigInteger DEFAULT_CHAIN_ID = BigInteger.ONE;
private static final IbftExtraDataCodec ibftExtraDataCodec = new IbftExtraDataCodec();
/**
* Create protocol schedule.
*
* @param config the config
* @param privacyParameters the privacy parameters
* @param isRevertReasonEnabled the is revert reason enabled
* @param evmConfiguration the evm configuration
* @return the protocol schedule
*/
public static ProtocolSchedule create(
final GenesisConfigOptions config,
final PrivacyParameters privacyParameters,
final boolean isRevertReasonEnabled,
final EvmConfiguration evmConfiguration) {
final IbftLegacyConfigOptions ibftConfig = config.getIbftLegacyConfigOptions();
final long blockPeriod = ibftConfig.getBlockPeriodSeconds();
return new ProtocolScheduleBuilder(
config,
Optional.of(DEFAULT_CHAIN_ID),
ProtocolSpecAdapters.create(0, builder -> applyIbftChanges(blockPeriod, builder)),
privacyParameters,
isRevertReasonEnabled,
evmConfiguration,
null,
new BadBlockManager(),
false,
null)
.createProtocolSchedule();
}
/**
* Create protocol schedule.
*
* @param config the config
* @param isRevertReasonEnabled the is revert reason enabled
* @param evmConfiguration the evm configuration
* @return the protocol schedule
*/
public static ProtocolSchedule create(
final GenesisConfigOptions config,
final boolean isRevertReasonEnabled,
final EvmConfiguration evmConfiguration) {
return create(config, PrivacyParameters.DEFAULT, isRevertReasonEnabled, evmConfiguration);
}
private static ProtocolSpecBuilder applyIbftChanges(
final long secondsBetweenBlocks, final ProtocolSpecBuilder builder) {
return builder
.blockHeaderValidatorBuilder(
feeMarket -> ibftBlockHeaderValidatorBuilder(secondsBetweenBlocks))
.ommerHeaderValidatorBuilder(
feeMarket -> ibftBlockHeaderValidatorBuilder(secondsBetweenBlocks))
.blockBodyValidatorBuilder(MainnetBlockBodyValidator::new)
.blockValidatorBuilder(MainnetProtocolSpecs.blockValidatorBuilder())
.blockImporterBuilder(MainnetBlockImporter::new)
.difficultyCalculator((time, parent) -> BigInteger.ONE)
.blockReward(Wei.ZERO)
.skipZeroBlockRewards(true)
.blockHeaderFunctions(
new BftBlockHeaderFunctions(
IbftBlockHashing::calculateHashOfIbftBlockOnchain, ibftExtraDataCodec));
}
}

View File

@@ -0,0 +1,149 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.ibftlegacy.headervalidationrules;
import org.hyperledger.besu.consensus.common.bft.BftContext;
import org.hyperledger.besu.consensus.common.bft.BftHelpers;
import org.hyperledger.besu.consensus.ibftlegacy.IbftBlockHashing;
import org.hyperledger.besu.consensus.ibftlegacy.IbftExtraDataCodec;
import org.hyperledger.besu.consensus.ibftlegacy.IbftHelpers;
import org.hyperledger.besu.consensus.ibftlegacy.IbftLegacyExtraData;
import org.hyperledger.besu.datatypes.Address;
import org.hyperledger.besu.ethereum.ProtocolContext;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.mainnet.AttachedBlockHeaderValidationRule;
import org.hyperledger.besu.ethereum.rlp.RLPException;
import java.util.Collection;
import java.util.List;
import java.util.NavigableSet;
import java.util.TreeSet;
import com.google.common.collect.Iterables;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Ensures the byte content of the extraData field can be deserialised into an appropriate
* structure, and that the structure created contains data matching expectations from preceding
* blocks.
*/
public class IbftExtraDataValidationRule implements AttachedBlockHeaderValidationRule {
private static final Logger LOG = LoggerFactory.getLogger(IbftExtraDataValidationRule.class);
private static final IbftExtraDataCodec ibftExtraDataCodec = new IbftExtraDataCodec();
private final boolean validateCommitSeals;
private final long ceil2nBy3Block;
/**
* Instantiates a new Ibft extra data validation rule.
*
* @param validateCommitSeals the validate commit seals
* @param ceil2nBy3Block the ceil 2 n by 3 block
*/
public IbftExtraDataValidationRule(final boolean validateCommitSeals, final long ceil2nBy3Block) {
this.validateCommitSeals = validateCommitSeals;
this.ceil2nBy3Block = ceil2nBy3Block;
}
@Override
public boolean validate(
final BlockHeader header, final BlockHeader parent, final ProtocolContext context) {
try {
final Collection<Address> storedValidators =
context
.getConsensusContext(BftContext.class)
.getValidatorProvider()
.getValidatorsAfterBlock(parent);
final IbftLegacyExtraData ibftExtraData = ibftExtraDataCodec.decode(header);
final Address proposer = IbftBlockHashing.recoverProposerAddress(header, ibftExtraData);
if (!storedValidators.contains(proposer)) {
LOG.info("Invalid block header: Proposer sealing block is not a member of the validators.");
return false;
}
if (validateCommitSeals) {
final List<Address> committers =
IbftBlockHashing.recoverCommitterAddresses(header, ibftExtraData);
final int minimumSealsRequired =
header.getNumber() < ceil2nBy3Block
? IbftHelpers.calculateRequiredValidatorQuorum(storedValidators.size())
: BftHelpers.calculateRequiredValidatorQuorum(storedValidators.size());
if (!validateCommitters(committers, storedValidators, minimumSealsRequired)) {
return false;
}
}
final NavigableSet<Address> sortedReportedValidators =
new TreeSet<>(ibftExtraData.getValidators());
if (!Iterables.elementsEqual(ibftExtraData.getValidators(), sortedReportedValidators)) {
LOG.info(
"Invalid block header: Validators are not sorted in ascending order. Expected {} but got {}.",
sortedReportedValidators,
ibftExtraData.getValidators());
return false;
}
if (!Iterables.elementsEqual(ibftExtraData.getValidators(), storedValidators)) {
LOG.info(
"Invalid block header: Incorrect validators. Expected {} but got {}.",
storedValidators,
ibftExtraData.getValidators());
return false;
}
} catch (final RLPException ex) {
LOG.info(
"Invalid block header: ExtraData field was unable to be deserialised into an IBFT Struct.",
ex);
return false;
} catch (final IllegalArgumentException ex) {
LOG.info("Invalid block header: Failed to verify extra data", ex);
return false;
} catch (final RuntimeException ex) {
LOG.info("Invalid block header: Failed to find validators at parent");
return false;
}
return true;
}
private boolean validateCommitters(
final Collection<Address> committers,
final Collection<Address> storedValidators,
final int minimumSealsRequired) {
if (committers.size() < minimumSealsRequired) {
LOG.info(
"Invalid block header: Insufficient committers to seal block. (Required {}, received {})",
minimumSealsRequired,
committers.size());
return false;
}
if (!storedValidators.containsAll(committers)) {
LOG.info(
"Invalid block header: Not all committers are in the locally maintained validator list.");
return false;
}
return true;
}
}

View File

@@ -0,0 +1,48 @@
/*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.consensus.ibftlegacy.headervalidationrules;
import org.hyperledger.besu.consensus.ibftlegacy.IbftLegacyBlockInterface;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.mainnet.DetachedBlockHeaderValidationRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** The Vote validation rule. */
public class VoteValidationRule implements DetachedBlockHeaderValidationRule {
private static final Logger LOG = LoggerFactory.getLogger(VoteValidationRule.class);
/** Default constructor */
public VoteValidationRule() {}
/**
* Responsible for ensuring the nonce is either auth or drop.
*
* @param header the block header to validate
* @param parent the block header corresponding to the parent of the header being validated.
* @return true if the nonce in the header is a valid validator vote value.
*/
@Override
public boolean validate(final BlockHeader header, final BlockHeader parent) {
final long nonce = header.getNonce();
if (!IbftLegacyBlockInterface.isValidVoteValue(nonce)) {
LOG.info("Invalid block header: Nonce value ({}) is neither auth or drop.", nonce);
return false;
}
return true;
}
}

View File

@@ -236,7 +236,7 @@ public class QbftBlockHeightManager implements BaseQbftBlockHeightManager {
if (!(validatorsForHeight.containsAll(previousValidators))
|| !(previousValidators.containsAll(validatorsForHeight))) {
LOG.info(
"Validator list change. Previous chain height {}: {}. Current chain height {}: {}.",
"QBFT Validator list change. Previous chain height {}: {}. Current chain height {}: {}.",
parentHeader.getNumber(),
previousValidators,
parentHeader.getNumber() + 1,

View File

@@ -32,6 +32,7 @@ dependencies {
implementation project(':config')
implementation project(':consensus:common')
implementation project(':consensus:qbft-core')
implementation project(':consensus:ibftlegacy')
implementation project(':crypto:services')
implementation project(':datatypes')
implementation project(':ethereum:api')

View File

@@ -32,6 +32,8 @@ public interface MiningCoordinator {
void stop();
default void subscribe() {}
void awaitStop() throws InterruptedException;
/**

View File

@@ -89,6 +89,19 @@ public class ProtocolContext {
return consensusContext.as(klass);
}
/**
* Gets the consensus context of the protocol context.
*
* @param <C> the type of the consensus context
* @param klass the klass
* @param blockNumber the block number
* @return the consensus context of the protocol context
*/
public <C extends ConsensusContext> C getConsensusContext(
final Class<C> klass, final long blockNumber) {
return consensusContext.as(klass);
}
/**
* Gets the safe consensus context of the protocol context.
*

View File

@@ -130,7 +130,7 @@ public class GenesisFileModule {
private static GenesisFileModule createGenesisModule(final String genesisConfig) {
final JsonObject genesis = new JsonObject(genesisConfig);
final JsonObject config = genesis.getJsonObject("config");
if (config.containsKey("clique") || config.containsKey("qbft")) {
if (config.containsKey("ibft") || config.containsKey("clique") || config.containsKey("qbft")) {
throw new RuntimeException("Only Ethash and Merge configs accepted as genesis files");
}
return new MainnetGenesisFileModule(genesisConfig);

View File

@@ -47,6 +47,7 @@ dependencies {
api project(':consensus:clique')
api project(':consensus:common')
api project(':consensus:ibft')
api project(':consensus:ibftlegacy')
api project(':consensus:merge')
api project(':consensus:qbft')
api project(':consensus:qbft-core')

View File

@@ -30,6 +30,7 @@ include 'config'
include 'consensus:clique'
include 'consensus:common'
include 'consensus:ibft'
include 'consensus:ibftlegacy'
include 'consensus:merge'
include 'consensus:qbft'
include 'consensus:qbft-core'