diff --git a/command/bridge/deploy/deploy.go b/command/bridge/deploy/deploy.go index c5c6ec6c3b..b07edaada0 100644 --- a/command/bridge/deploy/deploy.go +++ b/command/bridge/deploy/deploy.go @@ -15,7 +15,7 @@ import ( "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/bridge/helper" cmdHelper "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/crypto" @@ -28,11 +28,11 @@ var ( params deployParams // consensusCfg contains consensus protocol configuration parameters - consensusCfg polybft.PolyBFTConfig + consensusCfg polycfg.PolyBFT ) type deploymentResultInfo struct { - BridgeCfg *polybft.BridgeConfig + BridgeCfg *polycfg.Bridge CommandResults []command.CommandResult } @@ -194,7 +194,7 @@ func runCommand(cmd *cobra.Command, _ []string) { } // write updated consensus configuration - chainConfig.Params.Engine[polybft.ConsensusName] = consensusCfg + chainConfig.Params.Engine[polycfg.ConsensusName] = consensusCfg if err := cmdHelper.WriteGenesisConfigToDisk(chainConfig, params.genesisPath); err != nil { outputter.SetError(fmt.Errorf("failed to save chain configuration bridge data: %w", err)) @@ -241,7 +241,7 @@ func deployContracts( var ( internalChainID = chainCfg.Params.ChainID - bridgeConfig = &polybft.BridgeConfig{JSONRPCEndpoint: params.externalRPCAddress} + bridgeConfig = &polycfg.Bridge{JSONRPCEndpoint: params.externalRPCAddress} externalContracts []*contract internalContracts []*contract ) diff --git a/command/bridge/deploy/deploy_test.go b/command/bridge/deploy/deploy_test.go index 9bb06b5dc7..274fa86367 100644 --- a/command/bridge/deploy/deploy_test.go +++ b/command/bridge/deploy/deploy_test.go @@ -14,7 +14,7 @@ import ( "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/bridge/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/types" ) @@ -41,8 +41,8 @@ func TestDeployContracts_NoPanics(t *testing.T) { outputter := command.InitializeOutputter(GetCommand()) params.proxyContractsAdmin = "0x5aaeb6053f3e94c9b9a09f33669435e7ef1beaed" - consensusCfg = polybft.PolyBFTConfig{ - NativeTokenConfig: &polybft.TokenConfig{ + consensusCfg = polycfg.PolyBFT{ + NativeTokenConfig: &polycfg.Token{ Name: "Test", Symbol: "TST", Decimals: 18, diff --git a/command/bridge/deploy/external_contracts.go b/command/bridge/deploy/external_contracts.go index 45e3508a83..8c4a8ef3b2 100644 --- a/command/bridge/deploy/external_contracts.go +++ b/command/bridge/deploy/external_contracts.go @@ -5,7 +5,7 @@ import ( "math/big" "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/contracts" @@ -16,7 +16,7 @@ import ( ) // initExternalContracts initializes the external contracts -func initExternalContracts(bridgeCfg *polybft.BridgeConfig, +func initExternalContracts(bridgeCfg *polycfg.Bridge, externalChainClient *jsonrpc.EthClient, externalChainID *big.Int) ([]*contract, error) { externalContracts := make([]*contract, 0) @@ -35,7 +35,7 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc20Name), hasProxy: false, artifact: contractsapi.RootERC20, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalNativeERC20Addr = dcr[0].Address }, }) @@ -47,7 +47,7 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: blsName, hasProxy: true, artifact: contractsapi.BLS, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.BLSAddress = dcr[1].Address }, }) @@ -57,7 +57,7 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: bn256G2Name, hasProxy: true, artifact: contractsapi.BLS256, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.BN256G2Address = dcr[1].Address }, }) @@ -67,12 +67,12 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, gatewayName), hasProxy: true, artifact: contractsapi.Gateway, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalGatewayAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { validatorSet, err := getValidatorSet(fmt, genesisValidators) @@ -96,12 +96,12 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc20PredicateName), hasProxy: true, artifact: contractsapi.RootERC20Predicate, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalERC20PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { input := &contractsapi.InitializeRootERC20PredicateFn{ @@ -123,12 +123,12 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc20MintablePredicateName), hasProxy: true, artifact: contractsapi.ChildERC20Predicate, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalMintableERC20PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { input := &contractsapi.InitializeChildERC20PredicateFn{ @@ -148,7 +148,7 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc20TemplateName), hasProxy: false, artifact: contractsapi.ChildERC20, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalERC20Addr = dcr[0].Address }, }) @@ -158,12 +158,12 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc721PredicateName), hasProxy: true, artifact: contractsapi.RootERC721Predicate, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalERC721PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { input := &contractsapi.InitializeRootERC721PredicateFn{ @@ -183,12 +183,12 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc721MintablePredicateName), hasProxy: true, artifact: contractsapi.ChildERC721Predicate, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalMintableERC721PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { input := &contractsapi.InitializeChildERC721PredicateFn{ @@ -208,7 +208,7 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc721TemplateName), hasProxy: false, artifact: contractsapi.ChildERC721, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalERC721Addr = dcr[0].Address }, }) @@ -218,12 +218,12 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc1155PredicateName), hasProxy: true, artifact: contractsapi.RootERC1155Predicate, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalERC1155PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { input := &contractsapi.InitializeRootERC1155PredicateFn{ @@ -243,12 +243,12 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc1155MintablePredicateName), hasProxy: true, artifact: contractsapi.ChildERC1155Predicate, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalMintableERC1155PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { input := &contractsapi.InitializeChildERC1155PredicateFn{ @@ -268,7 +268,7 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: getContractName(false, erc1155TemplateName), hasProxy: false, artifact: contractsapi.ChildERC1155, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.ExternalERC1155Addr = dcr[0].Address }, }) @@ -280,12 +280,12 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, name: bladeManagerName, artifact: contractsapi.BladeManager, hasProxy: true, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.BladeManagerAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { gvs := make([]*contractsapi.GenesisAccount, len(genesisValidators)) @@ -316,7 +316,7 @@ func initExternalContracts(bridgeCfg *polybft.BridgeConfig, // populateExistingNativeTokenAddr checks whether given token is deployed on the provided address. // If it is, then its address is set to the bridge config, otherwise an error is returned func populateExistingNativeTokenAddr(eth *jsonrpc.EthClient, tokenAddr, tokenName string, - bridgeCfg *polybft.BridgeConfig) error { + bridgeCfg *polycfg.Bridge) error { addr := types.StringToAddress(tokenAddr) code, err := eth.GetCode(addr, jsonrpc.LatestBlockNumberOrHash) diff --git a/command/bridge/deploy/internal_contracts.go b/command/bridge/deploy/internal_contracts.go index 84d5126bca..cebb06a7eb 100644 --- a/command/bridge/deploy/internal_contracts.go +++ b/command/bridge/deploy/internal_contracts.go @@ -6,7 +6,7 @@ import ( "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/contracts" @@ -30,12 +30,12 @@ func initInternalContracts(chainCfg *chain.Chain) []*contract { name: getContractName(true, gatewayName), hasProxy: true, artifact: contractsapi.Gateway, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.InternalGatewayAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, _ int64) error { validatorSet, err := getValidatorSet(fmt, genesisValidators) @@ -64,12 +64,12 @@ func initInternalContracts(chainCfg *chain.Chain) []*contract { name: getContractName(true, erc20PredicateName), hasProxy: true, artifact: contractArtifact, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.InternalERC20PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { var input contractsapi.FunctionAbi @@ -109,12 +109,12 @@ func initInternalContracts(chainCfg *chain.Chain) []*contract { name: getContractName(true, erc721PredicateName), hasProxy: true, artifact: contractArtifact, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.InternalERC721PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { var input contractsapi.FunctionAbi @@ -152,12 +152,12 @@ func initInternalContracts(chainCfg *chain.Chain) []*contract { name: getContractName(true, erc1155PredicateName), hasProxy: true, artifact: contractArtifact, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.InternalERC1155PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { var input contractsapi.FunctionAbi @@ -195,12 +195,12 @@ func initInternalContracts(chainCfg *chain.Chain) []*contract { name: getContractName(true, erc20MintablePredicateName), hasProxy: true, artifact: contractArtifact, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.InternalMintableERC20PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { var input contractsapi.FunctionAbi @@ -238,12 +238,12 @@ func initInternalContracts(chainCfg *chain.Chain) []*contract { name: getContractName(true, erc721MintablePredicateName), hasProxy: true, artifact: contractArtifact, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.InternalMintableERC721PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { var input contractsapi.FunctionAbi @@ -281,12 +281,12 @@ func initInternalContracts(chainCfg *chain.Chain) []*contract { name: getContractName(true, erc1155MintablePredicateName), hasProxy: true, artifact: contractArtifact, - addressPopulatorFn: func(bc *polybft.BridgeConfig, dcr []*deployContractResult) { + addressPopulatorFn: func(bc *polycfg.Bridge, dcr []*deployContractResult) { bc.InternalMintableERC1155PredicateAddr = dcr[1].Address }, initializeFn: func(fmt command.OutputFormatter, relayer txrelayer.TxRelayer, genesisValidators []*validator.GenesisValidator, - config *polybft.BridgeConfig, + config *polycfg.Bridge, key crypto.Key, destinationChainID int64) error { var input contractsapi.FunctionAbi @@ -320,7 +320,7 @@ func initInternalContracts(chainCfg *chain.Chain) []*contract { // preAllocateInternalPredicates pre-allocates internal predicates in genesis // if the command is run in bootstrap mode func preAllocateInternalPredicates(o command.OutputFormatter, internalContracts []*contract, - chainCfg *chain.Chain, bridgeCfg *polybft.BridgeConfig) error { + chainCfg *chain.Chain, bridgeCfg *polycfg.Bridge) error { predicateBaseProxyAddress := contracts.ChildBridgeContractsBaseAddress if consensusCfg.Bridge != nil { diff --git a/command/bridge/deploy/params.go b/command/bridge/deploy/params.go index 9e3e85a8b6..b15650b78d 100644 --- a/command/bridge/deploy/params.go +++ b/command/bridge/deploy/params.go @@ -7,7 +7,7 @@ import ( "time" "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" ) const ( @@ -38,7 +38,7 @@ func (ip *deployParams) validateFlags() error { return fmt.Errorf("provided genesis path '%s' is invalid. Error: %w ", ip.genesisPath, err) } - consensusCfg, err = polybft.LoadPolyBFTConfig(ip.genesisPath) + consensusCfg, err = polycfg.LoadPolyBFTConfig(ip.genesisPath) if err != nil { return err } diff --git a/command/bridge/deploy/types.go b/command/bridge/deploy/types.go index 87bdc86287..4311509ebe 100644 --- a/command/bridge/deploy/types.go +++ b/command/bridge/deploy/types.go @@ -5,7 +5,7 @@ import ( "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/bridge/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" @@ -36,10 +36,10 @@ const ( erc1155TemplateName = "ERC1155Template" ) -type addressPopulator func(*polybft.BridgeConfig, []*deployContractResult) +type addressPopulator func(*polycfg.Bridge, []*deployContractResult) type initializer func(command.OutputFormatter, txrelayer.TxRelayer, []*validator.GenesisValidator, - *polybft.BridgeConfig, crypto.Key, int64) error + *polycfg.Bridge, crypto.Key, int64) error // contract represents a contract to be deployed type contract struct { @@ -52,7 +52,7 @@ type contract struct { // deploy deploys the contract and its proxy if it has one, and returns the deployment results func (c *contract) deploy( - bridgeCfg *polybft.BridgeConfig, + bridgeCfg *polycfg.Bridge, txRelayer txrelayer.TxRelayer, deployerKey crypto.Key, proxyAdmin types.Address) ([]*deployContractResult, error) { txn := helper.CreateTransaction(types.ZeroAddress, nil, c.artifact.Bytecode, nil, true) diff --git a/command/bridge/finalize/finalize.go b/command/bridge/finalize/finalize.go index 3eeed01690..e04349c193 100644 --- a/command/bridge/finalize/finalize.go +++ b/command/bridge/finalize/finalize.go @@ -12,7 +12,7 @@ import ( "github.com/0xPolygon/polygon-edge/command/genesis" "github.com/0xPolygon/polygon-edge/command/helper" polybftsecrets "github.com/0xPolygon/polygon-edge/command/secrets/init" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/helper/hex" @@ -111,7 +111,7 @@ func runCommand(cmd *cobra.Command, _ []string) error { return fmt.Errorf("failed to read chain configuration: %w", err) } - consensusConfig, err := polybft.GetPolyBFTConfig(chainConfig.Params) + consensusConfig, err := polycfg.GetPolyBFTConfig(chainConfig.Params) if err != nil { return fmt.Errorf("failed to retrieve consensus configuration: %w", err) } @@ -200,7 +200,7 @@ func runCommand(cmd *cobra.Command, _ []string) error { } chainConfig.Genesis.ExtraData = genesisExtraData - chainConfig.Params.Engine[polybft.ConsensusName] = consensusConfig + chainConfig.Params.Engine[polycfg.ConsensusName] = consensusConfig // save updated stake and genesis extra to genesis file on disk if err := helper.WriteGenesisConfigToDisk(chainConfig, params.genesisPath); err != nil { diff --git a/command/bridge/helper/utils.go b/command/bridge/helper/utils.go index 8e30596bec..ad35beffb6 100644 --- a/command/bridge/helper/utils.go +++ b/command/bridge/helper/utils.go @@ -13,8 +13,8 @@ import ( "github.com/docker/go-connections/nat" polybftsecrets "github.com/0xPolygon/polygon-edge/command/secrets/init" - "github.com/0xPolygon/polygon-edge/consensus/polybft" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" polybftWallet "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" @@ -43,10 +43,8 @@ const ( var ( ErrExternalChainNotFound = errors.New("external chain not found") - errTestModeSecrets = errors.New("external chain test mode does not imply specifying secrets parameters") - - ErrNoAddressesProvided = errors.New("no addresses provided") - ErrInconsistentLength = errors.New("addresses and amounts must be equal length") + ErrNoAddressesProvided = errors.New("no addresses provided") + ErrInconsistentLength = errors.New("addresses and amounts must be equal length") externalChainAccountKey *crypto.ECDSAKey ) @@ -156,7 +154,7 @@ func GetECDSAKey(privateKey, accountDir, accountConfig string) (crypto.Key, erro // GetValidatorInfo queries SupernetManager smart contract on root // and retrieves validator info for given address func GetValidatorInfo(validatorAddr types.Address, supernetManagerAddr, stakeManagerAddr types.Address, - txRelayer txrelayer.TxRelayer) (*polybft.ValidatorInfo, error) { + txRelayer txrelayer.TxRelayer) (*systemstate.ValidatorInfo, error) { caller := contracts.SystemCaller getValidatorMethod := contractsapi.StakeManager.Abi.GetMethod("stakeOf") @@ -191,7 +189,7 @@ func GetValidatorInfo(validatorAddr types.Address, supernetManagerAddr, stakeMan } //nolint:forcetypeassert - validatorInfo := &polybft.ValidatorInfo{ + validatorInfo := &systemstate.ValidatorInfo{ Address: validatorAddr, IsActive: innerMap["isActive"].(bool), IsWhitelisted: innerMap["isWhitelisted"].(bool), diff --git a/command/bridge/premine/premine.go b/command/bridge/premine/premine.go index 730bd44114..aafb83c06d 100644 --- a/command/bridge/premine/premine.go +++ b/command/bridge/premine/premine.go @@ -10,7 +10,7 @@ import ( bridgeHelper "github.com/0xPolygon/polygon-edge/command/bridge/helper" "github.com/0xPolygon/polygon-edge/command/helper" polybftsecrets "github.com/0xPolygon/polygon-edge/command/secrets/init" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/txrelayer" "github.com/0xPolygon/polygon-edge/types" @@ -131,7 +131,7 @@ func runCommand(cmd *cobra.Command, _ []string) error { return fmt.Errorf("failed to read chain configuration: %w", err) } - consensusConfig, err := polybft.GetPolyBFTConfig(chainConfig.Params) + consensusConfig, err := polycfg.GetPolyBFTConfig(chainConfig.Params) if err != nil { return fmt.Errorf("failed to retrieve consensus configuration: %w", err) } diff --git a/command/genesis/params.go b/command/genesis/params.go index cb0d734e6d..98d93e13f8 100644 --- a/command/genesis/params.go +++ b/command/genesis/params.go @@ -10,7 +10,7 @@ import ( "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/server" "github.com/0xPolygon/polygon-edge/types" ) @@ -50,14 +50,12 @@ var ( errInvalidEpochSize = errors.New("epoch size must be greater than 1") errRewardWalletAmountZero = errors.New("reward wallet amount can not be zero or negative") errReserveAccMustBePremined = errors.New("it is mandatory to premine reserve account (0x0 address)") - errBlockTrackerPollInterval = errors.New("block tracker poll interval must be greater than 0") errBaseFeeChangeDenomZero = errors.New("base fee change denominator must be greater than 0") errBaseFeeEMZero = errors.New("base fee elasticity multiplier must be greater than 0") errBaseFeeZero = errors.New("base fee must be greater than 0") errRewardWalletNotDefined = errors.New("reward wallet address must be defined") errRewardWalletZero = errors.New("reward wallet address must not be zero address") errInvalidVotingPeriod = errors.New("voting period can not be zero") - errStakeTokenIsZeroAddress = errors.New("stake token address must not be zero address") ) type genesisParams struct { @@ -110,7 +108,7 @@ type genesisParams struct { bridgeBlockListEnabled []string nativeTokenConfigRaw string - nativeTokenConfig *polybft.TokenConfig + nativeTokenConfig *polycfg.Token premineInfos []*helper.PremineInfo stakeInfos map[types.Address]*big.Int @@ -308,16 +306,6 @@ func (p *genesisParams) parsePremineInfo() error { return nil } -// validateBlockTrackerPollInterval validates block tracker block interval -// which can not be 0 -func (p *genesisParams) validateBlockTrackerPollInterval() error { - if p.blockTrackerPollInterval == 0 { - return helper.ErrBlockTrackerPollInterval - } - - return nil -} - func (p *genesisParams) validateGenesisBaseFeeConfig() error { if p.baseFeeConfig == "" { return nil diff --git a/command/genesis/params_test.go b/command/genesis/params_test.go index 018ae79783..cd4a9d5b3d 100644 --- a/command/genesis/params_test.go +++ b/command/genesis/params_test.go @@ -9,7 +9,7 @@ import ( "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/types" ) @@ -19,13 +19,13 @@ func Test_extractNativeTokenMetadata(t *testing.T) { cases := []struct { name string rawConfig string - expectedCfg *polybft.TokenConfig + expectedCfg *polycfg.Token expectErr bool }{ { name: "default token config", rawConfig: "", - expectedCfg: polybft.DefaultTokenConfig, + expectedCfg: polycfg.DefaultTokenConfig, expectErr: false, }, { @@ -51,7 +51,7 @@ func Test_extractNativeTokenMetadata(t *testing.T) { { name: "mintable valid config", rawConfig: "MyToken:MTK:9:true", - expectedCfg: &polybft.TokenConfig{ + expectedCfg: &polycfg.Token{ Name: "MyToken", Symbol: "MTK", Decimals: 9, @@ -62,7 +62,7 @@ func Test_extractNativeTokenMetadata(t *testing.T) { { name: "non-mintable valid config", rawConfig: "MyToken:MTK:9:false:1", - expectedCfg: &polybft.TokenConfig{ + expectedCfg: &polycfg.Token{ Name: "MyToken", Symbol: "MTK", Decimals: 9, @@ -151,7 +151,7 @@ func Test_validatePremineInfo(t *testing.T) { t.Run(c.name, func(t *testing.T) { t.Parallel() - p := &genesisParams{premine: c.premineRaw, nativeTokenConfig: &polybft.TokenConfig{IsMintable: c.isTokenMintable}} + p := &genesisParams{premine: c.premineRaw, nativeTokenConfig: &polycfg.Token{IsMintable: c.isTokenMintable}} err := p.parsePremineInfo() if c.expectedParseErrMsg != "" { @@ -222,7 +222,7 @@ func Test_validateRewardWallet(t *testing.T) { p := &genesisParams{ rewardWallet: c.rewardWallet, epochReward: c.epochReward, - nativeTokenConfig: &polybft.TokenConfig{}, + nativeTokenConfig: &polycfg.Token{}, } err := p.validateRewardWalletAndToken() require.ErrorIs(t, err, c.expectValidateErr) diff --git a/command/genesis/polybft_params.go b/command/genesis/polybft_params.go index 40df7efb52..1a1047560b 100644 --- a/command/genesis/polybft_params.go +++ b/command/genesis/polybft_params.go @@ -15,7 +15,9 @@ import ( "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/helper" "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/helper/common" @@ -161,7 +163,7 @@ func (p *genesisParams) generateChainConfig(o command.OutputFormatter) error { proposalQuorum = proposalQuorumMax } - polyBftConfig := &polybft.PolyBFTConfig{ + polyBftConfig := &polycfg.PolyBFT{ InitialValidatorSet: initialValidators, BlockTime: common.Duration{Duration: p.blockTime}, EpochSize: p.epochSize, @@ -175,7 +177,7 @@ func (p *genesisParams) generateChainConfig(o command.OutputFormatter) error { MaxValidatorSetSize: p.maxNumValidators, CheckpointInterval: p.checkpointInterval, WithdrawalWaitPeriod: p.withdrawalWaitPeriod, - RewardConfig: &polybft.RewardsConfig{ + RewardConfig: &polycfg.Rewards{ TokenAddress: rewardTokenAddr, WalletAddress: walletPremineInfo.Address, WalletAmount: walletPremineInfo.Amount, @@ -184,7 +186,7 @@ func (p *genesisParams) generateChainConfig(o command.OutputFormatter) error { BlockTrackerPollInterval: common.Duration{Duration: p.blockTrackerPollInterval}, ProxyContractsAdmin: types.StringToAddress(p.proxyContractsAdmin), BladeAdmin: types.StringToAddress(p.bladeAdmin), - GovernanceConfig: &polybft.GovernanceConfig{ + GovernanceConfig: &polycfg.Governance{ VotingDelay: voteDelay, VotingPeriod: votingPeriod, ProposalThreshold: proposalThreshold, @@ -196,7 +198,7 @@ func (p *genesisParams) generateChainConfig(o command.OutputFormatter) error { ForkParamsAddr: contracts.ForkParamsContract, }, StakeTokenAddr: p.stakeTokenAddr, - Bridge: make(map[uint64]*polybft.BridgeConfig), + Bridge: make(map[uint64]*polycfg.Bridge), } // Disable london hardfork if burn contract address is not provided @@ -296,7 +298,7 @@ func (p *genesisParams) generateChainConfig(o command.OutputFormatter) error { Alloc: allocs, ExtraData: genesisExtraData, GasUsed: command.DefaultGenesisGasUsed, - Mixhash: polybft.PolyBFTMixDigest, + Mixhash: polytypes.PolyBFTMixDigest, } if p.isBurnContractEnabled() { @@ -671,7 +673,7 @@ func (p *genesisParams) isBurnContractEnabled() bool { // extractNativeTokenMetadata parses provided native token metadata (such as name, symbol and decimals count) func (p *genesisParams) extractNativeTokenMetadata() error { - tokenConfig, err := polybft.ParseRawTokenConfig(p.nativeTokenConfigRaw) + tokenConfig, err := polycfg.ParseRawTokenConfig(p.nativeTokenConfigRaw) if err != nil { return err } diff --git a/command/genesis/utils.go b/command/genesis/utils.go index eaf67484e6..76e1bda06a 100644 --- a/command/genesis/utils.go +++ b/command/genesis/utils.go @@ -12,8 +12,9 @@ import ( "strings" "github.com/0xPolygon/polygon-edge/command" - "github.com/0xPolygon/polygon-edge/consensus/polybft" "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/helper/common" @@ -66,7 +67,7 @@ func verifyGenesisExistence(genesisPath string) *GenesisGenError { } // parseBurnContractInfo parses provided burn contract information and returns burn contract block and address -func parseBurnContractInfo(burnContractInfoRaw string) (*polybft.BurnContractInfo, error) { +func parseBurnContractInfo(burnContractInfoRaw string) (*polycfg.BurnContractInfo, error) { // :
[:] burnContractParts := strings.Split(burnContractInfoRaw, ":") if len(burnContractParts) < 2 || len(burnContractParts) > 3 { @@ -88,7 +89,7 @@ func parseBurnContractInfo(burnContractInfoRaw string) (*polybft.BurnContractInf } if len(burnContractParts) == 2 { - return &polybft.BurnContractInfo{ + return &polycfg.BurnContractInfo{ BlockNumber: blockNum, Address: contractAddr, DestinationAddress: types.ZeroAddress, @@ -102,7 +103,7 @@ func parseBurnContractInfo(burnContractInfoRaw string) (*polybft.BurnContractInf return nil, fmt.Errorf("failed to parse burn destination address %s: %w", destinationAddrRaw, err) } - return &polybft.BurnContractInfo{ + return &polycfg.BurnContractInfo{ BlockNumber: blockNum, Address: contractAddr, DestinationAddress: destinationAddr, @@ -268,7 +269,7 @@ func GenerateExtraDataPolyBft(validators []*validator.ValidatorMetadata) ([]byte Removed: bitmap.Bitmap{}, } - extra := polybft.Extra{Validators: delta, BlockMetaData: &polybft.BlockMetaData{}} + extra := polytypes.Extra{Validators: delta, BlockMetaData: &polytypes.BlockMetaData{}} return extra.MarshalRLPTo(nil), nil } diff --git a/command/validator/helper/helper.go b/command/validator/helper/helper.go index 6fb8b1c335..43a4f01138 100644 --- a/command/validator/helper/helper.go +++ b/command/validator/helper/helper.go @@ -6,8 +6,8 @@ import ( "os" polybftsecrets "github.com/0xPolygon/polygon-edge/command/secrets/init" - "github.com/0xPolygon/polygon-edge/consensus/polybft" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/helper/common" @@ -58,7 +58,7 @@ func GetAccountFromDir(accountDir string) (*wallet.Account, error) { // GetValidatorInfo queries CustomSupernetManager, StakeManager and RewardPool smart contracts // to retrieve validator info for given address -func GetValidatorInfo(validatorAddr types.Address, childRelayer txrelayer.TxRelayer) (*polybft.ValidatorInfo, error) { +func GetValidatorInfo(validatorAddr types.Address, childRelayer txrelayer.TxRelayer) (*systemstate.ValidatorInfo, error) { getValidatorMethod := contractsapi.StakeManager.Abi.GetMethod("getValidator") encode, err := getValidatorMethod.Encode([]interface{}{validatorAddr}) @@ -92,7 +92,7 @@ func GetValidatorInfo(validatorAddr types.Address, childRelayer txrelayer.TxRela } //nolint:forcetypeassert - validatorInfo := &polybft.ValidatorInfo{ + validatorInfo := &systemstate.ValidatorInfo{ Address: validatorAddr, IsActive: innerMap["isActive"].(bool), IsWhitelisted: innerMap["isWhitelisted"].(bool), diff --git a/consensus/polybft/block_builder.go b/consensus/polybft/blockchain/block_builder.go similarity index 85% rename from consensus/polybft/block_builder.go rename to consensus/polybft/blockchain/block_builder.go index 8c0eee2552..ceff2ee765 100644 --- a/consensus/polybft/block_builder.go +++ b/consensus/polybft/blockchain/block_builder.go @@ -1,4 +1,4 @@ -package polybft +package blockchain import ( "bytes" @@ -6,15 +6,13 @@ import ( "time" "github.com/0xPolygon/polygon-edge/consensus" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/state" "github.com/0xPolygon/polygon-edge/txpool" "github.com/0xPolygon/polygon-edge/types" hcf "github.com/hashicorp/go-hclog" ) -//nolint:godox -// TODO: Add opentracing (to be fixed in EVM-540) - // BlockBuilderParams are fields for the block that cannot be changed type BlockBuilderParams struct { // Parent block @@ -36,21 +34,21 @@ type BlockBuilderParams struct { Logger hcf.Logger // txPoolInterface implementation - TxPool txPoolInterface + TxPool polytypes.TxPool // BaseFee is the base fee BaseFee uint64 } -func NewBlockBuilder(params *BlockBuilderParams) *BlockBuilder { - return &BlockBuilder{ +func NewBlockBuilder(params *BlockBuilderParams) polytypes.BlockBuilder { + return &BlockBuilderImpl{ params: params, } } -var _ blockBuilder = &BlockBuilder{} +var _ polytypes.BlockBuilder = &BlockBuilderImpl{} -type BlockBuilder struct { +type BlockBuilderImpl struct { // input params for the block params *BlockBuilderParams @@ -68,7 +66,7 @@ type BlockBuilder struct { } // Init initializes block builder before adding transactions and actual block building -func (b *BlockBuilder) Reset() error { +func (b *BlockBuilderImpl) Reset() error { // set the timestamp parentTime := time.Unix(int64(b.params.Parent.Timestamp), 0) headerTime := parentTime.Add(b.params.BlockTime) @@ -104,12 +102,12 @@ func (b *BlockBuilder) Reset() error { } // Block returns the built block if nil, it is not built yet -func (b *BlockBuilder) Block() *types.Block { +func (b *BlockBuilderImpl) Block() *types.Block { return b.block } // Build creates the state and the final block -func (b *BlockBuilder) Build(handler func(h *types.Header)) (*types.FullBlock, error) { +func (b *BlockBuilderImpl) Build(handler func(h *types.Header)) (*types.FullBlock, error) { if handler != nil { handler(b.header) } @@ -139,7 +137,7 @@ func (b *BlockBuilder) Build(handler func(h *types.Header)) (*types.FullBlock, e } // WriteTx applies given transaction to the state. If transaction apply fails, it reverts the saved snapshot. -func (b *BlockBuilder) WriteTx(tx *types.Transaction) error { +func (b *BlockBuilderImpl) WriteTx(tx *types.Transaction) error { if tx.Gas() > b.params.GasLimit { b.params.Logger.Info("Transaction gas limit exceedes block gas limit", "hash", tx.Hash(), "tx gas limit", tx.Gas(), "block gas limt", b.params.GasLimit) @@ -157,7 +155,7 @@ func (b *BlockBuilder) WriteTx(tx *types.Transaction) error { } // Fill fills the block with transactions from the txpool -func (b *BlockBuilder) Fill() { +func (b *BlockBuilderImpl) Fill() { var buf bytes.Buffer blockTimer := time.NewTimer(b.params.BlockTime) @@ -196,11 +194,11 @@ write: } // Receipts returns the collection of transaction receipts for given block -func (b *BlockBuilder) Receipts() []*types.Receipt { +func (b *BlockBuilderImpl) Receipts() []*types.Receipt { return b.state.Receipts() } -func (b *BlockBuilder) writeTxPoolTransaction(tx *types.Transaction) (bool, error) { +func (b *BlockBuilderImpl) writeTxPoolTransaction(tx *types.Transaction) (bool, error) { if tx == nil { return true, nil } @@ -227,6 +225,6 @@ func (b *BlockBuilder) writeTxPoolTransaction(tx *types.Transaction) (bool, erro } // GetState returns Transition reference -func (b *BlockBuilder) GetState() *state.Transition { +func (b *BlockBuilderImpl) GetState() *state.Transition { return b.state } diff --git a/consensus/polybft/block_builder_test.go b/consensus/polybft/blockchain/block_builder_test.go similarity index 95% rename from consensus/polybft/block_builder_test.go rename to consensus/polybft/blockchain/block_builder_test.go index 29efff6c11..2f8820cf11 100644 --- a/consensus/polybft/block_builder_test.go +++ b/consensus/polybft/blockchain/block_builder_test.go @@ -1,4 +1,4 @@ -package polybft +package blockchain import ( "math/big" @@ -6,6 +6,7 @@ import ( "time" "github.com/0xPolygon/polygon-edge/chain" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/helper/common" @@ -75,7 +76,7 @@ func TestBlockBuilder_BuildBlockTxOneFailedTxAndOneTakesTooMuchGas(t *testing.T) // Gas Limit is important to be high for tx pool parentHeader := &types.Header{StateRoot: hash, GasLimit: 1e15} - txPool := &txPoolMock{} + txPool := &helpers.TxPoolMock{} txPool.On("Prepare").Once() for i, acc := range accounts { @@ -113,7 +114,7 @@ func TestBlockBuilder_BuildBlockTxOneFailedTxAndOneTakesTooMuchGas(t *testing.T) } } - bb := NewBlockBuilder(&BlockBuilderParams{ + b := NewBlockBuilder(&BlockBuilderParams{ BlockTime: time.Millisecond * 200, Parent: parentHeader, Coinbase: types.ZeroAddress, @@ -123,6 +124,8 @@ func TestBlockBuilder_BuildBlockTxOneFailedTxAndOneTakesTooMuchGas(t *testing.T) Logger: logger, }) + bb := b.(*BlockBuilderImpl) + require.NoError(t, bb.Reset()) bb.Fill() diff --git a/consensus/polybft/blockchain_wrapper.go b/consensus/polybft/blockchain/blockchain_wrapper.go similarity index 60% rename from consensus/polybft/blockchain_wrapper.go rename to consensus/polybft/blockchain/blockchain_wrapper.go index 09b290f5d9..106a755079 100644 --- a/consensus/polybft/blockchain_wrapper.go +++ b/consensus/polybft/blockchain/blockchain_wrapper.go @@ -1,4 +1,4 @@ -package polybft +package blockchain import ( "errors" @@ -10,6 +10,9 @@ import ( "github.com/0xPolygon/polygon-edge/blockchain" "github.com/0xPolygon/polygon-edge/consensus" + "github.com/0xPolygon/polygon-edge/consensus/polybft/metrics" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/state" "github.com/0xPolygon/polygon-edge/types" @@ -25,69 +28,36 @@ var ( errSendTxnUnsupported = errors.New("system state does not support send transactions") ) -// blockchain is an interface that wraps the methods called on blockchain -type blockchainBackend interface { - // CurrentHeader returns the header of blockchain block head - CurrentHeader() *types.Header +var _ polytypes.Blockchain = &BlockchainWrapper{} - // CommitBlock commits a block to the chain. - CommitBlock(block *types.FullBlock) error - - // NewBlockBuilder is a factory method that returns a block builder on top of 'parent'. - NewBlockBuilder(parent *types.Header, coinbase types.Address, - txPool txPoolInterface, blockTime time.Duration, logger hclog.Logger) (blockBuilder, error) - - // ProcessBlock builds a final block from given 'block' on top of 'parent'. - ProcessBlock(parent *types.Header, block *types.Block) (*types.FullBlock, error) - - // GetStateProviderForBlock returns a reference to make queries to the state at 'block'. - GetStateProviderForBlock(block *types.Header) (contract.Provider, error) - - // GetStateProvider returns a reference to make queries to the provided state. - GetStateProvider(transition *state.Transition) contract.Provider - - // GetHeaderByNumber returns a reference to block header for the given block number. - GetHeaderByNumber(number uint64) (*types.Header, bool) - - // GetHeaderByHash returns a reference to block header for the given block hash - GetHeaderByHash(hash types.Hash) (*types.Header, bool) - - // GetSystemState creates a new instance of SystemState interface - GetSystemState(provider contract.Provider) SystemState - - // SubscribeEvents subscribes to blockchain events - SubscribeEvents() blockchain.Subscription - - // UnubscribeEvents unsubscribes from blockchain events - UnubscribeEvents(subscription blockchain.Subscription) - - // GetChainID returns chain id of the current blockchain - GetChainID() uint64 - - // GetReceiptsByHash retrieves receipts by hash - GetReceiptsByHash(hash types.Hash) ([]*types.Receipt, error) -} - -var _ blockchainBackend = &blockchainWrapper{} - -type blockchainWrapper struct { +type BlockchainWrapper struct { logger hclog.Logger executor *state.Executor blockchain *blockchain.Blockchain } +func NewBlockchainWrapper(logger hclog.Logger, + blockchain *blockchain.Blockchain, + executor *state.Executor) *BlockchainWrapper { + return &BlockchainWrapper{ + logger: logger, + executor: executor, + blockchain: blockchain, + } +} + // CurrentHeader returns the header of blockchain block head -func (p *blockchainWrapper) CurrentHeader() *types.Header { +func (p *BlockchainWrapper) CurrentHeader() *types.Header { return p.blockchain.Header() } // CommitBlock commits a block to the chain -func (p *blockchainWrapper) CommitBlock(block *types.FullBlock) error { +func (p *BlockchainWrapper) CommitBlock(block *types.FullBlock) error { return p.blockchain.WriteFullBlock(block, consensusSource) } // ProcessBlock builds a final block from given 'block' on top of 'parent' -func (p *blockchainWrapper) ProcessBlock(parent *types.Header, block *types.Block) (*types.FullBlock, error) { +func (p *BlockchainWrapper) ProcessBlock(parent *types.Header, block *types.Block) (*types.FullBlock, error) { header := block.Header.Copy() start := time.Now().UTC() @@ -101,7 +71,7 @@ func (p *blockchainWrapper) ProcessBlock(parent *types.Header, block *types.Bloc return nil, fmt.Errorf("failed to commit the state changes: %w", err) } - updateBlockExecutionMetric(start) + metrics.UpdateBlockExecutionMetric(start) if root != block.Header.StateRoot { return nil, fmt.Errorf("incorrect state root: (%s, %s)", root, block.Header.StateRoot) @@ -126,7 +96,7 @@ func (p *blockchainWrapper) ProcessBlock(parent *types.Header, block *types.Bloc } // GetStateProviderForBlock is an implementation of blockchainBackend interface -func (p *blockchainWrapper) GetStateProviderForBlock(header *types.Header) (contract.Provider, error) { +func (p *BlockchainWrapper) GetStateProviderForBlock(header *types.Header) (contract.Provider, error) { transition, err := p.executor.BeginTxn(header.StateRoot, header, types.ZeroAddress) if err != nil { return nil, err @@ -136,24 +106,24 @@ func (p *blockchainWrapper) GetStateProviderForBlock(header *types.Header) (cont } // GetStateProvider returns a reference to make queries to the provided state -func (p *blockchainWrapper) GetStateProvider(transition *state.Transition) contract.Provider { +func (p *BlockchainWrapper) GetStateProvider(transition *state.Transition) contract.Provider { return NewStateProvider(transition) } // GetHeaderByNumber is an implementation of blockchainBackend interface -func (p *blockchainWrapper) GetHeaderByNumber(number uint64) (*types.Header, bool) { +func (p *BlockchainWrapper) GetHeaderByNumber(number uint64) (*types.Header, bool) { return p.blockchain.GetHeaderByNumber(number) } // GetHeaderByHash is an implementation of blockchainBackend interface -func (p *blockchainWrapper) GetHeaderByHash(hash types.Hash) (*types.Header, bool) { +func (p *BlockchainWrapper) GetHeaderByHash(hash types.Hash) (*types.Header, bool) { return p.blockchain.GetHeaderByHash(hash) } // NewBlockBuilder is an implementation of blockchainBackend interface -func (p *blockchainWrapper) NewBlockBuilder( +func (p *BlockchainWrapper) NewBlockBuilder( parent *types.Header, coinbase types.Address, - txPool txPoolInterface, blockTime time.Duration, logger hclog.Logger) (blockBuilder, error) { + txPool polytypes.TxPool, blockTime time.Duration, logger hclog.Logger) (polytypes.BlockBuilder, error) { gasLimit, err := p.blockchain.CalculateGasLimit(parent.Number + 1) if err != nil { return nil, err @@ -172,23 +142,23 @@ func (p *blockchainWrapper) NewBlockBuilder( } // GetSystemState is an implementation of blockchainBackend interface -func (p *blockchainWrapper) GetSystemState(provider contract.Provider) SystemState { - return NewSystemState(contracts.EpochManagerContract, contracts.BridgeStorageContract, provider) +func (p *BlockchainWrapper) GetSystemState(provider contract.Provider) systemstate.SystemState { + return systemstate.NewSystemState(contracts.EpochManagerContract, contracts.BridgeStorageContract, provider) } -func (p *blockchainWrapper) SubscribeEvents() blockchain.Subscription { +func (p *BlockchainWrapper) SubscribeEvents() blockchain.Subscription { return p.blockchain.SubscribeEvents() } -func (p *blockchainWrapper) UnubscribeEvents(subscription blockchain.Subscription) { +func (p *BlockchainWrapper) UnubscribeEvents(subscription blockchain.Subscription) { p.blockchain.UnsubscribeEvents(subscription) } -func (p *blockchainWrapper) GetChainID() uint64 { +func (p *BlockchainWrapper) GetChainID() uint64 { return uint64(p.blockchain.Config().ChainID) } -func (p *blockchainWrapper) GetReceiptsByHash(hash types.Hash) ([]*types.Receipt, error) { +func (p *BlockchainWrapper) GetReceiptsByHash(hash types.Hash) ([]*types.Receipt, error) { return p.blockchain.GetReceiptsByHash(hash) } diff --git a/consensus/polybft/bridge.go b/consensus/polybft/bridge/bridge.go similarity index 55% rename from consensus/polybft/bridge.go rename to consensus/polybft/bridge/bridge.go index ef8f848087..f7862ea4d7 100644 --- a/consensus/polybft/bridge.go +++ b/consensus/polybft/bridge/bridge.go @@ -1,61 +1,92 @@ -package polybft +package bridge import ( "fmt" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/hashicorp/go-hclog" + "github.com/libp2p/go-libp2p/core/peer" bolt "go.etcd.io/bbolt" + "google.golang.org/protobuf/proto" ) +// topic is an interface for p2p message gossiping +type Topic interface { + Publish(obj proto.Message) error + Subscribe(handler func(obj interface{}, from peer.ID)) error +} + var _ Bridge = (*bridge)(nil) // bridge is a struct that manages different bridges type bridge struct { bridgeManagers map[uint64]BridgeManager - state *State + state *BridgeManagerStore internalChainID uint64 relayer BridgeEventRelayer + logger hclog.Logger } // Bridge is an interface that defines functions that a bridge must implement type Bridge interface { Close() - PostBlock(req *PostBlockRequest) error - PostEpoch(req *PostEpochRequest) error + PostBlock(req *polytypes.PostBlockRequest) error + PostEpoch(req *polytypes.PostEpochRequest) error BridgeBatch(pendingBlockNumber uint64) ([]*BridgeBatchSigned, error) } -var _ Bridge = (*dummyBridge)(nil) +var _ Bridge = (*DummyBridge)(nil) -type dummyBridge map[uint64]BridgeManager +type DummyBridge struct{} -func (d *dummyBridge) Close() {} -func (d *dummyBridge) PostBlock(req *PostBlockRequest) error { return nil } -func (d *dummyBridge) PostEpoch(req *PostEpochRequest) error { return nil } -func (d *dummyBridge) BridgeBatch(pendingBlockNumber uint64) ([]*BridgeBatchSigned, error) { +func (d *DummyBridge) Close() {} +func (d *DummyBridge) PostBlock(req *polytypes.PostBlockRequest) error { return nil } +func (d *DummyBridge) PostEpoch(req *polytypes.PostEpochRequest) error { return nil } +func (d *DummyBridge) BridgeBatch(pendingBlockNumber uint64) ([]*BridgeBatchSigned, error) { return nil, nil } -func (d *dummyBridge) InsertEpoch(epoch uint64, tx *bolt.Tx) error { return nil } +func (d *DummyBridge) InsertEpoch(epoch uint64, tx *bolt.Tx) error { return nil } // newBridge creates a new instance of bridge -func newBridge(runtime Runtime, - runtimeConfig *runtimeConfig, - eventProvider *EventProvider, +func NewBridge(runtime Runtime, + state *state.State, + runtimeConfig *config.Runtime, + bridgeTopic Topic, + eventProvider *state.EventProvider, + blockchain polytypes.Blockchain, logger hclog.Logger) (Bridge, error) { - internalChainID := runtimeConfig.blockchain.GetChainID() + if len(runtimeConfig.GenesisConfig.Bridge) == 0 { + return &DummyBridge{}, nil + } + + internalChainID := blockchain.GetChainID() + chainIDs := make([]uint64, 0, len(runtimeConfig.GenesisConfig.Bridge)+1) + chainIDs = append(chainIDs, internalChainID) + + for chainID := range runtimeConfig.GenesisConfig.Bridge { + chainIDs = append(chainIDs, chainID) + } + + store, err := newBridgeManagerStore(state.DB(), chainIDs) + if err != nil { + return nil, fmt.Errorf("error creating bridge manager store, err: %w", err) + } bridge := &bridge{ bridgeManagers: make(map[uint64]BridgeManager), - state: runtimeConfig.State, + state: store, internalChainID: internalChainID, + logger: logger, } for externalChainID, cfg := range runtimeConfig.GenesisConfig.Bridge { - bridgeManager := newBridgeManager(logger, runtimeConfig.State, &bridgeEventManagerConfig{ + bridgeManager := newBridgeManager(logger, store, &bridgeEventManagerConfig{ bridgeCfg: cfg, - topic: runtimeConfig.bridgeTopic, + topic: bridgeTopic, key: runtimeConfig.Key, - maxNumberOfEvents: maxNumberOfEvents, + maxNumberOfEvents: maxNumberOfBatchEvents, }, runtime, externalChainID, internalChainID) bridge.bridgeManagers[externalChainID] = bridgeManager @@ -64,7 +95,7 @@ func newBridge(runtime Runtime, } } - relayer, err := newBridgeEventRelayer(runtimeConfig, logger) + relayer, err := newBridgeEventRelayer(blockchain, runtimeConfig, logger) if err != nil { return nil, err } @@ -89,7 +120,7 @@ func (b *bridge) Close() { // PostBlock is a function executed on every block finalization (either by consensus or syncer) // and calls PostBlock in each bridge manager -func (b bridge) PostBlock(req *PostBlockRequest) error { +func (b bridge) PostBlock(req *polytypes.PostBlockRequest) error { for chainID, bridgeManager := range b.bridgeManagers { if err := bridgeManager.PostBlock(); err != nil { return fmt.Errorf("erorr bridge post block, chainID: %d, err: %w", chainID, err) @@ -101,8 +132,13 @@ func (b bridge) PostBlock(req *PostBlockRequest) error { // PostEpoch is a function executed on epoch ending / start of new epoch // and calls PostEpoch in each bridge manager -func (b *bridge) PostEpoch(req *PostEpochRequest) error { - if err := b.state.EpochStore.insertEpoch(req.NewEpochID, req.DBTx, b.internalChainID); err != nil { +func (b *bridge) PostEpoch(req *polytypes.PostEpochRequest) error { + if err := b.state.cleanEpochsFromDB(req.DBTx); err != nil { + // we just log this, as it is not critical + b.logger.Error("error cleaning epochs from db", "err", err) + } + + if err := b.state.insertEpoch(req.NewEpochID, req.DBTx, b.internalChainID); err != nil { return fmt.Errorf("error inserting epoch to internal, err: %w", err) } diff --git a/consensus/polybft/bridge_batch.go b/consensus/polybft/bridge/bridge_batch.go similarity index 93% rename from consensus/polybft/bridge_batch.go rename to consensus/polybft/bridge/bridge_batch.go index 45ce1bc88c..7d8b22a907 100644 --- a/consensus/polybft/bridge_batch.go +++ b/consensus/polybft/bridge/bridge_batch.go @@ -1,10 +1,12 @@ -package polybft +package bridge import ( "fmt" "github.com/0xPolygon/polygon-edge/bls" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/types" ) @@ -60,7 +62,7 @@ var _ contractsapi.StateTransactionInput = &BridgeBatchSigned{} // BridgeBatchSigned encapsulates bridge batch with aggregated signatures type BridgeBatchSigned struct { MessageBatch *contractsapi.BridgeMessageBatch - AggSignature Signature + AggSignature polytypes.Signature } // Hash calculates hash value for BridgeBatchSigned object. @@ -107,7 +109,7 @@ func (bbs *BridgeBatchSigned) EncodeAbi() ([]byte, error) { // DecodeAbi contains logic for decoding given ABI data func (bbs *BridgeBatchSigned) DecodeAbi(txData []byte) error { - if len(txData) < abiMethodIDLength { + if len(txData) < helpers.AbiMethodIDLength { return fmt.Errorf("invalid batch data, len = %d", len(txData)) } @@ -125,7 +127,7 @@ func (bbs *BridgeBatchSigned) DecodeAbi(txData []byte) error { *bbs = BridgeBatchSigned{ MessageBatch: commit.Batch, - AggSignature: Signature{ + AggSignature: polytypes.Signature{ AggregatedSignature: signature, Bitmap: commit.Bitmap, }, diff --git a/consensus/polybft/bridge_batch_test.go b/consensus/polybft/bridge/bridge_batch_test.go similarity index 68% rename from consensus/polybft/bridge_batch_test.go rename to consensus/polybft/bridge/bridge_batch_test.go index d8a96c33f5..e3ce839fe8 100644 --- a/consensus/polybft/bridge_batch_test.go +++ b/consensus/polybft/bridge/bridge_batch_test.go @@ -1,4 +1,4 @@ -package polybft +package bridge import ( "math/big" @@ -6,6 +6,7 @@ import ( "github.com/0xPolygon/polygon-edge/bls" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/stretchr/testify/require" ) @@ -36,7 +37,7 @@ func TestBridgeBatch_BridgeBatchEncodeDecode(t *testing.T) { t.Parallel() const epoch, eventsCount = uint64(100), 11 - pendingBridgeBatch, _, _ := buildBridgeBatchAndBridgeEvents(t, eventsCount, epoch, uint64(2)) + pendingBridgeBatch, _, _ := BuildBridgeBatchAndBridgeEvents(t, eventsCount, epoch, uint64(2)) blsKey1, err := bls.GenerateBlsKey() require.NoError(t, err) @@ -46,10 +47,10 @@ func TestBridgeBatch_BridgeBatchEncodeDecode(t *testing.T) { data, err := pendingBridgeBatch.BridgeMessageBatch.EncodeAbi() require.NoError(t, err) - signature1, err := blsKey1.Sign(data, domain) + signature1, err := blsKey1.Sign(data, TestDomain) require.NoError(t, err) - signature2, err := blsKey2.Sign(data, domain) + signature2, err := blsKey2.Sign(data, TestDomain) require.NoError(t, err) signatures := bls.Signatures{signature1, signature2} @@ -59,7 +60,7 @@ func TestBridgeBatch_BridgeBatchEncodeDecode(t *testing.T) { expectedSignedBridgeBatchMsg := &BridgeBatchSigned{ MessageBatch: pendingBridgeBatch.BridgeMessageBatch, - AggSignature: Signature{ + AggSignature: polytypes.Signature{ Bitmap: []byte{5, 1}, AggregatedSignature: aggSig, }, @@ -86,39 +87,6 @@ func newTestBridgeBatchSigned(t *testing.T, sourceChainID, destinationChainID ui SourceChainID: new(big.Int).SetUint64(sourceChainID), DestinationChainID: new(big.Int).SetUint64(destinationChainID), }, - AggSignature: Signature{}, + AggSignature: polytypes.Signature{}, } } - -func buildBridgeBatchAndBridgeEvents(t *testing.T, bridgeMessageCount int, - epoch, startIdx uint64) (*PendingBridgeBatch, *BridgeBatchSigned, []*contractsapi.BridgeMsgEvent) { - t.Helper() - - bridgeMessageEvents := generateBridgeMessageEvents(t, bridgeMessageCount, startIdx) - pendingBridgeBatch, err := NewPendingBridgeBatch(epoch, bridgeMessageEvents) - require.NoError(t, err) - - blsKey, err := bls.GenerateBlsKey() - require.NoError(t, err) - - data, err := pendingBridgeBatch.BridgeMessageBatch.EncodeAbi() - require.NoError(t, err) - - signature, err := blsKey.Sign(data, domain) - require.NoError(t, err) - - signatures := bls.Signatures{signature} - - aggSig, err := signatures.Aggregate().Marshal() - require.NoError(t, err) - - bridgeBatchSigned := &BridgeBatchSigned{ - MessageBatch: pendingBridgeBatch.BridgeMessageBatch, - AggSignature: Signature{ - AggregatedSignature: aggSig, - Bitmap: []byte{}, - }, - } - - return pendingBridgeBatch, bridgeBatchSigned, bridgeMessageEvents -} diff --git a/consensus/polybft/bridge_event_relayer.go b/consensus/polybft/bridge/bridge_event_relayer.go similarity index 89% rename from consensus/polybft/bridge_event_relayer.go rename to consensus/polybft/bridge/bridge_event_relayer.go index c96eb80f63..cc51b3cff0 100644 --- a/consensus/polybft/bridge_event_relayer.go +++ b/consensus/polybft/bridge/bridge_event_relayer.go @@ -1,4 +1,4 @@ -package polybft +package bridge import ( "errors" @@ -8,7 +8,11 @@ import ( "path" "strings" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" @@ -29,10 +33,10 @@ var ( // BridgeEventRelayer is an interface that defines functions for bridge event relayer type BridgeEventRelayer interface { - EventSubscriber + state.EventSubscriber AddLog(chainID *big.Int, eventLog *ethgo.Log) error Close() - Start(runtimeCfg *runtimeConfig, eventProvider *EventProvider) error + Start(runtimeCfg *config.Runtime, eventProvider *state.EventProvider) error } var _ BridgeEventRelayer = (*dummyBridgeEventRelayer)(nil) @@ -48,7 +52,7 @@ func (d *dummyBridgeEventRelayer) ProcessLog(header *types.Header, log *ethgo.Lo return nil } func (d *dummyBridgeEventRelayer) Close() {} -func (d *dummyBridgeEventRelayer) Start(runtimeCfg *runtimeConfig, eventProvider *EventProvider) error { +func (d *dummyBridgeEventRelayer) Start(runtimeCfg *config.Runtime, eventProvider *state.EventProvider) error { return nil } @@ -58,15 +62,15 @@ type bridgeEventRelayerImpl struct { key crypto.Key logger hclog.Logger - state *BridgeMessageStore + state *BridgeManagerStore externalTxRelayers map[uint64]txrelayer.TxRelayer internalTxRelayer txrelayer.TxRelayer internalChainID *big.Int - blockchain blockchainBackend + blockchain polytypes.Blockchain - bridgeConfig map[uint64]*BridgeConfig + bridgeConfig map[uint64]*config.Bridge eventTrackers []*tracker.EventTracker eventCh chan contractsapi.StructAbi @@ -76,18 +80,19 @@ type bridgeEventRelayerImpl struct { // newBridgeEventRelayer creates a new instance of bridge event relayer // if the node is not a relayer, it will return a dummy bridge event relayer func newBridgeEventRelayer( - runtimeConfig *runtimeConfig, + blockchain polytypes.Blockchain, + runtimeConfig *config.Runtime, logger hclog.Logger, ) (BridgeEventRelayer, error) { - if !runtimeConfig.consensusConfig.IsRelayer { + if !runtimeConfig.ConsensusConfig.IsRelayer { return &dummyBridgeEventRelayer{}, nil } relayer := &bridgeEventRelayerImpl{ key: wallet.NewEcdsaSigner(runtimeConfig.Key), logger: logger.Named("bridge-relayer"), - internalChainID: big.NewInt(runtimeConfig.genesisParams.ChainID), - blockchain: runtimeConfig.blockchain, + internalChainID: big.NewInt(runtimeConfig.ChainParams.ChainID), + blockchain: blockchain, eventCh: make(chan contractsapi.StructAbi, eventChBuffer), quitCh: make(chan struct{}), } @@ -235,12 +240,12 @@ func (ber *bridgeEventRelayerImpl) sendCommitValidatorSet(event *contractsapi.Si } // Start starts the bridge relayer -func (ber *bridgeEventRelayerImpl) Start(runtimeCfg *runtimeConfig, eventProvider *EventProvider) error { +func (ber *bridgeEventRelayerImpl) Start(runtimeCfg *config.Runtime, eventProvider *state.EventProvider) error { txRelayers := make(map[uint64]txrelayer.TxRelayer, len(runtimeCfg.GenesisConfig.Bridge)) trackers := make([]*tracker.EventTracker, 0, len(runtimeCfg.GenesisConfig.Bridge)) // create tx relayer for internal chain - internalChainTxRelayer, err := createBridgeTxRelayer(runtimeCfg.consensusConfig.RPCEndpoint, ber.logger) + internalChainTxRelayer, err := createBridgeTxRelayer(runtimeCfg.ConsensusConfig.RPCEndpoint, ber.logger) if err != nil { return fmt.Errorf("failed to create tx relayer for internal chain: %w", err) } @@ -278,9 +283,9 @@ func (ber *bridgeEventRelayerImpl) Start(runtimeCfg *runtimeConfig, eventProvide // startTrackerForChain starts a new instance of tracker.EventTracker // for listening to the events from an external chain func (ber *bridgeEventRelayerImpl) startTrackerForChain(chainID uint64, - bridgeCfg *BridgeConfig, runtimeCfg *runtimeConfig) (*tracker.EventTracker, error) { + bridgeCfg *config.Bridge, runtimeCfg *config.Runtime) (*tracker.EventTracker, error) { store, err := store.NewBoltDBEventTrackerStore( - path.Join(runtimeCfg.DataDir, fmt.Sprintf("/bridge-event-relayer%d.db", chainID))) + path.Join(runtimeCfg.StateDataDir, fmt.Sprintf("/bridge-event-relayer%d.db", chainID))) if err != nil { return nil, err } @@ -290,9 +295,9 @@ func (ber *bridgeEventRelayerImpl) startTrackerForChain(chainID uint64, EventSubscriber: ber, Logger: ber.logger, RPCEndpoint: bridgeCfg.JSONRPCEndpoint, - SyncBatchSize: runtimeCfg.eventTracker.SyncBatchSize, - NumBlockConfirmations: runtimeCfg.eventTracker.NumBlockConfirmations, - NumOfBlocksToReconcile: runtimeCfg.eventTracker.NumOfBlocksToReconcile, + SyncBatchSize: runtimeCfg.EventTracker.SyncBatchSize, + NumBlockConfirmations: runtimeCfg.EventTracker.NumBlockConfirmations, + NumOfBlocksToReconcile: runtimeCfg.EventTracker.NumOfBlocksToReconcile, PollInterval: runtimeCfg.GenesisConfig.BlockTrackerPollInterval.Duration, LogFilter: map[ethgo.Address][]ethgo.Hash{ ethgo.Address(bridgeCfg.ExternalGatewayAddr): { @@ -346,7 +351,7 @@ func (ber *bridgeEventRelayerImpl) ProcessLog(header *types.Header, log *ethgo.L return err } - systemState := NewSystemState(contracts.EpochManagerContract, contracts.BridgeStorageContract, provider) + systemState := systemstate.NewSystemState(contracts.EpochManagerContract, contracts.BridgeStorageContract, provider) switch log.Topics[0] { case bridgeMessageResultEvent.Sig(): @@ -441,20 +446,3 @@ func createBridgeTxRelayer(rpcEndpoint string, logger hclog.Logger) (txrelayer.T txrelayer.WithIPAddress(rpcEndpoint), txrelayer.WithNoWaiting(), txrelayer.WithWriter(logger.StandardWriter(&hclog.StandardLoggerOptions{}))) } - -// convertLog converts types.Log to ethgo.Log -func convertLog(log *types.Log) *ethgo.Log { - l := ðgo.Log{ - Address: ethgo.Address(log.Address), - Data: make([]byte, len(log.Data)), - Topics: make([]ethgo.Hash, len(log.Topics)), - } - - copy(l.Data, log.Data) - - for i, topic := range log.Topics { - l.Topics[i] = ethgo.Hash(topic) - } - - return l -} diff --git a/consensus/polybft/bridge_manager.go b/consensus/polybft/bridge/bridge_manager.go similarity index 88% rename from consensus/polybft/bridge_manager.go rename to consensus/polybft/bridge/bridge_manager.go index a709e10820..02ba04a407 100644 --- a/consensus/polybft/bridge_manager.go +++ b/consensus/polybft/bridge/bridge_manager.go @@ -1,4 +1,4 @@ -package polybft +package bridge import ( "encoding/hex" @@ -15,35 +15,41 @@ import ( "github.com/hashicorp/go-hclog" "github.com/libp2p/go-libp2p/core/peer" bolt "go.etcd.io/bbolt" - "google.golang.org/protobuf/proto" "github.com/0xPolygon/polygon-edge/bls" "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" polybftProto "github.com/0xPolygon/polygon-edge/consensus/polybft/proto" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/types" ) var ( - errUnknownBridgeEvent = errors.New("unknown bridge event") bridgeMessageEventSig = new(contractsapi.BridgeMsgEvent).Sig() + errUnknownBridgeEvent = errors.New("unknown bridge event") + errQuorumNotReached = errors.New("quorum not reached for batch") ) +const maxNumberOfBatchEvents = 10 + type Runtime interface { IsActiveValidator() bool } // BridgeManager is an interface that defines functions for bridge workflow type BridgeManager interface { - EventSubscriber - Start(runtimeCfg *runtimeConfig) error + state.EventSubscriber + Start(runtimeCfg *config.Runtime) error AddLog(chainID *big.Int, eventLog *ethgo.Log) error BridgeBatch(blockNumber uint64) (*BridgeBatchSigned, error) PostBlock() error - PostEpoch(req *PostEpochRequest) error + PostEpoch(req *polytypes.PostEpochRequest) error Close() } @@ -52,13 +58,13 @@ var _ BridgeManager = (*dummyBridgeEventManager)(nil) // dummyBridgeEventManager is used when bridge is not enabled type dummyBridgeEventManager struct{} -func (d *dummyBridgeEventManager) Start(runtimeCfg *runtimeConfig) error { return nil } +func (d *dummyBridgeEventManager) Start(runtimeCfg *config.Runtime) error { return nil } func (d *dummyBridgeEventManager) AddLog(chainID *big.Int, eventLog *ethgo.Log) error { return nil } func (d *dummyBridgeEventManager) BridgeBatch(blockNumber uint64) (*BridgeBatchSigned, error) { return nil, nil } func (d *dummyBridgeEventManager) PostBlock() error { return nil } -func (d *dummyBridgeEventManager) PostEpoch(req *PostEpochRequest) error { +func (d *dummyBridgeEventManager) PostEpoch(req *polytypes.PostEpochRequest) error { return nil } @@ -74,8 +80,8 @@ func (d *dummyBridgeEventManager) Close() {} // bridgeEventManagerConfig holds the configuration data of bridge event manager type bridgeEventManagerConfig struct { - bridgeCfg *BridgeConfig - topic topic + bridgeCfg *config.Bridge + topic Topic key *wallet.Key maxNumberOfEvents uint64 } @@ -86,7 +92,7 @@ var _ BridgeManager = (*bridgeEventManager)(nil) // saving and querying bridge message events, and creating, and submitting new batches type bridgeEventManager struct { logger hclog.Logger - state *State + state *BridgeManagerStore config *bridgeEventManagerConfig @@ -104,16 +110,10 @@ type bridgeEventManager struct { tracker *tracker.EventTracker } -// topic is an interface for p2p message gossiping -type topic interface { - Publish(obj proto.Message) error - Subscribe(handler func(obj interface{}, from peer.ID)) error -} - // newBridgeManager creates a new instance of bridge event manager func newBridgeManager( logger hclog.Logger, - state *State, + state *BridgeManagerStore, config *bridgeEventManagerConfig, runtime Runtime, externalChainID, internalChainID uint64) *bridgeEventManager { @@ -128,7 +128,7 @@ func newBridgeManager( } // Start starts the bridge event manager -func (b *bridgeEventManager) Start(runtimeConfig *runtimeConfig) error { +func (b *bridgeEventManager) Start(runtimeConfig *config.Runtime) error { if err := b.initTransport(); err != nil { return fmt.Errorf("failed to initialize bridge event transport layer. Error: %w", err) } @@ -149,8 +149,8 @@ func (b *bridgeEventManager) Close() { } // initTracker starts a new event tracker (to receive bridge events from external chain) -func (b *bridgeEventManager) initTracker(runtimeCfg *runtimeConfig) (*tracker.EventTracker, error) { - store, err := store.NewBoltDBEventTrackerStore(path.Join(runtimeCfg.DataDir, "/bridge.db")) +func (b *bridgeEventManager) initTracker(runtimeCfg *config.Runtime) (*tracker.EventTracker, error) { + store, err := store.NewBoltDBEventTrackerStore(path.Join(runtimeCfg.StateDataDir, "/bridge.db")) if err != nil { return nil, err } @@ -160,9 +160,9 @@ func (b *bridgeEventManager) initTracker(runtimeCfg *runtimeConfig) (*tracker.Ev EventSubscriber: b, Logger: b.logger, RPCEndpoint: b.config.bridgeCfg.JSONRPCEndpoint, - SyncBatchSize: runtimeCfg.eventTracker.SyncBatchSize, - NumBlockConfirmations: runtimeCfg.eventTracker.NumBlockConfirmations, - NumOfBlocksToReconcile: runtimeCfg.eventTracker.NumOfBlocksToReconcile, + SyncBatchSize: runtimeCfg.EventTracker.SyncBatchSize, + NumBlockConfirmations: runtimeCfg.EventTracker.NumBlockConfirmations, + NumOfBlocksToReconcile: runtimeCfg.EventTracker.NumOfBlocksToReconcile, PollInterval: runtimeCfg.GenesisConfig.BlockTrackerPollInterval.Duration, LogFilter: map[ethgo.Address][]ethgo.Hash{ ethgo.Address(b.config.bridgeCfg.ExternalGatewayAddr): {bridgeMessageEventSig}, @@ -224,7 +224,7 @@ func (b *bridgeEventManager) saveVote(vote *BridgeBatchVote) error { } if vote.EpochNumber == epoch+1 { - if err := b.state.EpochStore.insertEpoch(epoch+1, nil, vote.SourceChainID); err != nil { + if err := b.state.insertEpoch(epoch+1, nil, vote.SourceChainID); err != nil { return fmt.Errorf("error saving msg vote from a future epoch: %d. Error: %w", epoch+1, err) } } @@ -238,7 +238,7 @@ func (b *bridgeEventManager) saveVote(vote *BridgeBatchVote) error { Signature: vote.Signature, } - numSignatures, err := b.state.BridgeMessageStore.insertConsensusData( + numSignatures, err := b.state.insertConsensusData( vote.EpochNumber, vote.Hash, msgVote, @@ -309,7 +309,7 @@ func (b *bridgeEventManager) AddLog(chainID *big.Int, eventLog *ethgo.Log) error return err } - if err := b.state.BridgeMessageStore.insertBridgeMessageEvent(event); err != nil { + if err := b.state.insertBridgeMessageEvent(event); err != nil { b.logger.Error("could not save bridge message event to boltDb", "err", err) return err @@ -367,7 +367,7 @@ func (b *bridgeEventManager) BridgeBatch(blockNumber uint64) (*BridgeBatchSigned // getAggSignatureForBridgeBatchMessage checks if pending batch has quorum, // and if it does, aggregates the signatures func (b *bridgeEventManager) getAggSignatureForBridgeBatchMessage(blockNumber uint64, - pendingBridgeBatch *PendingBridgeBatch) (Signature, error) { + pendingBridgeBatch *PendingBridgeBatch) (polytypes.Signature, error) { validatorSet := b.validatorSet validatorAddrToIndex := make(map[string]int, validatorSet.Len()) @@ -379,16 +379,16 @@ func (b *bridgeEventManager) getAggSignatureForBridgeBatchMessage(blockNumber ui bridgeBatchHash, err := pendingBridgeBatch.Hash() if err != nil { - return Signature{}, err + return polytypes.Signature{}, err } // get all the votes from the database for batch - votes, err := b.state.BridgeMessageStore.getMessageVotes( + votes, err := b.state.getMessageVotes( pendingBridgeBatch.Epoch, bridgeBatchHash.Bytes(), pendingBridgeBatch.SourceChainID.Uint64()) if err != nil { - return Signature{}, err + return polytypes.Signature{}, err } var ( @@ -405,7 +405,7 @@ func (b *bridgeEventManager) getAggSignatureForBridgeBatchMessage(blockNumber ui signature, err := bls.UnmarshalSignature(vote.Signature) if err != nil { - return Signature{}, err + return polytypes.Signature{}, err } bmap.Set(uint64(index)) //nolint:gosec @@ -415,15 +415,15 @@ func (b *bridgeEventManager) getAggSignatureForBridgeBatchMessage(blockNumber ui } if !validatorSet.HasQuorum(blockNumber, signers) { - return Signature{}, errQuorumNotReached + return polytypes.Signature{}, errQuorumNotReached } aggregatedSignature, err := signatures.Aggregate().Marshal() if err != nil { - return Signature{}, err + return polytypes.Signature{}, err } - result := Signature{ + result := polytypes.Signature{ AggregatedSignature: aggregatedSignature, Bitmap: bmap, } @@ -433,8 +433,8 @@ func (b *bridgeEventManager) getAggSignatureForBridgeBatchMessage(blockNumber ui // PostEpoch notifies the bridge event manager that an epoch has changed, // so that it can discard any previous epoch bridge batch, and build a new one (since validator set changed) -func (b *bridgeEventManager) PostEpoch(req *PostEpochRequest) error { - if err := b.state.EpochStore.insertEpoch(req.NewEpochID, req.DBTx, b.externalChainID); err != nil { +func (b *bridgeEventManager) PostEpoch(req *polytypes.PostEpochRequest) error { + if err := b.state.insertEpoch(req.NewEpochID, req.DBTx, b.externalChainID); err != nil { return fmt.Errorf("an error occurred while inserting new epoch in db, chainID: %d. Reason: %w", b.externalChainID, err) } @@ -448,14 +448,14 @@ func (b *bridgeEventManager) PostEpoch(req *PostEpochRequest) error { b.epoch = req.NewEpochID // build a new batch at the end of the epoch - b.nextEventIDExternal, err = req.SystemState.GetNextCommittedIndex(b.externalChainID, External) + b.nextEventIDExternal, err = req.SystemState.GetNextCommittedIndex(b.externalChainID, systemstate.External) if err != nil { b.lock.Unlock() return err } - b.nextEventIDInternal, err = req.SystemState.GetNextCommittedIndex(b.internalChainID, Internal) + b.nextEventIDInternal, err = req.SystemState.GetNextCommittedIndex(b.internalChainID, systemstate.Internal) if err != nil { b.lock.Unlock() @@ -506,7 +506,7 @@ func (b *bridgeEventManager) buildBridgeBatch( // Since lock is reduced grab original values into local variables in order to keep them epoch := b.epoch - bridgeMessageEvents, err := b.state.BridgeMessageStore.getBridgeMessageEventsForBridgeBatch( + bridgeMessageEvents, err := b.state.getBridgeMessageEventsForBridgeBatch( nextBridgeEventIDIndex, nextBridgeEventIDIndex+b.config.maxNumberOfEvents-1, dbTx, @@ -559,7 +559,7 @@ func (b *bridgeEventManager) buildBridgeBatch( Signature: signature, } - if _, err = b.state.BridgeMessageStore.insertConsensusData( + if _, err = b.state.insertConsensusData( epoch, hashBytes, sig, @@ -654,7 +654,7 @@ func (b *bridgeEventManager) ProcessLog(header *types.Header, log *ethgo.Log, db } if bridgeMessageResultEvent.Status { - return b.state.BridgeMessageStore.removeBridgeEvents(&bridgeMessageResultEvent) + return b.state.removeBridgeEvents(&bridgeMessageResultEvent) } return nil @@ -668,7 +668,7 @@ func (b *bridgeEventManager) ProcessLog(header *types.Header, log *ethgo.Log, db return nil } - return b.state.BridgeMessageStore.insertBridgeMessageEvent(&bridgeMsgEvent) + return b.state.insertBridgeMessageEvent(&bridgeMsgEvent) default: return errUnknownBridgeEvent } diff --git a/consensus/polybft/bridge_manager_test.go b/consensus/polybft/bridge/bridge_manager_test.go similarity index 87% rename from consensus/polybft/bridge_manager_test.go rename to consensus/polybft/bridge/bridge_manager_test.go index 10002a6040..8be74e4eac 100644 --- a/consensus/polybft/bridge_manager_test.go +++ b/consensus/polybft/bridge/bridge_manager_test.go @@ -1,10 +1,13 @@ -package polybft +package bridge import ( + "fmt" "math/big" "math/rand" "os" + "path" "testing" + "time" "github.com/Ethernal-Tech/ethgo" "github.com/Ethernal-Tech/ethgo/abi" @@ -14,21 +17,58 @@ import ( bolt "go.etcd.io/bbolt" "google.golang.org/protobuf/proto" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/helper/common" "github.com/0xPolygon/polygon-edge/types" + + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" ) +var bigZero = big.NewInt(0) + +func newTestState(tb *testing.T) *BridgeManagerStore { + tb.Helper() + + dir := fmt.Sprintf("/tmp/consensus-temp_%v", time.Now().UTC().Format(time.RFC3339Nano)) + err := os.Mkdir(dir, 0775) + + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + if err := os.RemoveAll(dir); err != nil { + tb.Fatal(err) + } + }) + + db, err := bolt.Open(path.Join(dir, "my.db"), 0666, nil) + if err != nil { + tb.Fatal(err) + } + + numOfBridges := uint64(1000) + chainIds := make([]uint64, 0, numOfBridges) + for i := uint64(0); i < numOfBridges; i++ { + chainIds = append(chainIds, i) + } + + store, err := newBridgeManagerStore(db, chainIds) + if err != nil { + tb.Fatal(err) + } + + return store +} + func newTestBridgeManager(t *testing.T, key *validator.TestValidator, runtime Runtime) *bridgeEventManager { t.Helper() - tmpDir, err := os.MkdirTemp("/tmp", "test-data-dir-state-sync") - require.NoError(t, err) - state := newTestState(t) - require.NoError(t, state.EpochStore.insertEpoch(0, nil, 1)) + require.NoError(t, state.insertEpoch(0, nil, 1)) topic := &mockTopic{} @@ -38,13 +78,9 @@ func newTestBridgeManager(t *testing.T, key *validator.TestValidator, runtime Ru &bridgeEventManagerConfig{ topic: topic, key: key.Key(), - maxNumberOfEvents: maxNumberOfEvents, + maxNumberOfEvents: maxNumberOfBatchEvents, }, runtime, 1, 0) - t.Cleanup(func() { - os.RemoveAll(tmpDir) - }) - return s } @@ -66,7 +102,7 @@ func TestBridgeEventManager_PostEpoch_BuildBridgeBatch(t *testing.T) { // add 5 bridge messages starting in index 0, it will generate one smaller batch for i := 0; i < 5; i++ { - require.NoError(t, s.state.BridgeMessageStore.insertBridgeMessageEvent(bridgeMessages10[i])) + require.NoError(t, s.state.insertBridgeMessageEvent(bridgeMessages10[i])) } require.NoError(t, s.buildExternalBridgeBatch(nil)) @@ -78,7 +114,7 @@ func TestBridgeEventManager_PostEpoch_BuildBridgeBatch(t *testing.T) { // add the next 5 bridge messages, at that point, so that it generates a larger batch for i := 5; i < 10; i++ { - require.NoError(t, s.state.BridgeMessageStore.insertBridgeMessageEvent(bridgeMessages10[i])) + require.NoError(t, s.state.insertBridgeMessageEvent(bridgeMessages10[i])) } require.NoError(t, s.buildExternalBridgeBatch(nil)) @@ -101,7 +137,7 @@ func TestBridgeEventManager_PostEpoch_BuildBridgeBatch(t *testing.T) { // add 5 bridge messages starting in index 0, they will be saved to db for i := 0; i < 5; i++ { - require.NoError(t, s.state.BridgeMessageStore.insertBridgeMessageEvent(bridgeMessages10[i])) + require.NoError(t, s.state.insertBridgeMessageEvent(bridgeMessages10[i])) } // I am not a validator so no batches should be built @@ -160,12 +196,12 @@ func TestBridgeEventManager_MessagePool(t *testing.T) { require.NoError(t, s.saveVote(msg)) // no votes for the current epoch - votes, err := s.state.BridgeMessageStore.getMessageVotes(0, msg.Hash, 1) + votes, err := s.state.getMessageVotes(0, msg.Hash, 1) require.NoError(t, err) require.Len(t, votes, 0) // returns an error for the invalid epoch - _, err = s.state.BridgeMessageStore.getMessageVotes(1, msg.Hash, 0) + _, err = s.state.getMessageVotes(1, msg.Hash, 0) require.Error(t, err) }) @@ -216,18 +252,18 @@ func TestBridgeEventManager_MessagePool(t *testing.T) { // vote with validator 1 require.NoError(t, s.saveVote(val1signed)) - votes, err := s.state.BridgeMessageStore.getMessageVotes(0, msg.hash, 1) + votes, err := s.state.getMessageVotes(0, msg.hash, 1) require.NoError(t, err) require.Len(t, votes, 1) // vote with validator 1 again (the votes do not increase) require.NoError(t, s.saveVote(val1signed)) - votes, _ = s.state.BridgeMessageStore.getMessageVotes(0, msg.hash, 1) + votes, _ = s.state.getMessageVotes(0, msg.hash, 1) require.Len(t, votes, 1) // vote with validator 2 require.NoError(t, s.saveVote(val2signed)) - votes, _ = s.state.BridgeMessageStore.getMessageVotes(0, msg.hash, 1) + votes, _ = s.state.getMessageVotes(0, msg.hash, 1) require.Len(t, votes, 2) }) } @@ -305,10 +341,10 @@ func TestBridgeEventManager_RemoveProcessedEventsAndProofs(t *testing.T) { bridgeMessageEvents := generateBridgeMessageEvents(t, bridgeMessageEventsCount, 0) for _, event := range bridgeMessageEvents { - require.NoError(t, s.state.BridgeMessageStore.insertBridgeMessageEvent(event)) + require.NoError(t, s.state.insertBridgeMessageEvent(event)) } - bridgeMessageEventsBefore, err := s.state.BridgeMessageStore.list() + bridgeMessageEventsBefore, err := s.state.list() require.NoError(t, err) require.Equal(t, bridgeMessageEventsCount, len(bridgeMessageEventsBefore)) @@ -318,7 +354,7 @@ func TestBridgeEventManager_RemoveProcessedEventsAndProofs(t *testing.T) { } // all bridge message events and their proofs should be removed from the store - stateSyncEventsAfter, err := s.state.BridgeMessageStore.list() + stateSyncEventsAfter, err := s.state.list() require.NoError(t, err) require.Equal(t, 0, len(stateSyncEventsAfter)) } @@ -339,7 +375,7 @@ func TestBridgeEventManager_AddLog_BuildBridgeBatches(t *testing.T) { // empty log which is not an bridge message require.NoError(t, s.AddLog(big.NewInt(1), ðgo.Log{Data: bridgeMsgData})) - bridgeEvents, err := s.state.BridgeMessageStore.list() + bridgeEvents, err := s.state.list() require.NoError(t, err) require.Len(t, bridgeEvents, 0) @@ -350,7 +386,7 @@ func TestBridgeEventManager_AddLog_BuildBridgeBatches(t *testing.T) { // log with the bridge message topic but incorrect content require.Error(t, s.AddLog(big.NewInt(1), ðgo.Log{Topics: []ethgo.Hash{bridgeMessageEventID}, Data: bridgeMsgData})) - bridgeEvents, err = s.state.BridgeMessageStore.list() + bridgeEvents, err = s.state.list() require.NoError(t, err) require.Len(t, bridgeEvents, 0) @@ -371,7 +407,7 @@ func TestBridgeEventManager_AddLog_BuildBridgeBatches(t *testing.T) { require.NoError(t, s.AddLog(big.NewInt(1), goodLog)) - bridgeEvents, err = s.state.BridgeMessageStore.getBridgeMessageEventsForBridgeBatch(0, 0, nil, 1, 0) + bridgeEvents, err = s.state.getBridgeMessageEventsForBridgeBatch(0, 0, nil, 1, 0) require.NoError(t, err) require.Len(t, bridgeEvents, 1) require.Len(t, s.pendingBridgeBatches, 1) @@ -429,7 +465,7 @@ func TestBridgeEventManager_AddLog_BuildBridgeBatches(t *testing.T) { require.NoError(t, s.AddLog(big.NewInt(1), goodLog)) // node should have inserted given bridgeMsg event, but it shouldn't build any batch - bridgeMessages, err := s.state.BridgeMessageStore.getBridgeMessageEventsForBridgeBatch(0, 0, nil, 1, 0) + bridgeMessages, err := s.state.getBridgeMessageEventsForBridgeBatch(0, 0, nil, 1, 0) require.NoError(t, err) require.Len(t, bridgeMessages, 1) require.Equal(t, uint64(0), bridgeMessages[0].ID.Uint64()) @@ -521,7 +557,7 @@ var _ BridgeManager = (*mockBridgeManager)(nil) type mockBridgeManager struct { chainID uint64 - state *State + state *BridgeManagerStore } func (*mockBridgeManager) AddLog(chainID *big.Int, eventLog *ethgo.Log) error { @@ -540,8 +576,8 @@ func (*mockBridgeManager) PostBlock() error { } // PostEpoch implements BridgeManager. -func (mbm *mockBridgeManager) PostEpoch(req *PostEpochRequest) error { - if err := mbm.state.EpochStore.insertEpoch(req.NewEpochID, req.DBTx, mbm.chainID); err != nil { +func (mbm *mockBridgeManager) PostEpoch(req *polytypes.PostEpochRequest) error { + if err := mbm.state.insertEpoch(req.NewEpochID, req.DBTx, mbm.chainID); err != nil { return err } @@ -554,7 +590,7 @@ func (*mockBridgeManager) ProcessLog(header *types.Header, log *ethgo.Log, dbTx } // Start implements BridgeManager. -func (*mockBridgeManager) Start(runtimeCfg *runtimeConfig) error { +func (*mockBridgeManager) Start(runtimeCfg *config.Runtime) error { return nil } diff --git a/consensus/polybft/state_store_bridge_message.go b/consensus/polybft/bridge/state_store_bridge_message.go similarity index 59% rename from consensus/polybft/state_store_bridge_message.go rename to consensus/polybft/bridge/state_store_bridge_message.go index c0482a3145..f8a05919f4 100644 --- a/consensus/polybft/state_store_bridge_message.go +++ b/consensus/polybft/bridge/state_store_bridge_message.go @@ -1,4 +1,4 @@ -package polybft +package bridge import ( "encoding/json" @@ -18,6 +18,8 @@ var ( bridgeBatchBucket = []byte("bridgeBatches") // bucket to store message votes (signatures) messageVotesBucket = []byte("votes") + // bucket to store epochs and all its nested buckets (message votes and message pool events) + epochsBucket = []byte("epochs") // errNotEnoughBridgeEvents error message errNotEnoughBridgeEvents = errors.New("there is either a gap or not enough bridge events") @@ -25,6 +27,27 @@ var ( errNoBridgeBatchForBridgeEvent = errors.New("no bridge batch found for given bridge message events") ) +// BridgeBatchVoteConsensusData encapsulates sender identifier and its signature +type BridgeBatchVoteConsensusData struct { + // Signer of the vote + Sender string + // Signature of the message + Signature []byte +} + +// BridgeBatchVote represents the payload which is gossiped across the network +type BridgeBatchVote struct { + *BridgeBatchVoteConsensusData + // Hash is encoded data + Hash []byte + // Number of epoch + EpochNumber uint64 + // SourceChainID from bridge batch + SourceChainID uint64 + // DestinationChainID from bridge batch + DestinationChainID uint64 +} + /* Bolt DB schema: @@ -34,46 +57,57 @@ bridge message events/ bridge batches/ |--> chainId --> bridgeBatches.Message[last].Id -> *BridgeBatchSigned (json marshalled) -relayerEvents/ -|--> chainId --> RelayerEventData.EventID -> *RelayerEventData (json marshalled) +bridge message votes / +|--> chainId --> epoch -> hash -> *BridgeBatchVote (json marshalled) */ -type BridgeMessageStore struct { +type BridgeManagerStore struct { db *bolt.DB chainIDs []uint64 } -// initialize creates necessary buckets in DB if they don't already exist -func (bms *BridgeMessageStore) initialize(tx *bolt.Tx) error { +func newBridgeManagerStore(db *bolt.DB, chainIDs []uint64) (*BridgeManagerStore, error) { var err error - var bridgeMessageBucket, bridgeBatchesBucket *bolt.Bucket + store := &BridgeManagerStore{db: db, chainIDs: chainIDs} - if bridgeMessageBucket, err = tx.CreateBucketIfNotExists(bridgeMessageEventsBucket); err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", string(bridgeMessageEventsBucket), err) - } + return store, store.db.Update(func(tx *bolt.Tx) error { + var bridgeMessageBucket, bridgeBatchesBucket, epochBucket *bolt.Bucket - if bridgeBatchesBucket, err = tx.CreateBucketIfNotExists(bridgeBatchBucket); err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", string(bridgeBatchBucket), err) - } + if bridgeMessageBucket, err = tx.CreateBucketIfNotExists(bridgeMessageEventsBucket); err != nil { + return fmt.Errorf("failed to create bucket=%s: %w", string(bridgeMessageEventsBucket), err) + } - for _, chainID := range bms.chainIDs { - chainIDBytes := common.EncodeUint64ToBytes(chainID) + if bridgeBatchesBucket, err = tx.CreateBucketIfNotExists(bridgeBatchBucket); err != nil { + return fmt.Errorf("failed to create bucket=%s: %w", string(bridgeBatchBucket), err) + } - if _, err := bridgeMessageBucket.CreateBucketIfNotExists(chainIDBytes); err != nil { - return fmt.Errorf("failed to create bucket chainID=%s: %w", string(bridgeMessageEventsBucket), err) + if epochBucket, err = tx.CreateBucketIfNotExists(epochsBucket); err != nil { + return fmt.Errorf("failed to create bucket=%s: %w", string(epochsBucket), err) } - if _, err := bridgeBatchesBucket.CreateBucketIfNotExists(chainIDBytes); err != nil { - return fmt.Errorf("failed to create bucket chainID=%s: %w", string(bridgeBatchBucket), err) + for _, chainID := range chainIDs { + chainIDBytes := common.EncodeUint64ToBytes(chainID) + + if _, err := bridgeMessageBucket.CreateBucketIfNotExists(chainIDBytes); err != nil { + return fmt.Errorf("failed to create bucket chainID=%s: %w", string(bridgeMessageEventsBucket), err) + } + + if _, err := bridgeBatchesBucket.CreateBucketIfNotExists(chainIDBytes); err != nil { + return fmt.Errorf("failed to create bucket chainID=%s: %w", string(bridgeBatchBucket), err) + } + + if _, err := epochBucket.CreateBucketIfNotExists(chainIDBytes); err != nil { + return fmt.Errorf("failed to create bucket chainID=%s: %w", string(epochsBucket), err) + } } - } - return nil + return nil + }) } // insertBridgeMessageEvent inserts a new bridge message event to state event bucket in db -func (bms *BridgeMessageStore) insertBridgeMessageEvent(event *contractsapi.BridgeMsgEvent) error { +func (bms *BridgeManagerStore) insertBridgeMessageEvent(event *contractsapi.BridgeMsgEvent) error { return bms.db.Update(func(tx *bolt.Tx) error { raw, err := json.Marshal(event) if err != nil { @@ -87,7 +121,7 @@ func (bms *BridgeMessageStore) insertBridgeMessageEvent(event *contractsapi.Brid } // removeBridgeEvents removes bridge events and their proofs from the buckets in db -func (bms *BridgeMessageStore) removeBridgeEvents( +func (bms *BridgeManagerStore) removeBridgeEvents( bridgeMessageEventIDs *contractsapi.BridgeMessageResultEvent) error { return bms.db.Update(func(tx *bolt.Tx) error { eventsBucket := tx.Bucket(bridgeMessageEventsBucket). @@ -105,7 +139,7 @@ func (bms *BridgeMessageStore) removeBridgeEvents( } // list iterates through all events in events bucket in db, un-marshals them, and returns as array -func (bms *BridgeMessageStore) list() ([]*contractsapi.BridgeMsgEvent, error) { +func (bms *BridgeManagerStore) list() ([]*contractsapi.BridgeMsgEvent, error) { events := []*contractsapi.BridgeMsgEvent{} for _, chainID := range bms.chainIDs { @@ -131,7 +165,7 @@ func (bms *BridgeMessageStore) list() ([]*contractsapi.BridgeMsgEvent, error) { } // getBridgeMessageEventsForBridgeBatch returns bridge events for bridge batch -func (bms *BridgeMessageStore) getBridgeMessageEventsForBridgeBatch( +func (bms *BridgeManagerStore) getBridgeMessageEventsForBridgeBatch( fromIndex, toIndex uint64, dbTx *bolt.Tx, sourceChainID, destinationChainID uint64) ( []*contractsapi.BridgeMsgEvent, error) { var ( @@ -173,7 +207,7 @@ func (bms *BridgeMessageStore) getBridgeMessageEventsForBridgeBatch( } // getBridgeBatchForBridgeEvents returns the bridgeBatch that contains given bridge event if it exists -func (bms *BridgeMessageStore) getBridgeBatchForBridgeEvents( +func (bms *BridgeManagerStore) getBridgeBatchForBridgeEvents( bridgeMessageID, chainID uint64) (*BridgeBatchSigned, error) { var signedBridgeBatch *BridgeBatchSigned @@ -201,7 +235,7 @@ func (bms *BridgeMessageStore) getBridgeBatchForBridgeEvents( } // insertBridgeBatchMessage inserts signed batch to db -func (bms *BridgeMessageStore) insertBridgeBatchMessage(signedBridgeBatch *BridgeBatchSigned, +func (bms *BridgeManagerStore) insertBridgeBatchMessage(signedBridgeBatch *BridgeBatchSigned, dbTx *bolt.Tx) error { insertFn := func(tx *bolt.Tx) error { raw, err := json.Marshal(signedBridgeBatch) @@ -236,7 +270,7 @@ func (bms *BridgeMessageStore) insertBridgeBatchMessage(signedBridgeBatch *Bridg } // insertConsensusData inserts given batch consensus data to corresponding bucket of given epoch -func (bms *BridgeMessageStore) insertConsensusData(epoch uint64, key []byte, +func (bms *BridgeManagerStore) insertConsensusData(epoch uint64, key []byte, vote *BridgeBatchVoteConsensusData, dbTx *bolt.Tx, sourceChainID uint64) (int, error) { var ( numOfSignatures int @@ -289,7 +323,7 @@ func (bms *BridgeMessageStore) insertConsensusData(epoch uint64, key []byte, } // getMessageVotes gets all signatures from db associated with given epoch and hash -func (bms *BridgeMessageStore) getMessageVotes( +func (bms *BridgeManagerStore) getMessageVotes( epoch uint64, hash []byte, sourceChainID uint64) ([]*BridgeBatchVoteConsensusData, error) { @@ -314,7 +348,7 @@ func (bms *BridgeMessageStore) getMessageVotes( } // getMessageVotesLocked gets all signatures from db associated with given epoch and hash -func (bms *BridgeMessageStore) getMessageVotesLocked(tx *bolt.Tx, epoch uint64, +func (bms *BridgeManagerStore) getMessageVotesLocked(tx *bolt.Tx, epoch uint64, hash []byte, sourceChainID uint64) ([]*BridgeBatchVoteConsensusData, error) { bucket, err := getNestedBucketInEpoch(tx, epoch, messageVotesBucket, sourceChainID) if err != nil { @@ -333,3 +367,90 @@ func (bms *BridgeMessageStore) getMessageVotesLocked(tx *bolt.Tx, epoch uint64, return signatures, nil } + +// getNestedBucketInEpoch returns a nested (child) bucket from db associated with given epoch +func getNestedBucketInEpoch(tx *bolt.Tx, epoch uint64, bucketKey []byte, chainID uint64) (*bolt.Bucket, error) { + epochBucket, err := getEpochBucket(tx, epoch, chainID) + if err != nil { + return nil, err + } + + bucket := epochBucket.Bucket(bucketKey) + if bucket == nil { + return nil, fmt.Errorf("could not find %v bucket for epoch: %v", string(bucketKey), epoch) + } + + return bucket, nil +} + +// getEpochBucket returns bucket from db associated with given epoch +func getEpochBucket(tx *bolt.Tx, epoch uint64, chainID uint64) (*bolt.Bucket, error) { + epochBucket := tx.Bucket(epochsBucket). + Bucket(common.EncodeUint64ToBytes(chainID)). + Bucket(common.EncodeUint64ToBytes(epoch)) + if epochBucket == nil { + return nil, fmt.Errorf("could not find bucket for epoch: %v", epoch) + } + + return epochBucket, nil +} + +// insertEpoch inserts a new epoch to db with its meta data +func (s *BridgeManagerStore) insertEpoch(epoch uint64, dbTx *bolt.Tx, chainID uint64) error { + insertFn := func(tx *bolt.Tx) error { + chainIDBucket, err := tx.Bucket(epochsBucket).CreateBucketIfNotExists(common.EncodeUint64ToBytes(chainID)) + if err != nil { + return err + } + + epochBucket, err := chainIDBucket.CreateBucketIfNotExists(common.EncodeUint64ToBytes(epoch)) + if err != nil { + return err + } + + _, err = epochBucket.CreateBucketIfNotExists(messageVotesBucket) + if err != nil { + return err + } + + return err + } + + if dbTx == nil { + return s.db.Update(func(tx *bolt.Tx) error { + return insertFn(tx) + }) + } + + return insertFn(dbTx) +} + +// cleanEpochsFromDB cleans epoch buckets from db +func (s *BridgeManagerStore) cleanEpochsFromDB(dbTx *bolt.Tx) error { + cleanFn := func(tx *bolt.Tx) error { + if err := tx.DeleteBucket(epochsBucket); err != nil { + return err + } + + epochBucket, err := tx.CreateBucket(epochsBucket) + if err != nil { + return err + } + + for _, chainID := range s.chainIDs { + if _, err := epochBucket.CreateBucket(common.EncodeUint64ToBytes(chainID)); err != nil { + return err + } + } + + return nil + } + + if dbTx == nil { + return s.db.Update(func(tx *bolt.Tx) error { + return cleanFn(tx) + }) + } + + return cleanFn(dbTx) +} diff --git a/consensus/polybft/state_store_bridge_message_test.go b/consensus/polybft/bridge/state_store_bridge_message_test.go similarity index 57% rename from consensus/polybft/state_store_bridge_message_test.go rename to consensus/polybft/bridge/state_store_bridge_message_test.go index eb70e9097f..e12e6874a7 100644 --- a/consensus/polybft/state_store_bridge_message_test.go +++ b/consensus/polybft/bridge/state_store_bridge_message_test.go @@ -1,4 +1,4 @@ -package polybft +package bridge import ( "bytes" @@ -6,17 +6,13 @@ import ( "math/big" "testing" - "github.com/0xPolygon/polygon-edge/bls" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" - "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.etcd.io/bbolt" ) -var domain = crypto.Keccak256([]byte("DOMAIN")) - func TestState_InsertEvent(t *testing.T) { t.Parallel() @@ -30,10 +26,10 @@ func TestState_InsertEvent(t *testing.T) { DestinationChainID: bigZero, } - err := state.BridgeMessageStore.insertBridgeMessageEvent(event1) + err := state.insertBridgeMessageEvent(event1) assert.NoError(t, err) - events, err := state.BridgeMessageStore.list() + events, err := state.list() assert.NoError(t, err) assert.Len(t, events, 1) } @@ -43,17 +39,17 @@ func TestState_Insert_And_Get_MessageVotes(t *testing.T) { state := newTestState(t) epoch := uint64(1) - assert.NoError(t, state.EpochStore.insertEpoch(epoch, nil, 0)) + assert.NoError(t, state.insertEpoch(epoch, nil, 0)) hash := []byte{1, 2} - _, err := state.BridgeMessageStore.insertConsensusData(1, hash, &BridgeBatchVoteConsensusData{ + _, err := state.insertConsensusData(1, hash, &BridgeBatchVoteConsensusData{ Sender: "NODE_1", Signature: []byte{1, 2}, }, nil, 0) assert.NoError(t, err) - votes, err := state.BridgeMessageStore.getMessageVotes(epoch, hash, 0) + votes, err := state.getMessageVotes(epoch, hash, 0) assert.NoError(t, err) assert.Equal(t, 1, len(votes)) assert.Equal(t, "NODE_1", votes[0].Sender) @@ -65,8 +61,8 @@ func TestState_getBridgeEventsForBridgeBatch_NotEnoughEvents(t *testing.T) { state := newTestState(t) - for i := 0; i < maxNumberOfEvents-2; i++ { - assert.NoError(t, state.BridgeMessageStore.insertBridgeMessageEvent(&contractsapi.BridgeMsgEvent{ + for i := 0; i < maxNumberOfBatchEvents-2; i++ { + assert.NoError(t, state.insertBridgeMessageEvent(&contractsapi.BridgeMsgEvent{ ID: big.NewInt(int64(i)), Data: []byte{1, 2}, SourceChainID: big.NewInt(1), @@ -74,7 +70,7 @@ func TestState_getBridgeEventsForBridgeBatch_NotEnoughEvents(t *testing.T) { })) } - _, err := state.BridgeMessageStore.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfEvents-1, nil, 0, 0) + _, err := state.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfBatchEvents-1, nil, 0, 0) assert.ErrorIs(t, err, errNotEnoughBridgeEvents) } @@ -83,8 +79,8 @@ func TestState_getBridgeEventsForBridgeBatch(t *testing.T) { state := newTestState(t) - for i := 0; i < maxNumberOfEvents; i++ { - assert.NoError(t, state.BridgeMessageStore.insertBridgeMessageEvent(&contractsapi.BridgeMsgEvent{ + for i := 0; i < maxNumberOfBatchEvents; i++ { + assert.NoError(t, state.insertBridgeMessageEvent(&contractsapi.BridgeMsgEvent{ ID: big.NewInt(int64(i)), Data: []byte{1, 2}, SourceChainID: big.NewInt(1), @@ -95,32 +91,32 @@ func TestState_getBridgeEventsForBridgeBatch(t *testing.T) { t.Run("Return all - forced. Enough events", func(t *testing.T) { t.Parallel() - events, err := state.BridgeMessageStore.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfEvents-1, nil, 1, 0) + events, err := state.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfBatchEvents-1, nil, 1, 0) require.NoError(t, err) - require.Equal(t, maxNumberOfEvents, len(events)) + require.Equal(t, maxNumberOfBatchEvents, len(events)) }) t.Run("Return all - forced. Not enough events", func(t *testing.T) { t.Parallel() - _, err := state.BridgeMessageStore.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfEvents+1, nil, 1, 0) + _, err := state.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfBatchEvents+1, nil, 1, 0) require.ErrorIs(t, err, errNotEnoughBridgeEvents) }) t.Run("Return all you can. Enough events", func(t *testing.T) { t.Parallel() - events, err := state.BridgeMessageStore.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfEvents-1, nil, 1, 0) + events, err := state.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfBatchEvents-1, nil, 1, 0) assert.NoError(t, err) - assert.Equal(t, maxNumberOfEvents, len(events)) + assert.Equal(t, maxNumberOfBatchEvents, len(events)) }) t.Run("Return all you can. Not enough events", func(t *testing.T) { t.Parallel() - events, err := state.BridgeMessageStore.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfEvents+1, nil, 1, 0) + events, err := state.getBridgeMessageEventsForBridgeBatch(0, maxNumberOfBatchEvents+1, nil, 1, 0) assert.ErrorIs(t, err, errNotEnoughBridgeEvents) - assert.Equal(t, maxNumberOfEvents, len(events)) + assert.Equal(t, maxNumberOfBatchEvents, len(events)) }) } @@ -157,7 +153,7 @@ func TestState_getBridgeBatchForBridgeEvents(t *testing.T) { } for _, c := range cases { - signedBridgeBatch, err := state.BridgeMessageStore.getBridgeBatchForBridgeEvents(c.bridgeMessageID, 1) + signedBridgeBatch, err := state.getBridgeBatchForBridgeEvents(c.bridgeMessageID, 1) if c.hasBatch { require.NoError(t, err, fmt.Sprintf("bridge event %v", c.bridgeMessageID)) @@ -202,7 +198,7 @@ func TestState_GetNestedBucketInEpoch(t *testing.T) { ) s := newTestState(t) - require.NoError(t, s.EpochStore.insertEpoch(c.epochNumber, nil, 0)) + require.NoError(t, s.insertEpoch(c.epochNumber, nil, 0)) err = s.db.View(func(tx *bbolt.Tx) error { nestedBucket, err = getNestedBucketInEpoch(tx, c.epochNumber, c.bucketName, 0) @@ -220,51 +216,11 @@ func TestState_GetNestedBucketInEpoch(t *testing.T) { } } -func createTestBridgeBatchMessage(t *testing.T, numberOfMessages, firstIndex uint64) *BridgeBatchSigned { - t.Helper() - - messages := make([]*contractsapi.BridgeMessage, numberOfMessages) - - for i := firstIndex; i < firstIndex+numberOfMessages; i++ { - messages[i-firstIndex] = &contractsapi.BridgeMessage{ - ID: new(big.Int).SetUint64(i), - SourceChainID: big.NewInt(1), - DestinationChainID: bigZero, - Sender: types.Address{}, - Receiver: types.Address{}, - Payload: []byte{}} - } - - msg := contractsapi.BridgeMessageBatch{ - Messages: messages, - SourceChainID: big.NewInt(1), - DestinationChainID: big.NewInt(0), - } - - blsKey, err := bls.GenerateBlsKey() - require.NoError(t, err) - - data := generateRandomBytes(t) - - signature, err := blsKey.Sign(data, domain) - require.NoError(t, err) - - signatures := bls.Signatures{signature} - - aggSig, err := signatures.Aggregate().Marshal() - require.NoError(t, err) - - return &BridgeBatchSigned{ - MessageBatch: &msg, - AggSignature: Signature{AggregatedSignature: aggSig}, - } -} - -func insertTestBridgeBatches(t *testing.T, state *State, numberOfBatches uint64) { +func insertTestBridgeBatches(t *testing.T, state *BridgeManagerStore, numberOfBatches uint64) { t.Helper() for i := uint64(0); i <= numberOfBatches; i++ { - signedBridgeBatch := createTestBridgeBatchMessage(t, 10, 10*i) - require.NoError(t, state.BridgeMessageStore.insertBridgeBatchMessage(signedBridgeBatch, nil)) + signedBridgeBatch := CreateTestBridgeBatchMessage(t, 10, 10*i) + require.NoError(t, state.insertBridgeBatchMessage(signedBridgeBatch, nil)) } } diff --git a/consensus/polybft/bridge/test_helpers.go b/consensus/polybft/bridge/test_helpers.go new file mode 100644 index 0000000000..951fa0f55f --- /dev/null +++ b/consensus/polybft/bridge/test_helpers.go @@ -0,0 +1,108 @@ +package bridge + +import ( + "fmt" + "math/big" + "testing" + + "github.com/0xPolygon/polygon-edge/bls" + "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" + "github.com/0xPolygon/polygon-edge/crypto" + "github.com/0xPolygon/polygon-edge/types" + "github.com/stretchr/testify/require" +) + +var TestDomain = crypto.Keccak256([]byte("DOMAIN")) + +func BuildBridgeBatchAndBridgeEvents(t *testing.T, bridgeMessageCount int, + epoch, startIdx uint64) (*PendingBridgeBatch, *BridgeBatchSigned, []*contractsapi.BridgeMsgEvent) { + t.Helper() + + bridgeMessageEvents := generateBridgeMessageEvents(t, bridgeMessageCount, startIdx) + pendingBridgeBatch, err := NewPendingBridgeBatch(epoch, bridgeMessageEvents) + require.NoError(t, err) + + blsKey, err := bls.GenerateBlsKey() + require.NoError(t, err) + + data, err := pendingBridgeBatch.BridgeMessageBatch.EncodeAbi() + require.NoError(t, err) + + signature, err := blsKey.Sign(data, TestDomain) + require.NoError(t, err) + + signatures := bls.Signatures{signature} + + aggSig, err := signatures.Aggregate().Marshal() + require.NoError(t, err) + + bridgeBatchSigned := &BridgeBatchSigned{ + MessageBatch: pendingBridgeBatch.BridgeMessageBatch, + AggSignature: polytypes.Signature{ + AggregatedSignature: aggSig, + Bitmap: []byte{}, + }, + } + + return pendingBridgeBatch, bridgeBatchSigned, bridgeMessageEvents +} + +func generateBridgeMessageEvents(t *testing.T, eventsCount int, startIdx uint64) []*contractsapi.BridgeMsgEvent { + t.Helper() + + bridgeMessageEvents := make([]*contractsapi.BridgeMsgEvent, eventsCount) + for i := 0; i < eventsCount; i++ { + bridgeMessageEvents[i] = &contractsapi.BridgeMsgEvent{ + ID: big.NewInt(int64(startIdx + uint64(i))), + Sender: types.StringToAddress(fmt.Sprintf("0x5%d", i)), + Receiver: types.StringToAddress(fmt.Sprintf("0x4%d", i)), + Data: helpers.GenerateRandomBytes(t), + SourceChainID: big.NewInt(1), + DestinationChainID: big.NewInt(0), + } + } + + return bridgeMessageEvents +} + +func CreateTestBridgeBatchMessage(t *testing.T, numberOfMessages, firstIndex uint64) *BridgeBatchSigned { + t.Helper() + + messages := make([]*contractsapi.BridgeMessage, numberOfMessages) + + for i := firstIndex; i < firstIndex+numberOfMessages; i++ { + messages[i-firstIndex] = &contractsapi.BridgeMessage{ + ID: new(big.Int).SetUint64(i), + SourceChainID: big.NewInt(1), + DestinationChainID: big.NewInt(0), + Sender: types.Address{}, + Receiver: types.Address{}, + Payload: []byte{}} + } + + msg := contractsapi.BridgeMessageBatch{ + Messages: messages, + SourceChainID: big.NewInt(1), + DestinationChainID: big.NewInt(0), + } + + blsKey, err := bls.GenerateBlsKey() + require.NoError(t, err) + + data := helpers.GenerateRandomBytes(t) + + signature, err := blsKey.Sign(data, TestDomain) + require.NoError(t, err) + + signatures := bls.Signatures{signature} + + aggSig, err := signatures.Aggregate().Marshal() + require.NoError(t, err) + + return &BridgeBatchSigned{ + MessageBatch: &msg, + AggSignature: polytypes.Signature{AggregatedSignature: aggSig}, + } +} diff --git a/consensus/polybft/bridge_event_relayer_test.go b/consensus/polybft/bridge_event_relayer_test.go deleted file mode 100644 index 7820da1be7..0000000000 --- a/consensus/polybft/bridge_event_relayer_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package polybft - -/* func TestStateSyncRelayer_FullWorkflow(t *testing.T) { - t.Skip() - t.Parallel() - - testKey := createTestKey(t) - bridgeMessageAddr := types.StringToAddress("0x56563") - - headers := []*types.Header{ - {Number: 2}, {Number: 3}, {Number: 4}, {Number: 5}, - } - - blockhainMock := &blockchainMock{} - dummyTxRelayer := newDummyStakeTxRelayer(t, nil) - state := newTestState(t) - - stateSyncRelayer := newBridgeEventRelayer( - map[uint64]txrelayer.TxRelayer{1: dummyTxRelayer}, - state.BridgeMessageStore, - blockhainMock, - testKey, - &relayerConfig{ - maxAttemptsToSend: 6, - maxBlocksToWaitForResend: 1, - maxEventsPerBatch: 1, - eventExecutionAddr: bridgeMessageAddr, - }, - hclog.Default(), - ) - - for _, h := range headers { - blockhainMock.On("CurrentHeader").Return(h).Once() - } - - // send first two events without errors - dummyTxRelayer.On("SendTransaction", mock.Anything, testKey).Return((*ethgo.Receipt)(nil), nil).Times(2) - // fail 3rd time - dummyTxRelayer.On("SendTransaction", mock.Anything, testKey).Return( - (*ethgo.Receipt)(nil), errors.New("e")).Once() - // send 3 events all at once at the end - dummyTxRelayer.On("SendTransaction", mock.Anything, testKey).Return((*ethgo.Receipt)(nil), nil).Once() - - require.NoError(t, stateSyncRelayer.Init()) - - // post 1st block - require.NoError(t, stateSyncRelayer.PostBlock(&PostBlockRequest{})) - - time.Sleep(time.Second * 2) // wait for some time - - events, err := state.BridgeMessageStore.GetAllAvailableRelayerEvents(0) - - require.NoError(t, err) - require.Len(t, events, 3) - require.Equal(t, uint64(1), events[0].EventID) - require.True(t, events[0].SentStatus) - require.False(t, events[1].SentStatus) - require.False(t, events[2].SentStatus) - - require.NoError(t, stateSyncRelayer.PostBlock(&PostBlockRequest{})) - - time.Sleep(time.Second * 2) // wait for some time - - events, err = state.BridgeMessageStore.GetAllAvailableRelayerEvents(0) - - require.NoError(t, err) - require.Len(t, events, 4) - require.True(t, events[0].SentStatus) - require.Equal(t, uint64(2), events[0].EventID) - require.False(t, events[1].SentStatus) - require.False(t, events[2].SentStatus) - - time.Sleep(time.Second * 2) // wait for some time - - events, err = state.BridgeMessageStore.GetAllAvailableRelayerEvents(0) - - require.NoError(t, err) - require.Len(t, events, 3) - require.True(t, events[0].SentStatus) - require.Equal(t, uint64(3), events[0].EventID) - require.False(t, events[1].SentStatus) - - // post 4th block - will not provide result, so one more SendTransaction will be triggered - stateSyncRelayer.config.maxEventsPerBatch = 3 // send all 3 left events at once - - require.NoError(t, stateSyncRelayer.PostBlock(&PostBlockRequest{})) - - time.Sleep(time.Second * 2) // wait for some time - - events, err = state.BridgeMessageStore.GetAllAvailableRelayerEvents(0) - - require.NoError(t, err) - require.Len(t, events, 3) - require.True(t, events[0].SentStatus && events[1].SentStatus && events[2].SentStatus) - - time.Sleep(time.Second * 2) // wait for some time - - events, err = state.BridgeMessageStore.GetAllAvailableRelayerEvents(0) - - require.NoError(t, err) - require.Len(t, events, 0) - - stateSyncRelayer.Close() - time.Sleep(time.Second) - - blockhainMock.AssertExpectations(t) - dummyTxRelayer.AssertExpectations(t) -} -*/ diff --git a/consensus/polybft/polybft_config.go b/consensus/polybft/config/polybft_config.go similarity index 89% rename from consensus/polybft/polybft_config.go rename to consensus/polybft/config/polybft_config.go index 801db85afb..db05c0d1aa 100644 --- a/consensus/polybft/polybft_config.go +++ b/consensus/polybft/config/polybft_config.go @@ -1,4 +1,4 @@ -package polybft +package config import ( "encoding/json" @@ -24,7 +24,7 @@ const ( ) var ( - DefaultTokenConfig = &TokenConfig{ + DefaultTokenConfig = &Token{ Name: defaultNativeTokenName, Symbol: defaultNativeTokenSymbol, Decimals: defaultNativeTokenDecimals, @@ -35,13 +35,13 @@ var ( "()") ) -// PolyBFTConfig is the configuration file for the Polybft consensus protocol. -type PolyBFTConfig struct { +// PolyBFT is the configuration file for the Polybft consensus protocol. +type PolyBFT struct { // InitialValidatorSet are the genesis validators InitialValidatorSet []*validator.GenesisValidator `json:"initialValidatorSet"` // Bridge represent configuration for external bridges - Bridge map[uint64]*BridgeConfig `json:"bridge"` + Bridge map[uint64]*Bridge `json:"bridge"` // EpochSize is size of epoch EpochSize uint64 `json:"epochSize"` @@ -59,7 +59,7 @@ type PolyBFTConfig struct { Governance types.Address `json:"governance"` // NativeTokenConfig defines name, symbol and decimal count of the native token - NativeTokenConfig *TokenConfig `json:"nativeTokenConfig"` + NativeTokenConfig *Token `json:"nativeTokenConfig"` // InitialTrieRoot corresponds to pre-existing state root in case data gets migrated from a legacy system InitialTrieRoot types.Hash `json:"initialTrieRoot"` @@ -77,7 +77,7 @@ type PolyBFTConfig struct { WithdrawalWaitPeriod uint64 `json:"withdrawalWaitPeriod"` // RewardConfig defines rewards configuration - RewardConfig *RewardsConfig `json:"rewardConfig"` + RewardConfig *Rewards `json:"rewardConfig"` // BlockTimeDrift defines the time slot in which a new block can be created BlockTimeDrift uint64 `json:"blockTimeDrift"` @@ -95,44 +95,44 @@ type PolyBFTConfig struct { BladeAdmin types.Address `json:"bladeAdmin"` // GovernanceConfig defines on chain governance configuration - GovernanceConfig *GovernanceConfig `json:"governanceConfig"` + GovernanceConfig *Governance `json:"governanceConfig"` // StakeTokenAddr represents the stake token contract address StakeTokenAddr types.Address `json:"stakeTokenAddr"` } // LoadPolyBFTConfig loads chain config from provided path and unmarshals PolyBFTConfig -func LoadPolyBFTConfig(chainConfigFile string) (PolyBFTConfig, error) { +func LoadPolyBFTConfig(chainConfigFile string) (PolyBFT, error) { chainCfg, err := chain.ImportFromFile(chainConfigFile) if err != nil { - return PolyBFTConfig{}, err + return PolyBFT{}, err } polybftConfig, err := GetPolyBFTConfig(chainCfg.Params) if err != nil { - return PolyBFTConfig{}, err + return PolyBFT{}, err } return polybftConfig, err } // GetPolyBFTConfig deserializes provided chain config and returns PolyBFTConfig -func GetPolyBFTConfig(chainParams *chain.Params) (PolyBFTConfig, error) { +func GetPolyBFTConfig(chainParams *chain.Params) (PolyBFT, error) { consensusConfigJSON, err := json.Marshal(chainParams.Engine[ConsensusName]) if err != nil { - return PolyBFTConfig{}, err + return PolyBFT{}, err } - var polyBFTConfig PolyBFTConfig + var polyBFTConfig PolyBFT if err = json.Unmarshal(consensusConfigJSON, &polyBFTConfig); err != nil { - return PolyBFTConfig{}, err + return PolyBFT{}, err } return polyBFTConfig, nil } -// BridgeConfig is the external chain configuration, needed for bridging -type BridgeConfig struct { +// Bridge is the external chain configuration, needed for bridging +type Bridge struct { // External chain bridge contracts ExternalGatewayAddr types.Address `json:"externalGatewayAddress"` ExternalERC20PredicateAddr types.Address `json:"externalERC20PredicateAddress"` @@ -165,7 +165,7 @@ type BridgeConfig struct { } // GetHighestInternalAddress returns the highest address among all internal bridge contracts -func (b *BridgeConfig) GetHighestInternalAddress() types.Address { +func (b *Bridge) GetHighestInternalAddress() types.Address { internalAddrs := b.getInternalContractAddrs() if len(internalAddrs) == 0 { @@ -184,7 +184,7 @@ func (b *BridgeConfig) GetHighestInternalAddress() types.Address { } // getInternalContractAddrs enumerates all the Internal bridge contract addresses -func (b *BridgeConfig) getInternalContractAddrs() []types.Address { +func (b *Bridge) getInternalContractAddrs() []types.Address { return []types.Address{ b.InternalGatewayAddr, b.InternalERC20PredicateAddr, @@ -196,12 +196,12 @@ func (b *BridgeConfig) getInternalContractAddrs() []types.Address { } } -func (p *PolyBFTConfig) IsBridgeEnabled() bool { +func (p *PolyBFT) IsBridgeEnabled() bool { return len(p.Bridge) > 0 } -// TokenConfig is the configuration of native token used by edge network -type TokenConfig struct { +// Token is the configuration of native token used by edge network +type Token struct { Name string `json:"name"` Symbol string `json:"symbol"` Decimals uint8 `json:"decimals"` @@ -209,7 +209,7 @@ type TokenConfig struct { ChainID uint64 `json:"chainID"` } -func ParseRawTokenConfig(rawConfig string) (*TokenConfig, error) { +func ParseRawTokenConfig(rawConfig string) (*Token, error) { if rawConfig == "" { return DefaultTokenConfig, nil } @@ -256,7 +256,7 @@ func ParseRawTokenConfig(rawConfig string) (*TokenConfig, error) { } } - return &TokenConfig{ + return &Token{ Name: name, Symbol: symbol, Decimals: uint8(decimals), @@ -265,7 +265,7 @@ func ParseRawTokenConfig(rawConfig string) (*TokenConfig, error) { }, nil } -type RewardsConfig struct { +type Rewards struct { // TokenAddress is the address of reward token on child chain TokenAddress types.Address @@ -276,7 +276,7 @@ type RewardsConfig struct { WalletAmount *big.Int } -func (r *RewardsConfig) MarshalJSON() ([]byte, error) { +func (r *Rewards) MarshalJSON() ([]byte, error) { raw := &rewardsConfigRaw{ TokenAddress: r.TokenAddress, WalletAddress: r.WalletAddress, @@ -286,7 +286,7 @@ func (r *RewardsConfig) MarshalJSON() ([]byte, error) { return json.Marshal(raw) } -func (r *RewardsConfig) UnmarshalJSON(data []byte) error { +func (r *Rewards) UnmarshalJSON(data []byte) error { var ( raw rewardsConfigRaw err error @@ -307,7 +307,7 @@ func (r *RewardsConfig) UnmarshalJSON(data []byte) error { return nil } -type GovernanceConfig struct { +type Governance struct { // VotingDelay indicates number of blocks after proposal is submitted before voting starts VotingDelay *big.Int // VotingPeriod indicates number of blocks that the voting period for a proposal lasts @@ -327,7 +327,7 @@ type GovernanceConfig struct { ForkParamsAddr types.Address } -func (g *GovernanceConfig) MarshalJSON() ([]byte, error) { +func (g *Governance) MarshalJSON() ([]byte, error) { raw := &governanceConfigRaw{ VotingDelay: common.EncodeBigInt(g.VotingDelay), VotingPeriod: common.EncodeBigInt(g.VotingPeriod), @@ -342,7 +342,7 @@ func (g *GovernanceConfig) MarshalJSON() ([]byte, error) { return json.Marshal(raw) } -func (g *GovernanceConfig) UnmarshalJSON(data []byte) error { +func (g *Governance) UnmarshalJSON(data []byte) error { var ( raw governanceConfigRaw err error diff --git a/consensus/polybft/polybft_config_test.go b/consensus/polybft/config/polybft_config_test.go similarity index 97% rename from consensus/polybft/polybft_config_test.go rename to consensus/polybft/config/polybft_config_test.go index 415c8b5ffe..136368a72a 100644 --- a/consensus/polybft/polybft_config_test.go +++ b/consensus/polybft/config/polybft_config_test.go @@ -1,4 +1,4 @@ -package polybft +package config import ( "reflect" @@ -16,7 +16,7 @@ func TestBridgeConfig_getInternalContractAddrs(t *testing.T) { mockAddr := types.Address{0x1} // Initialize a BridgeConfig struct with all Internal fields set to mockAddr - config := &BridgeConfig{ + config := &Bridge{ InternalGatewayAddr: mockAddr, InternalERC20PredicateAddr: mockAddr, InternalERC721PredicateAddr: mockAddr, diff --git a/consensus/polybft/config/runtime_config.go b/consensus/polybft/config/runtime_config.go new file mode 100644 index 0000000000..09a011c6b5 --- /dev/null +++ b/consensus/polybft/config/runtime_config.go @@ -0,0 +1,18 @@ +package config + +import ( + "github.com/0xPolygon/polygon-edge/chain" + "github.com/0xPolygon/polygon-edge/consensus" + "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" +) + +// RuntimeConfig is a struct that holds configuration data for given consensus runtime +type Runtime struct { + ChainParams *chain.Params + GenesisConfig *PolyBFT + Forks *chain.Forks + Key *wallet.Key + ConsensusConfig *consensus.Config + EventTracker *consensus.EventTracker + StateDataDir string +} diff --git a/consensus/polybft/consensus_metrics.go b/consensus/polybft/consensus_metrics.go deleted file mode 100644 index dc7ef5a10f..0000000000 --- a/consensus/polybft/consensus_metrics.go +++ /dev/null @@ -1,58 +0,0 @@ -package polybft - -import ( - "time" - - "github.com/0xPolygon/polygon-edge/types" - "github.com/armon/go-metrics" -) - -const ( - // consensusMetricsPrefix is a consensus-related metrics prefix - consensusMetricsPrefix = "consensus" -) - -// updateBlockMetrics updates various metrics based on the given block -// (such as block interval, number of transactions and block rounds metrics) -func updateBlockMetrics(currentBlock *types.Block, parentHeader *types.Header) error { - if currentBlock.Number() > 1 { - parentTime := time.Unix(int64(parentHeader.Timestamp), 0) - headerTime := time.Unix(int64(currentBlock.Header.Timestamp), 0) - // update the block interval metric - metrics.SetGauge([]string{consensusMetricsPrefix, "block_interval"}, float32(headerTime.Sub(parentTime).Seconds())) - } - - // update the number of transactions in the block metric - metrics.SetGauge([]string{consensusMetricsPrefix, "num_txs"}, float32(len(currentBlock.Transactions))) - - extra, err := GetIbftExtra(currentBlock.Header.ExtraData) - if err != nil { - return err - } - - // number of rounds needed to seal a block - metrics.SetGauge([]string{consensusMetricsPrefix, "rounds"}, float32(extra.BlockMetaData.BlockRound)) - metrics.SetGauge([]string{consensusMetricsPrefix, "chain_head"}, float32(currentBlock.Number())) - metrics.IncrCounter([]string{consensusMetricsPrefix, "block_counter"}, float32(1)) - metrics.SetGauge([]string{consensusMetricsPrefix, "block_space_used"}, float32(currentBlock.Header.GasUsed)) - - // Update the base fee metric - metrics.SetGauge([]string{consensusMetricsPrefix, "base_fee"}, float32(currentBlock.Header.BaseFee)) - - return nil -} - -// updateEpochMetrics updates epoch-related metrics -// (e.g. epoch number, validator set length) -func updateEpochMetrics(epoch epochMetadata) { - // update epoch number metrics - metrics.SetGauge([]string{consensusMetricsPrefix, "epoch_number"}, float32(epoch.Number)) - // update number of validators metrics - metrics.SetGauge([]string{consensusMetricsPrefix, "validators"}, float32(epoch.Validators.Len())) -} - -// updateBlockExecutionMetric updates the block execution metric -func updateBlockExecutionMetric(start time.Time) { - metrics.SetGauge([]string{consensusMetricsPrefix, "block_execution_time"}, - float32(time.Now().UTC().Sub(start).Seconds())) -} diff --git a/consensus/polybft/consensus_runtime.go b/consensus/polybft/consensus_runtime.go index 004853e835..83109f93ba 100644 --- a/consensus/polybft/consensus_runtime.go +++ b/consensus/polybft/consensus_runtime.go @@ -17,27 +17,28 @@ import ( protobuf "google.golang.org/protobuf/proto" "github.com/0xPolygon/polygon-edge/chain" - "github.com/0xPolygon/polygon-edge/consensus" + "github.com/0xPolygon/polygon-edge/consensus/polybft/bridge" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/consensus/polybft/governance" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" + polymetrics "github.com/0xPolygon/polygon-edge/consensus/polybft/metrics" + "github.com/0xPolygon/polygon-edge/consensus/polybft/proposer" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" + "github.com/0xPolygon/polygon-edge/consensus/polybft/stake" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/forkmanager" "github.com/0xPolygon/polygon-edge/helper/common" "github.com/0xPolygon/polygon-edge/types" ) -const ( - maxNumberOfEvents = 10 - stateFileName = "consensusState.db" -) - var ( // errNotAValidator represents "node is not a validator" error message errNotAValidator = errors.New("node is not a validator") - // errQuorumNotReached represents "quorum not reached for batch" error message - errQuorumNotReached = errors.New("quorum not reached for batch") ) // txPoolInterface is an abstraction of transaction pool @@ -66,7 +67,7 @@ type epochMetadata struct { // CurrentClientConfig is the current client configuration for current epoch // that is updated by governance proposals - CurrentClientConfig *PolyBFTConfig + CurrentClientConfig *config.PolyBFT } type guardedDataDTO struct { @@ -77,32 +78,22 @@ type guardedDataDTO struct { epoch *epochMetadata // proposerSnapshot at the time of collecting data - proposerSnapshot *ProposerSnapshot -} - -// runtimeConfig is a struct that holds configuration data for given consensus runtime -type runtimeConfig struct { - genesisParams *chain.Params - GenesisConfig *PolyBFTConfig - Forks *chain.Forks - DataDir string - Key *wallet.Key - State *State - blockchain blockchainBackend - polybftBackend polybftBackend - txPool txPoolInterface - bridgeTopic topic - consensusConfig *consensus.Config - eventTracker *consensus.EventTracker + proposerSnapshot *proposer.ProposerSnapshot } // consensusRuntime is a struct that provides consensus runtime features like epoch, state and event management type consensusRuntime struct { // config represents wrapper around required parameters which are received from the outside - config *runtimeConfig + config *config.Runtime + + blockchain polytypes.Blockchain + + backend polytypes.Polybft + + txPool polytypes.TxPool // state is reference to the struct which encapsulates bridge events persistence logic - state *State + state *state.State // fsm instance which is created for each `runSequence` fsm *fsm @@ -120,57 +111,67 @@ type consensusRuntime struct { activeValidatorFlag atomic.Bool // proposerCalculator is the object which manipulates with ProposerSnapshot - proposerCalculator *ProposerCalculator + proposerCalculator *proposer.ProposerCalculator // manager for handling validator stake change and updating validator set - stakeManager StakeManager + stakeManager stake.StakeManager - eventProvider *EventProvider + eventProvider *state.EventProvider // bridgeManagers handles storing, processing and executing bridge events - bridge Bridge + bridge bridge.Bridge // governanceManager is used for handling governance events gotten from proposals execution // also handles updating client configuration based on governance proposals - governanceManager GovernanceManager + governanceManager governance.GovernanceManager // logger instance logger hcf.Logger } // newConsensusRuntime creates and starts a new consensus runtime instance with event tracking -func newConsensusRuntime(log hcf.Logger, config *runtimeConfig) (*consensusRuntime, error) { - dbTx, err := config.State.beginDBTransaction(true) +func newConsensusRuntime(log hcf.Logger, config *config.Runtime, + st *state.State, + backend polytypes.Polybft, + blockchain polytypes.Blockchain, + txPool polytypes.TxPool, + bridgeTopic bridge.Topic, +) (*consensusRuntime, error) { + dbTx, err := st.BeginDBTransaction(true) if err != nil { return nil, fmt.Errorf("could not begin dbTx to init consensus runtime: %w", err) } defer dbTx.Rollback() //nolint:errcheck - proposerCalculator, err := NewProposerCalculator(config, log.Named("proposer_calculator"), dbTx) + proposerCalculator, err := proposer.NewProposerCalculator( + config, log.Named("proposer_calculator"), + st, backend, blockchain, dbTx) if err != nil { return nil, fmt.Errorf("failed to create consensus runtime, error while creating proposer calculator %w", err) } runtime := &consensusRuntime{ - state: config.State, + state: st, config: config, - lastBuiltBlock: config.blockchain.CurrentHeader(), + lastBuiltBlock: blockchain.CurrentHeader(), proposerCalculator: proposerCalculator, logger: log.Named("consensus_runtime"), - eventProvider: NewEventProvider(config.blockchain), - } - - if runtime.IsBridgeEnabled() { - if runtime.bridge, err = newBridge( - runtime, - runtime.config, - runtime.eventProvider, - log.Named("bridge")); err != nil { - return nil, err - } - } else { - runtime.bridge = &dummyBridge{} + eventProvider: state.NewEventProvider(blockchain), + backend: backend, + blockchain: blockchain, + txPool: txPool, + } + + if runtime.bridge, err = bridge.NewBridge( + runtime, + runtime.state, + runtime.config, + bridgeTopic, + runtime.eventProvider, + runtime.blockchain, + log.Named("bridge")); err != nil { + return nil, err } if err := runtime.initStakeManager(log, dbTx); err != nil { @@ -203,12 +204,12 @@ func (c *consensusRuntime) close() { func (c *consensusRuntime) initStakeManager(logger hcf.Logger, dbTx *bolt.Tx) error { var err error - c.stakeManager, err = newStakeManager( + c.stakeManager, err = stake.NewStakeManager( logger.Named("stake-manager"), c.state, contracts.StakeManagerContract, - c.config.blockchain, - c.config.polybftBackend, + c.blockchain, + c.backend, dbTx, ) @@ -219,11 +220,11 @@ func (c *consensusRuntime) initStakeManager(logger hcf.Logger, dbTx *bolt.Tx) er // initGovernanceManager initializes governance manager func (c *consensusRuntime) initGovernanceManager(logger hcf.Logger, dbTx *bolt.Tx) error { - governanceManager, err := newGovernanceManager( - c.config.genesisParams, + governanceManager, err := governance.NewGovernanceManager( + c.config.ChainParams, logger.Named("governance-manager"), c.state, - c.config.blockchain, + c.blockchain, dbTx, ) @@ -278,12 +279,12 @@ func (c *consensusRuntime) OnBlockInserted(fullBlock *types.FullBlock) { return } - if err := updateBlockMetrics(fullBlock.Block, c.lastBuiltBlock); err != nil { + if err := polymetrics.UpdateBlockMetrics(fullBlock.Block, c.lastBuiltBlock); err != nil { c.logger.Error("failed to update block metrics", "error", err) } // after the block has been written we reset the txpool so that the old transactions are removed - c.config.txPool.ResetWithBlock(fullBlock.Block) + c.txPool.ResetWithBlock(fullBlock.Block) var ( epoch = c.epoch @@ -294,7 +295,7 @@ func (c *consensusRuntime) OnBlockInserted(fullBlock *types.FullBlock) { ) // begin DB transaction - dbTx, err := c.state.beginDBTransaction(true) + dbTx, err := c.state.BeginDBTransaction(true) if err != nil { c.logger.Error("failed to begin db transaction on block finalization", "block", fullBlock.Block.Number(), "err", err) @@ -304,7 +305,7 @@ func (c *consensusRuntime) OnBlockInserted(fullBlock *types.FullBlock) { defer dbTx.Rollback() //nolint:errcheck - lastProcessedEventsBlock, err := c.state.getLastProcessedEventsBlock(dbTx) + lastProcessedEventsBlock, err := c.state.GetLastProcessedEventsBlock(dbTx) if err != nil { c.logger.Error("failed to get last processed events block on block finalization", "block", fullBlock.Block.Number(), "err", err) @@ -318,7 +319,7 @@ func (c *consensusRuntime) OnBlockInserted(fullBlock *types.FullBlock) { return } - postBlock := &PostBlockRequest{ + postBlock := &polytypes.PostBlockRequest{ FullBlock: fullBlock, Epoch: epoch.Number, IsEpochEndingBlock: isEndOfEpoch, @@ -360,7 +361,7 @@ func (c *consensusRuntime) OnBlockInserted(fullBlock *types.FullBlock) { } } - if err := c.state.insertLastProcessedEventsBlock(fullBlock.Block.Number(), dbTx); err != nil { + if err := c.state.InsertLastProcessedEventsBlock(fullBlock.Block.Number(), dbTx); err != nil { c.logger.Error("failed to update the last processed events block in db", "error", err) return @@ -397,10 +398,10 @@ func (c *consensusRuntime) FSM() error { return errNotAValidator } - blockBuilder, err := c.config.blockchain.NewBlockBuilder( + blockBuilder, err := c.blockchain.NewBlockBuilder( parent, c.config.Key.Address(), - c.config.txPool, + c.txPool, epoch.CurrentClientConfig.BlockTime.Duration, c.logger, ) @@ -421,8 +422,8 @@ func (c *consensusRuntime) FSM() error { config: epoch.CurrentClientConfig, forks: c.config.Forks, parent: parent, - backend: c.config.blockchain, - polybftBackend: c.config.polybftBackend, + blockchain: c.blockchain, + polybftBackend: c.backend, epochNumber: epoch.Number, blockBuilder: blockBuilder, validators: valSet, @@ -492,25 +493,18 @@ func (c *consensusRuntime) restartEpoch(header *types.Header, dbTx *bolt.Tx) (*e } } - validatorSet, err := c.config.polybftBackend.GetValidatorsWithTx(header.Number, nil, dbTx) + validatorSet, err := c.backend.GetValidatorsWithTx(header.Number, nil, dbTx) if err != nil { return nil, fmt.Errorf("restart epoch - cannot get validators: %w", err) } - updateEpochMetrics(epochMetadata{ - Number: epochNumber, - Validators: validatorSet, - }) + polymetrics.UpdateEpochMetrics(epochNumber, len(validatorSet)) firstBlockInEpoch, err := c.getFirstBlockOfEpoch(epochNumber, header) if err != nil { return nil, err } - if err := c.state.EpochStore.cleanEpochsFromDB(dbTx); err != nil { - c.logger.Error("Could not clean previous epochs from db.", "error", err) - } - c.logger.Info( "restartEpoch", "block number", header.Number, @@ -519,7 +513,7 @@ func (c *consensusRuntime) restartEpoch(header *types.Header, dbTx *bolt.Tx) (*e "firstBlockInEpoch", firstBlockInEpoch, ) - reqObj := &PostEpochRequest{ + reqObj := &polytypes.PostEpochRequest{ SystemState: systemState, NewEpochID: epochNumber, FirstBlockOfEpoch: firstBlockInEpoch, @@ -541,12 +535,12 @@ func (c *consensusRuntime) restartEpoch(header *types.Header, dbTx *bolt.Tx) (*e return nil, err } - currentPolyConfig, err := GetPolyBFTConfig(currentParams) + currentPolyConfig, err := config.GetPolyBFTConfig(currentParams) if err != nil { return nil, err } - c.config.polybftBackend.SetBlockTime(currentPolyConfig.BlockTime.Duration) + c.backend.SetBlockTime(currentPolyConfig.BlockTime.Duration) return &epochMetadata{ Number: epochNumber, @@ -577,7 +571,7 @@ func (c *consensusRuntime) calculateDistributeRewardsInput( lastFinalizedBlock *types.Header, epochID uint64, ) (*contractsapi.DistributeRewardForEpochManagerFn, error) { - if !isRewardDistributionBlock(c.config.Forks, isFirstBlockOfEpoch, isEndOfEpoch, pendingBlockNumber) { + if !governance.IsRewardDistributionBlock(c.config.Forks, isFirstBlockOfEpoch, isEndOfEpoch, pendingBlockNumber) { // we don't have to distribute rewards at this block return nil, nil } @@ -596,7 +590,7 @@ func (c *consensusRuntime) calculateDistributeRewardsInput( epochID-- } - getSealersForBlock := func(blockExtra *Extra, validators validator.AccountSet) error { + getSealersForBlock := func(blockExtra *polytypes.Extra, validators validator.AccountSet) error { signers, err := validators.GetFilteredValidators(blockExtra.Parent.Bitmap) if err != nil { return err @@ -611,19 +605,19 @@ func (c *consensusRuntime) calculateDistributeRewardsInput( return nil } - blockExtra, err := GetIbftExtra(blockHeader.ExtraData) + blockExtra, err := polytypes.GetIbftExtra(blockHeader.ExtraData) if err != nil { return nil, err } - previousBlockHeader, previousBlockExtra, err := getBlockData(blockHeader.Number-1, c.config.blockchain) + previousBlockHeader, previousBlockExtra, err := helpers.GetBlockData(blockHeader.Number-1, c.blockchain) if err != nil { return nil, err } // calculate uptime starting from last block - 1 in epoch until first block in given epoch for previousBlockExtra.BlockMetaData.EpochNumber == blockExtra.BlockMetaData.EpochNumber { - validators, err := c.config.polybftBackend.GetValidators(blockHeader.Number-1, nil) + validators, err := c.backend.GetValidators(blockHeader.Number-1, nil) if err != nil { return nil, err } @@ -632,24 +626,24 @@ func (c *consensusRuntime) calculateDistributeRewardsInput( return nil, err } - blockHeader, blockExtra, err = getBlockData(blockHeader.Number-1, c.config.blockchain) + blockHeader, blockExtra, err = helpers.GetBlockData(blockHeader.Number-1, c.blockchain) if err != nil { return nil, err } - previousBlockHeader, previousBlockExtra, err = getBlockData(previousBlockHeader.Number-1, c.config.blockchain) + previousBlockHeader, previousBlockExtra, err = helpers.GetBlockData(previousBlockHeader.Number-1, c.blockchain) if err != nil { return nil, err } } - lookbackSize := getLookbackSizeForRewardDistribution(c.config.Forks, pendingBlockNumber) + lookbackSize := governance.GetLookbackSizeForRewardDistribution(c.config.Forks, pendingBlockNumber) // calculate uptime for blocks from previous epoch that were not processed in previous uptime // since we can not calculate uptime for the last block in epoch (because of parent signatures) if blockHeader.Number > lookbackSize { for i := uint64(0); i < lookbackSize; i++ { - validators, err := c.config.polybftBackend.GetValidators(blockHeader.Number-2, nil) + validators, err := c.backend.GetValidators(blockHeader.Number-2, nil) if err != nil { return nil, err } @@ -658,7 +652,7 @@ func (c *consensusRuntime) calculateDistributeRewardsInput( return nil, err } - blockHeader, blockExtra, err = getBlockData(blockHeader.Number-1, c.config.blockchain) + blockHeader, blockExtra, err = helpers.GetBlockData(blockHeader.Number-1, c.blockchain) if err != nil { return nil, err } @@ -716,13 +710,13 @@ func (c *consensusRuntime) isFixedSizeOfSprintMet(blockNumber uint64, epoch *epo } // getSystemState builds SystemState instance for the most current block header -func (c *consensusRuntime) getSystemState(header *types.Header) (SystemState, error) { - provider, err := c.config.blockchain.GetStateProviderForBlock(header) +func (c *consensusRuntime) getSystemState(header *types.Header) (systemstate.SystemState, error) { + provider, err := c.blockchain.GetStateProviderForBlock(header) if err != nil { return nil, err } - return c.config.blockchain.GetSystemState(provider), nil + return c.blockchain.GetSystemState(provider), nil } func (c *consensusRuntime) IsValidProposal(rawProposal []byte) bool { @@ -784,7 +778,7 @@ func (c *consensusRuntime) IsValidProposalHash(proposal *proto.Proposal, hash [] return false } - extra, err := GetIbftExtra(block.Header.ExtraData) + extra, err := polytypes.GetIbftExtra(block.Header.ExtraData) if err != nil { c.logger.Error("failed to retrieve extra", "block number", block.Number(), "error", err) @@ -890,7 +884,7 @@ func (c *consensusRuntime) BuildPrePrepareMessage( return nil } - extra, err := GetIbftExtra(block.Header.ExtraData) + extra, err := polytypes.GetIbftExtra(block.Header.ExtraData) if err != nil { c.logger.Error("failed to retrieve extra for block %d: %w", block.Number(), err) @@ -993,9 +987,9 @@ func (c *consensusRuntime) RoundStarts(view *proto.View) error { c.logger.Info("RoundStarts", "height", view.Height, "round", view.Round) if view.Round > 0 { - c.config.txPool.ReinsertProposed() + c.txPool.ReinsertProposed() } else { - c.config.txPool.ClearProposed() + c.txPool.ClearProposed() } return nil @@ -1004,7 +998,7 @@ func (c *consensusRuntime) RoundStarts(view *proto.View) error { // SequenceCancelled represents sequence cancelled callback func (c *consensusRuntime) SequenceCancelled(view *proto.View) error { c.logger.Info("SequenceCancelled", "height", view.Height, "round", view.Round) - c.config.txPool.ReinsertProposed() + c.txPool.ReinsertProposed() return nil } @@ -1047,7 +1041,7 @@ func (c *consensusRuntime) getFirstBlockOfEpoch(epochNumber uint64, latestHeader blockHeader := latestHeader - blockExtra, err := GetIbftExtra(latestHeader.ExtraData) + blockExtra, err := polytypes.GetIbftExtra(latestHeader.ExtraData) if err != nil { return 0, err } @@ -1064,7 +1058,7 @@ func (c *consensusRuntime) getFirstBlockOfEpoch(epochNumber uint64, latestHeader for blockExtra.BlockMetaData.EpochNumber == epoch { firstBlockInEpoch = blockHeader.Number - blockHeader, blockExtra, err = getBlockData(blockHeader.Number-1, c.config.blockchain) + blockHeader, blockExtra, err = helpers.GetBlockData(blockHeader.Number-1, c.blockchain) if err != nil { return 0, err diff --git a/consensus/polybft/consensus_runtime_test.go b/consensus/polybft/consensus_runtime_test.go index 1218cd4a94..f51409f9e3 100644 --- a/consensus/polybft/consensus_runtime_test.go +++ b/consensus/polybft/consensus_runtime_test.go @@ -8,11 +8,19 @@ import ( "testing" "time" - "github.com/0xPolygon/go-ibft/messages/proto" + ibftproto "github.com/0xPolygon/go-ibft/messages/proto" "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/consensus" "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" + "github.com/0xPolygon/polygon-edge/consensus/polybft/bridge" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" + "github.com/0xPolygon/polygon-edge/consensus/polybft/governance" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" + "github.com/0xPolygon/polygon-edge/consensus/polybft/proposer" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" + "github.com/0xPolygon/polygon-edge/consensus/polybft/stake" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/contracts" @@ -20,9 +28,11 @@ import ( "github.com/0xPolygon/polygon-edge/helper/common" "github.com/0xPolygon/polygon-edge/types" "github.com/hashicorp/go-hclog" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" ) func init() { @@ -48,7 +58,7 @@ func TestConsensusRuntime_isFixedSizeOfEpochMet_NotReachedEnd(t *testing.T) { {10, 1, 1}, } - config := &runtimeConfig{GenesisConfig: &PolyBFTConfig{}} + config := &config.Runtime{GenesisConfig: &config.PolyBFT{}} runtime := &consensusRuntime{ config: config, lastBuiltBlock: &types.Header{}, @@ -86,7 +96,7 @@ func TestConsensusRuntime_isFixedSizeOfEpochMet_ReachedEnd(t *testing.T) { {10, 1, 10}, } - config := &runtimeConfig{GenesisConfig: &PolyBFTConfig{}} + config := &config.Runtime{GenesisConfig: &config.PolyBFT{}} runtime := &consensusRuntime{ config: config, epoch: &epochMetadata{CurrentClientConfig: config.GenesisConfig}, @@ -123,7 +133,7 @@ func TestConsensusRuntime_isFixedSizeOfSprintMet_NotReachedEnd(t *testing.T) { {10, 1, 1}, } - config := &runtimeConfig{GenesisConfig: &PolyBFTConfig{}} + config := &config.Runtime{GenesisConfig: &config.PolyBFT{}} runtime := &consensusRuntime{ config: config, epoch: &epochMetadata{CurrentClientConfig: config.GenesisConfig}, @@ -161,7 +171,7 @@ func TestConsensusRuntime_isFixedSizeOfSprintMet_ReachedEnd(t *testing.T) { {3, 3, 5}, } - config := &runtimeConfig{GenesisConfig: &PolyBFTConfig{}} + config := &config.Runtime{GenesisConfig: &config.PolyBFT{}} runtime := &consensusRuntime{ config: config, epoch: &epochMetadata{CurrentClientConfig: config.GenesisConfig}, @@ -196,59 +206,62 @@ func TestConsensusRuntime_OnBlockInserted_EndOfEpoch(t *testing.T) { }) newEpochNumber := currentEpochNumber + 1 - systemStateMock := new(systemStateMock) + systemStateMock := new(helpers.SystemStateMock) systemStateMock.On("GetEpoch").Return(newEpochNumber).Once() - blockchainMock := new(blockchainMock) - blockchainMock.On("GetStateProviderForBlock", mock.Anything).Return(new(stateProviderMock)).Once() + blockchainMock := new(helpers.BlockchainMock) + blockchainMock.On("GetStateProviderForBlock", mock.Anything).Return(new(helpers.StateProviderMock)).Once() blockchainMock.On("GetSystemState", mock.Anything, mock.Anything).Return(systemStateMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headerMap.getHeader) + blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headerMap.GetHeader) - polybftBackendMock := new(polybftBackendMock) + polybftBackendMock := new(helpers.PolybftBackendMock) polybftBackendMock.On("GetValidatorsWithTx", mock.Anything, mock.Anything, mock.Anything).Return(validatorSet).Times(3) polybftBackendMock.On("SetBlockTime", mock.Anything).Once() - txPool := new(txPoolMock) - txPool.On("ResetWithBlock", mock.Anything).Once() + txPoolMock := new(helpers.TxPoolMock) + txPoolMock.On("ResetWithBlock", mock.Anything).Once() - snapshot := NewProposerSnapshot(epochSize-1, validatorSet) - polybftCfg := &PolyBFTConfig{EpochSize: epochSize} - config := &runtimeConfig{ - GenesisConfig: &PolyBFTConfig{ + snapshot := proposer.NewProposerSnapshot(epochSize-1, validatorSet) + polybftCfg := &config.PolyBFT{EpochSize: epochSize} + config := &config.Runtime{ + GenesisConfig: &config.PolyBFT{ EpochSize: epochSize, }, - genesisParams: &chain.Params{Engine: map[string]interface{}{ConsensusName: polybftCfg}}, - blockchain: blockchainMock, - polybftBackend: polybftBackendMock, - txPool: txPool, - State: newTestState(t), + ChainParams: &chain.Params{Engine: map[string]interface{}{config.ConsensusName: polybftCfg}}, } - require.NoError(t, config.State.insertLastProcessedEventsBlock(builtBlock.Number()-1, nil)) + st := state.NewTestState(t) + + require.NoError(t, st.InsertLastProcessedEventsBlock(builtBlock.Number()-1, nil)) + + proposerCalculator, err := proposer.NewProposerCalculatorFromSnapshot(snapshot, config, st, + polybftBackendMock, blockchainMock, hclog.NewNullLogger()) + require.NoError(t, err) runtime := &consensusRuntime{ - proposerCalculator: NewProposerCalculatorFromSnapshot(snapshot, config, hclog.NewNullLogger()), + proposerCalculator: proposerCalculator, logger: hclog.NewNullLogger(), - state: config.State, + state: st, config: config, + blockchain: blockchainMock, + backend: polybftBackendMock, + txPool: txPoolMock, epoch: &epochMetadata{ Number: currentEpochNumber, FirstBlockInEpoch: header.Number - epochSize + 1, CurrentClientConfig: config.GenesisConfig, }, lastBuiltBlock: &types.Header{Number: header.Number - 1}, - stakeManager: &dummyStakeManager{}, - eventProvider: NewEventProvider(blockchainMock), - governanceManager: &dummyGovernanceManager{ - getClientConfigFn: func() (*chain.Params, error) { - return config.genesisParams, nil + stakeManager: &stake.DummyStakeManager{}, + eventProvider: state.NewEventProvider(blockchainMock), + governanceManager: &governance.DummyGovernanceManager{ + GetClientConfigFn: func() (*chain.Params, error) { + return config.ChainParams, nil }}, } - runtime.bridge = createTestBridge(t, runtime.state) + runtime.bridge = &bridge.DummyBridge{} runtime.OnBlockInserted(&types.FullBlock{Block: builtBlock}) - - require.True(t, runtime.state.EpochStore.isEpochInserted(currentEpochNumber+1, 1)) require.Equal(t, newEpochNumber, runtime.epoch.Number) blockchainMock.AssertExpectations(t) @@ -270,35 +283,35 @@ func TestConsensusRuntime_OnBlockInserted_MiddleOfEpoch(t *testing.T) { Header: header, }) - blockchainMock := new(blockchainMock) + blockchainMock := new(helpers.BlockchainMock) blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(builtBlock.Header, true).Once() - polybftBackendMock := new(polybftBackendMock) + polybftBackendMock := new(helpers.PolybftBackendMock) polybftBackendMock.On("GetValidatorsWithTx", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - txPool := new(txPoolMock) - txPool.On("ResetWithHeaders", mock.Anything).Once() + txPoolMock := new(helpers.TxPoolMock) + txPoolMock.On("ResetWithHeaders", mock.Anything).Once() - snapshot := NewProposerSnapshot(blockNumber, []*validator.ValidatorMetadata{}) - config := &runtimeConfig{ - GenesisConfig: &PolyBFTConfig{EpochSize: epochSize}, - blockchain: blockchainMock, - txPool: txPool, + snapshot := proposer.NewProposerSnapshot(blockNumber, []*validator.ValidatorMetadata{}) + config := &config.Runtime{ + GenesisConfig: &config.PolyBFT{EpochSize: epochSize}, } + proposerCalculator, err := proposer.NewProposerCalculatorFromSnapshot(snapshot, config, state.NewTestState(t), + polybftBackendMock, blockchainMock, hclog.NewNullLogger()) + require.NoError(t, err) + runtime := &consensusRuntime{ lastBuiltBlock: header, - config: &runtimeConfig{ - GenesisConfig: &PolyBFTConfig{EpochSize: epochSize}, - blockchain: blockchainMock, - txPool: txPool, - }, + blockchain: blockchainMock, + txPool: txPoolMock, + config: config, epoch: &epochMetadata{ Number: epoch, FirstBlockInEpoch: firstBlockInEpoch, }, logger: hclog.NewNullLogger(), - proposerCalculator: NewProposerCalculatorFromSnapshot(snapshot, config, hclog.NewNullLogger()), + proposerCalculator: proposerCalculator, } runtime.OnBlockInserted(&types.FullBlock{Block: builtBlock}) @@ -310,15 +323,20 @@ func TestConsensusRuntime_FSM_NotInValidatorSet(t *testing.T) { validators := validator.NewTestValidatorsWithAliases(t, []string{"A", "B", "C", "D"}) - snapshot := NewProposerSnapshot(1, nil) - config := &runtimeConfig{ - GenesisConfig: &PolyBFTConfig{ + snapshot := proposer.NewProposerSnapshot(1, nil) + config := &config.Runtime{ + GenesisConfig: &config.PolyBFT{ EpochSize: 1, }, - Key: createTestKey(t), + Key: helpers.CreateTestKey(t), } + + proposerCalculator, err := proposer.NewProposerCalculatorFromSnapshot(snapshot, config, state.NewTestState(t), + new(helpers.PolybftBackendMock), new(helpers.BlockchainMock), hclog.NewNullLogger()) + require.NoError(t, err) + runtime := &consensusRuntime{ - proposerCalculator: NewProposerCalculatorFromSnapshot(snapshot, config, hclog.NewNullLogger()), + proposerCalculator: proposerCalculator, config: config, epoch: &epochMetadata{ Number: 1, @@ -328,15 +346,14 @@ func TestConsensusRuntime_FSM_NotInValidatorSet(t *testing.T) { } runtime.setIsActiveValidator(true) - err := runtime.FSM() - assert.ErrorIs(t, err, errNotAValidator) + assert.ErrorIs(t, runtime.FSM(), errNotAValidator) } func TestConsensusRuntime_FSM_NotEndOfEpoch_NotEndOfSprint(t *testing.T) { t.Parallel() - extra := &Extra{ - BlockMetaData: &BlockMetaData{}, + extra := &polytypes.Extra{ + BlockMetaData: &polytypes.BlockMetaData{}, } lastBlock := &types.Header{ Number: 1, @@ -344,21 +361,25 @@ func TestConsensusRuntime_FSM_NotEndOfEpoch_NotEndOfSprint(t *testing.T) { } validators := validator.NewTestValidators(t, 3) - blockchainMock := new(blockchainMock) - blockchainMock.On("NewBlockBuilder", mock.Anything).Return(&BlockBuilder{}, nil).Once() + blockchainMock := new(helpers.BlockchainMock) + blockchainMock.On("NewBlockBuilder", mock.Anything).Return(new(helpers.BlockBuilderMock), nil).Once() - snapshot := NewProposerSnapshot(1, nil) - config := &runtimeConfig{ - GenesisConfig: &PolyBFTConfig{ + snapshot := proposer.NewProposerSnapshot(1, nil) + config := &config.Runtime{ + GenesisConfig: &config.PolyBFT{ EpochSize: 10, SprintSize: 5, }, - Key: wallet.NewKey(validators.GetPrivateIdentities()[0]), - blockchain: blockchainMock, - Forks: chain.AllForksEnabled, + Key: wallet.NewKey(validators.GetPrivateIdentities()[0]), + Forks: chain.AllForksEnabled, } + + proposerCalculator, err := proposer.NewProposerCalculatorFromSnapshot(snapshot, config, state.NewTestState(t), + new(helpers.PolybftBackendMock), blockchainMock, hclog.NewNullLogger()) + require.NoError(t, err) + runtime := &consensusRuntime{ - proposerCalculator: NewProposerCalculatorFromSnapshot(snapshot, config, hclog.NewNullLogger()), + proposerCalculator: proposerCalculator, logger: hclog.NewNullLogger(), config: config, epoch: &epochMetadata{ @@ -367,14 +388,14 @@ func TestConsensusRuntime_FSM_NotEndOfEpoch_NotEndOfSprint(t *testing.T) { FirstBlockInEpoch: 1, CurrentClientConfig: config.GenesisConfig, }, + blockchain: blockchainMock, lastBuiltBlock: lastBlock, - state: newTestState(t), - bridge: &dummyBridge{}, + state: state.NewTestState(t), + bridge: &bridge.DummyBridge{}, } runtime.setIsActiveValidator(true) - err := runtime.FSM() - require.NoError(t, err) + require.NoError(t, runtime.FSM()) assert.True(t, runtime.IsActiveValidator()) assert.False(t, runtime.fsm.isEndOfEpoch) @@ -385,7 +406,7 @@ func TestConsensusRuntime_FSM_NotEndOfEpoch_NotEndOfSprint(t *testing.T) { assert.True(t, runtime.fsm.ValidatorSet().Includes(address)) assert.NotNil(t, runtime.fsm.blockBuilder) - assert.NotNil(t, runtime.fsm.backend) + assert.NotNil(t, runtime.fsm.blockchain) blockchainMock.AssertExpectations(t) } @@ -405,20 +426,18 @@ func TestConsensusRuntime_FSM_EndOfEpoch_BuildCommitEpoch(t *testing.T) { validatorAccounts := validator.NewTestValidatorsWithAliases(t, []string{"A", "B", "C", "D", "E", "F"}) validators := validatorAccounts.GetPublicIdentities() - blockchainMock := new(blockchainMock) - blockchainMock.On("NewBlockBuilder", mock.Anything).Return(&BlockBuilder{}, nil).Once() + blockchainMock := new(helpers.BlockchainMock) + blockchainMock.On("NewBlockBuilder", mock.Anything).Return(new(helpers.BlockBuilderMock), nil).Once() - state := newTestState(t) - require.NoError(t, state.EpochStore.insertEpoch(epoch, nil, 0)) + state := state.NewTestState(t) - config := &runtimeConfig{ - GenesisConfig: &PolyBFTConfig{ + config := &config.Runtime{ + GenesisConfig: &config.PolyBFT{ EpochSize: epochSize, SprintSize: sprintSize, }, - Key: validatorAccounts.GetValidator("A").Key(), - blockchain: blockchainMock, - Forks: chain.AllForksEnabled, + Key: validatorAccounts.GetValidator("A").Key(), + Forks: chain.AllForksEnabled, } metadata := &epochMetadata{ @@ -428,22 +447,26 @@ func TestConsensusRuntime_FSM_EndOfEpoch_BuildCommitEpoch(t *testing.T) { CurrentClientConfig: config.GenesisConfig, } - snapshot := NewProposerSnapshot(1, nil) + snapshot := proposer.NewProposerSnapshot(1, nil) + proposerCalculator, err := proposer.NewProposerCalculatorFromSnapshot(snapshot, config, state, + new(helpers.PolybftBackendMock), blockchainMock, hclog.NewNullLogger()) + require.NoError(t, err) + runtime := &consensusRuntime{ - proposerCalculator: NewProposerCalculatorFromSnapshot(snapshot, config, hclog.NewNullLogger()), + proposerCalculator: proposerCalculator, logger: hclog.NewNullLogger(), state: state, epoch: metadata, config: config, lastBuiltBlock: &types.Header{Number: 9}, - stakeManager: &dummyStakeManager{}, - bridge: &dummyBridge{}, + stakeManager: &stake.DummyStakeManager{}, + bridge: &bridge.DummyBridge{}, + blockchain: blockchainMock, } - err := runtime.FSM() - fsm := runtime.fsm + assert.NoError(t, runtime.FSM()) - assert.NoError(t, err) + fsm := runtime.fsm assert.True(t, fsm.isEndOfEpoch) assert.NotNil(t, fsm.commitEpochInput) assert.NotEmpty(t, fsm.commitEpochInput) @@ -457,7 +480,7 @@ func Test_NewConsensusRuntime(t *testing.T) { _, err := os.Create("/tmp/consensusState.db") require.NoError(t, err) - polyBftConfig := &PolyBFTConfig{ + polyBftConfig := &config.PolyBFT{ /* Bridge: map[uint64]*BridgeConfig{0: { StateSenderAddr: types.Address{0x13}, CheckpointManagerAddr: types.Address{0x10}, @@ -470,52 +493,41 @@ func Test_NewConsensusRuntime(t *testing.T) { validators := validator.NewTestValidators(t, 3).GetPublicIdentities() - systemStateMock := new(systemStateMock) + systemStateMock := new(helpers.SystemStateMock) systemStateMock.On("GetEpoch").Return(uint64(1)).Once() systemStateMock.On("GetNextCommittedIndex").Return(uint64(1)).Once() - blockchainMock := &blockchainMock{} + blockchainMock := new(helpers.BlockchainMock) blockchainMock.On("CurrentHeader").Return(&types.Header{Number: 1, ExtraData: createTestExtraForAccounts(t, 1, validators, nil)}) - blockchainMock.On("GetStateProviderForBlock", mock.Anything).Return(new(stateProviderMock)).Once() + blockchainMock.On("GetStateProviderForBlock", mock.Anything).Return(new(helpers.StateProviderMock)).Once() blockchainMock.On("GetSystemState", mock.Anything, mock.Anything).Return(systemStateMock).Once() blockchainMock.On("GetHeaderByNumber", uint64(0)).Return(&types.Header{Number: 0, ExtraData: createTestExtraForAccounts(t, 0, validators, nil)}) blockchainMock.On("GetHeaderByNumber", uint64(1)).Return(&types.Header{Number: 1, ExtraData: createTestExtraForAccounts(t, 1, validators, nil)}) - polybftBackendMock := new(polybftBackendMock) - polybftBackendMock.On("GetValidatorsWithTx", mock.Anything, mock.Anything, mock.Anything).Return(validators).Times(3) + polybftBackendMock := new(helpers.PolybftBackendMock) + polybftBackendMock.On("GetValidatorsWithTx", mock.Anything, mock.Anything, mock.Anything).Return(validators).Times(4) polybftBackendMock.On("SetBlockTime", mock.Anything).Once() tmpDir := t.TempDir() - config := &runtimeConfig{ - polybftBackend: polybftBackendMock, - State: newTestState(t), - genesisParams: &chain.Params{Engine: map[string]interface{}{ConsensusName: polyBftConfig}}, - GenesisConfig: polyBftConfig, - DataDir: tmpDir, - Key: createTestKey(t), - blockchain: blockchainMock, - bridgeTopic: &mockTopic{}, - consensusConfig: &consensus.Config{}, - eventTracker: &consensus.EventTracker{}, - Forks: chain.AllForksEnabled, - } - - require.NoError(t, config.State.StakeStore.insertFullValidatorSet(validatorSetState{ - BlockNumber: 1, - }, nil)) - - runtime, err := newConsensusRuntime(hclog.NewNullLogger(), config) + st := state.NewTestState(t) + + config := &config.Runtime{ + ChainParams: &chain.Params{Engine: map[string]interface{}{config.ConsensusName: polyBftConfig}}, + GenesisConfig: polyBftConfig, + StateDataDir: tmpDir, + Key: helpers.CreateTestKey(t), + EventTracker: &consensus.EventTracker{}, + Forks: chain.AllForksEnabled, + } + + runtime, err := newConsensusRuntime(hclog.NewNullLogger(), config, st, polybftBackendMock, blockchainMock, nil, &mockTopic{}) require.NoError(t, err) assert.False(t, runtime.IsActiveValidator()) - assert.Equal(t, runtime.config.DataDir, tmpDir) + assert.Equal(t, runtime.config.StateDataDir, tmpDir) assert.Equal(t, uint64(10), runtime.config.GenesisConfig.SprintSize) assert.Equal(t, uint64(10), runtime.config.GenesisConfig.EpochSize) assert.Equal(t, "0x0000000000000000000000000000000000000101", contracts.EpochManagerContract.String()) - // assert.Equal(t, "0x1300000000000000000000000000000000000000", runtime.config.GenesisConfig.Bridge[0].StateSenderAddr.String()) - // assert.Equal(t, "0x1000000000000000000000000000000000000000", runtime.config.GenesisConfig.Bridge[0].CheckpointManagerAddr.String()) - // assert.True(t, runtime.IsBridgeEnabled()) - // systemStateMock.AssertExpectations(t) blockchainMock.AssertExpectations(t) polybftBackendMock.AssertExpectations(t) } @@ -528,19 +540,22 @@ func TestConsensusRuntime_restartEpoch_SameEpochNumberAsTheLastOne(t *testing.T) newCurrentHeader := &types.Header{Number: originalBlockNumber + 1} validatorSet := validator.NewTestValidators(t, 3).GetPublicIdentities() - systemStateMock := new(systemStateMock) + systemStateMock := new(helpers.SystemStateMock) systemStateMock.On("GetEpoch").Return(uint64(1), nil).Once() - blockchainMock := new(blockchainMock) - blockchainMock.On("GetStateProviderForBlock", mock.Anything).Return(new(stateProviderMock)).Once() + blockchainMock := new(helpers.BlockchainMock) + blockchainMock.On("GetStateProviderForBlock", mock.Anything).Return(new(helpers.StateProviderMock)).Once() blockchainMock.On("GetSystemState", mock.Anything, mock.Anything).Return(systemStateMock).Once() - snapshot := NewProposerSnapshot(1, nil) - config := &runtimeConfig{ - blockchain: blockchainMock, - } + snapshot := proposer.NewProposerSnapshot(1, nil) + config := &config.Runtime{} + + proposerCalculator, err := proposer.NewProposerCalculatorFromSnapshot(snapshot, config, state.NewTestState(t), + new(helpers.PolybftBackendMock), blockchainMock, hclog.NewNullLogger()) + require.NoError(t, err) + runtime := &consensusRuntime{ - proposerCalculator: NewProposerCalculatorFromSnapshot(snapshot, config, hclog.NewNullLogger()), + proposerCalculator: proposerCalculator, config: config, epoch: &epochMetadata{ Number: 1, @@ -550,6 +565,7 @@ func TestConsensusRuntime_restartEpoch_SameEpochNumberAsTheLastOne(t *testing.T) lastBuiltBlock: &types.Header{ Number: originalBlockNumber, }, + blockchain: blockchainMock, } runtime.setIsActiveValidator(true) @@ -577,29 +593,29 @@ func TestConsensusRuntime_calculateCommitEpochInput_SecondEpoch(t *testing.T) { ) validators := validator.NewTestValidatorsWithAliases(t, []string{"A", "B", "C", "D", "E"}) - polybftConfig := &PolyBFTConfig{ + polybftConfig := &config.PolyBFT{ EpochSize: epochSize, SprintSize: sprintSize, } lastBuiltBlock, headerMap := createTestBlocks(t, 20, epochSize, validators.GetPublicIdentities()) - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headerMap.getHeader) + blockchainMock := new(helpers.BlockchainMock) + blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headerMap.GetHeader) - polybftBackendMock := new(polybftBackendMock) + polybftBackendMock := new(helpers.PolybftBackendMock) polybftBackendMock.On("GetValidators", mock.Anything, mock.Anything).Return(validators.GetPublicIdentities()).Times(10) - config := &runtimeConfig{ - GenesisConfig: polybftConfig, - blockchain: blockchainMock, - polybftBackend: polybftBackendMock, - Key: validators.GetValidator("A").Key(), - Forks: chain.AllForksEnabled, + config := &config.Runtime{ + GenesisConfig: polybftConfig, + Key: validators.GetValidator("A").Key(), + Forks: chain.AllForksEnabled, } consensusRuntime := &consensusRuntime{ - config: config, + config: config, + blockchain: blockchainMock, + backend: polybftBackendMock, epoch: &epochMetadata{ Number: currentEpoch, Validators: validators.GetPublicIdentities(), @@ -672,7 +688,7 @@ func TestConsensusRuntime_IsValidValidator_BasicCases(t *testing.T) { runtime, validatorAccounts := setupFn(t) signer := validatorAccounts.GetValidator(c.signerAlias) sender := validatorAccounts.GetValidator(c.senderAlias) - msg, err := signer.Key().SignIBFTMessage(&proto.IbftMessage{From: sender.Address().Bytes()}) + msg, err := signer.Key().SignIBFTMessage(&ibftproto.IbftMessage{From: sender.Address().Bytes()}) require.NoError(t, err) require.Equal(t, c.isValidSender, runtime.IsValidValidator(msg)) @@ -695,7 +711,7 @@ func TestConsensusRuntime_IsValidValidator_TamperSignature(t *testing.T) { // provide invalid signature sender := validatorAccounts.GetValidator("A") - msg := &proto.IbftMessage{ + msg := &ibftproto.IbftMessage{ From: sender.Address().Bytes(), Signature: []byte{1, 2, 3, 4, 5}, } @@ -719,12 +735,12 @@ func TestConsensusRuntime_TamperMessageContent(t *testing.T) { proposalSignature, err := sender.Key().SignWithDomain(proposalHash, signer.DomainBridge) require.NoError(t, err) - msg := &proto.IbftMessage{ - View: &proto.View{}, + msg := &ibftproto.IbftMessage{ + View: &ibftproto.View{}, From: sender.Address().Bytes(), - Type: proto.MessageType_COMMIT, - Payload: &proto.IbftMessage_CommitData{ - CommitData: &proto.CommitMessage{ + Type: ibftproto.MessageType_COMMIT, + Payload: &ibftproto.IbftMessage_CommitData{ + CommitData: &ibftproto.CommitMessage{ ProposalHash: proposalHash, CommittedSeal: proposalSignature, }, @@ -737,8 +753,8 @@ func TestConsensusRuntime_TamperMessageContent(t *testing.T) { assert.True(t, runtime.IsValidValidator(msg)) // modify message without signing it again - msg.Payload = &proto.IbftMessage_CommitData{ - CommitData: &proto.CommitMessage{ + msg.Payload = &ibftproto.IbftMessage_CommitData{ + CommitData: &ibftproto.CommitMessage{ ProposalHash: []byte{1, 3, 5, 7, 9}, // modification CommittedSeal: proposalSignature, }, @@ -750,8 +766,8 @@ func TestConsensusRuntime_TamperMessageContent(t *testing.T) { func TestConsensusRuntime_IsValidProposalHash(t *testing.T) { t.Parallel() - extra := &Extra{ - BlockMetaData: &BlockMetaData{ + extra := &polytypes.Extra{ + BlockMetaData: &polytypes.BlockMetaData{ EpochNumber: 1, BlockRound: 1, }, @@ -768,18 +784,19 @@ func TestConsensusRuntime_IsValidProposalHash(t *testing.T) { require.NoError(t, err) runtime := &consensusRuntime{ - logger: hclog.NewNullLogger(), - config: &runtimeConfig{blockchain: new(blockchainMock)}, + logger: hclog.NewNullLogger(), + config: &config.Runtime{}, + blockchain: new(helpers.BlockchainMock), } - require.True(t, runtime.IsValidProposalHash(&proto.Proposal{RawProposal: block.MarshalRLP()}, proposalHash.Bytes())) + require.True(t, runtime.IsValidProposalHash(&ibftproto.Proposal{RawProposal: block.MarshalRLP()}, proposalHash.Bytes())) } func TestConsensusRuntime_IsValidProposalHash_InvalidProposalHash(t *testing.T) { t.Parallel() - extra := &Extra{ - BlockMetaData: &BlockMetaData{ + extra := &polytypes.Extra{ + BlockMetaData: &polytypes.BlockMetaData{ EpochNumber: 1, BlockRound: 1, }, @@ -800,18 +817,19 @@ func TestConsensusRuntime_IsValidProposalHash_InvalidProposalHash(t *testing.T) block.Header.ComputeHash() runtime := &consensusRuntime{ - logger: hclog.NewNullLogger(), - config: &runtimeConfig{blockchain: new(blockchainMock)}, + logger: hclog.NewNullLogger(), + config: &config.Runtime{}, + blockchain: new(helpers.BlockchainMock), } - require.False(t, runtime.IsValidProposalHash(&proto.Proposal{RawProposal: block.MarshalRLP()}, proposalHash.Bytes())) + require.False(t, runtime.IsValidProposalHash(&ibftproto.Proposal{RawProposal: block.MarshalRLP()}, proposalHash.Bytes())) } func TestConsensusRuntime_IsValidProposalHash_InvalidExtra(t *testing.T) { t.Parallel() - extra := &Extra{ - BlockMetaData: &BlockMetaData{ + extra := &polytypes.Extra{ + BlockMetaData: &polytypes.BlockMetaData{ EpochNumber: 1, BlockRound: 1, }, @@ -829,33 +847,39 @@ func TestConsensusRuntime_IsValidProposalHash_InvalidExtra(t *testing.T) { require.NoError(t, err) runtime := &consensusRuntime{ - logger: hclog.NewNullLogger(), - config: &runtimeConfig{blockchain: new(blockchainMock)}, + logger: hclog.NewNullLogger(), + config: &config.Runtime{}, + blockchain: new(helpers.BlockchainMock), } - require.False(t, runtime.IsValidProposalHash(&proto.Proposal{RawProposal: block.MarshalRLP()}, proposalHash.Bytes())) + require.False(t, runtime.IsValidProposalHash(&ibftproto.Proposal{RawProposal: block.MarshalRLP()}, proposalHash.Bytes())) } func TestConsensusRuntime_BuildProposal_InvalidParent(t *testing.T) { - config := &runtimeConfig{} - snapshot := NewProposerSnapshot(1, nil) + config := &config.Runtime{} + snapshot := proposer.NewProposerSnapshot(1, nil) + + proposerCalculator, err := proposer.NewProposerCalculatorFromSnapshot(snapshot, config, state.NewTestState(t), + new(helpers.PolybftBackendMock), new(helpers.BlockchainMock), hclog.NewNullLogger()) + require.NoError(t, err) + runtime := &consensusRuntime{ logger: hclog.NewNullLogger(), lastBuiltBlock: &types.Header{Number: 2}, epoch: &epochMetadata{Number: 1}, config: config, - proposerCalculator: NewProposerCalculatorFromSnapshot(snapshot, config, hclog.NewNullLogger()), + proposerCalculator: proposerCalculator, } - require.Nil(t, runtime.BuildProposal(&proto.View{Round: 5})) + require.Nil(t, runtime.BuildProposal(&ibftproto.View{Round: 5})) } func TestConsensusRuntime_ID(t *testing.T) { t.Parallel() - key1, key2 := createTestKey(t), createTestKey(t) + key1, key2 := helpers.CreateTestKey(t), helpers.CreateTestKey(t) runtime := &consensusRuntime{ - config: &runtimeConfig{Key: key1}, + config: &config.Runtime{Key: key1}, } require.Equal(t, runtime.ID(), key1.Address().Bytes()) @@ -896,26 +920,26 @@ func TestConsensusRuntime_GetVotingPowers(t *testing.T) { func TestConsensusRuntime_BuildRoundChangeMessage(t *testing.T) { t.Parallel() - key := createTestKey(t) - view, rawProposal, certificate := &proto.View{}, []byte{1}, &proto.PreparedCertificate{} + key := helpers.CreateTestKey(t) + view, rawProposal, certificate := &ibftproto.View{}, []byte{1}, &ibftproto.PreparedCertificate{} runtime := &consensusRuntime{ - config: &runtimeConfig{ + config: &config.Runtime{ Key: key, }, logger: hclog.NewNullLogger(), } - proposal := &proto.Proposal{ + proposal := &ibftproto.Proposal{ RawProposal: rawProposal, Round: view.Round, } - expected := proto.IbftMessage{ + expected := ibftproto.IbftMessage{ View: view, From: key.Address().Bytes(), - Type: proto.MessageType_ROUND_CHANGE, - Payload: &proto.IbftMessage_RoundChangeData{RoundChangeData: &proto.RoundChangeMessage{ + Type: ibftproto.MessageType_ROUND_CHANGE, + Payload: &ibftproto.IbftMessage_RoundChangeData{RoundChangeData: &ibftproto.RoundChangeMessage{ LatestPreparedCertificate: certificate, LastPreparedProposal: proposal, }}, @@ -930,11 +954,11 @@ func TestConsensusRuntime_BuildRoundChangeMessage(t *testing.T) { func TestConsensusRuntime_BuildCommitMessage(t *testing.T) { t.Parallel() - key := createTestKey(t) - view, proposalHash := &proto.View{}, []byte{1, 2, 4} + key := helpers.CreateTestKey(t) + view, proposalHash := &ibftproto.View{}, []byte{1, 2, 4} runtime := &consensusRuntime{ - config: &runtimeConfig{ + config: &config.Runtime{ Key: key, }, } @@ -942,12 +966,12 @@ func TestConsensusRuntime_BuildCommitMessage(t *testing.T) { committedSeal, err := key.SignWithDomain(proposalHash, signer.DomainBridge) require.NoError(t, err) - expected := proto.IbftMessage{ + expected := ibftproto.IbftMessage{ View: view, From: key.Address().Bytes(), - Type: proto.MessageType_COMMIT, - Payload: &proto.IbftMessage_CommitData{ - CommitData: &proto.CommitMessage{ + Type: ibftproto.MessageType_COMMIT, + Payload: &ibftproto.IbftMessage_CommitData{ + CommitData: &ibftproto.CommitMessage{ ProposalHash: proposalHash, CommittedSeal: committedSeal, }, @@ -965,7 +989,8 @@ func TestConsensusRuntime_BuildPrePrepareMessage_EmptyProposal(t *testing.T) { runtime := &consensusRuntime{logger: hclog.NewNullLogger()} - assert.Nil(t, runtime.BuildPrePrepareMessage(nil, &proto.RoundChangeCertificate{}, &proto.View{Height: 1, Round: 0})) + assert.Nil(t, runtime.BuildPrePrepareMessage(nil, &ibftproto.RoundChangeCertificate{}, + &ibftproto.View{Height: 1, Round: 0})) } func TestConsensusRuntime_IsValidProposalHash_EmptyProposal(t *testing.T) { @@ -973,28 +998,28 @@ func TestConsensusRuntime_IsValidProposalHash_EmptyProposal(t *testing.T) { runtime := &consensusRuntime{logger: hclog.NewNullLogger()} - assert.False(t, runtime.IsValidProposalHash(&proto.Proposal{}, []byte("hash"))) + assert.False(t, runtime.IsValidProposalHash(&ibftproto.Proposal{}, []byte("hash"))) } func TestConsensusRuntime_BuildPrepareMessage(t *testing.T) { t.Parallel() - key := createTestKey(t) - view, proposalHash := &proto.View{}, []byte{1, 2, 4} + key := helpers.CreateTestKey(t) + view, proposalHash := &ibftproto.View{}, []byte{1, 2, 4} runtime := &consensusRuntime{ - config: &runtimeConfig{ + config: &config.Runtime{ Key: key, }, logger: hclog.NewNullLogger(), } - expected := proto.IbftMessage{ + expected := ibftproto.IbftMessage{ View: view, From: key.Address().Bytes(), - Type: proto.MessageType_PREPARE, - Payload: &proto.IbftMessage_PrepareData{ - PrepareData: &proto.PrepareMessage{ + Type: ibftproto.MessageType_PREPARE, + Payload: &ibftproto.IbftMessage_PrepareData{ + PrepareData: &ibftproto.PrepareMessage{ ProposalHash: proposalHash, }, }, @@ -1024,17 +1049,16 @@ func TestConsensusRuntime_RoundStarts(t *testing.T) { for _, c := range cases { c := c t.Run(c.funcName, func(t *testing.T) { - txPool := new(txPoolMock) + txPool := new(helpers.TxPoolMock) txPool.On(c.funcName).Once() runtime := &consensusRuntime{ - config: &runtimeConfig{ - txPool: txPool, - }, + config: &config.Runtime{}, logger: hclog.NewNullLogger(), + txPool: txPool, } - view := &proto.View{Round: c.round} + view := &ibftproto.View{Round: c.round} require.NoError(t, runtime.RoundStarts(view)) txPool.AssertExpectations(t) }) @@ -1042,30 +1066,29 @@ func TestConsensusRuntime_RoundStarts(t *testing.T) { } func TestConsensusRuntime_SequenceCancelled(t *testing.T) { - txPool := new(txPoolMock) + txPool := new(helpers.TxPoolMock) txPool.On("ReinsertProposed").Once() runtime := &consensusRuntime{ - config: &runtimeConfig{ - txPool: txPool, - }, + config: &config.Runtime{}, logger: hclog.NewNullLogger(), + txPool: txPool, } - view := &proto.View{} + view := &ibftproto.View{} require.NoError(t, runtime.SequenceCancelled(view)) txPool.AssertExpectations(t) } func createTestBlocks(t *testing.T, numberOfBlocks, defaultEpochSize uint64, - validatorSet validator.AccountSet) (*types.Header, *testHeadersMap) { + validatorSet validator.AccountSet) (*types.Header, *helpers.TestHeadersMap) { t.Helper() - headerMap := &testHeadersMap{} + headerMap := &helpers.TestHeadersMap{} bitmaps := createTestBitmaps(t, validatorSet, numberOfBlocks) - extra := &Extra{ - BlockMetaData: &BlockMetaData{EpochNumber: 0}, + extra := &polytypes.Extra{ + BlockMetaData: &polytypes.BlockMetaData{EpochNumber: 0}, } genesisBlock := &types.Header{ @@ -1074,7 +1097,7 @@ func createTestBlocks(t *testing.T, numberOfBlocks, defaultEpochSize uint64, } parentHash := types.BytesToHash(big.NewInt(0).Bytes()) - headerMap.addHeader(genesisBlock) + headerMap.AddHeader(genesisBlock) var hash types.Hash @@ -1091,7 +1114,7 @@ func createTestBlocks(t *testing.T, numberOfBlocks, defaultEpochSize uint64, GasLimit: types.StateTransactionGasLimit, } - headerMap.addHeader(header) + headerMap.AddHeader(header) parentHash = hash blockHeader = header @@ -1132,27 +1155,45 @@ func createTestExtraForAccounts(t *testing.T, epoch uint64, validators validator t.Helper() dummySignature := [64]byte{} - extraData := Extra{ + extraData := polytypes.Extra{ Validators: &validator.ValidatorSetDelta{ Added: validators, Removed: bitmap.Bitmap{}, }, - Parent: &Signature{Bitmap: b, AggregatedSignature: dummySignature[:]}, - Committed: &Signature{Bitmap: b, AggregatedSignature: dummySignature[:]}, - BlockMetaData: &BlockMetaData{EpochNumber: epoch}, + Parent: &polytypes.Signature{Bitmap: b, AggregatedSignature: dummySignature[:]}, + Committed: &polytypes.Signature{Bitmap: b, AggregatedSignature: dummySignature[:]}, + BlockMetaData: &polytypes.BlockMetaData{EpochNumber: epoch}, } return extraData.MarshalRLPTo(nil) } -func createTestBridge(t *testing.T, state *State) Bridge { - t.Helper() +var _ bridge.Topic = &mockTopic{} - manager := &mockBridgeManager{state: state, chainID: 1} +type mockTopic struct { + published proto.Message +} + +func (m *mockTopic) Publish(obj proto.Message) error { + m.published = obj - return &bridge{ - bridgeManagers: map[uint64]BridgeManager{1: manager}, - state: state, - relayer: &dummyBridgeEventRelayer{}, + return nil +} + +func (m *mockTopic) Subscribe(handler func(obj interface{}, from peer.ID)) error { + return nil +} + +// getEpochNumber returns epoch number for given blockNumber and epochSize. +// Epoch number is derived as a result of division of block number and epoch size. +// Since epoch number is 1-based (0 block represents special case zero epoch), +// we are incrementing result by one for non epoch-ending blocks. +func getEpochNumber(t *testing.T, blockNumber, epochSize uint64) uint64 { + t.Helper() + + if blockNumber%epochSize == 0 { // is end of period + return blockNumber / epochSize } + + return blockNumber/epochSize + 1 } diff --git a/consensus/polybft/contracts_initializer.go b/consensus/polybft/contracts_initializer.go index e64a463d63..0499e066e3 100644 --- a/consensus/polybft/contracts_initializer.go +++ b/consensus/polybft/contracts_initializer.go @@ -6,6 +6,7 @@ import ( "github.com/0xPolygon/polygon-edge/bls" "github.com/0xPolygon/polygon-edge/chain" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" @@ -20,7 +21,7 @@ const ( contractCallGasLimit = 100_000_000 ) -func initStakeManager(polyBFTConfig PolyBFTConfig, transition *state.Transition) error { +func initStakeManager(polyBFTConfig config.PolyBFT, transition *state.Transition) error { startValidators := make([]*contractsapi.GenesisValidator, len(polyBFTConfig.InitialValidatorSet)) for i, validator := range polyBFTConfig.InitialValidatorSet { @@ -76,7 +77,7 @@ func initStakeManager(polyBFTConfig PolyBFTConfig, transition *state.Transition) } // initEpochManager initializes EpochManager SC -func initEpochManager(polyBFTConfig PolyBFTConfig, transition *state.Transition) error { +func initEpochManager(polyBFTConfig config.PolyBFT, transition *state.Transition) error { initFn := &contractsapi.InitializeEpochManagerFn{ NewRewardToken: polyBFTConfig.RewardConfig.TokenAddress, NewRewardWallet: polyBFTConfig.RewardConfig.WalletAddress, @@ -95,7 +96,7 @@ func initEpochManager(polyBFTConfig PolyBFTConfig, transition *state.Transition) // getInitERC20PredicateInput builds initialization input parameters for child chain ERC20Predicate SC func getInitERC20PredicateInput( - config *BridgeConfig, + config *config.Bridge, internalChainMintable bool, destinationChainID *big.Int) ([]byte, error) { var params contractsapi.StateTransactionInput @@ -120,7 +121,7 @@ func getInitERC20PredicateInput( } // getInitERC20PredicateACLInput builds initialization input parameters for child chain ERC20PredicateAccessList SC -func getInitERC20PredicateACLInput(config *BridgeConfig, owner types.Address, +func getInitERC20PredicateACLInput(config *config.Bridge, owner types.Address, useAllowList, useBlockList, internalChainMintable bool, destinationChainID *big.Int) ([]byte, error) { var params contractsapi.StateTransactionInput if internalChainMintable { @@ -151,7 +152,7 @@ func getInitERC20PredicateACLInput(config *BridgeConfig, owner types.Address, // getInitERC721PredicateInput builds initialization input parameters for child chain ERC721Predicate SC func getInitERC721PredicateInput( - config *BridgeConfig, + config *config.Bridge, childOriginatedTokens bool, destinationChainID *big.Int) ([]byte, error) { var params contractsapi.StateTransactionInput @@ -176,7 +177,7 @@ func getInitERC721PredicateInput( // getInitERC721PredicateACLInput builds initialization input parameters // for child chain ERC721PredicateAccessList SC -func getInitERC721PredicateACLInput(config *BridgeConfig, owner types.Address, +func getInitERC721PredicateACLInput(config *config.Bridge, owner types.Address, useAllowList, useBlockList, internalChainMintable bool, destinationChainID *big.Int) ([]byte, error) { var params contractsapi.StateTransactionInput if internalChainMintable { @@ -206,7 +207,7 @@ func getInitERC721PredicateACLInput(config *BridgeConfig, owner types.Address, // getInitERC1155PredicateInput builds initialization input parameters for child chain ERC1155Predicate SC func getInitERC1155PredicateInput( - config *BridgeConfig, + config *config.Bridge, internalChainMintable bool, destinationChainID *big.Int) ([]byte, error) { var params contractsapi.StateTransactionInput @@ -231,7 +232,7 @@ func getInitERC1155PredicateInput( // getInitERC1155PredicateACLInput builds initialization input parameters // for child chain ERC1155PredicateAccessList SC -func getInitERC1155PredicateACLInput(config *BridgeConfig, owner types.Address, +func getInitERC1155PredicateACLInput(config *config.Bridge, owner types.Address, useAllowList, useBlockList, internalChainMintable bool, destinationChainID *big.Int) ([]byte, error) { var params contractsapi.StateTransactionInput if internalChainMintable { @@ -260,7 +261,7 @@ func getInitERC1155PredicateACLInput(config *BridgeConfig, owner types.Address, } // initNetworkParamsContract initializes NetworkParams contract on child chain -func initNetworkParamsContract(baseFeeChangeDenom uint64, cfg PolyBFTConfig, +func initNetworkParamsContract(baseFeeChangeDenom uint64, cfg config.PolyBFT, transition *state.Transition) error { initFn := &contractsapi.InitializeNetworkParamsFn{ InitParams: &contractsapi.InitParams{ @@ -293,7 +294,7 @@ func initNetworkParamsContract(baseFeeChangeDenom uint64, cfg PolyBFTConfig, } // initForkParamsContract initializes ForkParams contract on child chain -func initForkParamsContract(cfg PolyBFTConfig, transition *state.Transition) error { +func initForkParamsContract(cfg config.PolyBFT, transition *state.Transition) error { initFn := &contractsapi.InitializeForkParamsFn{ NewOwner: cfg.GovernanceConfig.ChildTimelockAddr, } @@ -308,7 +309,7 @@ func initForkParamsContract(cfg PolyBFTConfig, transition *state.Transition) err } // initChildTimelock initializes ChildTimelock contract on child chain -func initChildTimelock(cfg PolyBFTConfig, transition *state.Transition) error { +func initChildTimelock(cfg config.PolyBFT, transition *state.Transition) error { addresses := make([]types.Address, len(cfg.InitialValidatorSet)+1) // we need to add child governor to list of proposers and executors as well addresses[0] = cfg.GovernanceConfig.ChildGovernorAddr @@ -334,7 +335,7 @@ func initChildTimelock(cfg PolyBFTConfig, transition *state.Transition) error { } // initChildGovernor initializes ChildGovernor contract on child chain -func initChildGovernor(cfg PolyBFTConfig, transition *state.Transition) error { +func initChildGovernor(cfg config.PolyBFT, transition *state.Transition) error { addresses := make([]types.Address, len(cfg.InitialValidatorSet)) for i := 0; i < len(cfg.InitialValidatorSet); i++ { addresses[i] = cfg.InitialValidatorSet[i].Address @@ -357,7 +358,7 @@ func initChildGovernor(cfg PolyBFTConfig, transition *state.Transition) error { } // initBridgeStorageContract initializes BridgeStorage contract on blade chain -func initBridgeStorageContract(cfg PolyBFTConfig, transition *state.Transition) error { +func initBridgeStorageContract(cfg config.PolyBFT, transition *state.Transition) error { validators, err := getValidatorStorageValidators(cfg.InitialValidatorSet) if err != nil { return fmt.Errorf("error while converting validators for bridge storage contract: %w", err) @@ -379,7 +380,7 @@ func initBridgeStorageContract(cfg PolyBFTConfig, transition *state.Transition) } // initGatewayContract initializes Gateway contract on blade chain -func initGatewayContract(cfg PolyBFTConfig, bridgeCfg *BridgeConfig, +func initGatewayContract(cfg config.PolyBFT, bridgeCfg *config.Bridge, transition *state.Transition, alloc map[types.Address]*chain.GenesisAccount) error { implementationAddr := bridgeCfg.InternalGatewayAddr.IncrementBy(1) if _, exists := alloc[implementationAddr]; !exists { @@ -411,7 +412,7 @@ func initGatewayContract(cfg PolyBFTConfig, bridgeCfg *BridgeConfig, // initERC20ACLPredicateContract initializes ChildERC20Predicate with access list contract on blade chain func initERC20ACLPredicateContract( transition *state.Transition, - bcfg *BridgeConfig, + bcfg *config.Bridge, alloc map[types.Address]*chain.GenesisAccount, owner types.Address, useBridgeAllowList, useBridgeBlockList, childMintable bool, @@ -441,7 +442,7 @@ func initERC20ACLPredicateContract( // initERC721ACLPredicateContract initializes ChildERC721Predicate with access list contract on blade chain func initERC721ACLPredicateContract( transition *state.Transition, - bcfg *BridgeConfig, + bcfg *config.Bridge, alloc map[types.Address]*chain.GenesisAccount, owner types.Address, useBridgeAllowList, useBridgeBlockList, childMintable bool, @@ -471,7 +472,7 @@ func initERC721ACLPredicateContract( // initERC1155ACLPredicateContract initializes ChildERC1155Predicate with access list contract on blade chain func initERC1155ACLPredicateContract( transition *state.Transition, - bcfg *BridgeConfig, + bcfg *config.Bridge, alloc map[types.Address]*chain.GenesisAccount, owner types.Address, useBridgeAllowList, useBridgeBlockList, childMintable bool, @@ -501,7 +502,7 @@ func initERC1155ACLPredicateContract( // initERC20PredicateContract initializes ChildERC20Predicate contract on blade chain func initERC20PredicateContract( transition *state.Transition, - bcfg *BridgeConfig, + bcfg *config.Bridge, alloc map[types.Address]*chain.GenesisAccount, childMintable bool, destinationChainID *big.Int, @@ -529,7 +530,7 @@ func initERC20PredicateContract( // initERC721PredicateContract initializes ChildERC721Predicate contract on blade chain func initERC721PredicateContract( transition *state.Transition, - bcfg *BridgeConfig, + bcfg *config.Bridge, alloc map[types.Address]*chain.GenesisAccount, childMintable bool, destinationChainID *big.Int, @@ -557,7 +558,7 @@ func initERC721PredicateContract( // initERC1155PredicateContract initializes ChildERC1155Predicate contract on blade chain func initERC1155PredicateContract( transition *state.Transition, - bcfg *BridgeConfig, + bcfg *config.Bridge, alloc map[types.Address]*chain.GenesisAccount, childMintable bool, destinationChainID *big.Int, @@ -583,7 +584,7 @@ func initERC1155PredicateContract( } // mintRewardTokensToWallet mints configured amount of reward tokens to reward wallet address -func mintRewardTokensToWallet(polyBFTConfig PolyBFTConfig, transition *state.Transition) error { +func mintRewardTokensToWallet(polyBFTConfig config.PolyBFT, transition *state.Transition) error { if isNativeRewardToken(polyBFTConfig.RewardConfig.TokenAddress) { // if reward token is a native erc20 token, we don't need to mint an amount of tokens // for given wallet address to it since this is done in premine @@ -605,7 +606,7 @@ func mintRewardTokensToWallet(polyBFTConfig PolyBFTConfig, transition *state.Tra } // mintStakeToken mints configured amount of stake token to stake token address -func mintStakeToken(polyBFTConfig PolyBFTConfig, transition *state.Transition) error { +func mintStakeToken(polyBFTConfig config.PolyBFT, transition *state.Transition) error { if IsNativeStakeToken(polyBFTConfig.StakeTokenAddr) { return nil } @@ -632,7 +633,7 @@ func mintStakeToken(polyBFTConfig PolyBFTConfig, transition *state.Transition) e // approveEpochManagerAsSpender approves EpochManager contract as reward token spender // since EpochManager distributes rewards -func approveEpochManagerAsSpender(polyBFTConfig PolyBFTConfig, transition *state.Transition) error { +func approveEpochManagerAsSpender(polyBFTConfig config.PolyBFT, transition *state.Transition) error { approveFn := &contractsapi.ApproveRootERC20Fn{ Spender: contracts.EpochManagerContract, Amount: polyBFTConfig.RewardConfig.WalletAmount, diff --git a/consensus/polybft/extra_test.go b/consensus/polybft/extra_test.go deleted file mode 100644 index d64e432b30..0000000000 --- a/consensus/polybft/extra_test.go +++ /dev/null @@ -1,665 +0,0 @@ -package polybft - -import ( - "crypto/rand" - "errors" - "fmt" - "math/big" - mrand "math/rand" - "testing" - - "github.com/0xPolygon/polygon-edge/bls" - "github.com/0xPolygon/polygon-edge/chain" - "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" - "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" - "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/types" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/umbracle/fastrlp" -) - -func TestExtra_Encoding(t *testing.T) { - t.Parallel() - - digest := crypto.Keccak256([]byte("Dummy content to sign")) - keys := createRandomTestKeys(t, 2) - parentSig, err := keys[0].Sign(digest) - require.NoError(t, err) - - committedSig, err := keys[1].Sign(digest) - require.NoError(t, err) - - bmp := bitmap.Bitmap{} - bmp.Set(1) - bmp.Set(4) - - addedValidators := validator.NewTestValidatorsWithAliases(t, []string{"A", "B", "C"}).GetPublicIdentities() - - removedValidators := bitmap.Bitmap{} - removedValidators.Set(2) - - // different extra data for marshall/unmarshall - var cases = []struct { - extra *Extra - }{ - { - &Extra{}, - }, - { - &Extra{ - Validators: &validator.ValidatorSetDelta{}, - Parent: &Signature{}, - Committed: &Signature{}, - }, - }, - { - &Extra{ - Validators: &validator.ValidatorSetDelta{}, - }, - }, - { - &Extra{ - Validators: &validator.ValidatorSetDelta{ - Added: addedValidators, - }, - Parent: &Signature{}, - Committed: &Signature{}, - }, - }, - { - &Extra{ - Validators: &validator.ValidatorSetDelta{ - Removed: removedValidators, - }, - Parent: &Signature{AggregatedSignature: parentSig, Bitmap: bmp}, - Committed: &Signature{}, - }, - }, - { - &Extra{ - Validators: &validator.ValidatorSetDelta{ - Added: addedValidators, - Updated: addedValidators[1:], - Removed: removedValidators, - }, - Parent: &Signature{}, - Committed: &Signature{AggregatedSignature: committedSig, Bitmap: bmp}, - }, - }, - { - &Extra{ - Parent: &Signature{AggregatedSignature: parentSig, Bitmap: bmp}, - Committed: &Signature{AggregatedSignature: committedSig, Bitmap: bmp}, - }, - }, - { - &Extra{ - Parent: &Signature{AggregatedSignature: parentSig, Bitmap: bmp}, - Committed: &Signature{AggregatedSignature: committedSig, Bitmap: bmp}, - BlockMetaData: &BlockMetaData{ - BlockRound: 0, - EpochNumber: 3, - }, - }, - }, - } - - for _, c := range cases { - data := c.extra.MarshalRLPTo(nil) - extra := &Extra{} - assert.NoError(t, extra.UnmarshalRLP(data)) - assert.Equal(t, c.extra, extra) - } -} - -func TestExtra_UnmarshalRLPWith_NegativeCases(t *testing.T) { - t.Parallel() - - t.Run("Incorrect RLP marshalled data type", func(t *testing.T) { - t.Parallel() - - extra := &Extra{} - ar := &fastrlp.Arena{} - require.Error(t, extra.UnmarshalRLPWith(ar.NewBool(false))) - }) - - t.Run("Incorrect count of RLP marshalled array elements", func(t *testing.T) { - t.Parallel() - - extra := &Extra{} - ar := &fastrlp.Arena{} - require.ErrorContains(t, extra.UnmarshalRLPWith(ar.NewArray()), "incorrect elements count to decode Extra, expected 4 but found 0") - }) - - t.Run("Incorrect ValidatorSetDelta marshalled", func(t *testing.T) { - t.Parallel() - - extra := &Extra{} - ar := &fastrlp.Arena{} - extraMarshalled := ar.NewArray() - deltaMarshalled := ar.NewArray() - deltaMarshalled.Set(ar.NewBytes([]byte{0x73})) - extraMarshalled.Set(deltaMarshalled) // ValidatorSetDelta - extraMarshalled.Set(ar.NewBytes([]byte{})) // Seal - extraMarshalled.Set(ar.NewBytes([]byte{})) // Parent - extraMarshalled.Set(ar.NewBytes([]byte{})) // Committed - require.Error(t, extra.UnmarshalRLPWith(extraMarshalled)) - }) - - t.Run("Incorrect Seal marshalled", func(t *testing.T) { - t.Parallel() - - extra := &Extra{} - ar := &fastrlp.Arena{} - extraMarshalled := ar.NewArray() - deltaMarshalled := new(validator.ValidatorSetDelta).MarshalRLPWith(ar) - extraMarshalled.Set(deltaMarshalled) // ValidatorSetDelta - extraMarshalled.Set(ar.NewBytes([]byte{})) // Parent - extraMarshalled.Set(ar.NewBytes([]byte{})) // Committed - require.Error(t, extra.UnmarshalRLPWith(extraMarshalled)) - }) - - t.Run("Incorrect Parent signatures marshalled", func(t *testing.T) { - t.Parallel() - - extra := &Extra{} - ar := &fastrlp.Arena{} - extraMarshalled := ar.NewArray() - deltaMarshalled := new(validator.ValidatorSetDelta).MarshalRLPWith(ar) - extraMarshalled.Set(deltaMarshalled) // ValidatorSetDelta - extraMarshalled.Set(ar.NewBytes([]byte{})) // Seal - // Parent - parentArr := ar.NewArray() - parentArr.Set(ar.NewBytes([]byte{})) - extraMarshalled.Set(parentArr) - extraMarshalled.Set(ar.NewBytes([]byte{})) // Committed - require.Error(t, extra.UnmarshalRLPWith(extraMarshalled)) - }) - - t.Run("Incorrect Committed signatures marshalled", func(t *testing.T) { - t.Parallel() - - extra := &Extra{} - ar := &fastrlp.Arena{} - extraMarshalled := ar.NewArray() - deltaMarshalled := new(validator.ValidatorSetDelta).MarshalRLPWith(ar) - extraMarshalled.Set(deltaMarshalled) // ValidatorSetDelta - extraMarshalled.Set(ar.NewBytes([]byte{})) // Seal - - // Parent - key, err := wallet.GenerateAccount() - require.NoError(t, err) - - parentSignature := createSignature(t, []*wallet.Account{key}, types.BytesToHash([]byte("This is test hash")), signer.DomainBridge) - extraMarshalled.Set(parentSignature.MarshalRLPWith(ar)) - - // Committed - committedArr := ar.NewArray() - committedArr.Set(ar.NewBytes([]byte{})) - extraMarshalled.Set(committedArr) - require.Error(t, extra.UnmarshalRLPWith(extraMarshalled)) - }) - - t.Run("Incorrect BlockMeta data marshalled", func(t *testing.T) { - t.Parallel() - - ar := &fastrlp.Arena{} - extraMarshalled := ar.NewArray() - deltaMarshalled := new(validator.ValidatorSetDelta).MarshalRLPWith(ar) - extraMarshalled.Set(deltaMarshalled) // ValidatorSetDelta - extraMarshalled.Set(ar.NewBytes([]byte{})) // Seal - - // Parent - key, err := wallet.GenerateAccount() - require.NoError(t, err) - - parentSignature := createSignature(t, []*wallet.Account{key}, types.BytesToHash(generateRandomBytes(t)), signer.DomainBridge) - extraMarshalled.Set(parentSignature.MarshalRLPWith(ar)) - - // Committed - committedSignature := createSignature(t, []*wallet.Account{key}, types.BytesToHash(generateRandomBytes(t)), signer.DomainBridge) - extraMarshalled.Set(committedSignature.MarshalRLPWith(ar)) - - // Block meta data - BlockMetaArr := ar.NewArray() - BlockMetaArr.Set(ar.NewBytes(generateRandomBytes(t))) - extraMarshalled.Set(BlockMetaArr) - - extra := &Extra{} - require.Error(t, extra.UnmarshalRLPWith(extraMarshalled)) - }) -} - -func TestExtra_ValidateFinalizedData_UnhappyPath(t *testing.T) { - t.Parallel() - - const ( - headerNum = 10 - chainID = uint64(20) - ) - - header := &types.Header{ - Number: headerNum, - Hash: types.BytesToHash(generateRandomBytes(t)), - } - parent := &types.Header{ - Number: headerNum - 1, - Hash: types.BytesToHash(generateRandomBytes(t)), - } - - validators := validator.NewTestValidators(t, 6) - - polyBackendMock := new(polybftBackendMock) - polyBackendMock.On("GetValidators", mock.Anything, mock.Anything).Return(nil, errors.New("validators not found")) - - // missing Committed field - extra := &Extra{} - err := extra.ValidateFinalizedData( - header, parent, nil, chainID, nil, signer.DomainBridge, hclog.NewNullLogger()) - require.ErrorContains(t, err, fmt.Sprintf("failed to verify signatures for block %d, because signatures are not present", headerNum)) - - // missing Block field - extra = &Extra{Committed: &Signature{}} - err = extra.ValidateFinalizedData( - header, parent, nil, chainID, polyBackendMock, signer.DomainBridge, hclog.NewNullLogger()) - require.ErrorContains(t, err, fmt.Sprintf("failed to verify signatures for block %d, because block meta data are not present", headerNum)) - - blockMeta := &BlockMetaData{ - EpochNumber: 10, - BlockRound: 2, - } - extra = &Extra{Committed: &Signature{}, BlockMetaData: blockMeta} - err = extra.ValidateFinalizedData( - header, parent, nil, chainID, polyBackendMock, signer.DomainBridge, hclog.NewNullLogger()) - require.ErrorContains(t, err, - fmt.Sprintf("failed to validate header for block %d. could not retrieve block validators:validators not found", headerNum)) - - // failed to verify signatures (quorum not reached) - polyBackendMock = new(polybftBackendMock) - polyBackendMock.On("GetValidators", mock.Anything, mock.Anything).Return(validators.GetPublicIdentities()) - - noQuorumSignature := createSignature(t, validators.GetPrivateIdentities("0", "1"), types.BytesToHash([]byte("FooBar")), signer.DomainBridge) - extra = &Extra{Committed: noQuorumSignature, BlockMetaData: blockMeta} - blockMetaHash, err := blockMeta.Hash(header.Hash) - require.NoError(t, err) - - err = extra.ValidateFinalizedData( - header, parent, nil, chainID, polyBackendMock, signer.DomainBridge, hclog.NewNullLogger()) - require.ErrorContains(t, err, - fmt.Sprintf("failed to verify signatures for block %d (proposal hash %s): quorum not reached", headerNum, blockMetaHash)) - - // incorrect parent extra size - validSignature := createSignature(t, validators.GetPrivateIdentities(), blockMetaHash, signer.DomainBridge) - extra = &Extra{Committed: validSignature} - err = extra.ValidateFinalizedData( - header, parent, nil, chainID, polyBackendMock, signer.DomainBridge, hclog.NewNullLogger()) - require.ErrorContains(t, err, - fmt.Sprintf("failed to verify signatures for block %d, because block meta data are not present", headerNum)) -} - -func TestExtra_ValidateParentSignatures(t *testing.T) { - t.Parallel() - - const ( - chainID = 15 - headerNum = 23 - ) - - polyBackendMock := new(polybftBackendMock) - polyBackendMock.On("GetValidators", mock.Anything, mock.Anything).Return(nil, errors.New("no validators")) - - // validation is skipped for blocks 0 and 1 - extra := &Extra{} - err := extra.ValidateParentSignatures( - 1, polyBackendMock, nil, nil, nil, signer.DomainBridge, hclog.NewNullLogger()) - require.NoError(t, err) - - // parent signatures not present - err = extra.ValidateParentSignatures( - headerNum, polyBackendMock, nil, nil, nil, signer.DomainBridge, hclog.NewNullLogger()) - require.ErrorContains(t, err, fmt.Sprintf("failed to verify signatures for parent of block %d because signatures are not present", headerNum)) - - // validators not found - validators := validator.NewTestValidators(t, 5) - incorrectHash := types.BytesToHash([]byte("Hello World")) - invalidSig := createSignature(t, validators.GetPrivateIdentities(), incorrectHash, signer.DomainBridge) - extra = &Extra{Parent: invalidSig} - err = extra.ValidateParentSignatures( - headerNum, polyBackendMock, nil, nil, nil, signer.DomainBridge, hclog.NewNullLogger()) - require.ErrorContains(t, err, - fmt.Sprintf("failed to validate header for block %d. could not retrieve parent validators: no validators", headerNum)) - - // incorrect hash is signed - polyBackendMock = new(polybftBackendMock) - polyBackendMock.On("GetValidators", mock.Anything, mock.Anything).Return(validators.GetPublicIdentities()) - - parent := &types.Header{Number: headerNum - 1, Hash: types.BytesToHash(generateRandomBytes(t))} - parentBlockMeta := &BlockMetaData{EpochNumber: 3, BlockRound: 5} - parentExtra := &Extra{BlockMetaData: parentBlockMeta} - - parentBlockMetaHash, err := parentBlockMeta.Hash(parent.Hash) - require.NoError(t, err) - - err = extra.ValidateParentSignatures( - headerNum, polyBackendMock, nil, parent, parentExtra, signer.DomainBridge, hclog.NewNullLogger()) - require.ErrorContains(t, err, - fmt.Sprintf("failed to verify signatures for parent of block %d (proposal hash: %s): could not verify aggregated signature", headerNum, parentBlockMetaHash)) - - // valid signature provided - validSig := createSignature(t, validators.GetPrivateIdentities(), parentBlockMetaHash, signer.DomainBridge) - extra = &Extra{Parent: validSig} - err = extra.ValidateParentSignatures( - headerNum, polyBackendMock, nil, parent, parentExtra, signer.DomainBridge, hclog.NewNullLogger()) - require.NoError(t, err) -} - -func TestSignature_Verify(t *testing.T) { - t.Parallel() - - t.Run("Valid signatures", func(t *testing.T) { - t.Parallel() - - numValidators := 100 - msgHash := types.Hash{0x1} - - vals := validator.NewTestValidators(t, numValidators) - validatorsMetadata := vals.GetPublicIdentities() - validatorSet := vals.ToValidatorSet() - - var signatures bls.Signatures - - bitmap := bitmap.Bitmap{} - signers := make(map[types.Address]struct{}, len(validatorsMetadata)) - - for i, val := range vals.GetValidators() { - bitmap.Set(uint64(i)) - - tempSign, err := val.Account.Bls.Sign(msgHash[:], signer.DomainBridge) - require.NoError(t, err) - - signatures = append(signatures, tempSign) - aggs, err := signatures.Aggregate().Marshal() - assert.NoError(t, err) - - s := &Signature{ - AggregatedSignature: aggs, - Bitmap: bitmap, - } - - err = s.Verify(10, validatorsMetadata, msgHash, signer.DomainBridge, hclog.NewNullLogger()) - signers[val.Address()] = struct{}{} - - if !validatorSet.HasQuorum(10, signers) { - assert.ErrorContains(t, err, "quorum not reached", "failed for %d", i) - } else { - assert.NoError(t, err) - } - } - }) - - t.Run("Invalid bitmap provided", func(t *testing.T) { - t.Parallel() - - validatorSet := validator.NewTestValidators(t, 3).GetPublicIdentities() - bmp := bitmap.Bitmap{} - - // Make bitmap invalid, by setting some flag larger than length of validator set to 1 - bmp.Set(uint64(validatorSet.Len() + 1)) - s := &Signature{Bitmap: bmp} - - err := s.Verify(0, validatorSet, types.Hash{0x1}, signer.DomainBridge, hclog.NewNullLogger()) - require.Error(t, err) - }) -} - -func TestSignature_UnmarshalRLPWith_NegativeCases(t *testing.T) { - t.Parallel() - - t.Run("Incorrect RLP marshalled data type", func(t *testing.T) { - t.Parallel() - - ar := &fastrlp.Arena{} - signature := Signature{} - require.ErrorContains(t, signature.UnmarshalRLPWith(ar.NewNull()), "array type expected for signature struct") - }) - - t.Run("Incorrect AggregatedSignature field data type", func(t *testing.T) { - t.Parallel() - - ar := &fastrlp.Arena{} - signature := Signature{} - signatureMarshalled := ar.NewArray() - signatureMarshalled.Set(ar.NewNull()) - signatureMarshalled.Set(ar.NewNull()) - require.ErrorContains(t, signature.UnmarshalRLPWith(signatureMarshalled), "value is not of type bytes") - }) - - t.Run("Incorrect Bitmap field data type", func(t *testing.T) { - ar := &fastrlp.Arena{} - signature := Signature{} - signatureMarshalled := ar.NewArray() - signatureMarshalled.Set(ar.NewBytes([]byte{0x5, 0x90})) - signatureMarshalled.Set(ar.NewNull()) - require.ErrorContains(t, signature.UnmarshalRLPWith(signatureMarshalled), "value is not of type bytes") - }) -} - -func TestSignature_VerifyRandom(t *testing.T) { - t.Parallel() - - numValidators := 100 - vals := validator.NewTestValidators(t, numValidators) - msgHash := types.Hash{0x1} - - var signature bls.Signatures - - bitmap := bitmap.Bitmap{} - valIndxsRnd := mrand.Perm(numValidators)[:numValidators*2/3+1] - - accounts := vals.GetValidators() - - for _, index := range valIndxsRnd { - bitmap.Set(uint64(index)) - - tempSign, err := accounts[index].Account.Bls.Sign(msgHash[:], signer.DomainBridge) - require.NoError(t, err) - - signature = append(signature, tempSign) - } - - aggs, err := signature.Aggregate().Marshal() - require.NoError(t, err) - - s := &Signature{ - AggregatedSignature: aggs, - Bitmap: bitmap, - } - - err = s.Verify(1, vals.GetPublicIdentities(), msgHash, signer.DomainBridge, hclog.NewNullLogger()) - assert.NoError(t, err) -} - -func TestExtra_InitGenesisValidatorsDelta(t *testing.T) { - t.Parallel() - - t.Run("Happy path", func(t *testing.T) { - t.Parallel() - - const validatorsCount = 7 - vals := validator.NewTestValidators(t, validatorsCount) - - delta := &validator.ValidatorSetDelta{ - Added: make(validator.AccountSet, validatorsCount), - Removed: bitmap.Bitmap{}, - } - - i := 0 - - for _, val := range vals.Validators { - delta.Added[i] = &validator.ValidatorMetadata{ - Address: val.Account.Ecdsa.Address(), - BlsKey: val.Account.Bls.PublicKey(), - VotingPower: new(big.Int).SetUint64(val.VotingPower), - } - - i++ - } - - extra := Extra{Validators: delta} - - genesis := &chain.Genesis{ - ExtraData: extra.MarshalRLPTo(nil), - } - - genesisExtra, err := GetIbftExtra(genesis.ExtraData) - assert.NoError(t, err) - assert.Len(t, genesisExtra.Validators.Added, validatorsCount) - assert.Empty(t, genesisExtra.Validators.Removed) - }) - - t.Run("Invalid Extra data", func(t *testing.T) { - t.Parallel() - - genesis := &chain.Genesis{ - ExtraData: append(make([]byte, ExtraVanity), []byte{0x2, 0x3}...), - } - - _, err := GetIbftExtra(genesis.ExtraData) - - require.Error(t, err) - }) -} - -func Test_GetIbftExtraClean(t *testing.T) { - t.Parallel() - - key, err := wallet.GenerateAccount() - require.NoError(t, err) - - extra := &Extra{ - Validators: &validator.ValidatorSetDelta{ - Added: validator.AccountSet{ - &validator.ValidatorMetadata{ - Address: types.BytesToAddress([]byte{11, 22}), - BlsKey: key.Bls.PublicKey(), - VotingPower: new(big.Int).SetUint64(1000), - IsActive: true, - }, - }, - }, - Committed: &Signature{ - AggregatedSignature: []byte{23, 24}, - Bitmap: []byte{11}, - }, - Parent: &Signature{ - AggregatedSignature: []byte{0, 1}, - Bitmap: []byte{1}, - }, - } - - extraClean, err := GetIbftExtraClean(extra.MarshalRLPTo(nil)) - require.NoError(t, err) - - extraTwo := &Extra{} - require.NoError(t, extraTwo.UnmarshalRLP(extraClean)) - require.True(t, extra.Validators.Equals(extra.Validators)) - require.Equal(t, extra.Parent.AggregatedSignature, extraTwo.Parent.AggregatedSignature) - require.Equal(t, extra.Parent.Bitmap, extraTwo.Parent.Bitmap) - - require.Nil(t, extraTwo.Committed.AggregatedSignature) - require.Nil(t, extraTwo.Committed.Bitmap) -} - -func Test_GetIbftExtraClean_Fail(t *testing.T) { - t.Parallel() - - randomBytes := [ExtraVanity]byte{} - _, err := rand.Read(randomBytes[:]) - require.NoError(t, err) - - extra, err := GetIbftExtraClean(append(randomBytes[:], []byte{0x12, 0x6}...)) - require.Error(t, err) - require.Nil(t, extra) -} - -func TestBlockMetaData_Hash(t *testing.T) { - blockHash := types.BytesToHash(generateRandomBytes(t)) - origBlockMeta := &BlockMetaData{ - BlockRound: 0, - EpochNumber: 3, - } - copyBlockMeta := &BlockMetaData{} - *copyBlockMeta = *origBlockMeta - - origHash, err := origBlockMeta.Hash(blockHash) - require.NoError(t, err) - - copyHash, err := copyBlockMeta.Hash(blockHash) - require.NoError(t, err) - - require.Equal(t, origHash, copyHash) -} - -func TestBlockMetaData_Validate(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - parentEpochNumber uint64 - epochNumber uint64 - errString string - }{ - { - name: "Invalid (gap in epoch numbers)", - parentEpochNumber: 2, - epochNumber: 6, - errString: "invalid epoch number for epoch-beginning block", - }, - { - name: "Invalid (validator set and epoch numbers change)", - parentEpochNumber: 2, - epochNumber: 3, - }, - } - - for _, c := range cases { - c := c - t.Run(c.name, func(t *testing.T) { - t.Parallel() - - blockMeta := &BlockMetaData{ - EpochNumber: c.epochNumber, - } - parentBlockMeta := &BlockMetaData{EpochNumber: c.parentEpochNumber} - err := blockMeta.Validate(parentBlockMeta) - - if c.errString != "" { - require.ErrorContains(t, err, c.errString) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestBlockMetaData_Copy(t *testing.T) { - t.Parallel() - - original := &BlockMetaData{ - BlockRound: 1, - EpochNumber: 5, - } - - copied := original.Copy() - require.Equal(t, original, copied) - require.NotSame(t, original, copied) - - // alter arbitrary field on copied instance - copied.BlockRound = 10 - require.NotEqual(t, original.BlockRound, copied.BlockRound) -} diff --git a/consensus/polybft/fsm.go b/consensus/polybft/fsm.go index 2719547305..bfbd20223c 100644 --- a/consensus/polybft/fsm.go +++ b/consensus/polybft/fsm.go @@ -15,8 +15,14 @@ import ( "github.com/0xPolygon/polygon-edge/bls" "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" + "github.com/0xPolygon/polygon-edge/consensus/polybft/bridge" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/consensus/polybft/governance" + polymetrics "github.com/0xPolygon/polygon-edge/consensus/polybft/metrics" + "github.com/0xPolygon/polygon-edge/consensus/polybft/proposer" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/contracts" @@ -61,7 +67,7 @@ var ( type fsm struct { // PolyBFT consensus protocol configuration - config *PolyBFTConfig + config *config.PolyBFT // forks holds forks configuration forks *chain.Forks @@ -69,17 +75,17 @@ type fsm struct { // parent block header parent *types.Header - // backend implements methods for retrieving data from block chain - backend blockchainBackend + // blockchain implements methods for retrieving data from block chain + blockchain polytypes.Blockchain // polybftBackend implements methods needed from the polybft - polybftBackend polybftBackend + polybftBackend polytypes.Polybft // validators is the list of validators for this round validators validator.ValidatorSet // proposerSnapshot keeps information about new proposer - proposerSnapshot *ProposerSnapshot + proposerSnapshot *proposer.ProposerSnapshot // blockBuilder is the block builder for proposers blockBuilder blockBuilder @@ -106,7 +112,7 @@ type fsm struct { isFirstBlockOfEpoch bool // proposerBridgeBatchToRegister is a batch that is registered via state transaction by proposer - proposerBridgeBatchToRegister []*BridgeBatchSigned + proposerBridgeBatchToRegister []*bridge.BridgeBatchSigned // logger instance logger hclog.Logger @@ -121,17 +127,17 @@ type fsm struct { // BuildProposal builds a proposal for the current round (used if proposer) func (f *fsm) BuildProposal(currentRound uint64) ([]byte, error) { start := time.Now().UTC() - defer metrics.SetGauge([]string{consensusMetricsPrefix, "block_building_time"}, + defer metrics.SetGauge([]string{polymetrics.ConsensusMetricsPrefix, "block_building_time"}, float32(time.Now().UTC().Sub(start).Seconds())) parent := f.parent - extraParent, err := GetIbftExtra(parent.ExtraData) + extraParent, err := polytypes.GetIbftExtra(parent.ExtraData) if err != nil { return nil, err } - extra := &Extra{Parent: extraParent.Committed} + extra := &polytypes.Extra{Parent: extraParent.Committed} // for non-epoch ending blocks, currentValidatorsHash is the same as the nextValidatorsHash nextValidators := f.validators.Accounts() @@ -163,7 +169,7 @@ func (f *fsm) BuildProposal(currentRound uint64) ([]byte, error) { } } - if isRewardDistributionBlock(f.forks, f.isFirstBlockOfEpoch, f.isEndOfEpoch, f.Height()) { + if governance.IsRewardDistributionBlock(f.forks, f.isFirstBlockOfEpoch, f.isEndOfEpoch, f.Height()) { tx, err := f.createDistributeRewardsTx() if err != nil { return nil, err @@ -183,14 +189,14 @@ func (f *fsm) BuildProposal(currentRound uint64) ([]byte, error) { // fill the block with transactions f.blockBuilder.Fill() - extra.BlockMetaData = &BlockMetaData{ + extra.BlockMetaData = &polytypes.BlockMetaData{ BlockRound: currentRound, EpochNumber: f.epochNumber, } stateBlock, err := f.blockBuilder.Build(func(h *types.Header) { h.ExtraData = extra.MarshalRLPTo(nil) - h.MixHash = PolyBFTMixDigest + h.MixHash = polytypes.PolyBFTMixDigest }) if err != nil { @@ -250,7 +256,7 @@ func (f *fsm) applyBridgeBatchTx() error { } // createBridgeBatchTx builds bridge batch registration transaction -func (f *fsm) createBridgeBatchTx(signedBridgeBatch *BridgeBatchSigned) (*types.Transaction, error) { +func (f *fsm) createBridgeBatchTx(signedBridgeBatch *bridge.BridgeBatchSigned) (*types.Transaction, error) { inputData, err := signedBridgeBatch.EncodeAbi() if err != nil { return nil, fmt.Errorf("failed to encode input data for bridge batch registration: %w", err) @@ -260,7 +266,7 @@ func (f *fsm) createBridgeBatchTx(signedBridgeBatch *BridgeBatchSigned) (*types. } // applyValidatorSetCommitTx build validator set commit transaction and apply it -func (f *fsm) applyValidatorSetCommitTx(nextValidators validator.AccountSet, extra *Extra) error { +func (f *fsm) applyValidatorSetCommitTx(nextValidators validator.AccountSet, extra *polytypes.Extra) error { commitValidatorSetInput, err := createCommitValidatorSetInput(nextValidators, extra) if err != nil { return err @@ -338,12 +344,12 @@ func (f *fsm) Validate(proposal []byte) error { ) } - extra, err := GetIbftExtra(block.Header.ExtraData) + extra, err := polytypes.GetIbftExtra(block.Header.ExtraData) if err != nil { return fmt.Errorf("cannot get extra data:%w", err) } - parentExtra, err := GetIbftExtra(f.parent.ExtraData) + parentExtra, err := polytypes.GetIbftExtra(f.parent.ExtraData) if err != nil { return err } @@ -392,7 +398,7 @@ func (f *fsm) Validate(proposal []byte) error { f.logger.Trace("[FSM.Validate]", "block num", block.Number(), "parent validators", validators) } - stateBlock, err := f.backend.ProcessBlock(f.parent, &block) + stateBlock, err := f.blockchain.ProcessBlock(f.parent, &block) if err != nil { return err } @@ -461,7 +467,7 @@ func (f *fsm) VerifyStateTransactions(transactions []*types.Transaction) error { } switch stateTxData := decodedStateTx.(type) { - case *BridgeBatchSigned: + case *bridge.BridgeBatchSigned: if !f.isEndOfSprint { return errBridgeBatchTxInNonSprintBlock } @@ -533,7 +539,7 @@ func (f *fsm) VerifyStateTransactions(transactions []*types.Transaction) error { } } - if isRewardDistributionBlock(f.forks, f.isFirstBlockOfEpoch, f.isEndOfEpoch, f.Height()) { + if governance.IsRewardDistributionBlock(f.forks, f.isFirstBlockOfEpoch, f.isEndOfEpoch, f.Height()) { if !distributeRewardsTxExists { // this is a check if distribute rewards transaction is not in the list of transactions at all // but it should be @@ -561,7 +567,7 @@ func (f *fsm) Insert(proposal []byte, committedSeals []*messages.CommittedSeal) // In this function we should try to return little to no errors since // at this point everything we have to do is just commit something that // we should have already computed beforehand. - extra, err := GetIbftExtra(newBlock.Block.Header.ExtraData) + extra, err := polytypes.GetIbftExtra(newBlock.Block.Header.ExtraData) if err != nil { return nil, fmt.Errorf("failed to insert proposal, due to not being able to extract extra data: %w", err) } @@ -602,7 +608,7 @@ func (f *fsm) Insert(proposal []byte, committedSeals []*messages.CommittedSeal) // include aggregated signature of all committed seals // also includes bitmap which contains all indexes from validator set which provides there seals - extra.Committed = &Signature{ + extra.Committed = &polytypes.Signature{ AggregatedSignature: aggregatedSignature, Bitmap: bitmap, } @@ -610,7 +616,7 @@ func (f *fsm) Insert(proposal []byte, committedSeals []*messages.CommittedSeal) // Write extra data to header newBlock.Block.Header.ExtraData = extra.MarshalRLPTo(nil) - if err := f.backend.CommitBlock(newBlock); err != nil { + if err := f.blockchain.CommitBlock(newBlock); err != nil { return nil, err } @@ -653,7 +659,7 @@ func (f *fsm) verifyCommitEpochTx(commitEpochTx *types.Transaction) error { // and compares its hash with the one extracted from the block. func (f *fsm) verifyDistributeRewardsTx(distributeRewardsTx *types.Transaction) error { // we don't have distribute rewards tx if we just started the chain - if isRewardDistributionBlock(f.forks, f.isFirstBlockOfEpoch, f.isEndOfEpoch, f.Height()) { + if governance.IsRewardDistributionBlock(f.forks, f.isFirstBlockOfEpoch, f.isEndOfEpoch, f.Height()) { localDistributeRewardsTx, err := f.createDistributeRewardsTx() if err != nil { return err @@ -718,7 +724,7 @@ func (f *fsm) verifyCommitValidatorSetTx(commitValidatorSetTx *contractsapi.Comm // verifyBridgeBatchTx validates bridge batch transaction func verifyBridgeBatchTx(blockNumber uint64, txHash types.Hash, - signedBridgeBatch *BridgeBatchSigned, + signedBridgeBatch *bridge.BridgeBatchSigned, validators validator.ValidatorSet) error { signers, err := validators.Accounts().GetFilteredValidators(signedBridgeBatch.AggSignature.Bitmap) if err != nil { @@ -749,8 +755,8 @@ func verifyBridgeBatchTx(blockNumber uint64, txHash types.Hash, func validateHeaderFields(parent *types.Header, header *types.Header, blockTimeDrift uint64) error { // header extra data must be higher or equal to ExtraVanity = 32 in order to be compliant with Ethereum blocks - if len(header.ExtraData) < ExtraVanity { - return fmt.Errorf("extra-data shorter than %d bytes (%d)", ExtraVanity, len(header.ExtraData)) + if len(header.ExtraData) < polytypes.ExtraVanity { + return fmt.Errorf("extra-data shorter than %d bytes (%d)", polytypes.ExtraVanity, len(header.ExtraData)) } // verify parent hash if parent.Hash != header.ParentHash { @@ -778,7 +784,7 @@ func validateHeaderFields(parent *types.Header, header *types.Header, blockTimeD return fmt.Errorf("timestamp older than parent") } // verify mix digest - if header.MixHash != PolyBFTMixDigest { + if header.MixHash != polytypes.PolyBFTMixDigest { return fmt.Errorf("mix digest is not correct") } // difficulty must be > 0 @@ -796,7 +802,7 @@ func validateHeaderFields(parent *types.Header, header *types.Header, blockTimeD // createCommitValidatorSetInput creates input for valdidatoeSetCommit func createCommitValidatorSetInput( validators validator.AccountSet, - extra *Extra) (*contractsapi.CommitValidatorSetBridgeStorageFn, error) { + extra *polytypes.Extra) (*contractsapi.CommitValidatorSetBridgeStorageFn, error) { signature, err := bls.UnmarshalSignature(extra.Committed.AggregatedSignature) if err != nil { return nil, err diff --git a/consensus/polybft/fsm_test.go b/consensus/polybft/fsm_test.go index 2afd8d8848..61360e94b2 100644 --- a/consensus/polybft/fsm_test.go +++ b/consensus/polybft/fsm_test.go @@ -14,8 +14,12 @@ import ( "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/consensus" "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" + "github.com/0xPolygon/polygon-edge/consensus/polybft/bridge" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/contracts" @@ -65,7 +69,7 @@ func TestFSM_ValidateHeader(t *testing.T) { // mix digest require.ErrorContains(t, validateHeaderFields(parent, header, blockTimeDrift), "mix digest is not correct") - header.MixHash = PolyBFTMixDigest + header.MixHash = polytypes.PolyBFTMixDigest // difficulty header.Difficulty = 0 @@ -137,20 +141,18 @@ func TestFSM_BuildProposal_WithoutCommitEpochTxGood(t *testing.T) { stateBlock := createDummyStateBlock(parentBlockNumber+1, parent.Hash, extra) mBlockBuilder := newBlockBuilderMock(stateBlock) - blockchainMock := &blockchainMock{} runtime := &consensusRuntime{ logger: hclog.NewNullLogger(), - config: &runtimeConfig{ - Key: wallet.NewKey(validators.GetPrivateIdentities()[0]), - blockchain: blockchainMock, + config: &config.Runtime{ + Key: wallet.NewKey(validators.GetPrivateIdentities()[0]), }, } fsm := &fsm{ parent: parent, blockBuilder: mBlockBuilder, - config: &PolyBFTConfig{}, - backend: blockchainMock, + config: &config.PolyBFT{}, + blockchain: &helpers.BlockchainMock{}, validators: validators.ToValidatorSet(), logger: hclog.NewNullLogger(), forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, @@ -166,7 +168,7 @@ func TestFSM_BuildProposal_WithoutCommitEpochTxGood(t *testing.T) { block := types.Block{} require.NoError(t, block.UnmarshalRLP(proposal)) - blockMeta := &BlockMetaData{ + blockMeta := &polytypes.BlockMetaData{ BlockRound: currentRound, EpochNumber: fsm.epochNumber, } @@ -202,17 +204,19 @@ func TestFSM_BuildProposal_WithCommitEpochTxGood(t *testing.T) { mBlockBuilder := newBlockBuilderMock(stateBlock) mBlockBuilder.On("WriteTx", mock.Anything).Return(error(nil)).Once() - blockChainMock := new(blockchainMock) + blockChainMock := new(helpers.BlockchainMock) runtime := &consensusRuntime{ logger: hclog.NewNullLogger(), - config: &runtimeConfig{ - Key: wallet.NewKey(validators.GetPrivateIdentities()[0]), - blockchain: blockChainMock, + config: &config.Runtime{ + Key: wallet.NewKey(validators.GetPrivateIdentities()[0]), }, } - fsm := &fsm{parent: parent, blockBuilder: mBlockBuilder, config: &PolyBFTConfig{}, backend: blockChainMock, + fsm := &fsm{parent: parent, + blockBuilder: mBlockBuilder, + config: &config.PolyBFT{}, + blockchain: blockChainMock, isEndOfEpoch: true, validators: validators.ToValidatorSet(), commitEpochInput: createTestCommitEpochInput(t, 0, 10), @@ -229,7 +233,7 @@ func TestFSM_BuildProposal_WithCommitEpochTxGood(t *testing.T) { assert.Equal(t, stateBlock.Block.MarshalRLP(), proposal) - blockMeta := &BlockMetaData{ + blockMeta := &polytypes.BlockMetaData{ BlockRound: currentRound, EpochNumber: fsm.epochNumber, } @@ -258,13 +262,13 @@ func TestFSM_BuildProposal_EpochEndingBlock_FailedToApplyStateTx(t *testing.T) { parent := &types.Header{Number: parentBlockNumber, ExtraData: extra} - mBlockBuilder := new(blockBuilderMock) + mBlockBuilder := new(helpers.BlockBuilderMock) mBlockBuilder.On("WriteTx", mock.Anything).Return(errors.New("error")).Once() mBlockBuilder.On("Reset").Return(error(nil)).Once() validatorSet := validator.NewValidatorSet(validators.GetPublicIdentities(), hclog.NewNullLogger()) - fsm := &fsm{parent: parent, blockBuilder: mBlockBuilder, backend: &blockchainMock{}, + fsm := &fsm{parent: parent, blockBuilder: mBlockBuilder, blockchain: &helpers.BlockchainMock{}, isEndOfEpoch: true, validators: validatorSet, commitEpochInput: createTestCommitEpochInput(t, 0, 10), @@ -311,15 +315,15 @@ func TestFSM_BuildProposal_EpochEndingBlock_ValidatorsDeltaExists(t *testing.T) Removed: *removedBitmap, } - blockChainMock := new(blockchainMock) + blockChainMock := new(helpers.BlockchainMock) validatorSet := validator.NewValidatorSet(validators, hclog.NewNullLogger()) fsm := &fsm{ parent: parent, blockBuilder: blockBuilderMock, - config: &PolyBFTConfig{}, - backend: blockChainMock, + config: &config.PolyBFT{}, + blockchain: blockChainMock, isEndOfEpoch: true, validators: validatorSet, commitEpochInput: createTestCommitEpochInput(t, 0, 10), @@ -332,7 +336,7 @@ func TestFSM_BuildProposal_EpochEndingBlock_ValidatorsDeltaExists(t *testing.T) assert.NoError(t, err) assert.NotNil(t, proposal) - blockExtra, err := GetIbftExtra(stateBlock.Block.Header.ExtraData) + blockExtra, err := polytypes.GetIbftExtra(stateBlock.Block.Header.ExtraData) assert.NoError(t, err) assert.Len(t, blockExtra.Validators.Added, 2) assert.False(t, blockExtra.Validators.IsEmpty()) @@ -368,15 +372,15 @@ func TestFSM_BuildProposal_NonEpochEndingBlock_ValidatorsDeltaNil(t *testing.T) parent.ComputeHash() stateBlock := createDummyStateBlock(parentBlockNumber+1, parent.Hash, extra) - blockBuilderMock := &blockBuilderMock{} + blockBuilderMock := new(helpers.BlockBuilderMock) blockBuilderMock.On("Build", mock.Anything).Return(stateBlock).Once() blockBuilderMock.On("Fill").Once() blockBuilderMock.On("Reset").Return(error(nil)).Once() fsm := &fsm{parent: parent, blockBuilder: blockBuilderMock, - config: &PolyBFTConfig{}, - backend: &blockchainMock{}, + config: &config.PolyBFT{}, + blockchain: new(helpers.BlockchainMock), isEndOfEpoch: false, validators: testValidators.ToValidatorSet(), logger: hclog.NewNullLogger(), @@ -387,7 +391,7 @@ func TestFSM_BuildProposal_NonEpochEndingBlock_ValidatorsDeltaNil(t *testing.T) assert.NoError(t, err) assert.NotNil(t, proposal) - blockExtra, err := GetIbftExtra(stateBlock.Block.Header.ExtraData) + blockExtra, err := polytypes.GetIbftExtra(stateBlock.Block.Header.ExtraData) assert.NoError(t, err) assert.Nil(t, blockExtra.Validators) @@ -415,13 +419,13 @@ func TestFSM_BuildProposal_EpochEndingBlock_FailToGetNextValidatorsHash(t *testi parent := &types.Header{Number: parentBlockNumber, ExtraData: extra.MarshalRLPTo(nil)} - blockBuilderMock := new(blockBuilderMock) + blockBuilderMock := new(helpers.BlockBuilderMock) blockBuilderMock.On("WriteTx", mock.Anything).Return(error(nil)).Once() blockBuilderMock.On("Reset").Return(error(nil)).Once() fsm := &fsm{parent: parent, blockBuilder: blockBuilderMock, - config: &PolyBFTConfig{}, + config: &config.PolyBFT{}, isEndOfEpoch: true, validators: testValidators.ToValidatorSet(), commitEpochInput: createTestCommitEpochInput(t, 0, 10), @@ -628,21 +632,21 @@ func TestFSM_VerifyStateTransaction_BridgeBatches(t *testing.T) { t.Parallel() var ( - pendingBridgeBatches [2]*PendingBridgeBatch + pendingBridgeBatches [2]*bridge.PendingBridgeBatch bridgeMessageEvents [2][]*contractsapi.BridgeMsgEvent - signedBridgeBatches [2]*BridgeBatchSigned + signedBridgeBatches [2]*bridge.BridgeBatchSigned ) validators := validator.NewTestValidatorsWithAliases(t, []string{"A", "B", "C", "D", "E"}) - pendingBridgeBatches[0], signedBridgeBatches[0], bridgeMessageEvents[0] = buildBridgeBatchAndBridgeEvents(t, 10, uint64(3), 2) - pendingBridgeBatches[1], signedBridgeBatches[1], bridgeMessageEvents[1] = buildBridgeBatchAndBridgeEvents(t, 10, uint64(3), 12) + pendingBridgeBatches[0], signedBridgeBatches[0], bridgeMessageEvents[0] = bridge.BuildBridgeBatchAndBridgeEvents(t, 10, uint64(3), 2) + pendingBridgeBatches[1], signedBridgeBatches[1], bridgeMessageEvents[1] = bridge.BuildBridgeBatchAndBridgeEvents(t, 10, uint64(3), 12) executeForValidators := func(aliases ...string) error { for _, sc := range signedBridgeBatches { // add register batches state transaction hash, err := sc.Hash() require.NoError(t, err) - signature := createSignature(t, validators.GetPrivateIdentities(aliases...), hash, signer.DomainBridge) + signature := helpers.CreateSignature(t, validators.GetPrivateIdentities(aliases...), hash, signer.DomainBridge) sc.AggSignature = *signature } @@ -695,7 +699,7 @@ func TestFSM_VerifyStateTransaction_BridgeBatches(t *testing.T) { isEndOfSprint: true, parent: &types.Header{Number: 9}, validators: validatorSet, - proposerBridgeBatchToRegister: []*BridgeBatchSigned{bridgeBatch}, + proposerBridgeBatchToRegister: []*bridge.BridgeBatchSigned{bridgeBatch}, logger: hclog.NewNullLogger(), } @@ -712,9 +716,9 @@ func TestFSM_VerifyStateTransaction_BridgeBatches(t *testing.T) { blsKey, err := bls.GenerateBlsKey() require.NoError(t, err) - data := generateRandomBytes(t) + data := helpers.GenerateRandomBytes(t) - signature, err := blsKey.Sign(data, domain) + signature, err := blsKey.Sign(data, bridge.TestDomain) require.NoError(t, err) signatures := bls.Signatures{signature} @@ -724,7 +728,7 @@ func TestFSM_VerifyStateTransaction_BridgeBatches(t *testing.T) { validators := validator.NewTestValidators(t, 5) bridgeBatch := createTestBridgeBatch(t, validators.GetPrivateIdentities()) - bridgeBatch.AggSignature = Signature{ + bridgeBatch.AggSignature = polytypes.Signature{ AggregatedSignature: aggSig, Bitmap: []byte{}, } @@ -736,7 +740,7 @@ func TestFSM_VerifyStateTransaction_BridgeBatches(t *testing.T) { isEndOfSprint: true, parent: &types.Header{Number: 9}, validators: validatorSet, - proposerBridgeBatchToRegister: []*BridgeBatchSigned{bridgeBatch}, + proposerBridgeBatchToRegister: []*bridge.BridgeBatchSigned{bridgeBatch}, commitEpochInput: createTestCommitEpochInput(t, 0, 10), distributeRewardsInput: createTestDistributeRewardsInput(t, 0, nil, 10), logger: hclog.NewNullLogger(), @@ -760,7 +764,7 @@ func TestFSM_VerifyStateTransaction_BridgeBatches(t *testing.T) { fsm := &fsm{} - encodedBatch, err := createTestBridgeBatchMessage(t, 0, 0).EncodeAbi() + encodedBatch, err := bridge.CreateTestBridgeBatchMessage(t, 0, 0).EncodeAbi() require.NoError(t, err) tx := createStateTransactionWithData(contracts.BridgeStorageContract, encodedBatch) @@ -772,7 +776,7 @@ func TestFSM_VerifyStateTransaction_BridgeBatches(t *testing.T) { t.Parallel() validators := validator.NewTestValidatorsWithAliases(t, []string{"A", "B", "C", "D", "E", "F"}) - _, bridgeBatchSigned, _ := buildBridgeBatchAndBridgeEvents(t, 10, uint64(3), 2) + _, bridgeBatchSigned, _ := bridge.BuildBridgeBatchAndBridgeEvents(t, 10, uint64(3), 2) validatorSet := validator.NewValidatorSet(validators.GetPublicIdentities(), hclog.NewNullLogger()) @@ -787,7 +791,7 @@ func TestFSM_VerifyStateTransaction_BridgeBatches(t *testing.T) { var txns []*types.Transaction - signature := createSignature(t, validators.GetPrivateIdentities("A", "B", "C", "D", "E"), hash, signer.DomainBridge) + signature := helpers.CreateSignature(t, validators.GetPrivateIdentities("A", "B", "C", "D", "E"), hash, signer.DomainBridge) bridgeBatchSigned.AggSignature = *signature inputData, err := bridgeBatchSigned.EncodeAbi() @@ -827,8 +831,8 @@ func TestFSM_ValidateCommit_WrongValidator(t *testing.T) { fsm := &fsm{ parent: parent, blockBuilder: mBlockBuilder, - config: &PolyBFTConfig{}, - backend: &blockchainMock{}, + config: &config.PolyBFT{}, + blockchain: &helpers.BlockchainMock{}, validators: validators.ToValidatorSet(), logger: hclog.NewNullLogger(), forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, @@ -862,8 +866,8 @@ func TestFSM_ValidateCommit_InvalidHash(t *testing.T) { fsm := &fsm{ parent: parent, blockBuilder: mBlockBuilder, - config: &PolyBFTConfig{}, - backend: &blockchainMock{}, + config: &config.PolyBFT{}, + blockchain: &helpers.BlockchainMock{}, validators: validators.ToValidatorSet(), logger: hclog.NewNullLogger(), forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, @@ -898,8 +902,8 @@ func TestFSM_ValidateCommit_Good(t *testing.T) { fsm := &fsm{ parent: parent, blockBuilder: mBlockBuilder, - config: &PolyBFTConfig{}, - backend: &blockchainMock{}, + config: &config.PolyBFT{}, + blockchain: &helpers.BlockchainMock{}, validators: validatorSet, logger: hclog.NewNullLogger(), forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, @@ -935,7 +939,7 @@ func TestFSM_Validate_EpochEndingBlock_MismatchInDeltas(t *testing.T) { } parent.ComputeHash() - polybftBackendMock := new(polybftBackendMock) + polybftBackendMock := new(helpers.PolybftBackendMock) polybftBackendMock.On("GetValidators", mock.Anything, mock.Anything).Return(validators.GetPublicIdentities(), nil).Once() extra := createTestExtraObject(validators.GetPublicIdentities(), validator.AccountSet{}, 4, signaturesCount, signaturesCount) @@ -943,11 +947,11 @@ func TestFSM_Validate_EpochEndingBlock_MismatchInDeltas(t *testing.T) { require.NoError(t, err) extra.Validators = &validator.ValidatorSetDelta{} // this will cause test to fail - extra.Parent = createSignature(t, validators.GetPrivateIdentities(), parentBlockMetaHash, signer.DomainBridge) + extra.Parent = helpers.CreateSignature(t, validators.GetPrivateIdentities(), parentBlockMetaHash, signer.DomainBridge) stateBlock := createDummyStateBlock(parent.Number+1, types.Hash{100, 15}, extra.MarshalRLPTo(nil)) - proposalHash, err := new(BlockMetaData).Hash(stateBlock.Block.Hash()) + proposalHash, err := new(polytypes.BlockMetaData).Hash(stateBlock.Block.Hash()) require.NoError(t, err) commitEpoch := createTestCommitEpochInput(t, 1, 10) @@ -999,21 +1003,21 @@ func TestFSM_Validate_EpochEndingBlock_MismatchInDeltas(t *testing.T) { proposal := stateBlock.Block.MarshalRLP() - blockchainMock := new(blockchainMock) + blockchainMock := new(helpers.BlockchainMock) blockchainMock.On("ProcessBlock", mock.Anything, mock.Anything). Return(stateBlock, error(nil)). Maybe() fsm := &fsm{ parent: parent, - backend: blockchainMock, + blockchain: blockchainMock, validators: validators.ToValidatorSet(), logger: hclog.NewNullLogger(), isEndOfEpoch: true, commitEpochInput: commitEpoch, polybftBackend: polybftBackendMock, newValidatorsDelta: newValidatorDelta, - config: &PolyBFTConfig{BlockTimeDrift: 1}, + config: &config.PolyBFT{BlockTimeDrift: 1}, forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, } @@ -1043,7 +1047,7 @@ func TestFSM_Validate_EpochEndingBlock_UpdatingValidatorSetInNonEpochEndingBlock } parent.ComputeHash() - polybftBackendMock := new(polybftBackendMock) + polybftBackendMock := new(helpers.PolybftBackendMock) polybftBackendMock.On("GetValidators", mock.Anything, mock.Anything).Return(validators.GetPublicIdentities(), nil).Once() // a new validator is added to delta which proposers block does not have @@ -1064,11 +1068,11 @@ func TestFSM_Validate_EpochEndingBlock_UpdatingValidatorSetInNonEpochEndingBlock require.NoError(t, err) extra.Validators = newValidatorDelta // this will cause test to fail - extra.Parent = createSignature(t, validators.GetPrivateIdentities(), parentBlockMetaHash, signer.DomainBridge) + extra.Parent = helpers.CreateSignature(t, validators.GetPrivateIdentities(), parentBlockMetaHash, signer.DomainBridge) stateBlock := createDummyStateBlock(parent.Number+1, types.Hash{100, 15}, extra.MarshalRLPTo(nil)) - proposalHash, err := new(BlockMetaData).Hash(stateBlock.Block.Hash()) + proposalHash, err := new(polytypes.BlockMetaData).Hash(stateBlock.Block.Hash()) require.NoError(t, err) stateBlock.Block.Header.Hash = proposalHash @@ -1077,18 +1081,18 @@ func TestFSM_Validate_EpochEndingBlock_UpdatingValidatorSetInNonEpochEndingBlock proposal := stateBlock.Block.MarshalRLP() - blockchainMock := new(blockchainMock) + blockchainMock := new(helpers.BlockchainMock) blockchainMock.On("ProcessBlock", mock.Anything, mock.Anything). Return(stateBlock, error(nil)). Maybe() fsm := &fsm{ parent: parent, - backend: blockchainMock, + blockchain: blockchainMock, validators: validators.ToValidatorSet(), logger: hclog.NewNullLogger(), polybftBackend: polybftBackendMock, - config: &PolyBFTConfig{BlockTimeDrift: 1}, + config: &config.PolyBFT{BlockTimeDrift: 1}, forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, } @@ -1117,17 +1121,17 @@ func TestFSM_Validate_IncorrectHeaderParentHash(t *testing.T) { fsm := &fsm{ parent: parent, - backend: &blockchainMock{}, + blockchain: &helpers.BlockchainMock{}, validators: validators.ToValidatorSet(), logger: hclog.NewNullLogger(), - config: &PolyBFTConfig{ + config: &config.PolyBFT{ BlockTimeDrift: 1, }, } stateBlock := createDummyStateBlock(parent.Number+1, types.Hash{100, 15}, parent.ExtraData) - hash, err := new(BlockMetaData).Hash(stateBlock.Block.Hash()) + hash, err := new(polytypes.BlockMetaData).Hash(stateBlock.Block.Hash()) require.NoError(t, err) stateBlock.Block.Header.Hash = hash @@ -1160,13 +1164,13 @@ func TestFSM_Validate_InvalidNumber(t *testing.T) { fsm := &fsm{ parent: parent, blockBuilder: mBlockBuilder, - backend: &blockchainMock{}, + blockchain: &helpers.BlockchainMock{}, validators: validators.ToValidatorSet(), logger: hclog.NewNullLogger(), - config: &PolyBFTConfig{BlockTimeDrift: 1}, + config: &config.PolyBFT{BlockTimeDrift: 1}, } - proposalHash, err := new(BlockMetaData).Hash(stateBlock.Block.Hash()) + proposalHash, err := new(polytypes.BlockMetaData).Hash(stateBlock.Block.Hash()) require.NoError(t, err) stateBlock.Block.Header.Hash = proposalHash @@ -1201,14 +1205,14 @@ func TestFSM_Validate_TimestampOlder(t *testing.T) { stateBlock := &types.FullBlock{Block: consensus.BuildBlock(consensus.BuildBlockParams{Header: header})} fsm := &fsm{ parent: parent, - backend: &blockchainMock{}, + blockchain: &helpers.BlockchainMock{}, validators: validators.ToValidatorSet(), logger: hclog.NewNullLogger(), - config: &PolyBFTConfig{ + config: &config.PolyBFT{ BlockTimeDrift: 1, }} - blocMetaHash, err := new(BlockMetaData).Hash(header.Hash) + blocMetaHash, err := new(polytypes.BlockMetaData).Hash(header.Hash) require.NoError(t, err) stateBlock.Block.Header.Hash = blocMetaHash @@ -1244,16 +1248,16 @@ func TestFSM_Validate_IncorrectMixHash(t *testing.T) { fsm := &fsm{ parent: parent, - backend: &blockchainMock{}, + blockchain: &helpers.BlockchainMock{}, validators: validators.ToValidatorSet(), logger: hclog.NewNullLogger(), - config: &PolyBFTConfig{ + config: &config.PolyBFT{ BlockTimeDrift: 1, }, } rlpBlock := buildBlock.Block.MarshalRLP() - _, err := new(BlockMetaData).Hash(header.Hash) + _, err := new(polytypes.BlockMetaData).Hash(header.Hash) require.NoError(t, err) err = fsm.Validate(rlpBlock) @@ -1269,7 +1273,7 @@ func TestFSM_Insert_Good(t *testing.T) { signaturesCount = 3 ) - setupFn := func() (*fsm, []*messages.CommittedSeal, *types.FullBlock, *blockchainMock) { + setupFn := func() (*fsm, []*messages.CommittedSeal, *types.FullBlock, *helpers.BlockchainMock) { validators := validator.NewTestValidators(t, accountCount) allAccounts := validators.GetPrivateIdentities() validatorsMetadata := validators.GetPublicIdentities() @@ -1285,7 +1289,7 @@ func TestFSM_Insert_Good(t *testing.T) { builtBlock := &types.FullBlock{Block: block} builderMock := newBlockBuilderMock(builtBlock) - chainMock := &blockchainMock{} + chainMock := new(helpers.BlockchainMock) chainMock.On("CommitBlock", mock.Anything).Return(error(nil)).Once() chainMock.On("ProcessBlock", mock.Anything, mock.Anything). Return(builtBlock, error(nil)). @@ -1295,7 +1299,7 @@ func TestFSM_Insert_Good(t *testing.T) { parent: parent, blockBuilder: builderMock, target: builtBlock, - backend: chainMock, + blockchain: chainMock, validators: validator.NewValidatorSet(validatorsMetadata[0:len(validatorsMetadata)-1], hclog.NewNullLogger()), logger: hclog.NewNullLogger(), } @@ -1346,7 +1350,7 @@ func TestFSM_Insert_Good(t *testing.T) { fsm, seals, builtBlock, _ := setupFn() proposal := builtBlock.Block.MarshalRLP() fsm.target = builtBlock - fsm.target.Block.Header.Hash = types.BytesToHash(generateRandomBytes(t)) + fsm.target.Block.Header.Hash = types.BytesToHash(helpers.GenerateRandomBytes(t)) _, err := fsm.Insert(proposal, seals) require.ErrorIs(t, err, errProposalDontMatch) @@ -1379,7 +1383,7 @@ func TestFSM_Insert_InvalidNode(t *testing.T) { validatorSet := validator.NewValidatorSet(validatorsMetadata[0:len(validatorsMetadata)-1], hclog.NewNullLogger()) - fsm := &fsm{parent: parent, blockBuilder: mBlockBuilder, backend: &blockchainMock{}, + fsm := &fsm{parent: parent, blockBuilder: mBlockBuilder, blockchain: &helpers.BlockchainMock{}, validators: validatorSet, } @@ -1427,10 +1431,10 @@ func TestFSM_DecodeBridgeBatchStateTxs(t *testing.T) { eventsSize = 40 ) - _, signedBridgeBatch, _ := buildBridgeBatchAndBridgeEvents(t, eventsSize, uint64(3), from) + _, signedBridgeBatch, _ := bridge.BuildBridgeBatchAndBridgeEvents(t, eventsSize, uint64(3), from) f := &fsm{ - proposerBridgeBatchToRegister: []*BridgeBatchSigned{signedBridgeBatch}, + proposerBridgeBatchToRegister: []*bridge.BridgeBatchSigned{signedBridgeBatch}, commitEpochInput: createTestCommitEpochInput(t, 0, 10), distributeRewardsInput: createTestDistributeRewardsInput(t, 0, nil, 10), logger: hclog.NewNullLogger(), @@ -1443,7 +1447,7 @@ func TestFSM_DecodeBridgeBatchStateTxs(t *testing.T) { decodedData, err := decodeStateTransaction(bridgeBatchTx.Input()) require.NoError(t, err) - decodedBridgeBatchMsg, ok := decodedData.(*BridgeBatchSigned) + decodedBridgeBatchMsg, ok := decodedData.(*bridge.BridgeBatchSigned) require.True(t, ok) numberOfMessages := len(signedBridgeBatch.MessageBatch.Messages) @@ -1500,25 +1504,25 @@ func TestFSM_Validate_FailToVerifySignatures(t *testing.T) { extra := createTestExtraObject(validatorsMetadata, validator.AccountSet{}, 4, signaturesCount, signaturesCount) - extra.BlockMetaData = &BlockMetaData{} + extra.BlockMetaData = &polytypes.BlockMetaData{} parent := &types.Header{ Number: parentBlockNumber, ExtraData: extra.MarshalRLPTo(nil), } parent.ComputeHash() - polybftBackendMock := new(polybftBackendMock) + polybftBackendMock := new(helpers.PolybftBackendMock) polybftBackendMock.On("GetValidators", mock.Anything, mock.Anything).Return(validatorsMetadata, nil).Once() validatorSet := validator.NewValidatorSet(validatorsMetadata, hclog.NewNullLogger()) fsm := &fsm{ parent: parent, - backend: &blockchainMock{}, + blockchain: &helpers.BlockchainMock{}, polybftBackend: polybftBackendMock, validators: validatorSet, logger: hclog.NewNullLogger(), - config: &PolyBFTConfig{ + config: &config.PolyBFT{ BlockTimeDrift: 1, }, } @@ -1528,13 +1532,13 @@ func TestFSM_Validate_FailToVerifySignatures(t *testing.T) { Number: parentBlockNumber + 1, ParentHash: parent.Hash, Timestamp: parent.Timestamp + 1, - MixHash: PolyBFTMixDigest, + MixHash: polytypes.PolyBFTMixDigest, Difficulty: 1, ExtraData: parent.ExtraData, }, }) - blockMetaHash, err := new(BlockMetaData).Hash(finalBlock.Hash()) + blockMetaHash, err := new(polytypes.BlockMetaData).Hash(finalBlock.Hash()) require.NoError(t, err) finalBlock.Header.Hash = blockMetaHash @@ -1552,7 +1556,7 @@ func createDummyStateBlock(blockNumber uint64, parentHash types.Hash, extraData ParentHash: parentHash, Difficulty: 1, ExtraData: extraData, - MixHash: PolyBFTMixDigest, + MixHash: polytypes.PolyBFTMixDigest, }, }) @@ -1571,7 +1575,7 @@ func createTestExtra( return extraData.MarshalRLPTo(nil) } -func createTestBridgeBatch(t *testing.T, accounts []*wallet.Account) *BridgeBatchSigned { +func createTestBridgeBatch(t *testing.T, accounts []*wallet.Account) *bridge.BridgeBatchSigned { t.Helper() bitmap := bitmap.Bitmap{} @@ -1583,14 +1587,14 @@ func createTestBridgeBatch(t *testing.T, accounts []*wallet.Account) *BridgeBatc Sender: accounts[i].Ecdsa.Address(), Receiver: accounts[0].Ecdsa.Address(), Data: []byte{}, - SourceChainID: bigZero, + SourceChainID: big.NewInt(0), DestinationChainID: big.NewInt(1), } bitmap.Set(uint64(i)) } - newPendingBridgeBatch, err := NewPendingBridgeBatch(1, bridgeMessageEvents) + newPendingBridgeBatch, err := bridge.NewPendingBridgeBatch(1, bridgeMessageEvents) require.NoError(t, err) hash, err := newPendingBridgeBatch.Hash() @@ -1608,21 +1612,21 @@ func createTestBridgeBatch(t *testing.T, accounts []*wallet.Account) *BridgeBatc aggregatedSignature, err := signatures.Aggregate().Marshal() assert.NoError(t, err) - signature := Signature{ + signature := polytypes.Signature{ AggregatedSignature: aggregatedSignature, Bitmap: bitmap, } assert.NoError(t, err) - return &BridgeBatchSigned{ + return &bridge.BridgeBatchSigned{ MessageBatch: newPendingBridgeBatch.BridgeMessageBatch, AggSignature: signature, } } -func newBlockBuilderMock(stateBlock *types.FullBlock) *blockBuilderMock { - mBlockBuilder := new(blockBuilderMock) +func newBlockBuilderMock(stateBlock *types.FullBlock) *helpers.BlockBuilderMock { + mBlockBuilder := new(helpers.BlockBuilderMock) mBlockBuilder.On("Build", mock.Anything).Return(stateBlock).Once() mBlockBuilder.On("Fill", mock.Anything).Once() mBlockBuilder.On("Reset", mock.Anything).Return(error(nil)).Once() @@ -1634,11 +1638,11 @@ func createTestExtraObject(allAccounts, previousValidatorSet validator.AccountSet, validatorsCount, committedSignaturesCount, - parentSignaturesCount int) *Extra { + parentSignaturesCount int) *polytypes.Extra { accountCount := len(allAccounts) dummySignature := [64]byte{} bitmapCommitted, bitmapParent := bitmap.Bitmap{}, bitmap.Bitmap{} - extraData := &Extra{} + extraData := &polytypes.Extra{} extraData.Validators = generateValidatorDelta(validatorsCount, allAccounts, previousValidatorSet) for j := range rand.Perm(accountCount)[:committedSignaturesCount] { @@ -1649,9 +1653,9 @@ func createTestExtraObject(allAccounts, bitmapParent.Set(uint64(j)) } - extraData.Parent = &Signature{Bitmap: bitmapCommitted, AggregatedSignature: dummySignature[:]} - extraData.Committed = &Signature{Bitmap: bitmapParent, AggregatedSignature: dummySignature[:]} - extraData.BlockMetaData = &BlockMetaData{} + extraData.Parent = &polytypes.Signature{Bitmap: bitmapCommitted, AggregatedSignature: dummySignature[:]} + extraData.Committed = &polytypes.Signature{Bitmap: bitmapParent, AggregatedSignature: dummySignature[:]} + extraData.BlockMetaData = &polytypes.BlockMetaData{} return extraData } @@ -1680,3 +1684,16 @@ func generateValidatorDelta(validatorCount int, allAccounts, previousValidatorSe return } + +func createTestCommitValidatorSetBridgeStorageInput(t *testing.T, validatorSet []*contractsapi.Validator, + signature [2]*big.Int, bitmap []byte) *contractsapi.CommitValidatorSetBridgeStorageFn { + t.Helper() + + commitValidatorSet := &contractsapi.CommitValidatorSetBridgeStorageFn{ + NewValidatorSet: validatorSet, + Signature: signature, + Bitmap: bitmap, + } + + return commitValidatorSet +} diff --git a/consensus/polybft/governance_manager.go b/consensus/polybft/governance/governance_manager.go similarity index 88% rename from consensus/polybft/governance_manager.go rename to consensus/polybft/governance/governance_manager.go index 0c7cd21448..4d0c24ddfc 100644 --- a/consensus/polybft/governance_manager.go +++ b/consensus/polybft/governance/governance_manager.go @@ -1,4 +1,4 @@ -package polybft +package governance import ( "encoding/json" @@ -13,7 +13,10 @@ import ( bolt "go.etcd.io/bbolt" "github.com/0xPolygon/polygon-edge/chain" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/forkmanager" @@ -31,12 +34,12 @@ var ( stringABIType = abi.MustNewType("tuple(string)") ) -// isRewardDistributionBlock indicates if reward distribution transaction +// IsRewardDistributionBlock indicates if reward distribution transaction // should happen in given block // if governance fork is enabled, reward distribution is only present on the first block of epoch // and if we are not at the start of chain // if governance fork is not enabled, reward distribution is only present at the epoch ending block -func isRewardDistributionBlock(forks *chain.Forks, isFirstBlockOfEpoch, isEndOfEpoch bool, +func IsRewardDistributionBlock(forks *chain.Forks, isFirstBlockOfEpoch, isEndOfEpoch bool, pendingBlockNumber uint64) bool { if forks.IsActive(chain.Governance, pendingBlockNumber) { return isFirstBlockOfEpoch && pendingBlockNumber > 1 @@ -45,9 +48,9 @@ func isRewardDistributionBlock(forks *chain.Forks, isFirstBlockOfEpoch, isEndOfE return isEndOfEpoch } -// getLookbackSizeForRewardDistribution returns lookback size for reward distribution +// GetLookbackSizeForRewardDistribution returns lookback size for reward distribution // based on if governance fork is enabled or not -func getLookbackSizeForRewardDistribution(forks *chain.Forks, blockNumber uint64) uint64 { +func GetLookbackSizeForRewardDistribution(forks *chain.Forks, blockNumber uint64) uint64 { if forks.IsActive(chain.Governance, blockNumber) { return newRewardLookbackSize } @@ -58,36 +61,36 @@ func getLookbackSizeForRewardDistribution(forks *chain.Forks, blockNumber uint64 // GovernanceManager interface provides functions for handling governance events // and updating client configuration based on executed governance proposals type GovernanceManager interface { - EventSubscriber - PostBlock(req *PostBlockRequest) error - PostEpoch(req *PostEpochRequest) error + state.EventSubscriber + PostBlock(req *polytypes.PostBlockRequest) error + PostEpoch(req *polytypes.PostEpochRequest) error GetClientConfig(dbTx *bolt.Tx) (*chain.Params, error) } -var _ GovernanceManager = (*dummyGovernanceManager)(nil) +var _ GovernanceManager = (*DummyGovernanceManager)(nil) // dummyStakeManager is a dummy implementation of GovernanceManager interface // used only for unit testing -type dummyGovernanceManager struct { - getClientConfigFn func() (*chain.Params, error) +type DummyGovernanceManager struct { + GetClientConfigFn func() (*chain.Params, error) } -func (d *dummyGovernanceManager) PostBlock(req *PostBlockRequest) error { return nil } -func (d *dummyGovernanceManager) PostEpoch(req *PostEpochRequest) error { return nil } -func (d *dummyGovernanceManager) GetClientConfig(dbTx *bolt.Tx) (*chain.Params, error) { - if d.getClientConfigFn != nil { - return d.getClientConfigFn() +func (d *DummyGovernanceManager) PostBlock(req *polytypes.PostBlockRequest) error { return nil } +func (d *DummyGovernanceManager) PostEpoch(req *polytypes.PostEpochRequest) error { return nil } +func (d *DummyGovernanceManager) GetClientConfig(dbTx *bolt.Tx) (*chain.Params, error) { + if d.GetClientConfigFn != nil { + return d.GetClientConfigFn() } return nil, nil } // EventSubscriber implementation -func (d *dummyGovernanceManager) GetLogFilters() map[types.Address][]types.Hash { +func (d *DummyGovernanceManager) GetLogFilters() map[types.Address][]types.Hash { return make(map[types.Address][]types.Hash) } -func (d *dummyGovernanceManager) ProcessLog(header *types.Header, log *ethgo.Log, dbTx *bolt.Tx) error { +func (d *DummyGovernanceManager) ProcessLog(header *types.Header, log *ethgo.Log, dbTx *bolt.Tx) error { return nil } @@ -97,20 +100,25 @@ var _ GovernanceManager = (*governanceManager)(nil) // and updates the client configuration based on executed governance proposals type governanceManager struct { logger hclog.Logger - state *State + state *GovernanceStore allForksHashes map[types.Hash]string } -// newGovernanceManager is a constructor function for governance manager -func newGovernanceManager(genesisParams *chain.Params, +// NewGovernanceManager is a constructor function for governance manager +func NewGovernanceManager(genesisParams *chain.Params, logger hclog.Logger, - state *State, - blockhain blockchainBackend, - dbTx *bolt.Tx) (*governanceManager, error) { - config, err := state.GovernanceStore.getClientConfig(dbTx) + state *state.State, + blockhain polytypes.Blockchain, + dbTx *bolt.Tx) (GovernanceManager, error) { + store, err := newGovernanceStoreWithTx(state.DB(), dbTx) + if err != nil { + return nil, fmt.Errorf("could not create governance store. Error: %w", err) + } + + config, err := store.getClientConfig(dbTx) if config == nil || errors.Is(err, errClientConfigNotFound) { // insert initial config to db if not already inserted - if err = state.GovernanceStore.insertClientConfig(genesisParams, dbTx); err != nil { + if err = store.insertClientConfig(genesisParams, dbTx); err != nil { return nil, err } } else if err != nil { @@ -132,12 +140,12 @@ func newGovernanceManager(genesisParams *chain.Params, g := &governanceManager{ logger: logger, - state: state, + state: store, allForksHashes: allForkNameHashes, } // get all forks we already have in db and activate them on startup - forksInDB, err := state.GovernanceStore.getAllForkEvents(dbTx) + forksInDB, err := store.getAllForkEvents(dbTx) if err != nil { return nil, fmt.Errorf("could not activate forks from db on startup. Error: %w", err) } @@ -153,11 +161,11 @@ func newGovernanceManager(genesisParams *chain.Params, // GetClientConfig returns latest client configuration from boltdb func (g *governanceManager) GetClientConfig(dbTx *bolt.Tx) (*chain.Params, error) { - return g.state.GovernanceStore.getClientConfig(dbTx) + return g.state.getClientConfig(dbTx) } // PostEpoch notifies the governance manager that an epoch has changed -func (g *governanceManager) PostEpoch(req *PostEpochRequest) error { +func (g *governanceManager) PostEpoch(req *polytypes.PostEpochRequest) error { if !req.Forks.IsActive(chain.Governance, req.FirstBlockOfEpoch) { // if governance fork is not enabled, do nothing return nil @@ -169,7 +177,7 @@ func (g *governanceManager) PostEpoch(req *PostEpochRequest) error { "epoch", previousEpoch) // get events that happened in the previous epoch - eventsRaw, err := g.state.GovernanceStore.getNetworkParamsEvents(previousEpoch, req.DBTx) + eventsRaw, err := g.state.getNetworkParamsEvents(previousEpoch, req.DBTx) if err != nil { return fmt.Errorf("could not get governance events on start of epoch: %d. %w", req.NewEpochID, err) @@ -184,12 +192,12 @@ func (g *governanceManager) PostEpoch(req *PostEpochRequest) error { } // get last saved config - latestChainParams, err := g.state.GovernanceStore.getClientConfig(req.DBTx) + latestChainParams, err := g.state.getClientConfig(req.DBTx) if err != nil { return err } - latestPolybftConfig, err := GetPolyBFTConfig(latestChainParams) + latestPolybftConfig, err := config.GetPolyBFTConfig(latestChainParams) if err != nil { return err } @@ -348,15 +356,15 @@ func (g *governanceManager) PostEpoch(req *PostEpochRequest) error { } } - latestChainParams.Engine[ConsensusName] = latestPolybftConfig + latestChainParams.Engine[config.ConsensusName] = latestPolybftConfig // save updated config to db - return g.state.GovernanceStore.insertClientConfig(latestChainParams, req.DBTx) + return g.state.insertClientConfig(latestChainParams, req.DBTx) } // PostBlock notifies governance manager that a block was finalized // so that he can extract governance events and save them to bolt db -func (g *governanceManager) PostBlock(req *PostBlockRequest) error { +func (g *governanceManager) PostBlock(req *polytypes.PostBlockRequest) error { if !req.Forks.IsActive(chain.Governance, req.FullBlock.Block.Number()) { // if governance fork is not enabled, do nothing return nil @@ -367,7 +375,7 @@ func (g *governanceManager) PostBlock(req *PostBlockRequest) error { currentBlock := req.FullBlock.Block.Number() - forkEvents, err := g.state.GovernanceStore.getAllForkEvents(req.DBTx) + forkEvents, err := g.state.getAllForkEvents(req.DBTx) if err != nil { g.logger.Debug("Post block - Getting fork events failed.", "epoch", req.Epoch, "block", currentBlock) @@ -555,7 +563,7 @@ func (g *governanceManager) ProcessLog(header *types.Header, log *ethgo.Log, dbT return nil } - extra, err := GetIbftExtra(header.ExtraData) + extra, err := polytypes.GetIbftExtra(header.ExtraData) if err != nil { return err } @@ -566,7 +574,7 @@ func (g *governanceManager) ProcessLog(header *types.Header, log *ethgo.Log, dbT "event", event, ) - return g.state.GovernanceStore.insertGovernanceEvent( + return g.state.insertGovernanceEvent( extra.BlockMetaData.EpochNumber, event, dbTx) } diff --git a/consensus/polybft/state_store_governance.go b/consensus/polybft/governance/state_store_governance.go similarity index 89% rename from consensus/polybft/state_store_governance.go rename to consensus/polybft/governance/state_store_governance.go index 325c7bddd7..8511170a8d 100644 --- a/consensus/polybft/state_store_governance.go +++ b/consensus/polybft/governance/state_store_governance.go @@ -1,4 +1,4 @@ -package polybft +package governance import ( "bytes" @@ -35,24 +35,42 @@ type GovernanceStore struct { db *bolt.DB } -// initialize creates necessary buckets in DB if they don't already exist -func (g *GovernanceStore) initialize(tx *bolt.Tx) error { +func newGovernanceStore(db *bolt.DB) (*GovernanceStore, error) { + var store *GovernanceStore + + err := db.Update(func(tx *bolt.Tx) error { + s, err := newGovernanceStoreWithTx(db, tx) + if err != nil { + return err + } + + store = s + + return nil + }) + + return store, err +} + +func newGovernanceStoreWithTx(db *bolt.DB, tx *bolt.Tx) (*GovernanceStore, error) { + store := &GovernanceStore{db: db} + if _, err := tx.CreateBucketIfNotExists(networkParamsEventsBucket); err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", + return nil, fmt.Errorf("failed to create bucket=%s: %w", string(networkParamsEventsBucket), err) } if _, err := tx.CreateBucketIfNotExists(forkParamsEventsBucket); err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", + return nil, fmt.Errorf("failed to create bucket=%s: %w", string(forkParamsEventsBucket), err) } if _, err := tx.CreateBucketIfNotExists(clientConfigBucket); err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", + return nil, fmt.Errorf("failed to create bucket=%s: %w", string(clientConfigBucket), err) } - return nil + return store, nil } // insertGovernanceEvent inserts governance event to bolt db diff --git a/consensus/polybft/state_store_governance_test.go b/consensus/polybft/governance/state_store_governance_test.go similarity index 80% rename from consensus/polybft/state_store_governance_test.go rename to consensus/polybft/governance/state_store_governance_test.go index bd043bf0e0..668807c56e 100644 --- a/consensus/polybft/state_store_governance_test.go +++ b/consensus/polybft/governance/state_store_governance_test.go @@ -1,19 +1,54 @@ -package polybft +package governance import ( + "fmt" "math/big" + "os" + "path" "testing" "time" "github.com/0xPolygon/polygon-edge/chain" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/helper/common" "github.com/0xPolygon/polygon-edge/types" "github.com/stretchr/testify/require" + bolt "go.etcd.io/bbolt" ) +// newTestState creates new instance of state used by tests. +func newTestState(tb testing.TB) *GovernanceStore { + tb.Helper() + + dir := fmt.Sprintf("/tmp/consensus-temp_%v", time.Now().UTC().Format(time.RFC3339Nano)) + err := os.Mkdir(dir, 0775) + + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + if err := os.RemoveAll(dir); err != nil { + tb.Fatal(err) + } + }) + + db, err := bolt.Open(path.Join(dir, "my.db"), 0666, nil) + if err != nil { + tb.Fatal(err) + } + + governanceStore, err := newGovernanceStore(db) + if err != nil { + tb.Fatal(err) + } + + return governanceStore +} + func TestGovernanceStore_InsertAndGetEvents(t *testing.T) { t.Parallel() @@ -60,25 +95,25 @@ func TestGovernanceStore_InsertAndGetEvents(t *testing.T) { allEvents = append(allEvents, forkParamsEvents...) for _, e := range allEvents { - require.NoError(t, state.GovernanceStore.insertGovernanceEvent(epoch, e, nil)) + require.NoError(t, state.insertGovernanceEvent(epoch, e, nil)) } // test for an epoch that didn't have any events - eventsRaw, err := state.GovernanceStore.getNetworkParamsEvents(10, nil) + eventsRaw, err := state.getNetworkParamsEvents(10, nil) require.NoError(t, err) require.Len(t, eventsRaw, 0) // fork events are not saved per epoch so we should have 2 - forksInDB, err := state.GovernanceStore.getAllForkEvents(nil) + forksInDB, err := state.getAllForkEvents(nil) require.NoError(t, err) require.Len(t, forksInDB, len(forkParamsEvents)) // test for the epoch that had events - eventsRaw, err = state.GovernanceStore.getNetworkParamsEvents(epoch, nil) + eventsRaw, err = state.getNetworkParamsEvents(epoch, nil) require.NoError(t, err) require.Len(t, eventsRaw, len(networkParamsEvents)) - forksInDB, err = state.GovernanceStore.getAllForkEvents(nil) + forksInDB, err = state.getAllForkEvents(nil) require.NoError(t, err) require.Len(t, forksInDB, len(forkParamsEvents)) @@ -86,14 +121,14 @@ func TestGovernanceStore_InsertAndGetEvents(t *testing.T) { newFeatureEventTwo := &contractsapi.UpdatedFeatureEvent{Feature: types.BytesToHash([]byte("OxSomeFeature3")), Block: big.NewInt(130_000)} - require.NoError(t, state.GovernanceStore.insertGovernanceEvent(epoch, sprintSizeEvent, nil)) - require.NoError(t, state.GovernanceStore.insertGovernanceEvent(epoch, newFeatureEventTwo, nil)) + require.NoError(t, state.insertGovernanceEvent(epoch, sprintSizeEvent, nil)) + require.NoError(t, state.insertGovernanceEvent(epoch, newFeatureEventTwo, nil)) - eventsRaw, err = state.GovernanceStore.getNetworkParamsEvents(epoch, nil) + eventsRaw, err = state.getNetworkParamsEvents(epoch, nil) require.NoError(t, err) require.Len(t, eventsRaw, len(networkParamsEvents)+1) - forksInDB, err = state.GovernanceStore.getAllForkEvents(nil) + forksInDB, err = state.getAllForkEvents(nil) require.NoError(t, err) require.Len(t, forksInDB, len(forkParamsEvents)+1) } @@ -103,23 +138,23 @@ func TestGovernanceStore_InsertAndGetClientConfig(t *testing.T) { initialPolyConfig := createTestPolybftConfig() initialConfig := &chain.Params{ - Engine: map[string]interface{}{ConsensusName: initialPolyConfig}, + Engine: map[string]interface{}{config.ConsensusName: initialPolyConfig}, BaseFeeChangeDenom: 16, } state := newTestState(t) // try get config when there is none - _, err := state.GovernanceStore.getClientConfig(nil) + _, err := state.getClientConfig(nil) require.ErrorIs(t, err, errClientConfigNotFound) // insert config - require.NoError(t, state.GovernanceStore.insertClientConfig(initialConfig, nil)) + require.NoError(t, state.insertClientConfig(initialConfig, nil)) // now config should exist - configFromDB, err := state.GovernanceStore.getClientConfig(nil) + configFromDB, err := state.getClientConfig(nil) require.NoError(t, err) - polyConfigFromDB, err := GetPolyBFTConfig(configFromDB) + polyConfigFromDB, err := config.GetPolyBFTConfig(configFromDB) require.NoError(t, err) // check some fields to make sure they are as expected @@ -133,8 +168,8 @@ func TestGovernanceStore_InsertAndGetClientConfig(t *testing.T) { require.Equal(t, configFromDB.BaseFeeChangeDenom, initialConfig.BaseFeeChangeDenom) } -func createTestPolybftConfig() *PolyBFTConfig { - return &PolyBFTConfig{ +func createTestPolybftConfig() *config.PolyBFT { + return &config.PolyBFT{ InitialValidatorSet: []*validator.GenesisValidator{ { Address: types.BytesToAddress([]byte{0, 1, 2}), @@ -153,7 +188,7 @@ func createTestPolybftConfig() *PolyBFTConfig { Stake: big.NewInt(100), }, }, - Bridge: map[uint64]*BridgeConfig{0: { + Bridge: map[uint64]*config.Bridge{0: { ExternalGatewayAddr: types.StringToAddress("0xGatewayAddr"), ExternalERC20PredicateAddr: types.StringToAddress("0xRootERC20PredicateAddr"), ExternalMintableERC20PredicateAddr: types.StringToAddress("0xChildMintableERC20PredicateAddr"), @@ -180,19 +215,19 @@ func createTestPolybftConfig() *PolyBFTConfig { CheckpointInterval: 900, BlockTimeDrift: 10, Governance: types.ZeroAddress, - NativeTokenConfig: &TokenConfig{ + NativeTokenConfig: &config.Token{ Name: "Polygon_MATIC", Symbol: "MATIC", Decimals: 18, }, InitialTrieRoot: types.ZeroHash, WithdrawalWaitPeriod: 1, - RewardConfig: &RewardsConfig{ + RewardConfig: &config.Rewards{ TokenAddress: types.StringToAddress("0xRewardTokenAddr"), WalletAddress: types.StringToAddress("0xRewardWalletAddr"), WalletAmount: big.NewInt(1_000_000), }, - GovernanceConfig: &GovernanceConfig{ + GovernanceConfig: &config.Governance{ VotingDelay: big.NewInt(1000), VotingPeriod: big.NewInt(10_0000), ProposalThreshold: big.NewInt(1000), diff --git a/consensus/polybft/governance_manager_test.go b/consensus/polybft/governance_manager_test.go deleted file mode 100644 index b0187c6ea9..0000000000 --- a/consensus/polybft/governance_manager_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package polybft - -import ( - "math/big" - "testing" - - "github.com/0xPolygon/polygon-edge/chain" - "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" - "github.com/0xPolygon/polygon-edge/forkmanager" - "github.com/0xPolygon/polygon-edge/types" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/require" -) - -func TestGovernanceManager_PostEpoch(t *testing.T) { - t.Parallel() - - state := newTestState(t) - governanceManager := &governanceManager{ - state: state, - logger: hclog.NewNullLogger(), - } - - // insert some governance event - baseFeeChangeDenomEvent := &contractsapi.NewBaseFeeChangeDenomEvent{BaseFeeChangeDenom: big.NewInt(100)} - epochRewardEvent := &contractsapi.NewEpochRewardEvent{Reward: big.NewInt(10000)} - - require.NoError(t, state.GovernanceStore.insertGovernanceEvent(1, baseFeeChangeDenomEvent, nil)) - require.NoError(t, state.GovernanceStore.insertGovernanceEvent(1, epochRewardEvent, nil)) - - // no initial config was saved, so we expect an error - require.ErrorIs(t, governanceManager.PostEpoch(&PostEpochRequest{ - NewEpochID: 2, - FirstBlockOfEpoch: 21, - Forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, - }), - errClientConfigNotFound) - - params := &chain.Params{ - BaseFeeChangeDenom: 8, - Engine: map[string]interface{}{ConsensusName: createTestPolybftConfig()}, - } - - // insert initial config - require.NoError(t, state.GovernanceStore.insertClientConfig(params, nil)) - - // PostEpoch will now update config with new epoch reward value - require.NoError(t, governanceManager.PostEpoch(&PostEpochRequest{ - NewEpochID: 2, - FirstBlockOfEpoch: 21, - Forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, - })) - - updatedConfig, err := state.GovernanceStore.getClientConfig(nil) - require.NoError(t, err) - require.Equal(t, baseFeeChangeDenomEvent.BaseFeeChangeDenom.Uint64(), updatedConfig.BaseFeeChangeDenom) - - pbftConfig, err := GetPolyBFTConfig(updatedConfig) - require.NoError(t, err) - - require.Equal(t, epochRewardEvent.Reward.Uint64(), pbftConfig.EpochReward) -} - -func TestGovernanceManager_PostBlock(t *testing.T) { - t.Parallel() - - genesisPolybftConfig := createTestPolybftConfig() - - t.Run("Has no events in block", func(t *testing.T) { - t.Parallel() - - state := newTestState(t) - - // no governance events in receipts - req := &PostBlockRequest{ - FullBlock: &types.FullBlock{Block: &types.Block{Header: &types.Header{Number: 5}}, - Receipts: []*types.Receipt{}, - }, - Epoch: 1, - Forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, - } - - blockchainMock := new(blockchainMock) - blockchainMock.On("CurrentHeader").Return(&types.Header{ - Number: 0, - }) - - chainParams := &chain.Params{Engine: map[string]interface{}{ConsensusName: genesisPolybftConfig}} - governanceManager, err := newGovernanceManager(chainParams, - hclog.NewNullLogger(), state, blockchainMock, nil) - require.NoError(t, err) - - require.NoError(t, governanceManager.PostBlock(req)) - - eventsRaw, err := state.GovernanceStore.getNetworkParamsEvents(1, nil) - require.NoError(t, err) - require.Len(t, eventsRaw, 0) - }) - - t.Run("Has new fork", func(t *testing.T) { - t.Parallel() - - var ( - newForkHash = types.StringToHash("0xNewForkHash") - newForkBlock = big.NewInt(5) - newForkName = "newFork" - ) - - state := newTestState(t) - - req := &PostBlockRequest{ - FullBlock: &types.FullBlock{Block: &types.Block{Header: &types.Header{Number: 5}}}, - Epoch: 1, - Forks: &chain.Forks{chain.Governance: chain.NewFork(0)}, - } - - blockchainMock := new(blockchainMock) - blockchainMock.On("CurrentHeader").Return(&types.Header{ - Number: 4, - }) - - chainParams := &chain.Params{Engine: map[string]interface{}{ConsensusName: genesisPolybftConfig}} - governanceManager, err := newGovernanceManager(chainParams, - hclog.NewNullLogger(), state, blockchainMock, nil) - require.NoError(t, err) - - // this cheats that we have this fork in code - governanceManager.allForksHashes[newForkHash] = newForkName - - require.NoError(t, state.GovernanceStore.insertGovernanceEvent(1, - &contractsapi.NewFeatureEvent{ - Feature: newForkHash, Block: newForkBlock, - }, nil)) - - // new fork should not be registered and enabled before PostBlock - require.False(t, forkmanager.GetInstance().IsForkEnabled(newForkName, newForkBlock.Uint64())) - - require.NoError(t, governanceManager.PostBlock(req)) - - // new fork should be registered and enabled before PostBlock - require.True(t, forkmanager.GetInstance().IsForkEnabled(newForkName, newForkBlock.Uint64())) - }) -} diff --git a/consensus/polybft/hash.go b/consensus/polybft/hash.go index 7f769bc07c..54f039ffe2 100644 --- a/consensus/polybft/hash.go +++ b/consensus/polybft/hash.go @@ -3,6 +3,7 @@ package polybft import ( "sync" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/types" ) @@ -17,7 +18,7 @@ func setupHeaderHashFunc() { types.HeaderHash = func(h *types.Header) types.Hash { // when hashing the block for signing we have to remove from // the extra field the seal and committed seal items - extra, err := GetIbftExtraClean(h.ExtraData) + extra, err := polytypes.GetIbftExtraClean(h.ExtraData) if err != nil { return types.ZeroHash } diff --git a/consensus/polybft/hash_test.go b/consensus/polybft/hash_test.go index efebf09d3c..5b371a8e05 100644 --- a/consensus/polybft/hash_test.go +++ b/consensus/polybft/hash_test.go @@ -4,18 +4,20 @@ import ( "testing" "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func Test_setupHeaderHashFunc(t *testing.T) { - extra := &Extra{ + extra := &polytypes.Extra{ Validators: &validator.ValidatorSetDelta{Removed: bitmap.Bitmap{1}}, - Parent: createSignature(t, []*wallet.Account{generateTestAccount(t)}, types.ZeroHash, signer.DomainBridge), - Committed: &Signature{}, + Parent: helpers.CreateSignature(t, []*wallet.Account{helpers.GenerateTestAccount(t)}, types.ZeroHash, signer.DomainBridge), + Committed: &polytypes.Signature{}, } header := &types.Header{ @@ -27,12 +29,17 @@ func Test_setupHeaderHashFunc(t *testing.T) { header.ExtraData = extra.MarshalRLPTo(nil) notFullExtraHash := types.HeaderHash(header) - extra.Committed = createSignature(t, []*wallet.Account{generateTestAccount(t)}, types.ZeroHash, signer.DomainBridge) + extra.Committed = helpers.CreateSignature(t, []*wallet.Account{helpers.GenerateTestAccount(t)}, types.ZeroHash, signer.DomainBridge) header.ExtraData = extra.MarshalRLPTo(nil) fullExtraHash := types.HeaderHash(header) - assert.Equal(t, notFullExtraHash, fullExtraHash) + require.Equal(t, notFullExtraHash, fullExtraHash) header.ExtraData = []byte{1, 2, 3, 4, 100, 200, 255} - assert.Equal(t, types.ZeroHash, types.HeaderHash(header)) // to small extra data + require.Equal(t, types.ZeroHash, types.HeaderHash(header)) // to small extra data +} + +func init() { + // setup custom hash header func + setupHeaderHashFunc() } diff --git a/consensus/polybft/mocks_test.go b/consensus/polybft/helpers/mocks.go similarity index 56% rename from consensus/polybft/mocks_test.go rename to consensus/polybft/helpers/mocks.go index 09d01349d3..5d0b3e9c52 100644 --- a/consensus/polybft/mocks_test.go +++ b/consensus/polybft/helpers/mocks.go @@ -1,4 +1,4 @@ -package polybft +package helpers import ( "math/big" @@ -6,6 +6,8 @@ import ( "github.com/0xPolygon/polygon-edge/blockchain" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/helper/progress" "github.com/0xPolygon/polygon-edge/state" @@ -18,52 +20,52 @@ import ( bolt "go.etcd.io/bbolt" ) -var _ blockchainBackend = (*blockchainMock)(nil) +var _ polytypes.Blockchain = (*BlockchainMock)(nil) -type blockchainMock struct { +type BlockchainMock struct { mock.Mock } -func (m *blockchainMock) CurrentHeader() *types.Header { +func (m *BlockchainMock) CurrentHeader() *types.Header { args := m.Called() return args.Get(0).(*types.Header) } -func (m *blockchainMock) CommitBlock(block *types.FullBlock) error { +func (m *BlockchainMock) CommitBlock(block *types.FullBlock) error { args := m.Called(block) return args.Error(0) } -func (m *blockchainMock) NewBlockBuilder(parent *types.Header, coinbase types.Address, - txPool txPoolInterface, blockTime time.Duration, logger hclog.Logger) (blockBuilder, error) { +func (m *BlockchainMock) NewBlockBuilder(parent *types.Header, coinbase types.Address, + txPool polytypes.TxPool, blockTime time.Duration, logger hclog.Logger) (polytypes.BlockBuilder, error) { args := m.Called() - return args.Get(0).(blockBuilder), args.Error(1) + return args.Get(0).(polytypes.BlockBuilder), args.Error(1) } -func (m *blockchainMock) ProcessBlock(parent *types.Header, block *types.Block) (*types.FullBlock, error) { +func (m *BlockchainMock) ProcessBlock(parent *types.Header, block *types.Block) (*types.FullBlock, error) { args := m.Called(parent, block) return args.Get(0).(*types.FullBlock), args.Error(1) } -func (m *blockchainMock) GetStateProviderForBlock(block *types.Header) (contract.Provider, error) { +func (m *BlockchainMock) GetStateProviderForBlock(block *types.Header) (contract.Provider, error) { args := m.Called(block) stateProvider, _ := args.Get(0).(contract.Provider) return stateProvider, nil } -func (m *blockchainMock) GetStateProvider(transition *state.Transition) contract.Provider { +func (m *BlockchainMock) GetStateProvider(transition *state.Transition) contract.Provider { args := m.Called() stateProvider, _ := args.Get(0).(contract.Provider) return stateProvider } -func (m *blockchainMock) GetHeaderByNumber(number uint64) (*types.Header, bool) { +func (m *BlockchainMock) GetHeaderByNumber(number uint64) (*types.Header, bool) { args := m.Called(number) if len(args) == 1 { @@ -86,7 +88,7 @@ func (m *blockchainMock) GetHeaderByNumber(number uint64) (*types.Header, bool) panic("Unsupported mock for GetHeaderByNumber") //nolint:gocritic } -func (m *blockchainMock) GetHeaderByHash(hash types.Hash) (*types.Header, bool) { +func (m *BlockchainMock) GetHeaderByHash(hash types.Hash) (*types.Header, bool) { args := m.Called(hash) header, ok := args.Get(0).(*types.Header) @@ -104,41 +106,41 @@ func (m *blockchainMock) GetHeaderByHash(hash types.Hash) (*types.Header, bool) panic("Unsupported mock for GetHeaderByHash") //nolint:gocritic } -func (m *blockchainMock) GetSystemState(provider contract.Provider) SystemState { +func (m *BlockchainMock) GetSystemState(provider contract.Provider) systemstate.SystemState { args := m.Called(provider) - return args.Get(0).(SystemState) + return args.Get(0).(systemstate.SystemState) } -func (m *blockchainMock) SubscribeEvents() blockchain.Subscription { +func (m *BlockchainMock) SubscribeEvents() blockchain.Subscription { return nil } -func (m *blockchainMock) UnubscribeEvents(blockchain.Subscription) { +func (m *BlockchainMock) UnubscribeEvents(blockchain.Subscription) { } -func (m *blockchainMock) CalculateGasLimit(number uint64) (uint64, error) { +func (m *BlockchainMock) CalculateGasLimit(number uint64) (uint64, error) { return 0, nil } -func (m *blockchainMock) GetChainID() uint64 { +func (m *BlockchainMock) GetChainID() uint64 { return 0 } -func (m *blockchainMock) GetReceiptsByHash(hash types.Hash) ([]*types.Receipt, error) { +func (m *BlockchainMock) GetReceiptsByHash(hash types.Hash) ([]*types.Receipt, error) { args := m.Called(hash) return args.Get(0).([]*types.Receipt), args.Error(1) } -var _ polybftBackend = (*polybftBackendMock)(nil) +var _ polytypes.Polybft = (*PolybftBackendMock)(nil) -type polybftBackendMock struct { +type PolybftBackendMock struct { mock.Mock } // GetValidators retrieves validator set for the given block -func (p *polybftBackendMock) GetValidators(blockNumber uint64, parents []*types.Header) (validator.AccountSet, error) { +func (p *PolybftBackendMock) GetValidators(blockNumber uint64, parents []*types.Header) (validator.AccountSet, error) { args := p.Called(blockNumber, parents) if len(args) == 1 { accountSet, _ := args.Get(0).(validator.AccountSet) @@ -153,7 +155,7 @@ func (p *polybftBackendMock) GetValidators(blockNumber uint64, parents []*types. panic("polybftBackendMock.GetValidators doesn't support such combination of arguments") //nolint:gocritic } -func (p *polybftBackendMock) GetValidatorsWithTx(blockNumber uint64, parents []*types.Header, +func (p *PolybftBackendMock) GetValidatorsWithTx(blockNumber uint64, parents []*types.Header, dbTx *bolt.Tx) (validator.AccountSet, error) { args := p.Called(blockNumber, parents, dbTx) if len(args) == 1 { @@ -169,17 +171,17 @@ func (p *polybftBackendMock) GetValidatorsWithTx(blockNumber uint64, parents []* panic("polybftBackendMock.GetValidatorsWithTx doesn't support such combination of arguments") //nolint:gocritic } -func (p *polybftBackendMock) SetBlockTime(blockTime time.Duration) { +func (p *PolybftBackendMock) SetBlockTime(blockTime time.Duration) { p.Called(blockTime) } -var _ blockBuilder = (*blockBuilderMock)(nil) +var _ polytypes.BlockBuilder = (*BlockBuilderMock)(nil) -type blockBuilderMock struct { +type BlockBuilderMock struct { mock.Mock } -func (m *blockBuilderMock) Reset() error { +func (m *BlockBuilderMock) Reset() error { args := m.Called() if len(args) == 0 { return nil @@ -188,7 +190,7 @@ func (m *blockBuilderMock) Reset() error { return args.Error(0) } -func (m *blockBuilderMock) WriteTx(tx *types.Transaction) error { +func (m *BlockBuilderMock) WriteTx(tx *types.Transaction) error { args := m.Called(tx) if len(args) == 0 { return nil @@ -197,18 +199,18 @@ func (m *blockBuilderMock) WriteTx(tx *types.Transaction) error { return args.Error(0) } -func (m *blockBuilderMock) Fill() { +func (m *BlockBuilderMock) Fill() { m.Called() } // Receipts returns the collection of transaction receipts for given block -func (m *blockBuilderMock) Receipts() []*types.Receipt { +func (m *BlockBuilderMock) Receipts() []*types.Receipt { args := m.Called() return args.Get(0).([]*types.Receipt) } -func (m *blockBuilderMock) Build(handler func(*types.Header)) (*types.FullBlock, error) { +func (m *BlockBuilderMock) Build(handler func(*types.Header)) (*types.FullBlock, error) { args := m.Called(handler) builtBlock := args.Get(0).(*types.FullBlock) @@ -217,19 +219,19 @@ func (m *blockBuilderMock) Build(handler func(*types.Header)) (*types.FullBlock, return builtBlock, nil } -func (m *blockBuilderMock) GetState() *state.Transition { +func (m *BlockBuilderMock) GetState() *state.Transition { args := m.Called() return args.Get(0).(*state.Transition) } -var _ SystemState = (*systemStateMock)(nil) +var _ systemstate.SystemState = (*SystemStateMock)(nil) -type systemStateMock struct { +type SystemStateMock struct { mock.Mock } -func (m *systemStateMock) GetNextCommittedIndex(chainID uint64, chainType ChainType) (uint64, error) { +func (m *SystemStateMock) GetNextCommittedIndex(chainID uint64, chainType systemstate.ChainType) (uint64, error) { args := m.Called() if len(args) == 1 { @@ -245,7 +247,7 @@ func (m *systemStateMock) GetNextCommittedIndex(chainID uint64, chainType ChainT return 0, nil } -func (m *systemStateMock) GetBridgeBatchByNumber(numberOfBatch *big.Int) ( +func (m *SystemStateMock) GetBridgeBatchByNumber(numberOfBatch *big.Int) ( *contractsapi.SignedBridgeMessageBatch, error) { args := m.Called() if len(args) == 1 { @@ -261,7 +263,7 @@ func (m *systemStateMock) GetBridgeBatchByNumber(numberOfBatch *big.Int) ( return &contractsapi.SignedBridgeMessageBatch{}, nil } -func (m *systemStateMock) GetValidatorSetByNumber(numberOfValidatorSet *big.Int) ( +func (m *SystemStateMock) GetValidatorSetByNumber(numberOfValidatorSet *big.Int) ( *contractsapi.SignedValidatorSet, error) { args := m.Called() if len(args) == 1 { @@ -277,7 +279,7 @@ func (m *systemStateMock) GetValidatorSetByNumber(numberOfValidatorSet *big.Int) return &contractsapi.SignedValidatorSet{}, nil } -func (m *systemStateMock) GetEpoch() (uint64, error) { +func (m *SystemStateMock) GetEpoch() (uint64, error) { args := m.Called() if len(args) == 1 { epochNumber, _ := args.Get(0).(uint64) @@ -297,156 +299,106 @@ func (m *systemStateMock) GetEpoch() (uint64, error) { return 0, nil } -var _ contract.Provider = (*stateProviderMock)(nil) +var _ contract.Provider = (*StateProviderMock)(nil) -type stateProviderMock struct { +type StateProviderMock struct { mock.Mock } -func (s *stateProviderMock) Call(ethgo.Address, []byte, *contract.CallOpts) ([]byte, error) { +func (s *StateProviderMock) Call(ethgo.Address, []byte, *contract.CallOpts) ([]byte, error) { return nil, nil } -func (s *stateProviderMock) Txn(ethgo.Address, ethgo.Key, []byte) (contract.Txn, error) { +func (s *StateProviderMock) Txn(ethgo.Address, ethgo.Key, []byte) (contract.Txn, error) { return nil, nil } -var _ BridgeTransport = (*transportMock)(nil) +var _ polytypes.TxPool = (*TxPoolMock)(nil) -type transportMock struct { +type TxPoolMock struct { mock.Mock } -func (t *transportMock) Multicast(msg interface{}) { - _ = t.Called(msg) -} - -type testHeadersMap struct { - headersByNumber map[uint64]*types.Header -} - -func (t *testHeadersMap) addHeader(header *types.Header) { - if t.headersByNumber == nil { - t.headersByNumber = map[uint64]*types.Header{} - } - - t.headersByNumber[header.Number] = header -} - -func (t *testHeadersMap) getHeader(number uint64) *types.Header { - return t.headersByNumber[number] -} - -func (t *testHeadersMap) getHeaderByHash(hash types.Hash) *types.Header { - for _, header := range t.headersByNumber { - if header.Hash == hash { - return header - } - } - - return nil -} - -func (t *testHeadersMap) getHeaders() []*types.Header { - headers := make([]*types.Header, 0, len(t.headersByNumber)) - for _, header := range t.headersByNumber { - headers = append(headers, header) - } - - return headers -} - -var _ txPoolInterface = (*txPoolMock)(nil) - -type txPoolMock struct { - mock.Mock -} - -func (tp *txPoolMock) Prepare() { +func (tp *TxPoolMock) Prepare() { tp.Called() } -func (tp *txPoolMock) Length() uint64 { +func (tp *TxPoolMock) Length() uint64 { args := tp.Called() return args[0].(uint64) } -func (tp *txPoolMock) Peek() *types.Transaction { +func (tp *TxPoolMock) Peek() *types.Transaction { args := tp.Called() return args[0].(*types.Transaction) } -func (tp *txPoolMock) Pop(tx *types.Transaction) { +func (tp *TxPoolMock) Pop(tx *types.Transaction) { tp.Called(tx) } -func (tp *txPoolMock) Drop(tx *types.Transaction) { +func (tp *TxPoolMock) Drop(tx *types.Transaction) { tp.Called(tx) } -func (tp *txPoolMock) Demote(tx *types.Transaction) { +func (tp *TxPoolMock) Demote(tx *types.Transaction) { tp.Called(tx) } -func (tp *txPoolMock) SetSealing(v bool) { +func (tp *TxPoolMock) SetSealing(v bool) { tp.Called(v) } -func (tp *txPoolMock) ResetWithBlock(fullBlock *types.Block) { +func (tp *TxPoolMock) ResetWithBlock(fullBlock *types.Block) { tp.Called(fullBlock) } -func (tp *txPoolMock) ReinsertProposed() { +func (tp *TxPoolMock) ReinsertProposed() { tp.Called() } -func (tp *txPoolMock) ClearProposed() { +func (tp *TxPoolMock) ClearProposed() { tp.Called() } -var _ syncer.Syncer = (*syncerMock)(nil) +var _ syncer.Syncer = (*SyncerMock)(nil) -type syncerMock struct { +type SyncerMock struct { mock.Mock } -func (tp *syncerMock) Start() error { +func (tp *SyncerMock) Start() error { args := tp.Called() return args.Error(0) } -func (tp *syncerMock) Close() error { +func (tp *SyncerMock) Close() error { args := tp.Called() return args.Error(0) } -func (tp *syncerMock) GetSyncProgression() *progress.Progression { +func (tp *SyncerMock) GetSyncProgression() *progress.Progression { args := tp.Called() return args[0].(*progress.Progression) } -func (tp *syncerMock) HasSyncPeer() bool { +func (tp *SyncerMock) HasSyncPeer() bool { args := tp.Called() return args[0].(bool) } -func (tp *syncerMock) Sync(func(*types.FullBlock) bool) error { +func (tp *SyncerMock) Sync(func(*types.FullBlock) bool) error { args := tp.Called() return args.Error(0) } -func (tp *syncerMock) UpdateBlockTimeout(time.Duration) { +func (tp *SyncerMock) UpdateBlockTimeout(time.Duration) { tp.Called() } - -func init() { - // setup custom hash header func - setupHeaderHashFunc() -} diff --git a/consensus/polybft/helpers/runtime_helpers.go b/consensus/polybft/helpers/runtime_helpers.go new file mode 100644 index 0000000000..364cda3ec5 --- /dev/null +++ b/consensus/polybft/helpers/runtime_helpers.go @@ -0,0 +1,24 @@ +package helpers + +import ( + "github.com/0xPolygon/polygon-edge/blockchain" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" + "github.com/0xPolygon/polygon-edge/types" +) + +const AbiMethodIDLength = 4 + +// GetBlockData returns block header and extra +func GetBlockData(blockNumber uint64, blockchainBackend polytypes.Blockchain) (*types.Header, *polytypes.Extra, error) { + blockHeader, found := blockchainBackend.GetHeaderByNumber(blockNumber) + if !found { + return nil, nil, blockchain.ErrNoBlock + } + + blockExtra, err := polytypes.GetIbftExtra(blockHeader.ExtraData) + if err != nil { + return nil, nil, err + } + + return blockHeader, blockExtra, nil +} diff --git a/consensus/polybft/helpers/testing_helpers.go b/consensus/polybft/helpers/testing_helpers.go new file mode 100644 index 0000000000..2cd9ddb258 --- /dev/null +++ b/consensus/polybft/helpers/testing_helpers.go @@ -0,0 +1,107 @@ +package helpers + +import ( + "crypto/rand" + "testing" + + "github.com/0xPolygon/polygon-edge/bls" + "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" + "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" + "github.com/0xPolygon/polygon-edge/types" + "github.com/stretchr/testify/require" +) + +// GenerateRandomBytes generates byte array with random data of 32 bytes length +func GenerateRandomBytes(t *testing.T) (result []byte) { + t.Helper() + + result = make([]byte, types.HashLength) + _, err := rand.Reader.Read(result) + require.NoError(t, err, "Cannot generate random byte array content.") + + return +} + +type TestHeadersMap struct { + HeadersByNumber map[uint64]*types.Header +} + +func (t *TestHeadersMap) AddHeader(header *types.Header) { + if t.HeadersByNumber == nil { + t.HeadersByNumber = map[uint64]*types.Header{} + } + + t.HeadersByNumber[header.Number] = header +} + +func (t *TestHeadersMap) GetHeader(number uint64) *types.Header { + return t.HeadersByNumber[number] +} + +func (t *TestHeadersMap) GetHeaderByHash(hash types.Hash) *types.Header { + for _, header := range t.HeadersByNumber { + if header.Hash == hash { + return header + } + } + + return nil +} + +func (t *TestHeadersMap) GetHeaders() []*types.Header { + headers := make([]*types.Header, 0, len(t.HeadersByNumber)) + for _, header := range t.HeadersByNumber { + headers = append(headers, header) + } + + return headers +} + +func CreateTestKey(t *testing.T) *wallet.Key { + t.Helper() + + return wallet.NewKey(GenerateTestAccount(t)) +} + +func CreateRandomTestKeys(t *testing.T, numberOfKeys int) []*wallet.Key { + t.Helper() + + result := make([]*wallet.Key, numberOfKeys) + + for i := 0; i < numberOfKeys; i++ { + result[i] = wallet.NewKey(GenerateTestAccount(t)) + } + + return result +} + +func CreateSignature(t *testing.T, accounts []*wallet.Account, hash types.Hash, domain []byte) *polytypes.Signature { + t.Helper() + + var signatures bls.Signatures + + var bmp bitmap.Bitmap + for i, x := range accounts { + bmp.Set(uint64(i)) + + src, err := x.Bls.Sign(hash[:], domain) + require.NoError(t, err) + + signatures = append(signatures, src) + } + + aggs, err := signatures.Aggregate().Marshal() + require.NoError(t, err) + + return &polytypes.Signature{AggregatedSignature: aggs, Bitmap: bmp} +} + +func GenerateTestAccount(tb testing.TB) *wallet.Account { + tb.Helper() + + acc, err := wallet.GenerateAccount() + require.NoError(tb, err) + + return acc +} diff --git a/consensus/polybft/helpers_test.go b/consensus/polybft/helpers_test.go deleted file mode 100644 index 6f56ef7f95..0000000000 --- a/consensus/polybft/helpers_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package polybft - -import ( - "crypto/rand" - "fmt" - "math/big" - "os" - "path" - "testing" - "time" - - "github.com/0xPolygon/polygon-edge/bls" - "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" - "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" - "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/require" -) - -func createTestKey(t *testing.T) *wallet.Key { - t.Helper() - - return wallet.NewKey(generateTestAccount(t)) -} - -func createRandomTestKeys(t *testing.T, numberOfKeys int) []*wallet.Key { - t.Helper() - - result := make([]*wallet.Key, numberOfKeys) - - for i := 0; i < numberOfKeys; i++ { - result[i] = wallet.NewKey(generateTestAccount(t)) - } - - return result -} - -func createSignature(t *testing.T, accounts []*wallet.Account, hash types.Hash, domain []byte) *Signature { - t.Helper() - - var signatures bls.Signatures - - var bmp bitmap.Bitmap - for i, x := range accounts { - bmp.Set(uint64(i)) - - src, err := x.Bls.Sign(hash[:], domain) - require.NoError(t, err) - - signatures = append(signatures, src) - } - - aggs, err := signatures.Aggregate().Marshal() - require.NoError(t, err) - - return &Signature{AggregatedSignature: aggs, Bitmap: bmp} -} - -func createTestCommitEpochInput(t *testing.T, epochID uint64, - epochSize uint64) *contractsapi.CommitEpochEpochManagerFn { - t.Helper() - - var startBlock uint64 = 0 - if epochID > 1 { - startBlock = (epochID - 1) * epochSize - } - - commitEpoch := &contractsapi.CommitEpochEpochManagerFn{ - ID: new(big.Int).SetUint64(epochID), - Epoch: &contractsapi.Epoch{ - StartBlock: new(big.Int).SetUint64(startBlock + 1), - EndBlock: new(big.Int).SetUint64(epochSize * epochID), - EpochRoot: types.Hash{}, - }, - EpochSize: new(big.Int).SetUint64(epochSize), - } - - return commitEpoch -} - -func createTestCommitValidatorSetBridgeStorageInput(t *testing.T, validatorSet []*contractsapi.Validator, - signature [2]*big.Int, bitmap []byte) *contractsapi.CommitValidatorSetBridgeStorageFn { - t.Helper() - - commitValidatorSet := &contractsapi.CommitValidatorSetBridgeStorageFn{ - NewValidatorSet: validatorSet, - Signature: signature, - Bitmap: bitmap, - } - - return commitValidatorSet -} - -func createTestDistributeRewardsInput(t *testing.T, epochID uint64, - validatorSet validator.AccountSet, epochSize uint64) *contractsapi.DistributeRewardForEpochManagerFn { - t.Helper() - - if validatorSet == nil { - validatorSet = validator.NewTestValidators(t, 5).GetPublicIdentities() - } - - uptime := make([]*contractsapi.Uptime, len(validatorSet)) - - for i, v := range validatorSet { - uptime[i] = &contractsapi.Uptime{ - Validator: v.Address, - SignedBlocks: new(big.Int).SetUint64(epochSize), - } - } - - return &contractsapi.DistributeRewardForEpochManagerFn{ - EpochID: new(big.Int).SetUint64(epochID), - Uptime: uptime, - EpochSize: new(big.Int).SetUint64(epochSize), - } -} - -func generateBridgeMessageEvents(t *testing.T, eventsCount int, startIdx uint64) []*contractsapi.BridgeMsgEvent { - t.Helper() - - bridgeMessageEvents := make([]*contractsapi.BridgeMsgEvent, eventsCount) - for i := 0; i < eventsCount; i++ { - bridgeMessageEvents[i] = &contractsapi.BridgeMsgEvent{ - ID: big.NewInt(int64(startIdx + uint64(i))), - Sender: types.StringToAddress(fmt.Sprintf("0x5%d", i)), - Receiver: types.StringToAddress(fmt.Sprintf("0x4%d", i)), - Data: generateRandomBytes(t), - SourceChainID: big.NewInt(1), - DestinationChainID: big.NewInt(0), - } - } - - return bridgeMessageEvents -} - -// generateRandomBytes generates byte array with random data of 32 bytes length -func generateRandomBytes(t *testing.T) (result []byte) { - t.Helper() - - result = make([]byte, types.HashLength) - _, err := rand.Reader.Read(result) - require.NoError(t, err, "Cannot generate random byte array content.") - - return -} - -// getEpochNumber returns epoch number for given blockNumber and epochSize. -// Epoch number is derived as a result of division of block number and epoch size. -// Since epoch number is 1-based (0 block represents special case zero epoch), -// we are incrementing result by one for non epoch-ending blocks. -func getEpochNumber(t *testing.T, blockNumber, epochSize uint64) uint64 { - t.Helper() - - if isEndOfPeriod(blockNumber, epochSize) { - return blockNumber / epochSize - } - - return blockNumber/epochSize + 1 -} - -// newTestState creates new instance of state used by tests. -func newTestState(tb testing.TB) *State { - tb.Helper() - - dir := fmt.Sprintf("/tmp/consensus-temp_%v", time.Now().UTC().Format(time.RFC3339Nano)) - err := os.Mkdir(dir, 0775) - - if err != nil { - tb.Fatal(err) - } - - state, err := newState(path.Join(dir, "my.db"), make(chan struct{}), []uint64{0, 1}) - if err != nil { - tb.Fatal(err) - } - - tb.Cleanup(func() { - if err := os.RemoveAll(dir); err != nil { - tb.Fatal(err) - } - }) - - return state -} - -func generateTestAccount(tb testing.TB) *wallet.Account { - tb.Helper() - - acc, err := wallet.GenerateAccount() - require.NoError(tb, err) - - return acc -} - -// createTestBridgeConfig creates test bridge configuration with hard-coded addresses -func createTestBridgeConfig() *BridgeConfig { - return &BridgeConfig{ - ExternalGatewayAddr: types.StringToAddress("1"), - ExternalERC20PredicateAddr: types.StringToAddress("2"), - ExternalMintableERC20PredicateAddr: types.StringToAddress("3"), - ExternalNativeERC20Addr: types.StringToAddress("4"), - ExternalERC721PredicateAddr: types.StringToAddress("5"), - ExternalMintableERC721PredicateAddr: types.StringToAddress("6"), - ExternalERC1155PredicateAddr: types.StringToAddress("7"), - ExternalMintableERC1155PredicateAddr: types.StringToAddress("8"), - JSONRPCEndpoint: "http://localhost:8545", - } -} diff --git a/consensus/polybft/metrics/consensus_metrics.go b/consensus/polybft/metrics/consensus_metrics.go new file mode 100644 index 0000000000..c178ba6e2b --- /dev/null +++ b/consensus/polybft/metrics/consensus_metrics.go @@ -0,0 +1,59 @@ +package metrics + +import ( + "time" + + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" + "github.com/0xPolygon/polygon-edge/types" + "github.com/armon/go-metrics" +) + +const ( + // ConsensusMetricsPrefix is a consensus-related metrics prefix + ConsensusMetricsPrefix = "consensus" +) + +// UpdateBlockMetrics updates various metrics based on the given block +// (such as block interval, number of transactions and block rounds metrics) +func UpdateBlockMetrics(currentBlock *types.Block, parentHeader *types.Header) error { + if currentBlock.Number() > 1 { + parentTime := time.Unix(int64(parentHeader.Timestamp), 0) + headerTime := time.Unix(int64(currentBlock.Header.Timestamp), 0) + // update the block interval metric + metrics.SetGauge([]string{ConsensusMetricsPrefix, "block_interval"}, float32(headerTime.Sub(parentTime).Seconds())) + } + + // update the number of transactions in the block metric + metrics.SetGauge([]string{ConsensusMetricsPrefix, "num_txs"}, float32(len(currentBlock.Transactions))) + + extra, err := polytypes.GetIbftExtra(currentBlock.Header.ExtraData) + if err != nil { + return err + } + + // number of rounds needed to seal a block + metrics.SetGauge([]string{ConsensusMetricsPrefix, "rounds"}, float32(extra.BlockMetaData.BlockRound)) + metrics.SetGauge([]string{ConsensusMetricsPrefix, "chain_head"}, float32(currentBlock.Number())) + metrics.IncrCounter([]string{ConsensusMetricsPrefix, "block_counter"}, float32(1)) + metrics.SetGauge([]string{ConsensusMetricsPrefix, "block_space_used"}, float32(currentBlock.Header.GasUsed)) + + // Update the base fee metric + metrics.SetGauge([]string{ConsensusMetricsPrefix, "base_fee"}, float32(currentBlock.Header.BaseFee)) + + return nil +} + +// UpdateEpochMetrics updates epoch-related metrics +// (e.g. epoch number, validator set length) +func UpdateEpochMetrics(epoch uint64, validatorsLen int) { + // update epoch number metrics + metrics.SetGauge([]string{ConsensusMetricsPrefix, "epoch_number"}, float32(epoch)) + // update number of validators metrics + metrics.SetGauge([]string{ConsensusMetricsPrefix, "validators"}, float32(validatorsLen)) +} + +// UpdateBlockExecutionMetric updates the block execution metric +func UpdateBlockExecutionMetric(start time.Time) { + metrics.SetGauge([]string{ConsensusMetricsPrefix, "block_execution_time"}, + float32(time.Now().UTC().Sub(start).Seconds())) +} diff --git a/consensus/polybft/polybft.go b/consensus/polybft/polybft.go index 5ae188f9ae..ea01fd2cdf 100644 --- a/consensus/polybft/polybft.go +++ b/consensus/polybft/polybft.go @@ -15,9 +15,15 @@ import ( "github.com/0xPolygon/go-ibft/core" "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/consensus" + "github.com/0xPolygon/polygon-edge/consensus/polybft/blockchain" + "github.com/0xPolygon/polygon-edge/consensus/polybft/bridge" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" + polystate "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" + vs "github.com/0xPolygon/polygon-edge/consensus/polybft/validator-snapshot" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/forkmanager" @@ -38,20 +44,6 @@ const ( baseRoundTimeoutScaleFactor = 2 ) -// polybftBackend is an interface defining polybft methods needed by fsm and sync tracker -type polybftBackend interface { - // GetValidators retrieves validator set for the given block - GetValidators(blockNumber uint64, parents []*types.Header) (validator.AccountSet, error) - - // GetValidators retrieves validator set for the given block - // Function expects that db tx is already open - GetValidatorsWithTx(blockNumber uint64, parents []*types.Header, - dbTx *bolt.Tx) (validator.AccountSet, error) - - // SetBlockTime updates the block time - SetBlockTime(blockTime time.Duration) -} - // Factory is the factory function to create a discovery consensus func Factory(params *consensus.Params) (consensus.Consensus, error) { logger := params.Logger.Named("polybft") @@ -87,16 +79,16 @@ type Polybft struct { ibft *IBFTConsensusWrapper // state is reference to the struct which encapsulates consensus data persistence logic - state *State + state *polystate.State // consensus parameters config *consensus.Params // genesisClientConfig is genesis configuration for polybft consensus protocol - genesisClientConfig *PolyBFTConfig + genesisClientConfig *config.PolyBFT // blockchain is a reference to the blockchain object - blockchain blockchainBackend + blockchain polytypes.Blockchain // runtime handles consensus runtime features like epoch, state and event management runtime *consensusRuntime @@ -117,7 +109,7 @@ type Polybft struct { key *wallet.Key // validatorsCache represents cache of validators snapshots - validatorsCache *validatorsSnapshotCache + validatorsCache *vs.ValidatorsSnapshotCache // logger logger hclog.Logger @@ -126,9 +118,9 @@ type Polybft struct { txPool txPoolInterface } -func GenesisPostHookFactory(config *chain.Chain, engineName string) func(txn *state.Transition) error { +func GenesisPostHookFactory(cfg *chain.Chain, engineName string) func(txn *state.Transition) error { return func(transition *state.Transition) error { - polyBFTConfig, err := GetPolyBFTConfig(config.Params) + polyBFTConfig, err := config.GetPolyBFTConfig(cfg.Params) if err != nil { return err } @@ -138,7 +130,7 @@ func GenesisPostHookFactory(config *chain.Chain, engineName string) func(txn *st // that is used for minting and burning native token initialTotalSupply := big.NewInt(0) - for addr, alloc := range config.Genesis.Alloc { + for addr, alloc := range cfg.Genesis.Alloc { if addr == types.ZeroAddress { continue } @@ -148,12 +140,12 @@ func GenesisPostHookFactory(config *chain.Chain, engineName string) func(txn *st proxyAddrMapping := contracts.GetProxyImplementationMapping() - burnContractAddress, isBurnContractSet := getBurnContractAddress(config, polyBFTConfig) + burnContractAddress, isBurnContractSet := getBurnContractAddress(cfg, polyBFTConfig) if isBurnContractSet { proxyAddrMapping[contracts.DefaultBurnContract] = burnContractAddress } - if _, ok := config.Genesis.Alloc[contracts.RewardTokenContract]; ok { + if _, ok := cfg.Genesis.Alloc[contracts.RewardTokenContract]; ok { proxyAddrMapping[contracts.RewardTokenContract] = contracts.RewardTokenContractV1 } @@ -162,7 +154,7 @@ func GenesisPostHookFactory(config *chain.Chain, engineName string) func(txn *st } // initialize NetworkParams SC - if err = initNetworkParamsContract(config.Params.BaseFeeChangeDenom, polyBFTConfig, transition); err != nil { + if err = initNetworkParamsContract(cfg.Params.BaseFeeChangeDenom, polyBFTConfig, transition); err != nil { return err } @@ -212,13 +204,13 @@ func GenesisPostHookFactory(config *chain.Chain, engineName string) func(txn *st } bridgeCfgMap := polyBFTConfig.Bridge - isBridgeAllowListEnabled := config.Params.IsBridgeAllowListEnabled() - isBridgeBlockListEnabled := config.Params.IsBridgeBlockListEnabled() + isBridgeAllowListEnabled := cfg.Params.IsBridgeAllowListEnabled() + isBridgeBlockListEnabled := cfg.Params.IsBridgeBlockListEnabled() // initialize Predicate SCs if isBridgeAllowListEnabled || isBridgeBlockListEnabled { // The owner of the contract will be the allow list admin or the block list admin, if any of them is set. - owner := config.Params.GetBridgeOwner() + owner := cfg.Params.GetBridgeOwner() useBridgeAllowList := isBridgeAllowListEnabled useBridgeBlockList := isBridgeBlockListEnabled @@ -226,47 +218,47 @@ func GenesisPostHookFactory(config *chain.Chain, engineName string) func(txn *st chainIDBig := new(big.Int).SetUint64(chainID) // initialize Gateway SC - if err = initGatewayContract(polyBFTConfig, chainBridgeCfg, transition, config.Genesis.Alloc); err != nil { + if err = initGatewayContract(polyBFTConfig, chainBridgeCfg, transition, cfg.Genesis.Alloc); err != nil { return err } // initialize ChildERC20PredicateAccessList SC - if err = initERC20ACLPredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC20ACLPredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, owner, useBridgeAllowList, useBridgeBlockList, false, chainIDBig, "ChildERC20PredicateAccessList"); err != nil { return err } // initialize ChildERC721PredicateAccessList SC - if err = initERC721ACLPredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC721ACLPredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, owner, useBridgeAllowList, useBridgeBlockList, false, chainIDBig, "ChildERC721PredicateAccessList"); err != nil { return err } // initialize ChildERC1155PredicateAccessList SC - if err = initERC1155ACLPredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC1155ACLPredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, owner, useBridgeAllowList, useBridgeBlockList, false, chainIDBig, "ChildERC1155PredicateAccessList"); err != nil { return err } // initialize RootMintableERC20PredicateAccessList SC - if err = initERC20ACLPredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC20ACLPredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, owner, useBridgeAllowList, useBridgeBlockList, true, chainIDBig, "RootERC20PredicateAccessList"); err != nil { return err } // initialize RootMintableERC721PredicateAccessList SC - if err = initERC721ACLPredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC721ACLPredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, owner, useBridgeAllowList, useBridgeBlockList, true, chainIDBig, "RootERC721PredicateAccessList"); err != nil { return err } // initialize RootMintableERC1155PredicateAccessList SC - if err = initERC1155ACLPredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC1155ACLPredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, owner, useBridgeAllowList, useBridgeBlockList, true, chainIDBig, "RootERC1155PredicateAccessList"); err != nil { return err @@ -277,42 +269,42 @@ func GenesisPostHookFactory(config *chain.Chain, engineName string) func(txn *st destChainIDBig := new(big.Int).SetUint64(destChainID) // initialize Gateway SC - if err = initGatewayContract(polyBFTConfig, chainBridgeCfg, transition, config.Genesis.Alloc); err != nil { + if err = initGatewayContract(polyBFTConfig, chainBridgeCfg, transition, cfg.Genesis.Alloc); err != nil { return err } // initialize ChildERC20Predicate SC - if err = initERC20PredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC20PredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, false, destChainIDBig, "ChildERC20Predicate"); err != nil { return err } // initialize ChildERC721Predicate SC - if err = initERC721PredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC721PredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, false, destChainIDBig, "ChildERC721Predicate"); err != nil { return err } // initialize ChildERC1155Predicate SC - if err = initERC1155PredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC1155PredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, false, destChainIDBig, "ChildERC1155Predicate"); err != nil { return err } // initialize RootMintableERC20Predicate SC - if err = initERC20PredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC20PredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, true, destChainIDBig, "RootERC20Predicate"); err != nil { return err } // initialize RootMintableERC721Predicate SC - if err = initERC721PredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC721PredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, true, destChainIDBig, "RootERC721Predicate"); err != nil { return err } // initialize RootMintableERC1155Predicate SC - if err = initERC1155PredicateContract(transition, chainBridgeCfg, config.Genesis.Alloc, + if err = initERC1155PredicateContract(transition, chainBridgeCfg, cfg.Genesis.Alloc, true, destChainIDBig, "RootERC1155Predicate"); err != nil { return err } @@ -371,7 +363,7 @@ func GenesisPostHookFactory(config *chain.Chain, engineName string) func(txn *st if isBurnContractSet { burnParams := &contractsapi.InitializeEIP1559BurnFn{ NewChildERC20Predicate: polyBFTConfig.Bridge[polyBFTConfig.NativeTokenConfig.ChainID].InternalERC20PredicateAddr, - NewBurnDestination: config.Params.BurnContractDestinationAddress, + NewBurnDestination: cfg.Params.BurnContractDestinationAddress, } input, err = burnParams.EncodeAbi() @@ -397,8 +389,8 @@ func ForkManagerFactory(forks *chain.Forks) error { } // IsL1OriginatedTokenCheck checks if the token is originated from L1 -func IsL1OriginatedTokenCheck(config *chain.Params) (bool, error) { - polyBFTConfig, err := GetPolyBFTConfig(config) +func IsL1OriginatedTokenCheck(cfg *chain.Params) (bool, error) { + polyBFTConfig, err := config.GetPolyBFTConfig(cfg) if err != nil { return false, err } @@ -428,11 +420,11 @@ func (p *Polybft) Initialize() error { ) // set blockchain backend - p.blockchain = &blockchainWrapper{ - logger: p.logger.Named("blockchain_wrapper"), - blockchain: p.config.Blockchain, - executor: p.config.Executor, - } + p.blockchain = blockchain.NewBlockchainWrapper( + p.logger.Named("blockchain_wrapper"), + p.config.Blockchain, + p.config.Executor, + ) // create bridge and consensus topics if err = p.createTopics(); err != nil { @@ -446,13 +438,15 @@ func (p *Polybft) Initialize() error { return fmt.Errorf("failed to create data directory. Error: %w", err) } - stt, err := newState(filepath.Join(p.dataDir, stateFileName), p.closeCh, p.getSupportedBridgeChainIDs()) + p.state, err = polystate.NewState(p.dataDir, p.closeCh) if err != nil { return fmt.Errorf("failed to create state instance. Error: %w", err) } - p.state = stt - p.validatorsCache = newValidatorsSnapshotCache(p.config.Logger, stt, p.blockchain) + p.validatorsCache, err = vs.NewValidatorsSnapshotCache(p.config.Logger, p.state, p.blockchain) + if err != nil { + return fmt.Errorf("failed to create validators cache. Error: %w", err) + } // create runtime if err := p.initRuntime(); err != nil { @@ -468,8 +462,8 @@ func (p *Polybft) Initialize() error { return nil } -func ForkManagerInitialParamsFactory(config *chain.Chain) (*forkmanager.ForkParams, error) { - pbftConfig, err := GetPolyBFTConfig(config.Params) +func ForkManagerInitialParamsFactory(cfg *chain.Chain) (*forkmanager.ForkParams, error) { + pbftConfig, err := config.GetPolyBFTConfig(cfg.Params) if err != nil { return nil, err } @@ -514,7 +508,7 @@ func (p *Polybft) Start() error { } // start state DB process - go p.state.startStatsReleasing() + go p.state.StartStatsReleasing() /* // polybft rootchain metrics go p.publishRootchainMetrics(p.logger.Named("rootchain_metrics")) */ @@ -524,22 +518,18 @@ func (p *Polybft) Start() error { // initRuntime creates consensus runtime func (p *Polybft) initRuntime() error { - runtimeConfig := &runtimeConfig{ - genesisParams: p.config.Config.Params, + runtimeConfig := &config.Runtime{ + ChainParams: p.config.Config.Params, GenesisConfig: p.genesisClientConfig, Forks: p.config.Config.Params.Forks, Key: p.key, - DataDir: p.dataDir, - State: p.state, - blockchain: p.blockchain, - polybftBackend: p, - txPool: p.txPool, - bridgeTopic: p.bridgeTopic, - consensusConfig: p.config.Config, - eventTracker: p.config.EventTracker, + StateDataDir: p.dataDir, + ConsensusConfig: p.config.Config, + EventTracker: p.config.EventTracker, } - runtime, err := newConsensusRuntime(p.logger, runtimeConfig) + runtime, err := newConsensusRuntime(p.logger, runtimeConfig, p.state, p, + p.blockchain, p.txPool, p.bridgeTopic) if err != nil { return err } @@ -668,9 +658,8 @@ func (p *Polybft) Close() error { close(p.closeCh) p.runtime.close() - p.state.db.Close() - return nil + return p.state.Close() } // GetSyncProgression retrieves the current sync progression, if any @@ -703,7 +692,7 @@ func (p *Polybft) verifyHeaderImpl(parent, header *types.Header, blockTimeDrift } // decode the extra data - extra, err := GetIbftExtra(header.ExtraData) + extra, err := polytypes.GetIbftExtra(header.ExtraData) if err != nil { return fmt.Errorf("failed to verify header for block %d. get extra error = %w", header.Number, err) } @@ -765,7 +754,7 @@ func (p *Polybft) PreCommitState(block *types.Block, _ *state.Transition) error return fmt.Errorf("unknown state transaction: tx=%v, error: %w", tx.Hash(), err) } - if signedBridgeBatch, ok := decodedStateTx.(*BridgeBatchSigned); ok { + if signedBridgeBatch, ok := decodedStateTx.(*bridge.BridgeBatchSigned); ok { if bridgeBatchTxExists { return fmt.Errorf("only one bridge batch state tx is allowed per block: %v", tx.Hash()) } @@ -796,18 +785,7 @@ func (p *Polybft) GetLatestChainConfig() (*chain.Params, error) { // FilterExtra is an implementation of Consensus interface func (p *Polybft) FilterExtra(extra []byte) ([]byte, error) { - return GetIbftExtraClean(extra) -} - -// getSupportedBridgeChainIDs return list of all chainIDs -func (p *Polybft) getSupportedBridgeChainIDs() []uint64 { - chainIDs := make([]uint64, 0, len(p.genesisClientConfig.Bridge)) - - for chainID := range p.genesisClientConfig.Bridge { - chainIDs = append(chainIDs, chainID) - } - - return chainIDs + return polytypes.GetIbftExtraClean(extra) } // initProxies initializes proxy contracts, that allow upgradeability of contracts implementation @@ -846,7 +824,7 @@ func initProxies(transition *state.Transition, admin types.Address, return nil } -func getBurnContractAddress(config *chain.Chain, polyBFTConfig PolyBFTConfig) (types.Address, bool) { +func getBurnContractAddress(config *chain.Chain, polyBFTConfig config.PolyBFT) (types.Address, bool) { if config.Params.BurnContract != nil && len(config.Params.BurnContract) == 1 && !polyBFTConfig.NativeTokenConfig.IsMintable { diff --git a/consensus/polybft/polybft_test.go b/consensus/polybft/polybft_test.go index 4a185fa97a..827c1ce0fc 100644 --- a/consensus/polybft/polybft_test.go +++ b/consensus/polybft/polybft_test.go @@ -8,8 +8,15 @@ import ( "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/consensus" + "github.com/0xPolygon/polygon-edge/consensus/polybft/bridge" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" + vs "github.com/0xPolygon/polygon-edge/consensus/polybft/validator-snapshot" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/helper/progress" "github.com/0xPolygon/polygon-edge/txpool" @@ -19,7 +26,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "go.etcd.io/bbolt" ) // the test initializes polybft and chain mock (map of headers) after which a new header is verified @@ -37,18 +43,18 @@ func TestPolybft_VerifyHeader(t *testing.T) { updateHeaderExtra := func(header *types.Header, validators *validator.ValidatorSetDelta, - parentSignature *Signature, - blockMeta *BlockMetaData, - committedAccounts []*wallet.Account) *Signature { - extra := &Extra{ + parentSignature *polytypes.Signature, + blockMeta *polytypes.BlockMetaData, + committedAccounts []*wallet.Account) *polytypes.Signature { + extra := &polytypes.Extra{ Validators: validators, Parent: parentSignature, BlockMetaData: blockMeta, - Committed: &Signature{}, + Committed: &polytypes.Signature{}, } if extra.BlockMetaData == nil { - extra.BlockMetaData = &BlockMetaData{} + extra.BlockMetaData = &polytypes.BlockMetaData{} } header.ExtraData = extra.MarshalRLPTo(nil) @@ -58,7 +64,7 @@ func TestPolybft_VerifyHeader(t *testing.T) { blockMetaHash, err := extra.BlockMetaData.Hash(header.Hash) require.NoError(t, err) - extra.Committed = createSignature(t, committedAccounts, blockMetaHash, signer.DomainBridge) + extra.Committed = helpers.CreateSignature(t, committedAccounts, blockMetaHash, signer.DomainBridge) header.ExtraData = extra.MarshalRLPTo(nil) } @@ -69,7 +75,7 @@ func TestPolybft_VerifyHeader(t *testing.T) { validators := validator.NewTestValidators(t, allValidatorsSize) // create configuration - polyBftConfig := PolyBFTConfig{ + polyBftConfig := config.PolyBFT{ InitialValidatorSet: validators.GetParamValidators(), EpochSize: fixedEpochSize, SprintSize: 5, @@ -84,7 +90,7 @@ func TestPolybft_VerifyHeader(t *testing.T) { accountSetParent, accountSetCurrent := accounts[:len(accounts)-1], accounts[1:] // create header map to simulate blockchain - headersMap := &testHeadersMap{} + headersMap := &helpers.TestHeadersMap{} // create genesis header genesisDelta, err := validator.CreateValidatorSetDelta(nil, validatorSetParent) @@ -94,7 +100,7 @@ func TestPolybft_VerifyHeader(t *testing.T) { updateHeaderExtra(genesisHeader, genesisDelta, nil, nil, nil) // add genesis header to map - headersMap.addHeader(genesisHeader) + headersMap.AddHeader(genesisHeader) // create headers from 1 to 9 for i := uint64(1); i < polyBftConfig.EpochSize; i++ { @@ -102,28 +108,31 @@ func TestPolybft_VerifyHeader(t *testing.T) { require.NoError(t, err) header := &types.Header{Number: i} - updateHeaderExtra(header, delta, nil, &BlockMetaData{EpochNumber: 1}, nil) + updateHeaderExtra(header, delta, nil, &polytypes.BlockMetaData{EpochNumber: 1}, nil) // add headers from 1 to 9 to map (blockchain imitation) - headersMap.addHeader(header) + headersMap.AddHeader(header) } // mock blockchain - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.getHeader) - blockchainMock.On("GetHeaderByHash", mock.Anything).Return(headersMap.getHeaderByHash) + blockchainMock := new(helpers.BlockchainMock) + blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.GetHeader) + blockchainMock.On("GetHeaderByHash", mock.Anything).Return(headersMap.GetHeaderByHash) // create polybft with appropriate mocks + validatorSnapCache, err := vs.NewValidatorsSnapshotCache( + hclog.NewNullLogger(), + state.NewTestState(t), + blockchainMock, + ) + require.NoError(t, err) + polybft := &Polybft{ closeCh: make(chan struct{}), logger: hclog.NewNullLogger(), genesisClientConfig: &polyBftConfig, blockchain: blockchainMock, - validatorsCache: newValidatorsSnapshotCache( - hclog.NewNullLogger(), - newTestState(t), - blockchainMock, - ), + validatorsCache: validatorSnapCache, runtime: &consensusRuntime{ epoch: &epochMetadata{ CurrentClientConfig: &polyBftConfig, @@ -139,10 +148,10 @@ func TestPolybft_VerifyHeader(t *testing.T) { Number: polyBftConfig.EpochSize, Timestamp: uint64(time.Now().UTC().Unix()), } - parentCommitment := updateHeaderExtra(parentHeader, parentDelta, nil, &BlockMetaData{EpochNumber: 1}, accountSetParent) + parentCommitment := updateHeaderExtra(parentHeader, parentDelta, nil, &polytypes.BlockMetaData{EpochNumber: 1}, accountSetParent) // add parent header to map - headersMap.addHeader(parentHeader) + headersMap.AddHeader(parentHeader) // create current header (block 11) with all appropriate fields required for validation currentDelta, err := validator.CreateValidatorSetDelta(validatorSetCurrent, validatorSetCurrent) @@ -152,11 +161,11 @@ func TestPolybft_VerifyHeader(t *testing.T) { Number: polyBftConfig.EpochSize + 1, ParentHash: parentHeader.Hash, Timestamp: parentHeader.Timestamp + 1, - MixHash: PolyBFTMixDigest, + MixHash: polytypes.PolyBFTMixDigest, Difficulty: 1, } updateHeaderExtra(currentHeader, currentDelta, nil, - &BlockMetaData{ + &polytypes.BlockMetaData{ EpochNumber: 2, }, nil) @@ -165,7 +174,7 @@ func TestPolybft_VerifyHeader(t *testing.T) { // omit Parent field (parent signature) intentionally updateHeaderExtra(currentHeader, currentDelta, nil, - &BlockMetaData{ + &polytypes.BlockMetaData{ EpochNumber: 1}, accountSetCurrent) @@ -173,46 +182,52 @@ func TestPolybft_VerifyHeader(t *testing.T) { assert.ErrorContains(t, polybft.VerifyHeader(currentHeader), "failed to verify signatures for parent of block") updateHeaderExtra(currentHeader, currentDelta, parentCommitment, - &BlockMetaData{ + &polytypes.BlockMetaData{ EpochNumber: 1}, accountSetCurrent) assert.NoError(t, polybft.VerifyHeader(currentHeader)) + validatorSnapCache, err = vs.NewValidatorsSnapshotCache(hclog.NewNullLogger(), state.NewTestState(t), blockchainMock) + require.NoError(t, err) + // clean validator snapshot cache (re-instantiate it), submit invalid validator set for parent signature and expect the following error - polybft.validatorsCache = newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock) - assert.NoError(t, polybft.validatorsCache.storeSnapshot( - &validatorSnapshot{Epoch: 0, Snapshot: validatorSetCurrent}, nil)) // invalid validator set is submitted - assert.NoError(t, polybft.validatorsCache.storeSnapshot( - &validatorSnapshot{Epoch: 1, Snapshot: validatorSetCurrent}, nil)) + polybft.validatorsCache = validatorSnapCache + assert.NoError(t, polybft.validatorsCache.StoreSnapshot( + &vs.ValidatorSnapshot{Epoch: 0, Snapshot: validatorSetCurrent}, nil)) // invalid validator set is submitted + assert.NoError(t, polybft.validatorsCache.StoreSnapshot( + &vs.ValidatorSnapshot{Epoch: 1, Snapshot: validatorSetCurrent}, nil)) assert.ErrorContains(t, polybft.VerifyHeader(currentHeader), "failed to verify signatures for parent of block") // clean validators cache again and set valid snapshots - polybft.validatorsCache = newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock) - assert.NoError(t, polybft.validatorsCache.storeSnapshot( - &validatorSnapshot{Epoch: 0, Snapshot: validatorSetParent}, nil)) - assert.NoError(t, polybft.validatorsCache.storeSnapshot( - &validatorSnapshot{Epoch: 1, Snapshot: validatorSetCurrent}, nil)) + validatorSnapCache, err = vs.NewValidatorsSnapshotCache(hclog.NewNullLogger(), state.NewTestState(t), blockchainMock) + require.NoError(t, err) + + polybft.validatorsCache = validatorSnapCache + assert.NoError(t, polybft.validatorsCache.StoreSnapshot( + &vs.ValidatorSnapshot{Epoch: 0, Snapshot: validatorSetParent}, nil)) + assert.NoError(t, polybft.validatorsCache.StoreSnapshot( + &vs.ValidatorSnapshot{Epoch: 1, Snapshot: validatorSetCurrent}, nil)) assert.NoError(t, polybft.VerifyHeader(currentHeader)) // add current header to the blockchain (headersMap) and try validating again - headersMap.addHeader(currentHeader) + headersMap.AddHeader(currentHeader) assert.NoError(t, polybft.VerifyHeader(currentHeader)) } func TestPolybft_Close(t *testing.T) { t.Parallel() - syncer := &syncerMock{} + syncer := &helpers.SyncerMock{} syncer.On("Close", mock.Anything).Return(error(nil)).Once() polybft := Polybft{ closeCh: make(chan struct{}), syncer: syncer, runtime: &consensusRuntime{ - bridge: &dummyBridge{}, + bridge: &bridge.DummyBridge{}, }, - state: &State{db: &bbolt.DB{}}, + state: state.NewTestState(t), } assert.NoError(t, polybft.Close()) @@ -242,7 +257,7 @@ func TestPolybft_GetSyncProgression(t *testing.T) { result := &progress.Progression{} - syncer := &syncerMock{} + syncer := &helpers.SyncerMock{} syncer.On("GetSyncProgression", mock.Anything).Return(result).Once() polybft := Polybft{ @@ -295,22 +310,22 @@ func Test_GenesisPostHookFactory(t *testing.T) { bridgeCfg := createTestBridgeConfig() cases := []struct { name string - config *PolyBFTConfig + config *config.PolyBFT bridgeAllowList *chain.AddressListConfig expectedErr error }{ { name: "access lists disabled", - config: &PolyBFTConfig{ + config: &config.PolyBFT{ InitialValidatorSet: validators.GetParamValidators(), - Bridge: map[uint64]*BridgeConfig{0: bridgeCfg}, + Bridge: map[uint64]*config.Bridge{0: bridgeCfg}, EpochSize: epochSize, - RewardConfig: &RewardsConfig{WalletAmount: ethgo.Ether(1000)}, - NativeTokenConfig: &TokenConfig{Name: "Test", Symbol: "TEST", Decimals: 18}, + RewardConfig: &config.Rewards{WalletAmount: ethgo.Ether(1000)}, + NativeTokenConfig: &config.Token{Name: "Test", Symbol: "TEST", Decimals: 18}, MaxValidatorSetSize: maxValidators, BladeAdmin: admin, - GovernanceConfig: &GovernanceConfig{ - VotingDelay: bigZero, + GovernanceConfig: &config.Governance{ + VotingDelay: big.NewInt(0), VotingPeriod: big.NewInt(10), ProposalThreshold: big.NewInt(25), ProposalQuorumPercentage: 67, @@ -319,16 +334,16 @@ func Test_GenesisPostHookFactory(t *testing.T) { }, { name: "access lists enabled", - config: &PolyBFTConfig{ + config: &config.PolyBFT{ InitialValidatorSet: validators.GetParamValidators(), - Bridge: map[uint64]*BridgeConfig{0: bridgeCfg}, + Bridge: map[uint64]*config.Bridge{0: bridgeCfg}, EpochSize: epochSize, - RewardConfig: &RewardsConfig{WalletAmount: ethgo.Ether(1000)}, - NativeTokenConfig: &TokenConfig{Name: "Test Mintable", Symbol: "TEST_MNT", Decimals: 18}, + RewardConfig: &config.Rewards{WalletAmount: ethgo.Ether(1000)}, + NativeTokenConfig: &config.Token{Name: "Test Mintable", Symbol: "TEST_MNT", Decimals: 18}, MaxValidatorSetSize: maxValidators, BladeAdmin: admin, - GovernanceConfig: &GovernanceConfig{ - VotingDelay: bigZero, + GovernanceConfig: &config.Governance{ + VotingDelay: big.NewInt(0), VotingPeriod: big.NewInt(10), ProposalThreshold: big.NewInt(25), ProposalQuorumPercentage: 67, @@ -347,14 +362,14 @@ func Test_GenesisPostHookFactory(t *testing.T) { t.Parallel() params := &chain.Params{ - Engine: map[string]interface{}{ConsensusName: tc.config}, + Engine: map[string]interface{}{config.ConsensusName: tc.config}, BridgeAllowList: tc.bridgeAllowList, } chainConfig := &chain.Chain{Params: params, Genesis: &chain.Genesis{Alloc: make(map[types.Address]*chain.GenesisAccount)}} - initHandler := GenesisPostHookFactory(chainConfig, ConsensusName) + initHandler := GenesisPostHookFactory(chainConfig, config.ConsensusName) require.NotNil(t, initHandler) - transition := newTestTransition(t, nil) + transition := systemstate.NewTestTransition(t, nil) if tc.expectedErr == nil { require.NoError(t, initHandler(transition)) } else { @@ -363,3 +378,18 @@ func Test_GenesisPostHookFactory(t *testing.T) { }) } } + +// createTestBridgeConfig creates test bridge configuration with hard-coded addresses +func createTestBridgeConfig() *config.Bridge { + return &config.Bridge{ + ExternalGatewayAddr: types.StringToAddress("1"), + ExternalERC20PredicateAddr: types.StringToAddress("2"), + ExternalMintableERC20PredicateAddr: types.StringToAddress("3"), + ExternalNativeERC20Addr: types.StringToAddress("4"), + ExternalERC721PredicateAddr: types.StringToAddress("5"), + ExternalMintableERC721PredicateAddr: types.StringToAddress("6"), + ExternalERC1155PredicateAddr: types.StringToAddress("7"), + ExternalMintableERC1155PredicateAddr: types.StringToAddress("8"), + JSONRPCEndpoint: "http://localhost:8545", + } +} diff --git a/consensus/polybft/proposer_calculator.go b/consensus/polybft/proposer/proposer_calculator.go similarity index 87% rename from consensus/polybft/proposer_calculator.go rename to consensus/polybft/proposer/proposer_calculator.go index 115fa79e4b..a96df3ca2b 100644 --- a/consensus/polybft/proposer_calculator.go +++ b/consensus/polybft/proposer/proposer_calculator.go @@ -1,10 +1,14 @@ -package polybft +package proposer import ( "bytes" "fmt" "math/big" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/helper/common" "github.com/0xPolygon/polygon-edge/types" @@ -39,15 +43,16 @@ type ProposerSnapshot struct { } // NewProposerSnapshotFromState create ProposerSnapshot from state if possible or from genesis block -func NewProposerSnapshotFromState(config *runtimeConfig, dbTx *bolt.Tx) (*ProposerSnapshot, error) { - snapshot, err := config.State.ProposerSnapshotStore.getProposerSnapshot(dbTx) +func NewProposerSnapshotFromState(proposerStore *ProposerSnapshotStore, + backend polytypes.Polybft, dbTx *bolt.Tx) (*ProposerSnapshot, error) { + snapshot, err := proposerStore.getProposerSnapshot(dbTx) if err != nil { return nil, err } if snapshot == nil { // pick validator set from genesis block if snapshot is not saved in db - genesisValidatorsSet, err := config.polybftBackend.GetValidatorsWithTx(0, nil, dbTx) + genesisValidatorsSet, err := backend.GetValidatorsWithTx(0, nil, dbTx) if err != nil { return nil, err } @@ -156,34 +161,47 @@ type ProposerCalculator struct { snapshot *ProposerSnapshot // runtime configuration - config *runtimeConfig + config *config.Runtime // state to save snapshot - state *State + state *ProposerSnapshotStore + + // blockchain instance + blockchain polytypes.Blockchain + + // polybft backend + backend polytypes.Polybft // logger instance logger hclog.Logger } // NewProposerCalculator creates a new proposer calculator object -func NewProposerCalculator(config *runtimeConfig, logger hclog.Logger, +func NewProposerCalculator(cfg *config.Runtime, logger hclog.Logger, + state *state.State, backend polytypes.Polybft, blockchain polytypes.Blockchain, dbTx *bolt.Tx) (*ProposerCalculator, error) { - snap, err := NewProposerSnapshotFromState(config, dbTx) + proposerStore, err := newProposerSnapshotStoreWithDB(state.DB(), dbTx) + if err != nil { + return nil, fmt.Errorf("cannot create proposer snapshot store: %w", err) + } + snap, err := NewProposerSnapshotFromState(proposerStore, backend, dbTx) if err != nil { return nil, err } pc := &ProposerCalculator{ - snapshot: snap, - config: config, - state: config.State, - logger: logger, + snapshot: snap, + config: cfg, + state: proposerStore, + logger: logger, + backend: backend, + blockchain: blockchain, } // If the node was previously stopped, leaving the proposer calculator in an inconsistent state, // proposer calculator needs to be updated. - blockNumber := config.blockchain.CurrentHeader().Number + blockNumber := blockchain.CurrentHeader().Number if pc.snapshot.Height <= blockNumber { if err = pc.update(blockNumber, dbTx); err != nil { return nil, err @@ -194,14 +212,22 @@ func NewProposerCalculator(config *runtimeConfig, logger hclog.Logger, } // NewProposerCalculator creates a new proposer calculator object -func NewProposerCalculatorFromSnapshot(pcs *ProposerSnapshot, config *runtimeConfig, - logger hclog.Logger) *ProposerCalculator { - return &ProposerCalculator{ - snapshot: pcs.Copy(), - config: config, - state: config.State, - logger: logger, +func NewProposerCalculatorFromSnapshot(pcs *ProposerSnapshot, config *config.Runtime, + state *state.State, backend polytypes.Polybft, blockchain polytypes.Blockchain, + logger hclog.Logger) (*ProposerCalculator, error) { + store, err := newProposerSnapshotStore(state.DB()) + if err != nil { + return nil, fmt.Errorf("cannot create proposer snapshot store: %w", err) } + + return &ProposerCalculator{ + snapshot: pcs.Copy(), + config: config, + state: store, + logger: logger, + backend: backend, + blockchain: blockchain, + }, nil } // Get copy of the proposers' snapshot @@ -215,7 +241,7 @@ func (pc *ProposerCalculator) GetSnapshot() (*ProposerSnapshot, bool) { // PostBlock is called on every insert of finalized block (either from consensus or syncer) // It will update priorities and save the updated snapshot to db -func (pc *ProposerCalculator) PostBlock(req *PostBlockRequest) error { +func (pc *ProposerCalculator) PostBlock(req *polytypes.PostBlockRequest) error { return pc.update(req.FullBlock.Block.Number(), req.DBTx) } @@ -236,7 +262,7 @@ func (pc *ProposerCalculator) update(blockNumber uint64, dbTx *bolt.Tx) error { "block", height, "validators", pc.snapshot.Validators) } - if err := pc.state.ProposerSnapshotStore.writeProposerSnapshot(pc.snapshot, dbTx); err != nil { + if err := pc.state.writeProposerSnapshot(pc.snapshot, dbTx); err != nil { return fmt.Errorf("cannot save proposers snapshot for block %d: %w", blockNumber, err) } @@ -253,7 +279,7 @@ func (pc *ProposerCalculator) updatePerBlock(blockNumber uint64, dbTx *bolt.Tx) blockNumber, pc.snapshot.Height) } - _, extra, err := getBlockData(blockNumber, pc.config.blockchain) + _, extra, err := helpers.GetBlockData(blockNumber, pc.blockchain) if err != nil { return fmt.Errorf("cannot get block header and extra while updating proposers snapshot %d: %w", blockNumber, err) } @@ -261,7 +287,7 @@ func (pc *ProposerCalculator) updatePerBlock(blockNumber uint64, dbTx *bolt.Tx) var newValidatorSet validator.AccountSet = nil if extra.Validators != nil && !extra.Validators.IsEmpty() { - newValidatorSet, err = pc.config.polybftBackend.GetValidatorsWithTx(blockNumber, nil, dbTx) + newValidatorSet, err = pc.backend.GetValidatorsWithTx(blockNumber, nil, dbTx) if err != nil { return fmt.Errorf("cannot get validators for block %d: %w", blockNumber, err) } diff --git a/consensus/polybft/state_store_proposer_snapshot.go b/consensus/polybft/proposer/state_store_proposer_snapshot.go similarity index 71% rename from consensus/polybft/state_store_proposer_snapshot.go rename to consensus/polybft/proposer/state_store_proposer_snapshot.go index abcae8456c..6f9b797123 100644 --- a/consensus/polybft/state_store_proposer_snapshot.go +++ b/consensus/polybft/proposer/state_store_proposer_snapshot.go @@ -1,4 +1,4 @@ -package polybft +package proposer import ( "encoding/json" @@ -26,12 +26,31 @@ type ProposerSnapshotStore struct { } // initialize creates necessary buckets in DB if they don't already exist -func (s *ProposerSnapshotStore) initialize(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists(proposerSnapshotBucket); err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", string(validatorSnapshotsBucket), err) +func newProposerSnapshotStore(db *bolt.DB) (*ProposerSnapshotStore, error) { + var store *ProposerSnapshotStore + + err := db.Update(func(tx *bolt.Tx) error { + s, err := newProposerSnapshotStoreWithDB(db, tx) + if err != nil { + return err + } + + store = s + + return nil + }) + + return store, err +} + +func newProposerSnapshotStoreWithDB(db *bolt.DB, dbTx *bolt.Tx) (*ProposerSnapshotStore, error) { + store := &ProposerSnapshotStore{db: db} + + if _, err := dbTx.CreateBucketIfNotExists(proposerSnapshotBucket); err != nil { + return nil, fmt.Errorf("failed to create bucket=%s: %w", string(proposerSnapshotBucket), err) } - return nil + return store, nil } // getProposerSnapshot gets latest proposer snapshot diff --git a/consensus/polybft/proposer/state_store_proposer_snapshot_test.go b/consensus/polybft/proposer/state_store_proposer_snapshot_test.go new file mode 100644 index 0000000000..351a197f3d --- /dev/null +++ b/consensus/polybft/proposer/state_store_proposer_snapshot_test.go @@ -0,0 +1,64 @@ +package proposer + +import ( + "fmt" + "os" + "path" + "testing" + "time" + + "github.com/stretchr/testify/require" + bolt "go.etcd.io/bbolt" +) + +// newTestState creates new instance of state used by tests. +func newTestState(tb testing.TB) *ProposerSnapshotStore { + tb.Helper() + + dir := fmt.Sprintf("/tmp/consensus-temp_%v", time.Now().UTC().Format(time.RFC3339Nano)) + err := os.Mkdir(dir, 0775) + + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + if err := os.RemoveAll(dir); err != nil { + tb.Fatal(err) + } + }) + + db, err := bolt.Open(path.Join(dir, "my.db"), 0666, nil) + if err != nil { + tb.Fatal(err) + } + + stakeSTore, err := newProposerSnapshotStore(db) + if err != nil { + tb.Fatal(err) + } + + return stakeSTore +} + +func TestState_getProposerSnapshot_writeProposerSnapshot(t *testing.T) { + t.Parallel() + + const ( + height = uint64(100) + round = uint64(5) + ) + + state := newTestState(t) + + snap, err := state.getProposerSnapshot(nil) + require.NoError(t, err) + require.Nil(t, snap) + + newSnapshot := &ProposerSnapshot{Height: height, Round: round} + require.NoError(t, state.writeProposerSnapshot(newSnapshot, nil)) + + snap, err = state.getProposerSnapshot(nil) + require.NoError(t, err) + require.Equal(t, newSnapshot, snap) +} diff --git a/consensus/polybft/proposer_calculator_test.go b/consensus/polybft/proposer_calculator_test.go deleted file mode 100644 index 83018701c6..0000000000 --- a/consensus/polybft/proposer_calculator_test.go +++ /dev/null @@ -1,712 +0,0 @@ -package polybft - -import ( - "bytes" - "math/big" - "testing" - - "github.com/0xPolygon/polygon-edge/bls" - "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/types" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestProposerCalculator_SetIndex(t *testing.T) { - t.Parallel() - - validators := validator.NewTestValidatorsWithAliases(t, []string{"A", "B", "C", "D", "E"}, []uint64{10, 100, 1, 50, 30}) - metadata := validators.GetPublicIdentities() - - vs := validators.ToValidatorSet() - - snapshot := NewProposerSnapshot(1, metadata) - - // validate no changes to validator set positions - for i, v := range vs.Accounts() { - assert.Equal(t, metadata[i].Address, v.Address) - } - - proposer, err := snapshot.CalcProposer(0, 1) - require.NoError(t, err) - assert.Equal(t, proposer, metadata[1].Address) - // validate no changes to validator set positions - for i, v := range vs.Accounts() { - assert.Equal(t, metadata[i].Address, v.Address) - } -} - -func TestProposerCalculator_RegularFlow(t *testing.T) { - t.Parallel() - - validators := validator.NewTestValidatorsWithAliases(t, []string{"A", "B", "C", "D", "E"}, []uint64{1, 2, 3, 4, 5}) - metadata := validators.GetPublicIdentities() - - snapshot := NewProposerSnapshot(0, metadata) - - currProposerAddress, err := snapshot.CalcProposer(0, 0) - require.NoError(t, err) - assert.Equal(t, metadata[4].Address, currProposerAddress) - - proposerAddressR1, err := snapshot.CalcProposer(1, 0) - require.NoError(t, err) - assert.Equal(t, metadata[3].Address, proposerAddressR1) - - proposerAddressR2, err := snapshot.CalcProposer(2, 0) - require.NoError(t, err) - assert.Equal(t, metadata[2].Address, proposerAddressR2) - - proposerAddressR3, err := snapshot.CalcProposer(3, 0) - require.NoError(t, err) - assert.Equal(t, metadata[1].Address, proposerAddressR3) - - proposerAddressR4, err := snapshot.CalcProposer(4, 0) - require.NoError(t, err) - assert.Equal(t, metadata[4].Address, proposerAddressR4) - - proposerAddressR5, err := snapshot.CalcProposer(5, 0) - require.NoError(t, err) - assert.Equal(t, metadata[3].Address, proposerAddressR5) - - proposerAddressR6, err := snapshot.CalcProposer(6, 0) - require.NoError(t, err) - assert.Equal(t, metadata[0].Address, proposerAddressR6) -} - -func TestProposerCalculator_SamePriority(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(5) - require.NoError(t, err) - - // at some point priorities will be the same and bytes address will be compared - vs := validator.NewValidatorSet([]*validator.ValidatorMetadata{ - { - BlsKey: keys[0].PublicKey(), - Address: types.Address{0x1}, - VotingPower: big.NewInt(1), - }, - { - BlsKey: keys[1].PublicKey(), - Address: types.Address{0x2}, - VotingPower: big.NewInt(2), - }, - { - BlsKey: keys[2].PublicKey(), - Address: types.Address{0x3}, - VotingPower: big.NewInt(3), - }, - }, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(0, vs.Accounts()) - - proposerR0, err := snapshot.CalcProposer(0, 0) - require.NoError(t, err) - assert.Equal(t, types.Address{0x3}, proposerR0) - - proposerR1, err := snapshot.CalcProposer(1, 0) - require.NoError(t, err) - assert.Equal(t, types.Address{0x2}, proposerR1) - - proposerR2, err := snapshot.CalcProposer(2, 0) - require.NoError(t, err) - assert.Equal(t, types.Address{0x1}, proposerR2) - - proposerR2, err = snapshot.CalcProposer(2, 0) // call again same round - require.NoError(t, err) - assert.Equal(t, types.Address{0x1}, proposerR2) -} - -func TestProposerCalculator_InversePriorityOrderWithExpectedListOfSelection(t *testing.T) { - t.Parallel() - - const numberOfIteration = 99 - - keys, err := bls.CreateRandomBlsKeys(3) - require.NoError(t, err) - - // priorities are from high to low vp in validator set - vset := validator.NewValidatorSet([]*validator.ValidatorMetadata{ - { - BlsKey: keys[0].PublicKey(), - Address: types.Address{0x1}, - VotingPower: big.NewInt(1000), - }, - { - BlsKey: keys[1].PublicKey(), - Address: types.Address{0x2}, - VotingPower: big.NewInt(300), - }, - { - BlsKey: keys[2].PublicKey(), - Address: types.Address{0x3}, - VotingPower: big.NewInt(330), - }, - }, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(4, vset.Accounts()) - - var proposers = make([]types.Address, numberOfIteration) - - for i := uint64(0); i < numberOfIteration; i++ { - proposers[i], err = snapshot.CalcProposer(i, 4) - require.NoError(t, err) - } - - // list of addresses in order that should be selected - expectedValidatorAddresses := []types.Address{ - {0x1}, {0x3}, {0x1}, {0x2}, {0x1}, {0x1}, {0x3}, {0x1}, {0x2}, {0x1}, {0x1}, {0x3}, {0x1}, {0x1}, {0x2}, {0x1}, - {0x3}, {0x1}, {0x1}, {0x2}, {0x1}, {0x1}, {0x3}, {0x1}, {0x2}, {0x1}, {0x1}, {0x3}, {0x1}, {0x2}, {0x1}, {0x1}, - {0x3}, {0x1}, {0x1}, {0x2}, {0x1}, {0x3}, {0x1}, {0x1}, {0x2}, {0x1}, {0x3}, {0x1}, {0x1}, {0x2}, {0x1}, {0x3}, - {0x1}, {0x1}, {0x2}, {0x1}, {0x3}, {0x1}, {0x1}, {0x1}, {0x3}, {0x2}, {0x1}, {0x1}, {0x1}, {0x3}, {0x1}, {0x2}, - {0x1}, {0x1}, {0x3}, {0x1}, {0x2}, {0x1}, {0x1}, {0x3}, {0x1}, {0x2}, {0x1}, {0x1}, {0x3}, {0x1}, {0x2}, {0x1}, - {0x1}, {0x3}, {0x1}, {0x1}, {0x2}, {0x1}, {0x3}, {0x1}, {0x1}, {0x2}, {0x1}, {0x3}, {0x1}, {0x1}, {0x2}, {0x1}, - {0x3}, {0x1}, {0x1}, - } - - for i, p := range proposers { - assert.True(t, bytes.Equal(expectedValidatorAddresses[i].Bytes(), p.Bytes())) - } -} - -func TestProposerCalculator_IncrementProposerPrioritySameVotingPower(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(3) - require.NoError(t, err) - - vs := validator.NewValidatorSet([]*validator.ValidatorMetadata{ - { - BlsKey: keys[0].PublicKey(), - Address: types.Address{0x1}, - VotingPower: big.NewInt(1), - }, - { - BlsKey: keys[1].PublicKey(), - Address: types.Address{0x2}, - VotingPower: big.NewInt(1), - }, - { - BlsKey: keys[2].PublicKey(), - Address: types.Address{0x3}, - VotingPower: big.NewInt(1), - }, - }, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(0, vs.Accounts()) - - // when voting power is the same order is by address - currProposerAddress, err := snapshot.CalcProposer(0, 0) - require.NoError(t, err) - assert.Equal(t, types.Address{0x1}, currProposerAddress) - - proposerAddresR1, err := snapshot.CalcProposer(1, 0) - require.NoError(t, err) - assert.Equal(t, types.Address{0x2}, proposerAddresR1) - - proposerAddressR2, err := snapshot.CalcProposer(2, 0) - require.NoError(t, err) - assert.Equal(t, types.Address{0x3}, proposerAddressR2) - - proposerAddressR3, err := snapshot.CalcProposer(3, 0) - require.NoError(t, err) - assert.Equal(t, types.Address{0x1}, proposerAddressR3) - - proposerAddressR4, err := snapshot.CalcProposer(4, 0) - require.NoError(t, err) - assert.Equal(t, types.Address{0x2}, proposerAddressR4) -} - -func TestProposerCalculator_AveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(3) - require.NoError(t, err) - - // Other than TestAveragingInIncrementProposerPriority this is a more complete test showing - // how each ProposerPriority changes in relation to the validator's voting power respectively. - // average is zero in each round: - vp0 := int64(10) - vp1 := int64(1) - vp2 := int64(1) - total := vp0 + vp1 + vp2 - avg := (vp0 + vp1 + vp2 - total) / 3 - valz := []*validator.ValidatorMetadata{ - { - BlsKey: keys[0].PublicKey(), - Address: types.Address{0x1}, - VotingPower: big.NewInt(vp0), - }, - { - BlsKey: keys[1].PublicKey(), - Address: types.Address{0x2}, - VotingPower: big.NewInt(vp1), - }, - { - BlsKey: keys[2].PublicKey(), - Address: types.Address{0x3}, - VotingPower: big.NewInt(vp2), - }, - } - - tcs := []struct { - wantProposerPriority []int64 - times uint64 - wantProposerIndex int64 - }{ - - 0: { - []int64{ - // Acumm+VotingPower-Avg: - 0 + vp0 - total - avg, // mostest will be subtracted by total voting power (12) - 0 + vp1, - 0 + vp2}, - 1, - 0, - }, - 1: { - []int64{ - (0 + vp0 - total) + vp0 - total - avg, // this will be mostest on 2nd iter, too - (0 + vp1) + vp1, - (0 + vp2) + vp2}, - 2, - 0, - }, // increment twice -> expect average to be subtracted twice - 2: { - []int64{ - 0 + 3*(vp0-total) - avg, // still mostest - 0 + 3*vp1, - 0 + 3*vp2}, - 3, - 0, - }, - 3: { - []int64{ - 0 + 4*(vp0-total), // still mostest - 0 + 4*vp1, - 0 + 4*vp2}, - 4, - 0, - }, - 4: { - []int64{ - 0 + 4*(vp0-total) + vp0, // 4 iters was mostest - 0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalVotingPower) - 0 + 5*vp2}, - 5, - 1, - }, - 5: { - []int64{ - 0 + 6*vp0 - 5*total, // mostest again - 0 + 6*vp1 - total, // mostest once up to here - 0 + 6*vp2}, - 6, - 0, - }, - 6: { - []int64{ - 0 + 7*vp0 - 6*total, // in 7 iters this val is mostest 6 times - 0 + 7*vp1 - total, // in 7 iters this val is mostest 1 time - 0 + 7*vp2}, - 7, - 0, - }, - 7: { - []int64{ - 0 + 8*vp0 - 7*total, // mostest again - 0 + 8*vp1 - total, - 0 + 8*vp2}, - 8, - 0, - }, - 8: { - []int64{ - 0 + 9*vp0 - 7*total, - 0 + 9*vp1 - total, - 0 + 9*vp2 - total}, // mostest - 9, - 2, - }, - 9: { - []int64{ - 0 + 10*vp0 - 8*total, // after 10 iters this is mostest again - 0 + 10*vp1 - total, // after 6 iters this val is "mostest" once and not in between - 0 + 10*vp2 - total}, // in between 10 iters this val is "mostest" once - 10, - 0, - }, - 10: { - []int64{ - 0 + 11*vp0 - 9*total, - 0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between - 0 + 11*vp2 - total}, // after 10 iters this val is "mostest" once - 11, - 0, - }, - } - - for i, tc := range tcs { - snap := NewProposerSnapshot(1, valz) - - _, err := incrementProposerPriorityNTimes(snap, tc.times) - require.NoError(t, err) - - address, _ := snap.GetLatestProposer(tc.times-1, 1) - - assert.Equal(t, snap.Validators[tc.wantProposerIndex].Metadata.Address, address, - "test case: %v", - i) - - for valIdx, val := range snap.Validators { - assert.Equal(t, - tc.wantProposerPriority[valIdx], - val.ProposerPriority.Int64(), - "test case: %v, validator: %v", - i, - valIdx) - } - } -} - -func TestProposerCalculator_UpdatesForNewValidatorSet(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(2) - require.NoError(t, err) - - v1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(100)} - v2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(100)} - - accountSet := []*validator.ValidatorMetadata{v1, v2} - vs := validator.NewValidatorSet(accountSet, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(0, vs.Accounts()) - - _, err = snapshot.CalcProposer(1, 0) - require.NoError(t, err) - - // verify that the capacity and length of validators is the same - assert.Equal(t, len(vs.Accounts()), cap(snapshot.Validators)) - // verify that validator priorities are centered - valsCount := int64(len(snapshot.Validators)) - - sum := big.NewInt(0) - for _, val := range snapshot.Validators { - // mind overflow - sum = new(big.Int).Add(sum, val.ProposerPriority) - } - - assert.True(t, sum.Cmp(big.NewInt(valsCount)) < 0 && sum.Cmp(big.NewInt(-valsCount)) > 0, - "expected total priority in (-%d, %d). Got %d", valsCount, valsCount, sum) - - // verify that priorities are scaled - diff := computeMaxMinPriorityDiff(snapshot.Validators) - totalVotingPower := vs.TotalVotingPower() - diffMax := new(big.Int).Mul(priorityWindowSizeFactor, &totalVotingPower) - assert.True(t, diff.Cmp(diffMax) <= 0, "expected priority distance < %d. Got %d", diffMax, diff) -} - -func TestProposerCalculator_GetLatestProposer(t *testing.T) { - t.Parallel() - - const ( - bestIdx = 5 - count = 10 - ) - - validatorSet := validator.NewTestValidators(t, count).GetPublicIdentities() - snapshot := NewProposerSnapshot(0, validatorSet) - snapshot.Validators[bestIdx].ProposerPriority = big.NewInt(1000000) - - // not set - _, err := snapshot.GetLatestProposer(0, 0) - assert.Error(t, err) - - _, err = snapshot.CalcProposer(0, 0) - assert.NoError(t, err) - - // wrong round - _, err = snapshot.GetLatestProposer(1, 0) - assert.Error(t, err) - - // wrong height - _, err = snapshot.GetLatestProposer(0, 1) - assert.Error(t, err) - - // ok - address, err := snapshot.GetLatestProposer(0, 0) - assert.NoError(t, err) - - proposerAddress := validatorSet[bestIdx].Address - assert.Equal(t, proposerAddress, address) -} - -func TestProposerCalculator_UpdateValidatorsSameVpUpdatedAndNewAdded(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(8) - require.NoError(t, err) - - v1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(100)} - v2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(100)} - v3 := &validator.ValidatorMetadata{Address: types.Address{0x3}, BlsKey: keys[2].PublicKey(), VotingPower: big.NewInt(100)} - v4 := &validator.ValidatorMetadata{Address: types.Address{0x4}, BlsKey: keys[3].PublicKey(), VotingPower: big.NewInt(100)} - v5 := &validator.ValidatorMetadata{Address: types.Address{0x5}, BlsKey: keys[4].PublicKey(), VotingPower: big.NewInt(100)} - - vs := validator.NewValidatorSet([]*validator.ValidatorMetadata{v1, v2, v3, v4, v5}, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(0, vs.Accounts()) - - // iterate one cycle should bring back priority to 0 - _, err = incrementProposerPriorityNTimes(snapshot, 5) - require.NoError(t, err) - - for _, v := range snapshot.Validators { - assert.True(t, v.ProposerPriority.Cmp(big.NewInt(0)) == 0) - } - - // updated old validators - u1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(10)} - u2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[2].PublicKey(), VotingPower: big.NewInt(10)} - // added new validator - a1 := &validator.ValidatorMetadata{Address: types.Address{0x9}, BlsKey: keys[7].PublicKey(), VotingPower: big.NewInt(100)} - - newAccountSet := []*validator.ValidatorMetadata{u1, u2, a1} - - require.NoError(t, updateValidators(snapshot, newAccountSet)) - assert.Equal(t, 3, len(snapshot.Validators)) - - // removedVp := sum(v3, v4, v5) = 300 - // newVp := sum(u1, u2, a1) = 120 - // sum(removedVp, newVp) = 420; priority(a1) = -1.125*420 = -472 - // scale: difMax = 2 * 120; diff(-475, 0); ratio ~ 2 - // priority(a1) = -472/2 = 236; u1 = 0, u2 = 0 - // center: avg = 236/3 = 79; priority(a1)= 236 - 79 - - // check voting power after update - assert.Equal(t, big.NewInt(10), snapshot.Validators[0].Metadata.VotingPower) - assert.Equal(t, big.NewInt(10), snapshot.Validators[1].Metadata.VotingPower) - assert.Equal(t, big.NewInt(100), snapshot.Validators[2].Metadata.VotingPower) - // newly added validator - assert.Equal(t, big.NewInt(100), snapshot.Validators[2].Metadata.VotingPower) - assert.Equal(t, types.Address{0x9}, snapshot.Validators[2].Metadata.Address) - assert.Equal(t, big.NewInt(-157), snapshot.Validators[2].ProposerPriority) // a1 - // check priority - assert.Equal(t, big.NewInt(79), snapshot.Validators[0].ProposerPriority) // u1 - assert.Equal(t, big.NewInt(79), snapshot.Validators[1].ProposerPriority) // u2 - - _, err = incrementProposerPriorityNTimes(snapshot, 1) - require.NoError(t, err) - - // 79 + 10 - (100+10+10) - assert.Equal(t, big.NewInt(-31), snapshot.Validators[0].ProposerPriority) - // 79 + 10 - assert.Equal(t, big.NewInt(89), snapshot.Validators[1].ProposerPriority) - // -157+100 - assert.Equal(t, big.NewInt(-57), snapshot.Validators[2].ProposerPriority) -} - -func TestProposerCalculator_UpdateValidators(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(4) - require.NoError(t, err) - - v1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(10)} - v2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(20)} - v3 := &validator.ValidatorMetadata{Address: types.Address{0x3}, BlsKey: keys[2].PublicKey(), VotingPower: big.NewInt(30)} - - vs := validator.NewValidatorSet([]*validator.ValidatorMetadata{v1, v2, v3}, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(0, vs.Accounts()) - require.Equal(t, big.NewInt(60), snapshot.GetTotalVotingPower()) - // init priority must be 0 - require.Zero(t, snapshot.Validators[0].ProposerPriority.Int64()) - require.Zero(t, snapshot.Validators[1].ProposerPriority.Int64()) - require.Zero(t, snapshot.Validators[2].ProposerPriority.Int64()) - // vp must be initialized - require.Equal(t, big.NewInt(10), snapshot.Validators[0].Metadata.VotingPower) - require.Equal(t, big.NewInt(20), snapshot.Validators[1].Metadata.VotingPower) - require.Equal(t, big.NewInt(30), snapshot.Validators[2].Metadata.VotingPower) - - // increment once - _, err = incrementProposerPriorityNTimes(snapshot, 1) - require.NoError(t, err) - - // updated - u1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(100)} - u2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(200)} - u3 := &validator.ValidatorMetadata{Address: types.Address{0x3}, BlsKey: keys[2].PublicKey(), VotingPower: big.NewInt(300)} - // added - a1 := &validator.ValidatorMetadata{Address: types.Address{0x4}, BlsKey: keys[3].PublicKey(), VotingPower: big.NewInt(400)} - - require.NoError(t, updateValidators(snapshot, []*validator.ValidatorMetadata{u1, u2, u3, a1})) - - require.Equal(t, 4, len(snapshot.Validators)) - // priorities are from previous iteration - require.Equal(t, big.NewInt(292), snapshot.Validators[0].ProposerPriority) - require.Equal(t, big.NewInt(302), snapshot.Validators[1].ProposerPriority) - require.Equal(t, big.NewInt(252), snapshot.Validators[2].ProposerPriority) - // new added a1 - require.Equal(t, types.Address{0x4}, snapshot.Validators[3].Metadata.Address) - require.Equal(t, big.NewInt(-843), snapshot.Validators[3].ProposerPriority) - // total vp is updated - require.Equal(t, big.NewInt(1000), snapshot.GetTotalVotingPower()) -} - -func TestProposerCalculator_ScaleAfterDelete(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(3) - require.NoError(t, err) - - v1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(10)} - v2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(10)} - v3 := &validator.ValidatorMetadata{Address: types.Address{0x3}, BlsKey: keys[2].PublicKey(), VotingPower: big.NewInt(80000)} - - vs := validator.NewValidatorSet([]*validator.ValidatorMetadata{v1, v2, v3}, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(0, vs.Accounts()) - assert.Equal(t, big.NewInt(80020), snapshot.GetTotalVotingPower()) - - _, err = incrementProposerPriorityNTimes(snapshot, 1) - require.NoError(t, err) - - // priorities are from previous iteration - require.Equal(t, big.NewInt(10), snapshot.Validators[0].ProposerPriority) - require.Equal(t, big.NewInt(10), snapshot.Validators[1].ProposerPriority) - require.Equal(t, big.NewInt(-20), snapshot.Validators[2].ProposerPriority) - - // another increment - proposer, err := incrementProposerPriorityNTimes(snapshot, 4000) - require.NoError(t, err) - // priorities are from previous iteration - assert.Equal(t, types.Address{0x3}, proposer.Metadata.Address) - - // reduce validator voting power from 8k to 1 - u1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(10)} - u2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(10)} - - require.Equal(t, big.NewInt(-40010), snapshot.Validators[0].ProposerPriority) - require.Equal(t, big.NewInt(40010), snapshot.Validators[1].ProposerPriority) - - require.NoError(t, updateValidators(snapshot, []*validator.ValidatorMetadata{u1, u2})) - - // maxdiff = 2*tvp = 40 - // diff(min,max) (-40010, 40010) = 80020 - // ratio := (diff + diffMax - 1) / diffMax; (80020 + 20 - 1)/20 = 2001 - // priority = priority / ratio; u1 = -40010 / 4001 ~ -19; u2 = 40010 / 4001 ~ 19 - require.Equal(t, big.NewInt(-19), snapshot.Validators[0].ProposerPriority) - require.Equal(t, big.NewInt(19), snapshot.Validators[1].ProposerPriority) - require.Equal(t, big.NewInt(20), snapshot.GetTotalVotingPower()) -} - -func TestProposerCalculator_ShiftAfterUpdate(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(3) - require.NoError(t, err) - - v1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(50)} - v2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(80)} - v3 := &validator.ValidatorMetadata{Address: types.Address{0x3}, BlsKey: keys[2].PublicKey(), VotingPower: big.NewInt(100000)} - - vs := validator.NewValidatorSet([]*validator.ValidatorMetadata{v1, v2, v3}, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(0, vs.Accounts()) - assert.Equal(t, big.NewInt(100130), snapshot.GetTotalVotingPower()) - - _, err = incrementProposerPriorityNTimes(snapshot, 4000) - require.NoError(t, err) - - // updates - u1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(5)} - u2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(8)} - - require.NoError(t, updateValidators(snapshot, []*validator.ValidatorMetadata{u1, u2})) - - // maxdiff = 2*tvp = 26 - // diff(min,max) (-260, 19610) = 19870 - // ratio := (diff + diffMax - 1) / diffMax; (19870 + 26 - 1)/26 =765 - // scale priority = priority / ratio; p1 = 0; p2 = 25 - // shift with avg=(25+0)/2=12; p = priority - avg; u1 = -12; u2= 13 - require.Equal(t, big.NewInt(-12), snapshot.Validators[0].ProposerPriority) - require.Equal(t, big.NewInt(13), snapshot.Validators[1].ProposerPriority) - require.Equal(t, big.NewInt(13), snapshot.GetTotalVotingPower()) -} - -func TestProposerCalculator_UpdateValidatorSet(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(3) - require.NoError(t, err) - - v1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(1)} - v2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(8)} - v3 := &validator.ValidatorMetadata{Address: types.Address{0x3}, BlsKey: keys[2].PublicKey(), VotingPower: big.NewInt(15)} - - vs := validator.NewValidatorSet([]*validator.ValidatorMetadata{v1, v2, v3}, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(0, vs.Accounts()) - assert.Equal(t, big.NewInt(24), snapshot.GetTotalVotingPower()) - - _, err = incrementProposerPriorityNTimes(snapshot, 2) - require.NoError(t, err) - - // modified validator - u1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(5)} - // added validator - a1 := &validator.ValidatorMetadata{Address: types.Address{0x4}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(8)} - - require.NoError(t, updateValidators(snapshot, []*validator.ValidatorMetadata{u1, a1})) - // expecting 2 validators with updated voting power and total voting power - require.Equal(t, 2, len(snapshot.Validators)) - require.Equal(t, types.Address{0x1}, snapshot.Validators[0].Metadata.Address) - require.Equal(t, big.NewInt(5), snapshot.Validators[0].Metadata.VotingPower) - require.Equal(t, big.NewInt(11), snapshot.Validators[0].ProposerPriority) - - require.Equal(t, types.Address{0x4}, snapshot.Validators[1].Metadata.Address) - require.Equal(t, big.NewInt(8), snapshot.Validators[1].Metadata.VotingPower) - require.Equal(t, big.NewInt(-10), snapshot.Validators[1].ProposerPriority) - require.Equal(t, big.NewInt(13), snapshot.GetTotalVotingPower()) -} - -func TestProposerCalculator_AddValidator(t *testing.T) { - t.Parallel() - - keys, err := bls.CreateRandomBlsKeys(3) - require.NoError(t, err) - - v1 := &validator.ValidatorMetadata{Address: types.Address{0x1}, BlsKey: keys[0].PublicKey(), VotingPower: big.NewInt(3)} - v2 := &validator.ValidatorMetadata{Address: types.Address{0x2}, BlsKey: keys[1].PublicKey(), VotingPower: big.NewInt(1)} - - vs := validator.NewValidatorSet([]*validator.ValidatorMetadata{v1, v2}, hclog.NewNullLogger()) - - snapshot := NewProposerSnapshot(0, vs.Accounts()) - assert.Equal(t, big.NewInt(4), snapshot.GetTotalVotingPower()) - proposer, err := incrementProposerPriorityNTimes(snapshot, 1) - require.NoError(t, err) - require.Equal(t, types.Address{0x1}, proposer.Metadata.Address) - require.Equal(t, big.NewInt(-1), snapshot.Validators[0].ProposerPriority) - require.Equal(t, big.NewInt(1), snapshot.Validators[1].ProposerPriority) - - _, err = incrementProposerPriorityNTimes(snapshot, 1) - require.NoError(t, err) - - require.Equal(t, big.NewInt(-2), snapshot.Validators[0].ProposerPriority) - require.Equal(t, big.NewInt(2), snapshot.Validators[1].ProposerPriority) - - a1 := &validator.ValidatorMetadata{Address: types.Address{0x3}, BlsKey: keys[2].PublicKey(), VotingPower: big.NewInt(8)} - - require.NoError(t, updateValidators(snapshot, []*validator.ValidatorMetadata{v1, v2, a1})) - - // updated vp: 8+3+1 = 12 - // added validator priority = -1.125*8 ~ -13 - // scaling: max(-13, 3) = 16 < 2* 12; no scaling - // centring: avg = (13+3+1)/3=5; v1=-2+5, v2=2+5; u3=-13+5 - require.Equal(t, big.NewInt(3), snapshot.Validators[0].ProposerPriority) - require.Equal(t, big.NewInt(7), snapshot.Validators[1].ProposerPriority) - require.Equal(t, big.NewInt(-8), snapshot.Validators[2].ProposerPriority) -} diff --git a/consensus/polybft/runtime_helpers.go b/consensus/polybft/runtime_helpers.go deleted file mode 100644 index 2dc21f967e..0000000000 --- a/consensus/polybft/runtime_helpers.go +++ /dev/null @@ -1,50 +0,0 @@ -package polybft - -import ( - "github.com/0xPolygon/polygon-edge/blockchain" - "github.com/0xPolygon/polygon-edge/types" -) - -// isEndOfPeriod checks if an end of a period (either it be sprint or epoch) -// is reached with the current block (the parent block of the current fsm iteration) -func isEndOfPeriod(blockNumber, periodSize uint64) bool { - return blockNumber%periodSize == 0 -} - -// getBlockData returns block header and extra -func getBlockData(blockNumber uint64, blockchainBackend blockchainBackend) (*types.Header, *Extra, error) { - blockHeader, found := blockchainBackend.GetHeaderByNumber(blockNumber) - if !found { - return nil, nil, blockchain.ErrNoBlock - } - - blockExtra, err := GetIbftExtra(blockHeader.ExtraData) - if err != nil { - return nil, nil, err - } - - return blockHeader, blockExtra, nil -} - -// isEpochEndingBlock checks if given block is an epoch ending block -func isEpochEndingBlock(blockNumber uint64, extra *Extra, blockchain blockchainBackend) (bool, error) { - if extra.Validators == nil { - // non epoch ending blocks have validator set delta as nil - return false, nil - } - - if !extra.Validators.IsEmpty() { - // if validator set delta is not empty, the validator set was changed in this block - // meaning the epoch changed as well - return true, nil - } - - _, nextBlockExtra, err := getBlockData(blockNumber+1, blockchain) - if err != nil { - return false, err - } - - // validator set delta can be empty (no change in validator set happened) - // so we need to check if their epoch numbers are different - return extra.BlockMetaData.EpochNumber != nextBlockExtra.BlockMetaData.EpochNumber, nil -} diff --git a/consensus/polybft/runtime_helpers_test.go b/consensus/polybft/runtime_helpers_test.go deleted file mode 100644 index 89bbd7b3e7..0000000000 --- a/consensus/polybft/runtime_helpers_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package polybft - -import ( - "testing" - - "github.com/0xPolygon/polygon-edge/blockchain" - "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" - "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/types" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestHelpers_isEpochEndingBlock_DeltaNotEmpty(t *testing.T) { - t.Parallel() - - validators := validator.NewTestValidators(t, 3).GetPublicIdentities() - - bitmap := bitmap.Bitmap{} - bitmap.Set(0) - - delta := &validator.ValidatorSetDelta{ - Added: validators[1:], - Removed: bitmap, - } - - extra := &Extra{Validators: delta} - blockNumber := uint64(20) - - isEndOfEpoch, err := isEpochEndingBlock(blockNumber, extra, new(blockchainMock)) - require.NoError(t, err) - require.True(t, isEndOfEpoch) -} - -func TestHelpers_isEpochEndingBlock_NoBlock(t *testing.T) { - t.Parallel() - - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(&types.Header{}, false) - - extra := &Extra{Validators: &validator.ValidatorSetDelta{}} - blockNumber := uint64(20) - - isEndOfEpoch, err := isEpochEndingBlock(blockNumber, extra, blockchainMock) - require.ErrorIs(t, blockchain.ErrNoBlock, err) - require.False(t, isEndOfEpoch) -} - -func TestHelpers_isEpochEndingBlock_EpochsNotTheSame(t *testing.T) { - t.Parallel() - - blockchainMock := new(blockchainMock) - - nextBlockExtra := &Extra{Validators: &validator.ValidatorSetDelta{}, BlockMetaData: &BlockMetaData{EpochNumber: 3}} - nextBlock := &types.Header{ - Number: 21, - ExtraData: nextBlockExtra.MarshalRLPTo(nil), - } - - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(nextBlock, true) - - extra := &Extra{Validators: &validator.ValidatorSetDelta{}, BlockMetaData: &BlockMetaData{EpochNumber: 2}} - blockNumber := uint64(20) - - isEndOfEpoch, err := isEpochEndingBlock(blockNumber, extra, blockchainMock) - require.NoError(t, err) - require.True(t, isEndOfEpoch) -} - -func TestHelpers_isEpochEndingBlock_EpochsAreTheSame(t *testing.T) { - t.Parallel() - - blockchainMock := new(blockchainMock) - - nextBlockExtra := &Extra{Validators: &validator.ValidatorSetDelta{}, BlockMetaData: &BlockMetaData{EpochNumber: 2}} - nextBlock := &types.Header{ - Number: 16, - ExtraData: nextBlockExtra.MarshalRLPTo(nil), - } - - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(nextBlock, true) - - extra := &Extra{Validators: &validator.ValidatorSetDelta{}, BlockMetaData: &BlockMetaData{EpochNumber: 2}} - blockNumber := uint64(15) - - isEndOfEpoch, err := isEpochEndingBlock(blockNumber, extra, blockchainMock) - require.NoError(t, err) - require.False(t, isEndOfEpoch) -} diff --git a/consensus/polybft/sc_integration_test.go b/consensus/polybft/sc_integration_test.go index eb074ca544..b51303f0b2 100644 --- a/consensus/polybft/sc_integration_test.go +++ b/consensus/polybft/sc_integration_test.go @@ -10,7 +10,9 @@ import ( "github.com/stretchr/testify/require" "github.com/0xPolygon/polygon-edge/chain" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/helper/common" @@ -82,7 +84,7 @@ func TestIntegration_CommitEpoch(t *testing.T) { } } - polyBFTConfig := PolyBFTConfig{ + polyBFTConfig := config.PolyBFT{ InitialValidatorSet: initValidators, EpochSize: 24 * 60 * 60 / 2, SprintSize: 5, @@ -96,12 +98,12 @@ func TestIntegration_CommitEpoch(t *testing.T) { BladeAdmin: accSet.GetAddresses()[0], // use 1st account as governance address Governance: currentValidators.ToValidatorSet().Accounts().GetAddresses()[0], - RewardConfig: &RewardsConfig{ + RewardConfig: &config.Rewards{ TokenAddress: contracts.NativeERC20TokenContract, WalletAddress: walletAddress, WalletAmount: new(big.Int).SetUint64(initialBalance), }, - GovernanceConfig: &GovernanceConfig{ + GovernanceConfig: &config.Governance{ VotingDelay: big.NewInt(10), VotingPeriod: big.NewInt(10), ProposalThreshold: big.NewInt(25), @@ -114,7 +116,7 @@ func TestIntegration_CommitEpoch(t *testing.T) { StakeTokenAddr: contracts.NativeERC20TokenContract, } - transition := newTestTransition(t, alloc) + transition := systemstate.NewTestTransition(t, alloc) // init NetworkParams require.NoError(t, initNetworkParamsContract(2, polyBFTConfig, transition)) @@ -148,3 +150,49 @@ func TestIntegration_CommitEpoch(t *testing.T) { t.Logf("Number of validators %d on commit epoch, Gas used %+v\n", accSet.Len(), result.GasUsed) } } + +func createTestCommitEpochInput(t *testing.T, epochID uint64, + epochSize uint64) *contractsapi.CommitEpochEpochManagerFn { + t.Helper() + + var startBlock uint64 = 0 + if epochID > 1 { + startBlock = (epochID - 1) * epochSize + } + + commitEpoch := &contractsapi.CommitEpochEpochManagerFn{ + ID: new(big.Int).SetUint64(epochID), + Epoch: &contractsapi.Epoch{ + StartBlock: new(big.Int).SetUint64(startBlock + 1), + EndBlock: new(big.Int).SetUint64(epochSize * epochID), + EpochRoot: types.Hash{}, + }, + EpochSize: new(big.Int).SetUint64(epochSize), + } + + return commitEpoch +} + +func createTestDistributeRewardsInput(t *testing.T, epochID uint64, + validatorSet validator.AccountSet, epochSize uint64) *contractsapi.DistributeRewardForEpochManagerFn { + t.Helper() + + if validatorSet == nil { + validatorSet = validator.NewTestValidators(t, 5).GetPublicIdentities() + } + + uptime := make([]*contractsapi.Uptime, len(validatorSet)) + + for i, v := range validatorSet { + uptime[i] = &contractsapi.Uptime{ + Validator: v.Address, + SignedBlocks: new(big.Int).SetUint64(epochSize), + } + } + + return &contractsapi.DistributeRewardForEpochManagerFn{ + EpochID: new(big.Int).SetUint64(epochID), + Uptime: uptime, + EpochSize: new(big.Int).SetUint64(epochSize), + } +} diff --git a/consensus/polybft/stake_manager.go b/consensus/polybft/stake/stake_manager.go similarity index 66% rename from consensus/polybft/stake_manager.go rename to consensus/polybft/stake/stake_manager.go index 0bfc993d30..a12d86cdcf 100644 --- a/consensus/polybft/stake_manager.go +++ b/consensus/polybft/stake/stake_manager.go @@ -1,62 +1,58 @@ -package polybft +package stake import ( - "bytes" - "encoding/json" "errors" "fmt" "math/big" - "sort" - "strings" "github.com/0xPolygon/polygon-edge/bls" "github.com/0xPolygon/polygon-edge/consensus/polybft/bitmap" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/helper/hex" "github.com/0xPolygon/polygon-edge/types" "github.com/Ethernal-Tech/ethgo" - "github.com/Ethernal-Tech/ethgo/abi" "github.com/Ethernal-Tech/ethgo/contract" "github.com/hashicorp/go-hclog" bolt "go.etcd.io/bbolt" ) var ( - bigZero = big.NewInt(0) - validatorTypeABI = abi.MustNewType("tuple(uint256[4] blsKey," + - "uint256 stake, bool isWhitelisted, bool isActive)") + bigZero = big.NewInt(0) errUnknownStakeManagerEvent = errors.New("unknown event from stake manager contract") + // error returned if full validator set does not exists in db + errNoFullValidatorSet = errors.New("full validator set not in db") ) // StakeManager interface provides functions for handling stake change of validators // and updating validator set based on changed stake type StakeManager interface { - EventSubscriber - PostBlock(req *PostBlockRequest) error + state.EventSubscriber + PostBlock(req *polytypes.PostBlockRequest) error UpdateValidatorSet(epoch uint64, maxValidatorSetSize uint64, currentValidatorSet validator.AccountSet) (*validator.ValidatorSetDelta, error) } -var _ StakeManager = (*dummyStakeManager)(nil) +var _ StakeManager = (*DummyStakeManager)(nil) -// dummyStakeManager is a dummy implementation of StakeManager interface +// DummyStakeManager is a dummy implementation of StakeManager interface // used only for unit testing -type dummyStakeManager struct{} +type DummyStakeManager struct{} -func (d *dummyStakeManager) PostBlock(req *PostBlockRequest) error { return nil } +func (d *DummyStakeManager) PostBlock(req *polytypes.PostBlockRequest) error { return nil } -func (d *dummyStakeManager) UpdateValidatorSet(epoch uint64, maxValidatorSetSize uint64, +func (d *DummyStakeManager) UpdateValidatorSet(epoch uint64, maxValidatorSetSize uint64, currentValidatorSet validator.AccountSet) (*validator.ValidatorSetDelta, error) { return &validator.ValidatorSetDelta{}, nil } // EventSubscriber implementation -func (d *dummyStakeManager) GetLogFilters() map[types.Address][]types.Hash { +func (d *DummyStakeManager) GetLogFilters() map[types.Address][]types.Hash { return make(map[types.Address][]types.Hash) } -func (d *dummyStakeManager) ProcessLog(header *types.Header, log *ethgo.Log, dbTx *bolt.Tx) error { +func (d *DummyStakeManager) ProcessLog(header *types.Header, log *ethgo.Log, dbTx *bolt.Tx) error { return nil } @@ -66,24 +62,29 @@ var _ StakeManager = (*stakeManager)(nil) // and calculates updated validator set based on changed stake type stakeManager struct { logger hclog.Logger - state *State + state *StakeStore stakeManagerContractAddr types.Address - polybftBackend polybftBackend - blockchain blockchainBackend + polybftBackend polytypes.Polybft + blockchain polytypes.Blockchain } -// newStakeManager returns a new instance of stake manager -func newStakeManager( +// NewStakeManager returns a new instance of stake manager +func NewStakeManager( logger hclog.Logger, - state *State, + state *state.State, stakeManagerAddr types.Address, - blockchain blockchainBackend, - polybftBackend polybftBackend, + blockchain polytypes.Blockchain, + polybftBackend polytypes.Polybft, dbTx *bolt.Tx, -) (*stakeManager, error) { +) (StakeManager, error) { + stakeStore, err := newStakeStoreWithTx(state.DB(), dbTx) + if err != nil { + return nil, err + } + sm := &stakeManager{ logger: logger, - state: state, + state: stakeStore, stakeManagerContractAddr: stakeManagerAddr, polybftBackend: polybftBackend, blockchain: blockchain, @@ -104,7 +105,7 @@ func newStakeManager( // PostBlock is called on every insert of finalized block (either from consensus or syncer) // It will update the fullValidatorSet in db to the current block number // Note that EventSubscriber - AddLog will get all the transfer events that happened in block -func (s *stakeManager) PostBlock(req *PostBlockRequest) error { +func (s *stakeManager) PostBlock(req *polytypes.PostBlockRequest) error { fullValidatorSet, err := s.getOrInitValidatorSet(req.DBTx) if err != nil { return err @@ -122,30 +123,30 @@ func (s *stakeManager) PostBlock(req *PostBlockRequest) error { fullValidatorSet.EpochID = req.Epoch fullValidatorSet.BlockNumber = blockNumber - return s.state.StakeStore.insertFullValidatorSet(fullValidatorSet, req.DBTx) + return s.state.insertFullValidatorSet(fullValidatorSet, req.DBTx) } -func (s *stakeManager) getOrInitValidatorSet(dbTx *bolt.Tx) (validatorSetState, error) { - validatorSet, err := s.state.StakeStore.getFullValidatorSet(dbTx) +func (s *stakeManager) getOrInitValidatorSet(dbTx *bolt.Tx) (validator.ValidatorSetState, error) { + validatorSet, err := s.state.getFullValidatorSet(dbTx) if err != nil { if !errors.Is(err, errNoFullValidatorSet) { - return validatorSetState{}, err + return validator.ValidatorSetState{}, err } validators, err := s.polybftBackend.GetValidatorsWithTx(0, nil, dbTx) if err != nil { - return validatorSetState{}, err + return validator.ValidatorSetState{}, err } - validatorSet = validatorSetState{ + validatorSet = validator.ValidatorSetState{ BlockNumber: 0, EpochID: 0, UpdatedAtBlockNumber: 0, - Validators: newValidatorStakeMap(validators), + Validators: validator.NewValidatorStakeMap(validators), } - if err = s.state.StakeStore.insertFullValidatorSet(validatorSet, dbTx); err != nil { - return validatorSetState{}, err + if err = s.state.insertFullValidatorSet(validatorSet, dbTx); err != nil { + return validator.ValidatorSetState{}, err } } @@ -153,7 +154,7 @@ func (s *stakeManager) getOrInitValidatorSet(dbTx *bolt.Tx) (validatorSetState, } func (s *stakeManager) updateWithReceipts( - fullValidatorSet *validatorSetState, + fullValidatorSet *validator.ValidatorSetState, events []contractsapi.EventAbi, blockNumber uint64) error { if len(events) == 0 { @@ -165,11 +166,11 @@ func (s *stakeManager) updateWithReceipts( case *contractsapi.StakeAddedEvent: s.logger.Debug("Stake added event", "to", stakeEvent.Validator, "amount", stakeEvent.Amount) - fullValidatorSet.Validators.addStake(stakeEvent.Validator, stakeEvent.Amount) + fullValidatorSet.Validators.AddStake(stakeEvent.Validator, stakeEvent.Amount) case *contractsapi.StakeRemovedEvent: s.logger.Debug("Stake removed event", "from", stakeEvent.Validator, "value", stakeEvent.Amount) - fullValidatorSet.Validators.removeStake(stakeEvent.Validator, stakeEvent.Amount) + fullValidatorSet.Validators.RemoveStake(stakeEvent.Validator, stakeEvent.Amount) default: // this should not happen, but lets log it if it does s.logger.Warn("Found a stake event that represents neither stake nor unstake") @@ -204,7 +205,7 @@ func (s *stakeManager) UpdateValidatorSet(epoch uint64, maxValidatorSetSize uint oldValidatorSet validator.AccountSet) (*validator.ValidatorSetDelta, error) { s.logger.Info("Calculating validators set update...", "epoch", epoch) - fullValidatorSet, err := s.state.StakeStore.getFullValidatorSet(nil) + fullValidatorSet, err := s.state.getFullValidatorSet(nil) if err != nil { return nil, fmt.Errorf("failed to get full validators set. Epoch: %d. Error: %w", epoch, err) } @@ -213,7 +214,7 @@ func (s *stakeManager) UpdateValidatorSet(epoch uint64, maxValidatorSetSize uint stakeMap := fullValidatorSet.Validators // slice of all validator set - newValidatorSet := stakeMap.getSorted(int(maxValidatorSetSize)) + newValidatorSet := stakeMap.GetSorted(int(maxValidatorSetSize)) // set of all addresses that will be in next validator set addressesSet := make(map[types.Address]struct{}, len(newValidatorSet)) @@ -368,101 +369,5 @@ func (s *stakeManager) ProcessLog(header *types.Header, log *ethgo.Log, dbTx *bo return err } - return s.state.StakeStore.insertFullValidatorSet(fullValidatorSet, dbTx) -} - -type validatorSetState struct { - BlockNumber uint64 `json:"block"` - EpochID uint64 `json:"epoch"` - UpdatedAtBlockNumber uint64 `json:"updated_at_block"` - Validators validatorStakeMap `json:"validators"` -} - -func (vs validatorSetState) Marshal() ([]byte, error) { - return json.Marshal(vs) -} - -func (vs *validatorSetState) Unmarshal(b []byte) error { - return json.Unmarshal(b, vs) -} - -// validatorStakeMap holds ValidatorMetadata for each validator address -type validatorStakeMap map[types.Address]*validator.ValidatorMetadata - -// newValidatorStakeMap returns a new instance of validatorStakeMap -func newValidatorStakeMap(validatorSet validator.AccountSet) validatorStakeMap { - stakeMap := make(validatorStakeMap, len(validatorSet)) - - for _, v := range validatorSet { - stakeMap[v.Address] = v.Copy() - } - - return stakeMap -} - -// addStake adds given amount to a validator defined by address -func (sc *validatorStakeMap) addStake(address types.Address, amount *big.Int) { - if metadata, exists := (*sc)[address]; exists { - metadata.VotingPower.Add(metadata.VotingPower, amount) - metadata.IsActive = metadata.VotingPower.Cmp(bigZero) > 0 - } else { - (*sc)[address] = &validator.ValidatorMetadata{ - VotingPower: new(big.Int).Set(amount), - Address: address, - IsActive: amount.Cmp(bigZero) > 0, - } - } -} - -// removeStake removes given amount from validator defined by address -func (sc *validatorStakeMap) removeStake(address types.Address, amount *big.Int) { - stakeData := (*sc)[address] - stakeData.VotingPower.Sub(stakeData.VotingPower, amount) - stakeData.IsActive = stakeData.VotingPower.Cmp(bigZero) > 0 -} - -// getSorted returns validators (*ValidatorMetadata) in sorted order -func (sc validatorStakeMap) getSorted(maxValidatorSetSize int) validator.AccountSet { - activeValidators := make(validator.AccountSet, 0, len(sc)) - - for _, v := range sc { - if v.VotingPower.Cmp(bigZero) > 0 { - activeValidators = append(activeValidators, v) - } - } - - sort.Slice(activeValidators, func(i, j int) bool { - v1, v2 := activeValidators[i], activeValidators[j] - - switch v1.VotingPower.Cmp(v2.VotingPower) { - case 1: - return true - case 0: - return bytes.Compare(v1.Address[:], v2.Address[:]) < 0 - default: - return false - } - }) - - if len(activeValidators) <= maxValidatorSetSize { - return activeValidators - } - - return activeValidators[:maxValidatorSetSize] -} - -func (sc validatorStakeMap) String() string { - var sb strings.Builder - - for _, x := range sc.getSorted(len(sc)) { - bls := "" - if x.BlsKey != nil { - bls = hex.EncodeToString(x.BlsKey.Marshal()) - } - - sb.WriteString(fmt.Sprintf("%s:%s:%s:%t\n", - x.Address, x.VotingPower, bls, x.IsActive)) - } - - return sb.String() + return s.state.insertFullValidatorSet(fullValidatorSet, dbTx) } diff --git a/consensus/polybft/state_store_stake.go b/consensus/polybft/stake/state_store_stake.go similarity index 69% rename from consensus/polybft/state_store_stake.go rename to consensus/polybft/stake/state_store_stake.go index 1734b63760..23d6564a58 100644 --- a/consensus/polybft/state_store_stake.go +++ b/consensus/polybft/stake/state_store_stake.go @@ -1,9 +1,9 @@ -package polybft +package stake import ( - "errors" "fmt" + "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" bolt "go.etcd.io/bbolt" ) @@ -12,27 +12,42 @@ var ( validatorSetBucket = []byte("fullValidatorSetBucket") // key of the full validator set in bucket fullValidatorSetKey = []byte("fullValidatorSet") - // error returned if full validator set does not exists in db - errNoFullValidatorSet = errors.New("full validator set not in db") ) type StakeStore struct { db *bolt.DB } -// initialize creates necessary buckets in DB if they don't already exist -func (s *StakeStore) initialize(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists(validatorSetBucket); err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", string(epochsBucket), err) +func newStakeStore(db *bolt.DB) (*StakeStore, error) { + var store *StakeStore + err := db.Update(func(tx *bolt.Tx) error { + s, err := newStakeStoreWithTx(db, tx) + if err != nil { + return err + } + + store = s + + return nil + }) + + return store, err +} + +func newStakeStoreWithTx(db *bolt.DB, dbTx *bolt.Tx) (*StakeStore, error) { + store := &StakeStore{db: db} + + if _, err := dbTx.CreateBucketIfNotExists(validatorSetBucket); err != nil { + return nil, fmt.Errorf("failed to create bucket=%s: %w", string(validatorSetBucket), err) } - return nil + return store, nil } // insertFullValidatorSet inserts full validator set to its bucket (or updates it if exists) // If the passed tx is already open (not nil), it will use it to insert full validator set // If the passed tx is not open (it is nil), it will open a new transaction on db and insert full validator set -func (s *StakeStore) insertFullValidatorSet(fullValidatorSet validatorSetState, dbTx *bolt.Tx) error { +func (s *StakeStore) insertFullValidatorSet(fullValidatorSet validator.ValidatorSetState, dbTx *bolt.Tx) error { insertFn := func(tx *bolt.Tx) error { raw, err := fullValidatorSet.Marshal() if err != nil { @@ -54,9 +69,9 @@ func (s *StakeStore) insertFullValidatorSet(fullValidatorSet validatorSetState, // getFullValidatorSet returns full validator set from its bucket if exists // If the passed tx is already open (not nil), it will use it to get full validator set // If the passed tx is not open (it is nil), it will open a new transaction on db and get full validator set -func (s *StakeStore) getFullValidatorSet(dbTx *bolt.Tx) (validatorSetState, error) { +func (s *StakeStore) getFullValidatorSet(dbTx *bolt.Tx) (validator.ValidatorSetState, error) { var ( - fullValidatorSet validatorSetState + fullValidatorSet validator.ValidatorSetState err error ) diff --git a/consensus/polybft/state_store_stake_test.go b/consensus/polybft/stake/state_store_stake_test.go similarity index 51% rename from consensus/polybft/state_store_stake_test.go rename to consensus/polybft/stake/state_store_stake_test.go index 758f9f9b14..b4089701f3 100644 --- a/consensus/polybft/state_store_stake_test.go +++ b/consensus/polybft/stake/state_store_stake_test.go @@ -1,18 +1,53 @@ -package polybft +package stake import ( + "fmt" + "os" + "path" "testing" + "time" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + bolt "go.etcd.io/bbolt" ) +// newTestState creates new instance of state used by tests. +func newTestState(tb testing.TB) *StakeStore { + tb.Helper() + + dir := fmt.Sprintf("/tmp/consensus-temp_%v", time.Now().UTC().Format(time.RFC3339Nano)) + err := os.Mkdir(dir, 0775) + + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + if err := os.RemoveAll(dir); err != nil { + tb.Fatal(err) + } + }) + + db, err := bolt.Open(path.Join(dir, "my.db"), 0666, nil) + if err != nil { + tb.Fatal(err) + } + + stakeSTore, err := newStakeStore(db) + if err != nil { + tb.Fatal(err) + } + + return stakeSTore +} + func TestState_Insert_And_Get_FullValidatorSet(t *testing.T) { state := newTestState(t) t.Run("No full validator set", func(t *testing.T) { - _, err := state.StakeStore.getFullValidatorSet(nil) + _, err := state.getFullValidatorSet(nil) require.ErrorIs(t, err, errNoFullValidatorSet) }) @@ -20,13 +55,13 @@ func TestState_Insert_And_Get_FullValidatorSet(t *testing.T) { t.Run("Insert validator set", func(t *testing.T) { validators := validator.NewTestValidators(t, 5).GetPublicIdentities() - assert.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ + assert.NoError(t, state.insertFullValidatorSet(validator.ValidatorSetState{ BlockNumber: 100, EpochID: 10, - Validators: newValidatorStakeMap(validators), + Validators: validator.NewValidatorStakeMap(validators), }, nil)) - fullValidatorSet, err := state.StakeStore.getFullValidatorSet(nil) + fullValidatorSet, err := state.getFullValidatorSet(nil) require.NoError(t, err) assert.Equal(t, uint64(100), fullValidatorSet.BlockNumber) assert.Equal(t, uint64(10), fullValidatorSet.EpochID) @@ -36,13 +71,13 @@ func TestState_Insert_And_Get_FullValidatorSet(t *testing.T) { t.Run("Update validator set", func(t *testing.T) { validators := validator.NewTestValidators(t, 10).GetPublicIdentities() - assert.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ + assert.NoError(t, state.insertFullValidatorSet(validator.ValidatorSetState{ BlockNumber: 40, EpochID: 4, - Validators: newValidatorStakeMap(validators), + Validators: validator.NewValidatorStakeMap(validators), }, nil)) - fullValidatorSet, err := state.StakeStore.getFullValidatorSet(nil) + fullValidatorSet, err := state.getFullValidatorSet(nil) require.NoError(t, err) assert.Len(t, fullValidatorSet.Validators, len(validators)) assert.Equal(t, uint64(40), fullValidatorSet.BlockNumber) diff --git a/consensus/polybft/stake_manager_fuzz_test.go b/consensus/polybft/stake_manager_fuzz_test.go deleted file mode 100644 index 4129de43f2..0000000000 --- a/consensus/polybft/stake_manager_fuzz_test.go +++ /dev/null @@ -1,237 +0,0 @@ -package polybft - -import ( - "encoding/json" - "errors" - "fmt" - "math/big" - "reflect" - "testing" - - "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/types" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -type postBlockStructF struct { - EpochID uint64 - ValidatorID uint64 - BlockID uint64 - StakeValue uint64 -} - -type updateValidatorSetF struct { - EpochID uint64 - Index uint64 - VotingPower int64 -} - -func FuzzTestStakeManagerPostBlock(f *testing.F) { - var ( - allAliases = []string{"A", "B", "C", "D", "E", "F"} - initialSetAliases = []string{"A", "B", "C", "D", "E"} - validators = validator.NewTestValidatorsWithAliases(f, allAliases) - state = newTestState(f) - ) - - seeds := []postBlockStructF{ - { - EpochID: 0, - ValidatorID: 1, - BlockID: 1, - StakeValue: 30, - }, - { - EpochID: 5, - ValidatorID: 30, - BlockID: 4, - StakeValue: 60, - }, - { - EpochID: 1, - ValidatorID: 42, - BlockID: 11, - StakeValue: 70, - }, - { - EpochID: 7, - ValidatorID: 1, - BlockID: 2, - StakeValue: 10, - }, - } - - for _, seed := range seeds { - data, err := json.Marshal(seed) - if err != nil { - return - } - - f.Add(data) - } - - f.Fuzz(func(t *testing.T, input []byte) { - t.Parallel() - - var data postBlockStructF - if err := json.Unmarshal(input, &data); err != nil { - t.Skip(err) - } - - if err := ValidateStruct(data); err != nil { - t.Skip(err) - } - - if data.ValidatorID > uint64(len(initialSetAliases)-1) { - t.Skip() - } - - validatorSetAddr := types.StringToAddress("0x0001") - - bcMock := new(blockchainMock) - for i := 0; i < int(data.BlockID); i++ { - bcMock.On("CurrentHeader").Return(&types.Header{Number: 0}) - bcMock.On("GetHeaderByNumber", mock.Anything).Return(&types.Header{Hash: types.Hash{6, 4}}, true).Once() - bcMock.On("GetReceiptsByHash", mock.Anything).Return([]*types.Receipt{{}}, error(nil)).Once() - } - - // insert initial full validator set - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(validators.GetPublicIdentities(initialSetAliases...)), - }, nil)) - - stakeManager, err := newStakeManager( - hclog.NewNullLogger(), - state, - types.StringToAddress("0x0002"), - bcMock, - nil, - nil, - ) - require.NoError(t, err) - - header := &types.Header{Number: data.BlockID} - require.NoError(t, stakeManager.ProcessLog(header, convertLog(createTestLogForStakeAddedEvent( - t, - validatorSetAddr, - validators.GetValidator(initialSetAliases[data.ValidatorID]).Address(), - data.StakeValue, - )), nil)) - - require.NoError(t, stakeManager.PostBlock(&PostBlockRequest{ - FullBlock: &types.FullBlock{Block: &types.Block{Header: &types.Header{Number: data.BlockID}}}, - Epoch: data.EpochID, - })) - }) -} - -func FuzzTestStakeManagerUpdateValidatorSet(f *testing.F) { - var ( - aliases = []string{"A", "B", "C", "D", "E"} - stakes = []uint64{10, 10, 10, 10, 10} - maxValidatorSetSize = uint64(10) - ) - - validators := validator.NewTestValidatorsWithAliases(f, aliases, stakes) - state := newTestState(f) - - bcMock := new(blockchainMock) - bcMock.On("CurrentHeader").Return(&types.Header{Number: 0}) - - err := state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(validators.GetPublicIdentities())}, nil) - require.NoError(f, err) - - stakeManager, err := newStakeManager( - hclog.NewNullLogger(), - state, - types.StringToAddress("0x0001"), - bcMock, - nil, - nil, - ) - require.NoError(f, err) - - seeds := []updateValidatorSetF{ - { - EpochID: 0, - Index: 1, - VotingPower: 30, - }, - { - EpochID: 1, - Index: 4, - VotingPower: 1, - }, - { - EpochID: 2, - Index: 3, - VotingPower: -2, - }, - } - - for _, seed := range seeds { - data, err := json.Marshal(seed) - if err != nil { - return - } - - f.Add(data) - } - - f.Fuzz(func(t *testing.T, input []byte) { - var data updateValidatorSetF - if err := json.Unmarshal(input, &data); err != nil { - t.Skip(err) - } - - if err := ValidateStruct(data); err != nil { - t.Skip(err) - } - - if data.Index > uint64(len(aliases)-1) { - t.Skip() - } - - err := state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(validators.GetPublicIdentities())}, nil) - require.NoError(t, err) - - _, err = stakeManager.UpdateValidatorSet(data.EpochID, maxValidatorSetSize, - validators.GetPublicIdentities(aliases[data.Index:]...)) - require.NoError(t, err) - - fullValidatorSet := validators.GetPublicIdentities().Copy() - validatorToUpdate := fullValidatorSet[data.Index] - validatorToUpdate.VotingPower = big.NewInt(data.VotingPower) - - _, err = stakeManager.UpdateValidatorSet(data.EpochID, maxValidatorSetSize, - validators.GetPublicIdentities()) - require.NoError(t, err) - }) -} - -func ValidateStruct(s interface{}) (err error) { - structType := reflect.TypeOf(s) - if structType.Kind() != reflect.Struct { - return errors.New("input param should be a struct") - } - - structVal := reflect.ValueOf(s) - fieldNum := structVal.NumField() - - for i := 0; i < fieldNum; i++ { - field := structVal.Field(i) - fieldName := structType.Field(i).Name - - isSet := field.IsValid() && !field.IsZero() - - if !isSet { - err = fmt.Errorf("%w%s is not set; ", err, fieldName) - } - } - - return err -} diff --git a/consensus/polybft/stake_manager_test.go b/consensus/polybft/stake_manager_test.go deleted file mode 100644 index 50a9beb5dd..0000000000 --- a/consensus/polybft/stake_manager_test.go +++ /dev/null @@ -1,543 +0,0 @@ -package polybft - -import ( - "math/big" - "testing" - - "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" - "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/crypto" - "github.com/0xPolygon/polygon-edge/helper/hex" - "github.com/0xPolygon/polygon-edge/jsonrpc" - "github.com/0xPolygon/polygon-edge/txrelayer" - "github.com/0xPolygon/polygon-edge/types" - "github.com/Ethernal-Tech/ethgo" - "github.com/Ethernal-Tech/ethgo/abi" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestStakeManager_PostBlock(t *testing.T) { - t.Parallel() - - var ( - allAliases = []string{"A", "B", "C", "D", "E", "F"} - initialSetAliases = []string{"A", "B", "C", "D", "E"} - epoch = uint64(1) - block = uint64(10) - newStake = uint64(100) - firstValidator = uint64(0) - secondValidator = uint64(1) - stakeManagerAddr = types.StringToAddress("0x0001") - ) - - t.Run("PostBlock - unstake to zero", func(t *testing.T) { - t.Parallel() - - state := newTestState(t) - - bcMock := new(blockchainMock) - bcMock.On("CurrentHeader").Return(&types.Header{Number: block - 1}, true).Once() - bcMock.On("GetStateProviderForBlock", mock.Anything).Return(nil).Times(len(allAliases)) - - validators := validator.NewTestValidatorsWithAliases(t, allAliases) - - // insert initial full validator set - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(validators.GetPublicIdentities(initialSetAliases...)), - BlockNumber: block - 1, - }, nil)) - - stakeManager, err := newStakeManager( - hclog.NewNullLogger(), - state, - stakeManagerAddr, - bcMock, - nil, - nil, - ) - require.NoError(t, err) - - header := &types.Header{Number: block} - - require.NoError(t, stakeManager.ProcessLog(header, convertLog(createTestLogForStakeRemovedEvent( - t, - stakeManagerAddr, - validators.GetValidator(initialSetAliases[firstValidator]).Address(), - 1, // initial validator stake was 1 - )), nil)) - - req := &PostBlockRequest{ - FullBlock: &types.FullBlock{Block: &types.Block{Header: header}}, - Epoch: epoch, - } - - require.NoError(t, stakeManager.PostBlock(req)) - - fullValidatorSet, err := state.StakeStore.getFullValidatorSet(nil) - require.NoError(t, err) - - var firstValidatorMeta *validator.ValidatorMetadata - - for _, validator := range fullValidatorSet.Validators { - if validator.Address.String() == validators.GetValidator(initialSetAliases[firstValidator]).Address().String() { - firstValidatorMeta = validator - } - } - - require.NotNil(t, firstValidatorMeta) - require.Equal(t, bigZero, firstValidatorMeta.VotingPower) - require.False(t, firstValidatorMeta.IsActive) - }) - t.Run("PostBlock - add stake to one validator", func(t *testing.T) { - t.Parallel() - - state := newTestState(t) - - bcMock := new(blockchainMock) - bcMock.On("CurrentHeader").Return(&types.Header{Number: block - 1}, true).Once() - - validators := validator.NewTestValidatorsWithAliases(t, allAliases) - - // insert initial full validator set - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(validators.GetPublicIdentities(initialSetAliases...)), - BlockNumber: block - 1, - }, nil)) - - stakeManager, err := newStakeManager( - hclog.NewNullLogger(), - state, - types.StringToAddress("0x0001"), - bcMock, - nil, - nil, - ) - require.NoError(t, err) - - header := &types.Header{Number: block} - require.NoError(t, stakeManager.ProcessLog(header, convertLog(createTestLogForStakeAddedEvent( - t, - stakeManagerAddr, - validators.GetValidator(initialSetAliases[secondValidator]).Address(), - 250, - )), nil)) - - req := &PostBlockRequest{ - FullBlock: &types.FullBlock{Block: &types.Block{Header: header}}, - Epoch: epoch, - } - - require.NoError(t, stakeManager.PostBlock(req)) - - fullValidatorSet, err := state.StakeStore.getFullValidatorSet(nil) - require.NoError(t, err) - - var firstValidator *validator.ValidatorMetadata - - for _, validator := range fullValidatorSet.Validators { - if validator.Address.String() == validators.GetValidator(initialSetAliases[secondValidator]).Address().String() { - firstValidator = validator - } - } - - require.NotNil(t, firstValidator) - require.Equal(t, big.NewInt(251), firstValidator.VotingPower) // 250 + initial 1 - require.True(t, firstValidator.IsActive) - }) - - t.Run("PostBlock - add validator and stake", func(t *testing.T) { - t.Parallel() - - state := newTestState(t) - validators := validator.NewTestValidatorsWithAliases(t, allAliases, []uint64{1, 2, 3, 4, 5, 6}) - - txRelayerMock := newDummyStakeTxRelayer(t, func() *validator.ValidatorMetadata { - return validators.GetValidator("F").ValidatorMetadata() - }) - // just mock the call however, the dummy relayer should do its magic - txRelayerMock.On("Call", mock.Anything, mock.Anything, mock.Anything). - Return(nil, error(nil)) - - bcMock := new(blockchainMock) - bcMock.On("CurrentHeader").Return(&types.Header{Number: block - 1}, true) - bcMock.On("GetStateProviderForBlock", mock.Anything).Return(nil).Times(len(allAliases)) - - // insert initial full validator set - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(validators.GetPublicIdentities(initialSetAliases...)), - BlockNumber: block - 1, - }, nil)) - - stakeManager, err := newStakeManager( - hclog.NewNullLogger(), - state, - types.StringToAddress("0x0001"), - bcMock, - nil, - nil, - ) - require.NoError(t, err) - - header := &types.Header{Number: block} - - for i := 0; i < len(allAliases); i++ { - require.NoError(t, stakeManager.ProcessLog(header, convertLog(createTestLogForStakeAddedEvent( - t, - stakeManagerAddr, - validators.GetValidator(allAliases[i]).Address(), - newStake, - )), nil)) - } - - req := &PostBlockRequest{ - FullBlock: &types.FullBlock{Block: &types.Block{Header: header}}, - Epoch: epoch, - } - - require.NoError(t, stakeManager.PostBlock(req)) - - fullValidatorSet, err := state.StakeStore.getFullValidatorSet(nil) - require.NoError(t, err) - require.Len(t, fullValidatorSet.Validators, len(allAliases)) - - validatorsCount := validators.ToValidatorSet().Len() - for i, v := range fullValidatorSet.Validators.getSorted(validatorsCount) { - require.Equal(t, newStake+uint64(validatorsCount)-uint64(i)-1, v.VotingPower.Uint64()) - } - }) -} - -func TestStakeManager_UpdateValidatorSet(t *testing.T) { - var ( - aliases = []string{"A", "B", "C", "D", "E"} - stakes = []uint64{10, 10, 10, 10, 10} - epoch = uint64(1) - maxValidatorSetSize = uint64(10) - ) - - validators := validator.NewTestValidatorsWithAliases(t, aliases, stakes) - state := newTestState(t) - - bcMock := new(blockchainMock) - bcMock.On("CurrentHeader").Return(&types.Header{Number: 0}, true).Once() - - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(validators.ToValidatorSet().Accounts()), - }, nil)) - - stakeManager, err := newStakeManager( - hclog.NewNullLogger(), - state, - types.StringToAddress("0x0001"), - bcMock, - nil, - nil, - ) - require.NoError(t, err) - - t.Run("UpdateValidatorSet - only update", func(t *testing.T) { - fullValidatorSet := validators.GetPublicIdentities().Copy() - validatorToUpdate := fullValidatorSet[0] - validatorToUpdate.VotingPower = big.NewInt(11) - - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(fullValidatorSet), - }, nil)) - - updateDelta, err := stakeManager.UpdateValidatorSet(epoch, maxValidatorSetSize, - validators.GetPublicIdentities()) - require.NoError(t, err) - - require.Len(t, updateDelta.Added, 0) - require.Len(t, updateDelta.Updated, 1) - require.Len(t, updateDelta.Removed, 0) - require.Equal(t, updateDelta.Updated[0].Address, validatorToUpdate.Address) - require.Equal(t, updateDelta.Updated[0].VotingPower.Uint64(), validatorToUpdate.VotingPower.Uint64()) - }) - - t.Run("UpdateValidatorSet - one unstake", func(t *testing.T) { - fullValidatorSet := validators.GetPublicIdentities(aliases[1:]...) - - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(fullValidatorSet), - }, nil)) - - updateDelta, err := stakeManager.UpdateValidatorSet(epoch+1, maxValidatorSetSize, - validators.GetPublicIdentities()) - - require.NoError(t, err) - require.Len(t, updateDelta.Added, 0) - require.Len(t, updateDelta.Updated, 0) - require.Len(t, updateDelta.Removed, 1) - }) - - t.Run("UpdateValidatorSet - one new validator", func(t *testing.T) { - addedValidator := validators.GetValidator("A") - - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(validators.GetPublicIdentities()), - }, nil)) - - updateDelta, err := stakeManager.UpdateValidatorSet(epoch+2, maxValidatorSetSize, - validators.GetPublicIdentities(aliases[1:]...)) - - require.NoError(t, err) - require.Len(t, updateDelta.Added, 1) - require.Len(t, updateDelta.Updated, 0) - require.Len(t, updateDelta.Removed, 0) - require.Equal(t, addedValidator.Address(), updateDelta.Added[0].Address) - require.Equal(t, addedValidator.VotingPower, updateDelta.Added[0].VotingPower.Uint64()) - }) - t.Run("UpdateValidatorSet - remove some stake", func(t *testing.T) { - fullValidatorSet := validators.GetPublicIdentities().Copy() - validatorToUpdate := fullValidatorSet[2] - validatorToUpdate.VotingPower = big.NewInt(5) - - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(fullValidatorSet), - }, nil)) - - updateDelta, err := stakeManager.UpdateValidatorSet(epoch+3, maxValidatorSetSize, - validators.GetPublicIdentities()) - - require.NoError(t, err) - require.Len(t, updateDelta.Added, 0) - require.Len(t, updateDelta.Updated, 1) - require.Len(t, updateDelta.Removed, 0) - require.Equal(t, updateDelta.Updated[0].Address, validatorToUpdate.Address) - require.Equal(t, updateDelta.Updated[0].VotingPower.Uint64(), validatorToUpdate.VotingPower.Uint64()) - }) - t.Run("UpdateValidatorSet - remove entire stake", func(t *testing.T) { - fullValidatorSet := validators.GetPublicIdentities().Copy() - validatorToUpdate := fullValidatorSet[3] - validatorToUpdate.VotingPower = bigZero - - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(fullValidatorSet), - }, nil)) - - updateDelta, err := stakeManager.UpdateValidatorSet(epoch+4, maxValidatorSetSize, - validators.GetPublicIdentities()) - - require.NoError(t, err) - require.Len(t, updateDelta.Added, 0) - require.Len(t, updateDelta.Updated, 0) - require.Len(t, updateDelta.Removed, 1) - }) - t.Run("UpdateValidatorSet - voting power negative", func(t *testing.T) { - fullValidatorSet := validators.GetPublicIdentities().Copy() - validatorsToUpdate := fullValidatorSet[4] - validatorsToUpdate.VotingPower = bigZero - - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(fullValidatorSet), - }, nil)) - - updateDelta, err := stakeManager.UpdateValidatorSet(epoch+5, maxValidatorSetSize, - validators.GetPublicIdentities()) - require.NoError(t, err) - require.Len(t, updateDelta.Added, 0) - require.Len(t, updateDelta.Updated, 0) - require.Len(t, updateDelta.Removed, 1) - }) - - t.Run("UpdateValidatorSet - max validator set size reached", func(t *testing.T) { - // because we now have 5 validators, and the new validator has more stake - fullValidatorSet := validators.GetPublicIdentities().Copy() - validatorToAdd := fullValidatorSet[0] - validatorToAdd.VotingPower = big.NewInt(11) - - require.NoError(t, state.StakeStore.insertFullValidatorSet(validatorSetState{ - Validators: newValidatorStakeMap(fullValidatorSet), - }, nil)) - - updateDelta, err := stakeManager.UpdateValidatorSet(epoch+6, 4, - validators.GetPublicIdentities(aliases[1:]...)) - - require.NoError(t, err) - require.Len(t, updateDelta.Added, 1) - require.Len(t, updateDelta.Updated, 0) - require.Len(t, updateDelta.Removed, 1) - require.Equal(t, validatorToAdd.Address, updateDelta.Added[0].Address) - require.Equal(t, validatorToAdd.VotingPower.Uint64(), updateDelta.Added[0].VotingPower.Uint64()) - }) -} - -func TestStakeCounter_ShouldBeDeterministic(t *testing.T) { - t.Parallel() - - const timesToExecute = 100 - - stakes := [][]uint64{ - {103, 102, 101, 51, 50, 30, 10}, - {100, 100, 100, 50, 50, 30, 10}, - {103, 102, 101, 51, 50, 30, 10}, - {100, 100, 100, 50, 50, 30, 10}, - } - maxValidatorSetSizes := []int{1000, 1000, 5, 6} - - for ind, stake := range stakes { - maxValidatorSetSize := maxValidatorSetSizes[ind] - - aliases := []string{"A", "B", "C", "D", "E", "F", "G"} - validators := validator.NewTestValidatorsWithAliases(t, aliases, stake) - - test := func() []*validator.ValidatorMetadata { - stakeCounter := newValidatorStakeMap(validators.GetPublicIdentities("A", "B", "C", "D", "E")) - - return stakeCounter.getSorted(maxValidatorSetSize) - } - - initialSlice := test() - - // stake counter and stake map should always be deterministic - for i := 0; i < timesToExecute; i++ { - currentSlice := test() - - require.Len(t, currentSlice, len(initialSlice)) - - for i, si := range currentSlice { - initialSi := initialSlice[i] - require.Equal(t, si.Address, initialSi.Address) - require.Equal(t, si.VotingPower.Uint64(), initialSi.VotingPower.Uint64()) - } - } - } -} - -func TestStakeManager_UpdateOnInit(t *testing.T) { - t.Parallel() - - var ( - allAliases = []string{"A", "B", "C", "D", "E", "F"} - stakeManagerAddr = types.StringToAddress("0xf001") - ) - - votingPowers := []uint64{1, 1, 1, 1, 5, 7} - validators := validator.NewTestValidatorsWithAliases(t, allAliases, votingPowers) - accountSet := validators.GetPublicIdentities(allAliases...) - state := newTestState(t) - - polyBackendMock := new(polybftBackendMock) - polyBackendMock.On("GetValidatorsWithTx", uint64(0), []*types.Header(nil), mock.Anything).Return(accountSet, nil).Once() - - _, err := newStakeManager( - hclog.NewNullLogger(), - state, - stakeManagerAddr, - nil, - polyBackendMock, - nil, - ) - require.NoError(t, err) - - fullValidatorSet, err := state.StakeStore.getFullValidatorSet(nil) - require.NoError(t, err) - - require.Equal(t, uint64(0), fullValidatorSet.BlockNumber) - require.Equal(t, uint64(0), fullValidatorSet.UpdatedAtBlockNumber) - require.Equal(t, uint64(0), fullValidatorSet.EpochID) - - for i, addr := range accountSet.GetAddresses() { - v, exists := fullValidatorSet.Validators[addr] - - require.True(t, exists) - require.Equal(t, big.NewInt(int64(votingPowers[i])), v.VotingPower) - } -} - -func createTestLogForStakeAddedEvent(t *testing.T, validatorSet, to types.Address, stake uint64) *types.Log { - t.Helper() - - var stakeAddedEvent contractsapi.StakeAddedEvent - - topics := make([]types.Hash, 2) - topics[0] = types.Hash(stakeAddedEvent.Sig()) - topics[1] = types.BytesToHash(to.Bytes()) - encodedData, err := abi.MustNewType("uint256").Encode(new(big.Int).SetUint64(stake)) - require.NoError(t, err) - - return &types.Log{ - Address: validatorSet, - Topics: topics, - Data: encodedData, - } -} - -func createTestLogForStakeRemovedEvent(t *testing.T, validatorSet, to types.Address, unstake uint64) *types.Log { - t.Helper() - - var stakeRemovedEvent contractsapi.StakeRemovedEvent - - topics := make([]types.Hash, 2) - topics[0] = types.Hash(stakeRemovedEvent.Sig()) - topics[1] = types.BytesToHash(to.Bytes()) - encodedData, err := abi.MustNewType("uint256").Encode(new(big.Int).SetUint64(unstake)) - require.NoError(t, err) - - return &types.Log{ - Address: validatorSet, - Topics: topics, - Data: encodedData, - } -} - -var _ txrelayer.TxRelayer = (*dummyStakeTxRelayer)(nil) - -type dummyStakeTxRelayer struct { - mock.Mock - callback func() *validator.ValidatorMetadata - t *testing.T -} - -func newDummyStakeTxRelayer(t *testing.T, callback func() *validator.ValidatorMetadata) *dummyStakeTxRelayer { - t.Helper() - - return &dummyStakeTxRelayer{ - t: t, - callback: callback, - } -} - -func (d *dummyStakeTxRelayer) Call(from types.Address, to types.Address, input []byte) (string, error) { - args := d.Called(from, to, input) - - if d.callback != nil { - validatorMetaData := d.callback() - encoded, err := validatorTypeABI.Encode(map[string]interface{}{ - "blsKey": validatorMetaData.BlsKey.ToBigInt(), - "stake": validatorMetaData.VotingPower, - "isWhitelisted": true, - "isActive": true, - }) - - require.NoError(d.t, err) - - return hex.EncodeToHex(encoded), nil - } - - return args.String(0), args.Error(1) -} - -func (d *dummyStakeTxRelayer) SendTransaction(transaction *types.Transaction, key crypto.Key) (*ethgo.Receipt, error) { - args := d.Called(transaction, key) - - return args.Get(0).(*ethgo.Receipt), args.Error(1) -} - -// SendTransactionLocal sends non-signed transaction (this is only for testing purposes) -func (d *dummyStakeTxRelayer) SendTransactionLocal(txn *types.Transaction) (*ethgo.Receipt, error) { - args := d.Called(txn) - - return args.Get(0).(*ethgo.Receipt), args.Error(1) -} - -func (d *dummyStakeTxRelayer) Client() *jsonrpc.EthClient { - return nil -} - -func (d *dummyStakeTxRelayer) GetTxnHashes() []types.Hash { - return nil -} diff --git a/consensus/polybft/state.go b/consensus/polybft/state/state.go similarity index 53% rename from consensus/polybft/state.go rename to consensus/polybft/state/state.go index 075d4702a5..5dffca91b6 100644 --- a/consensus/polybft/state.go +++ b/consensus/polybft/state/state.go @@ -1,4 +1,4 @@ -package polybft +package state import ( "fmt" @@ -37,69 +37,33 @@ type BridgeBatchVote struct { type State struct { db *bolt.DB close chan struct{} - - BridgeMessageStore *BridgeMessageStore - EpochStore *EpochStore - ProposerSnapshotStore *ProposerSnapshotStore - StakeStore *StakeStore - GovernanceStore *GovernanceStore } -// newState creates new instance of State -func newState(path string, closeCh chan struct{}, chainIDs []uint64) (*State, error) { +// NewState creates new instance of State +func NewState(path string, closeCh chan struct{}) (*State, error) { db, err := bolt.Open(path, 0666, nil) if err != nil { return nil, err } s := &State{ - db: db, - close: closeCh, - BridgeMessageStore: &BridgeMessageStore{db: db, chainIDs: chainIDs}, - EpochStore: &EpochStore{db: db, chainIDs: chainIDs}, - ProposerSnapshotStore: &ProposerSnapshotStore{db: db}, - StakeStore: &StakeStore{db: db}, - GovernanceStore: &GovernanceStore{db: db}, + db: db, + close: closeCh, } - if err = s.initStorages(); err != nil { - return nil, err - } - - return s, nil -} - -// initStorages initializes data storages -func (s *State) initStorages() error { - // init the buckets - return s.db.Update(func(tx *bolt.Tx) error { - if err := s.BridgeMessageStore.initialize(tx); err != nil { - return err - } - - if err := s.EpochStore.initialize(tx); err != nil { - return err - } - - if err := s.ProposerSnapshotStore.initialize(tx); err != nil { - return err + err = s.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(edgeEventsLastProcessedBlockBucket); err != nil { + return fmt.Errorf("cannot create bucket: %w", err) } - if err := s.StakeStore.initialize(tx); err != nil { - return err - } - - _, err := tx.CreateBucketIfNotExists(edgeEventsLastProcessedBlockBucket) - if err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", string(edgeEventsLastProcessedBlockBucket), err) - } - - return s.GovernanceStore.initialize(tx) + return nil }) + + return s, err } -// insertLastProcessedEventsBlock inserts the last processed block for events on Edge -func (s *State) insertLastProcessedEventsBlock(block uint64, dbTx *bolt.Tx) error { +// InsertLastProcessedEventsBlock inserts the last processed block for events on Edge +func (s *State) InsertLastProcessedEventsBlock(block uint64, dbTx *bolt.Tx) error { insertFn := func(tx *bolt.Tx) error { return tx.Bucket(edgeEventsLastProcessedBlockBucket).Put( edgeEventsLastProcessedBlockKey, common.EncodeUint64ToBytes(block)) @@ -114,8 +78,8 @@ func (s *State) insertLastProcessedEventsBlock(block uint64, dbTx *bolt.Tx) erro return insertFn(dbTx) } -// getLastProcessedEventsBlock gets the last processed block for events on Edge -func (s *State) getLastProcessedEventsBlock(dbTx *bolt.Tx) (uint64, error) { +// GetLastProcessedEventsBlock gets the last processed block for events on Edge +func (s *State) GetLastProcessedEventsBlock(dbTx *bolt.Tx) (uint64, error) { var ( lastProcessed uint64 err error @@ -141,14 +105,23 @@ func (s *State) getLastProcessedEventsBlock(dbTx *bolt.Tx) (uint64, error) { return lastProcessed, err } -// beginDBTransaction creates and begins a transaction on BoltDB +func (s *State) DB() *bolt.DB { + return s.db +} + +// Close closes the state +func (s *State) Close() error { + return s.db.Close() +} + +// BeginDBTransaction creates and begins a transaction on BoltDB // Note that transaction needs to be manually rollback or committed -func (s *State) beginDBTransaction(isWriteTx bool) (*bolt.Tx, error) { +func (s *State) BeginDBTransaction(isWriteTx bool) (*bolt.Tx, error) { return s.db.Begin(isWriteTx) } -// bucketStats returns stats for the given bucket in db -func bucketStats(bucketName []byte, db *bolt.DB) (*bolt.BucketStats, error) { +// BucketStats returns stats for the given bucket in db +func BucketStats(bucketName []byte, db *bolt.DB) (*bolt.BucketStats, error) { var stats *bolt.BucketStats err := db.View(func(tx *bolt.Tx) error { diff --git a/consensus/polybft/state_event_getter.go b/consensus/polybft/state/state_event_getter.go similarity index 91% rename from consensus/polybft/state_event_getter.go rename to consensus/polybft/state/state_event_getter.go index a972ebabf2..94ef36fe39 100644 --- a/consensus/polybft/state_event_getter.go +++ b/consensus/polybft/state/state_event_getter.go @@ -1,7 +1,8 @@ -package polybft +package state import ( "github.com/0xPolygon/polygon-edge/blockchain" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/types" "github.com/Ethernal-Tech/ethgo" bolt "go.etcd.io/bbolt" @@ -35,7 +36,7 @@ type EventProvider struct { } // NewEventProvider returns a new instance of eventProvider -func NewEventProvider(blockchain blockchainBackend) *EventProvider { +func NewEventProvider(blockchain polytypes.Blockchain) *EventProvider { return &EventProvider{ receiptsGetter: receiptsGetter{ blockchain: blockchain, @@ -145,7 +146,7 @@ func (e *EventProvider) getEventsFromReceipts(blockHeader *types.Header, type receiptsGetter struct { // blockchain is an abstraction of blockchain that provides necessary functions // for querying blockchain data (blocks, receipts, etc.) - blockchain blockchainBackend + blockchain polytypes.Blockchain } func (r *receiptsGetter) getReceiptsFromBlocksRange(from, to uint64, @@ -168,3 +169,20 @@ func (r *receiptsGetter) getReceiptsFromBlocksRange(from, to uint64, return nil } + +// convertLog converts types.Log to ethgo.Log +func convertLog(log *types.Log) *ethgo.Log { + l := ðgo.Log{ + Address: ethgo.Address(log.Address), + Data: make([]byte, len(log.Data)), + Topics: make([]ethgo.Hash, len(log.Topics)), + } + + copy(l.Data, log.Data) + + for i, topic := range log.Topics { + l.Topics[i] = ethgo.Hash(topic) + } + + return l +} diff --git a/consensus/polybft/stats.go b/consensus/polybft/state/stats.go similarity index 97% rename from consensus/polybft/stats.go rename to consensus/polybft/state/stats.go index 02ae335e0c..c6e43bd56b 100644 --- a/consensus/polybft/stats.go +++ b/consensus/polybft/state/stats.go @@ -1,4 +1,4 @@ -package polybft +package state import ( "time" @@ -7,8 +7,8 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// startStatsReleasing starts the process that releases BoltDB stats into prometheus periodically. -func (s *State) startStatsReleasing() { +// StartStatsReleasing starts the process that releases BoltDB stats into prometheus periodically. +func (s *State) StartStatsReleasing() { const ( statUpdatePeriod = 10 * time.Second dbSubsystem = "db" diff --git a/consensus/polybft/state/test_helpers.go b/consensus/polybft/state/test_helpers.go new file mode 100644 index 0000000000..d1ab568e1a --- /dev/null +++ b/consensus/polybft/state/test_helpers.go @@ -0,0 +1,34 @@ +package state + +import ( + "fmt" + "os" + "path" + "testing" + "time" +) + +// newTestState creates new instance of state used by tests. +func NewTestState(tb testing.TB) *State { + tb.Helper() + + dir := fmt.Sprintf("/tmp/consensus-temp_%v", time.Now().UTC().Format(time.RFC3339Nano)) + err := os.Mkdir(dir, 0775) + + if err != nil { + tb.Fatal(err) + } + + state, err := NewState(path.Join(dir, "my.db"), make(chan struct{})) + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + if err := os.RemoveAll(dir); err != nil { + tb.Fatal(err) + } + }) + + return state +} diff --git a/consensus/polybft/state_store_epoch.go b/consensus/polybft/state_store_epoch.go deleted file mode 100644 index a9d6a115b7..0000000000 --- a/consensus/polybft/state_store_epoch.go +++ /dev/null @@ -1,316 +0,0 @@ -package polybft - -import ( - "encoding/json" - "fmt" - - "github.com/0xPolygon/polygon-edge/helper/common" - bolt "go.etcd.io/bbolt" -) - -const ( - // validatorSnapshotLimit defines a maximum number of validator snapshots - // that can be stored in cache (both memory and db) - validatorSnapshotLimit = 100 - // numberOfSnapshotsToLeaveInMemory defines a number of validator snapshots to leave in memory - numberOfSnapshotsToLeaveInMemory = 12 - // numberOfSnapshotsToLeaveInDB defines a number of validator snapshots to leave in db - numberOfSnapshotsToLeaveInDB = 20 -) - -var ( - // bucket to store epochs and all its nested buckets (message votes and message pool events) - epochsBucket = []byte("epochs") - - // bucket to store validator snapshots - validatorSnapshotsBucket = []byte("validatorSnapshots") -) - -/* -Bolt DB schema: - -epochs/ -|--> chainID --> epochNumber --> messageVote - -validatorSnapshots/ -|--> epochNumber -> *AccountSet (json marshalled) -*/ - -type EpochStore struct { - db *bolt.DB - chainIDs []uint64 -} - -// initialize creates necessary buckets in DB if they don't already exist -func (s *EpochStore) initialize(tx *bolt.Tx) error { - var ( - epochBucket *bolt.Bucket - err error - ) - - if epochBucket, err = tx.CreateBucketIfNotExists(epochsBucket); err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", string(epochsBucket), err) - } - - if _, err := tx.CreateBucketIfNotExists(validatorSnapshotsBucket); err != nil { - return fmt.Errorf("failed to create bucket=%s: %w", string(validatorSnapshotsBucket), err) - } - - for _, chainID := range s.chainIDs { - if _, err := epochBucket.CreateBucketIfNotExists(common.EncodeUint64ToBytes(chainID)); err != nil { - return fmt.Errorf("failed to create epoch bucket for chainID=%d err: %w", chainID, err) - } - } - - return nil -} - -// insertValidatorSnapshot inserts a validator snapshot for the given block to its bucket in db -func (s *EpochStore) insertValidatorSnapshot(validatorSnapshot *validatorSnapshot, dbTx *bolt.Tx) error { - insertFn := func(tx *bolt.Tx) error { - raw, err := json.Marshal(validatorSnapshot) - if err != nil { - return err - } - - return tx.Bucket(validatorSnapshotsBucket).Put(common.EncodeUint64ToBytes(validatorSnapshot.Epoch), raw) - } - - if dbTx == nil { - return s.db.Update(func(tx *bolt.Tx) error { - return insertFn(tx) - }) - } - - return insertFn(dbTx) -} - -// getValidatorSnapshot queries the validator snapshot for given block from db -func (s *EpochStore) getValidatorSnapshot(epoch uint64) (*validatorSnapshot, error) { - var validatorSnapshot *validatorSnapshot - - err := s.db.View(func(tx *bolt.Tx) error { - v := tx.Bucket(validatorSnapshotsBucket).Get(common.EncodeUint64ToBytes(epoch)) - if v != nil { - return json.Unmarshal(v, &validatorSnapshot) - } - - return nil - }) - - return validatorSnapshot, err -} - -// getNearestOrEpochSnapshot returns the nearest or the exact epoch snapshot from db -func (s *EpochStore) getNearestOrEpochSnapshot(epoch uint64, dbTx *bolt.Tx) (*validatorSnapshot, error) { - var ( - snapshot *validatorSnapshot - err error - ) - - getFn := func(tx *bolt.Tx) error { - for { - v := tx.Bucket(validatorSnapshotsBucket).Get(common.EncodeUint64ToBytes(epoch)) - if v != nil { - return json.Unmarshal(v, &snapshot) - } - - if epoch == 0 { // prevent uint64 underflow - break - } - - epoch-- - } - - return nil - } - - if dbTx == nil { - err = s.db.View(func(tx *bolt.Tx) error { - return getFn(tx) - }) - } else { - err = getFn(dbTx) - } - - return snapshot, err -} - -// insertEpoch inserts a new epoch to db with its meta data -func (s *EpochStore) insertEpoch(epoch uint64, dbTx *bolt.Tx, chainID uint64) error { - insertFn := func(tx *bolt.Tx) error { - chainIDBucket, err := tx.Bucket(epochsBucket).CreateBucketIfNotExists(common.EncodeUint64ToBytes(chainID)) - if err != nil { - return err - } - - epochBucket, err := chainIDBucket.CreateBucketIfNotExists(common.EncodeUint64ToBytes(epoch)) - if err != nil { - return err - } - - _, err = epochBucket.CreateBucketIfNotExists(messageVotesBucket) - if err != nil { - return err - } - - return err - } - - if dbTx == nil { - return s.db.Update(func(tx *bolt.Tx) error { - return insertFn(tx) - }) - } - - return insertFn(dbTx) -} - -// isEpochInserted checks if given epoch is present in db -func (s *EpochStore) isEpochInserted(epoch uint64, chainID uint64) bool { - return s.db.View(func(tx *bolt.Tx) error { - _, err := getEpochBucket(tx, epoch, chainID) - - return err - }) == nil -} - -// getEpochBucket returns bucket from db associated with given epoch -func getEpochBucket(tx *bolt.Tx, epoch uint64, chainID uint64) (*bolt.Bucket, error) { - epochBucket := tx.Bucket(epochsBucket). - Bucket(common.EncodeUint64ToBytes(chainID)). - Bucket(common.EncodeUint64ToBytes(epoch)) - if epochBucket == nil { - return nil, fmt.Errorf("could not find bucket for epoch: %v", epoch) - } - - return epochBucket, nil -} - -// cleanEpochsFromDB cleans epoch buckets from db -func (s *EpochStore) cleanEpochsFromDB(dbTx *bolt.Tx) error { - cleanFn := func(tx *bolt.Tx) error { - if err := tx.DeleteBucket(epochsBucket); err != nil { - return err - } - - epochBucket, err := tx.CreateBucket(epochsBucket) - if err != nil { - return err - } - - for _, chainID := range s.chainIDs { - if _, err := epochBucket.CreateBucket(common.EncodeUint64ToBytes(chainID)); err != nil { - return err - } - } - - return nil - } - - if dbTx == nil { - return s.db.Update(func(tx *bolt.Tx) error { - return cleanFn(tx) - }) - } - - return cleanFn(dbTx) -} - -// cleanValidatorSnapshotsFromDB cleans the validator snapshots bucket if a limit is reached, -// but it leaves the latest (n) number of snapshots -func (s *EpochStore) cleanValidatorSnapshotsFromDB(epoch uint64, dbTx *bolt.Tx) error { - cleanFn := func(tx *bolt.Tx) error { - bucket := tx.Bucket(validatorSnapshotsBucket) - - // paired list - keys := make([][]byte, 0) - values := make([][]byte, 0) - - for i := 0; i < numberOfSnapshotsToLeaveInDB; i++ { // exclude the last inserted we already appended - key := common.EncodeUint64ToBytes(epoch) - value := bucket.Get(key) - - if value == nil { - continue - } - - keys = append(keys, key) - values = append(values, value) - epoch-- - } - - // removing an entire bucket is much faster than removing all keys - // look at thread https://github.com/boltdb/bolt/issues/667 - err := tx.DeleteBucket(validatorSnapshotsBucket) - if err != nil { - return err - } - - bucket, err = tx.CreateBucket(validatorSnapshotsBucket) - if err != nil { - return err - } - - // we start the loop in reverse so that the oldest of snapshots get inserted first in db - for i := len(keys) - 1; i >= 0; i-- { - if err := bucket.Put(keys[i], values[i]); err != nil { - return err - } - } - - return nil - } - - if dbTx == nil { - return s.db.Update(func(tx *bolt.Tx) error { - return cleanFn(tx) - }) - } - - return cleanFn(dbTx) -} - -// removeAllValidatorSnapshots drops a validator snapshot bucket and re-creates it in bolt database -func (s *EpochStore) removeAllValidatorSnapshots() error { - return s.db.Update(func(tx *bolt.Tx) error { - // removing an entire bucket is much faster than removing all keys - // look at thread https://github.com/boltdb/bolt/issues/667 - err := tx.DeleteBucket(validatorSnapshotsBucket) - if err != nil { - return err - } - - _, err = tx.CreateBucket(validatorSnapshotsBucket) - if err != nil { - return err - } - - return nil - }) -} - -// epochsDBStats returns stats of epochs bucket in db -func (s *EpochStore) epochsDBStats() (*bolt.BucketStats, error) { - return bucketStats(epochsBucket, s.db) -} - -// validatorSnapshotsDBStats returns stats of validators snapshot bucket in db -func (s *EpochStore) validatorSnapshotsDBStats() (*bolt.BucketStats, error) { - return bucketStats(validatorSnapshotsBucket, s.db) -} - -// getNestedBucketInEpoch returns a nested (child) bucket from db associated with given epoch -func getNestedBucketInEpoch(tx *bolt.Tx, epoch uint64, bucketKey []byte, chainID uint64) (*bolt.Bucket, error) { - epochBucket, err := getEpochBucket(tx, epoch, chainID) - if err != nil { - return nil, err - } - - bucket := epochBucket.Bucket(bucketKey) - if bucket == nil { - return nil, fmt.Errorf("could not find %v bucket for epoch: %v", string(bucketKey), epoch) - } - - return bucket, nil -} diff --git a/consensus/polybft/state_store_epoch_test.go b/consensus/polybft/state_store_epoch_test.go deleted file mode 100644 index 511af4c416..0000000000 --- a/consensus/polybft/state_store_epoch_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package polybft - -import ( - "fmt" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/0xPolygon/polygon-edge/bls" - "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/types" -) - -func TestState_insertAndGetValidatorSnapshot(t *testing.T) { - t.Parallel() - - const ( - epoch = uint64(1) - epochEndingBlock = uint64(100) - ) - - state := newTestState(t) - keys, err := bls.CreateRandomBlsKeys(3) - - require.NoError(t, err) - - snapshot := validator.AccountSet{ - &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x18}), BlsKey: keys[0].PublicKey()}, - &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x23}), BlsKey: keys[1].PublicKey()}, - &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x37}), BlsKey: keys[2].PublicKey()}, - } - - assert.NoError(t, state.EpochStore.insertValidatorSnapshot( - &validatorSnapshot{epoch, epochEndingBlock, snapshot}, nil)) - - snapshotFromDB, err := state.EpochStore.getValidatorSnapshot(epoch) - - assert.NoError(t, err) - assert.Equal(t, snapshot.Len(), snapshotFromDB.Snapshot.Len()) - assert.Equal(t, epoch, snapshotFromDB.Epoch) - assert.Equal(t, epochEndingBlock, snapshotFromDB.EpochEndingBlock) - - for i, v := range snapshot { - assert.Equal(t, v.Address, snapshotFromDB.Snapshot[i].Address) - assert.Equal(t, v.BlsKey, snapshotFromDB.Snapshot[i].BlsKey) - } -} - -func TestState_cleanValidatorSnapshotsFromDb(t *testing.T) { - t.Parallel() - - fixedEpochSize := uint64(10) - state := newTestState(t) - keys, err := bls.CreateRandomBlsKeys(3) - require.NoError(t, err) - - snapshot := validator.AccountSet{ - &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x18}), BlsKey: keys[0].PublicKey()}, - &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x23}), BlsKey: keys[1].PublicKey()}, - &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x37}), BlsKey: keys[2].PublicKey()}, - } - - var epoch uint64 - // add a couple of more snapshots above limit just to make sure we reached it - for i := 1; i <= validatorSnapshotLimit+2; i++ { - epoch = uint64(i) - assert.NoError(t, state.EpochStore.insertValidatorSnapshot( - &validatorSnapshot{epoch, epoch * fixedEpochSize, snapshot}, nil)) - } - - snapshotFromDB, err := state.EpochStore.getValidatorSnapshot(epoch) - - assert.NoError(t, err) - assert.Equal(t, snapshot.Len(), snapshotFromDB.Snapshot.Len()) - assert.Equal(t, epoch, snapshotFromDB.Epoch) - assert.Equal(t, epoch*fixedEpochSize, snapshotFromDB.EpochEndingBlock) - - for i, v := range snapshot { - assert.Equal(t, v.Address, snapshotFromDB.Snapshot[i].Address) - assert.Equal(t, v.BlsKey, snapshotFromDB.Snapshot[i].BlsKey) - } - - assert.NoError(t, state.EpochStore.cleanValidatorSnapshotsFromDB(epoch, nil)) - - // test that last (numberOfSnapshotsToLeaveInDb) of snapshots are left in db after cleanup - validatorSnapshotsBucketStats, err := state.EpochStore.validatorSnapshotsDBStats() - require.NoError(t, err) - - assert.Equal(t, numberOfSnapshotsToLeaveInDB, validatorSnapshotsBucketStats.KeyN) - - for i := 0; i < numberOfSnapshotsToLeaveInDB; i++ { - snapshotFromDB, err = state.EpochStore.getValidatorSnapshot(epoch) - assert.NoError(t, err) - assert.NotNil(t, snapshotFromDB) - - epoch-- - } -} - -func TestState_InsertVoteConcurrent(t *testing.T) { - t.Parallel() - - state := newTestState(t) - epoch := uint64(1) - assert.NoError(t, state.EpochStore.insertEpoch(epoch, nil, 0)) - - hash := []byte{1, 2} - - var wg sync.WaitGroup - - for i := 0; i < 100; i++ { - wg.Add(1) - - go func(i int) { - defer wg.Done() - - _, _ = state.BridgeMessageStore.insertConsensusData(epoch, hash, &BridgeBatchVoteConsensusData{ - Sender: fmt.Sprintf("NODE_%d", i), - Signature: []byte{1, 2}, - }, nil, 0) - }(i) - } - - wg.Wait() - - signatures, err := state.BridgeMessageStore.getMessageVotes(epoch, hash, 0) - assert.NoError(t, err) - assert.Len(t, signatures, 100) -} - -func TestState_Insert_And_Cleanup(t *testing.T) { - t.Parallel() - - state := newTestState(t) - hash1 := []byte{1, 2} - - for i := uint64(1); i <= 500; i++ { - epoch := i - err := state.EpochStore.insertEpoch(epoch, nil, 0) - - assert.NoError(t, err) - - _, _ = state.BridgeMessageStore.insertConsensusData(epoch, hash1, - &BridgeBatchVoteConsensusData{ - Sender: "NODE_1", - Signature: []byte{1, 2}, - }, nil, 0) - } - - stats, err := state.EpochStore.epochsDBStats() - require.NoError(t, err) - - // BucketN returns number of all buckets inside root bucket (including nested buckets) + the root itself - // Since we inserted 500 epochs we expect to have 1000 buckets inside epochs root bucket - // (500 buckets for epochs + each epoch has 1 nested bucket for message votes) - assert.Equal(t, 1002, stats.BucketN-1) - - assert.NoError(t, state.EpochStore.cleanEpochsFromDB(nil)) - - stats, err = state.EpochStore.epochsDBStats() - require.NoError(t, err) - - assert.Equal(t, 2, stats.BucketN-1) - - // there should be no votes for given epoch since we cleaned the db - votes, _ := state.BridgeMessageStore.getMessageVotes(1, hash1, 0) - assert.Nil(t, votes) - - for i := uint64(501); i <= 1000; i++ { - epoch := i - err := state.EpochStore.insertEpoch(epoch, nil, 0) - assert.NoError(t, err) - - _, _ = state.BridgeMessageStore.insertConsensusData(epoch, hash1, - &BridgeBatchVoteConsensusData{ - Sender: "NODE_1", - Signature: []byte{1, 2}, - }, nil, 0) - } - - stats, err = state.EpochStore.epochsDBStats() - require.NoError(t, err) - - assert.Equal(t, 1002, stats.BucketN-1) - - votes, _ = state.BridgeMessageStore.getMessageVotes(1000, hash1, 0) - assert.Equal(t, 1, len(votes)) -} - -func TestEpochStore_getNearestOrEpochSnapshot(t *testing.T) { - t.Parallel() - - state := newTestState(t) - epoch := uint64(1) - tv := validator.NewTestValidators(t, 3) - - // Insert a snapshot for epoch 1 - snapshot := &validatorSnapshot{ - Epoch: epoch, - EpochEndingBlock: 100, - Snapshot: tv.GetPublicIdentities(), - } - - require.NoError(t, state.EpochStore.insertValidatorSnapshot(snapshot, nil)) - - t.Run("with existing dbTx", func(t *testing.T) { - t.Parallel() - - dbTx, err := state.EpochStore.db.Begin(false) - require.NoError(t, err) - - result, err := state.EpochStore.getNearestOrEpochSnapshot(epoch, dbTx) - assert.NoError(t, err) - assert.Equal(t, snapshot, result) - - require.NoError(t, dbTx.Rollback()) - }) - - t.Run("without existing dbTx", func(t *testing.T) { - t.Parallel() - - result, err := state.EpochStore.getNearestOrEpochSnapshot(epoch, nil) - assert.NoError(t, err) - assert.Equal(t, snapshot, result) - }) - - t.Run("with non-existing epoch", func(t *testing.T) { - t.Parallel() - - result, err := state.EpochStore.getNearestOrEpochSnapshot(2, nil) - assert.NoError(t, err) - assert.Equal(t, snapshot, result) - }) -} diff --git a/consensus/polybft/state_store_proposer_snapshot_test.go b/consensus/polybft/state_store_proposer_snapshot_test.go deleted file mode 100644 index e8657f2203..0000000000 --- a/consensus/polybft/state_store_proposer_snapshot_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package polybft - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestState_getProposerSnapshot_writeProposerSnapshot(t *testing.T) { - t.Parallel() - - const ( - height = uint64(100) - round = uint64(5) - ) - - state := newTestState(t) - - snap, err := state.ProposerSnapshotStore.getProposerSnapshot(nil) - require.NoError(t, err) - require.Nil(t, snap) - - newSnapshot := &ProposerSnapshot{Height: height, Round: round} - require.NoError(t, state.ProposerSnapshotStore.writeProposerSnapshot(newSnapshot, nil)) - - snap, err = state.ProposerSnapshotStore.getProposerSnapshot(nil) - require.NoError(t, err) - require.Equal(t, newSnapshot, snap) -} diff --git a/consensus/polybft/state_transaction.go b/consensus/polybft/state_transaction.go index 0039f6d321..154a8e782a 100644 --- a/consensus/polybft/state_transaction.go +++ b/consensus/polybft/state_transaction.go @@ -4,17 +4,17 @@ import ( "bytes" "fmt" + "github.com/0xPolygon/polygon-edge/consensus/polybft/bridge" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" ) -const abiMethodIDLength = 4 - func decodeStateTransaction(txData []byte) (contractsapi.StateTransactionInput, error) { - if len(txData) < abiMethodIDLength { + if len(txData) < helpers.AbiMethodIDLength { return nil, fmt.Errorf("state transactions have input") } - sig := txData[:abiMethodIDLength] + sig := txData[:helpers.AbiMethodIDLength] var ( commitBridgeTxFn contractsapi.CommitBatchBridgeStorageFn @@ -26,7 +26,7 @@ func decodeStateTransaction(txData []byte) (contractsapi.StateTransactionInput, if bytes.Equal(sig, commitBridgeTxFn.Sig()) { // bridge batch - obj = &BridgeBatchSigned{} + obj = &bridge.BridgeBatchSigned{} } else if bytes.Equal(sig, commitEpochFn.Sig()) { // commit epoch obj = &contractsapi.CommitEpochEpochManagerFn{} diff --git a/consensus/polybft/system_state.go b/consensus/polybft/system_state/system_state.go similarity index 76% rename from consensus/polybft/system_state.go rename to consensus/polybft/system_state/system_state.go index 4c68cd1cf7..281b0fe441 100644 --- a/consensus/polybft/system_state.go +++ b/consensus/polybft/system_state/system_state.go @@ -1,15 +1,22 @@ -package polybft +package systemstate import ( + "errors" "fmt" "math/big" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + "github.com/0xPolygon/polygon-edge/contracts" + "github.com/0xPolygon/polygon-edge/state" "github.com/0xPolygon/polygon-edge/types" "github.com/Ethernal-Tech/ethgo" "github.com/Ethernal-Tech/ethgo/contract" ) +var ( + errSendTxnUnsupported = errors.New("system state does not support send transactions") +) + // ValidatorInfo is data transfer object which holds validator information, // provided by smart contract type ValidatorInfo struct { @@ -143,3 +150,37 @@ func (s *SystemStateImpl) GetValidatorSetByNumber(numberOfValidatorSet *big.Int) return validatorSet, err } + +var _ contract.Provider = &stateProvider{} + +type stateProvider struct { + transition *state.Transition +} + +// NewStateProvider initializes EVM against given state and chain config and returns stateProvider instance +// which is an abstraction for smart contract calls +func NewStateProvider(transition *state.Transition) contract.Provider { + return &stateProvider{transition: transition} +} + +// Call implements the contract.Provider interface to make contract calls directly to the state +func (s *stateProvider) Call(addr ethgo.Address, input []byte, opts *contract.CallOpts) ([]byte, error) { + result := s.transition.Call2( + contracts.SystemCaller, + types.Address(addr), + input, + big.NewInt(0), + 10000000, + ) + if result.Failed() { + return nil, result.Err + } + + return result.ReturnValue, nil +} + +// Txn is part of the contract.Provider interface to make Ethereum transactions. We disable this function +// since the system state does not make any transaction +func (s *stateProvider) Txn(_ ethgo.Address, _ ethgo.Key, _ []byte) (contract.Txn, error) { + return nil, errSendTxnUnsupported +} diff --git a/consensus/polybft/system_state_test.go b/consensus/polybft/system_state/system_state_test.go similarity index 75% rename from consensus/polybft/system_state_test.go rename to consensus/polybft/system_state/system_state_test.go index f3bbd32e18..4041e9d486 100644 --- a/consensus/polybft/system_state_test.go +++ b/consensus/polybft/system_state/system_state_test.go @@ -1,21 +1,17 @@ -package polybft +package systemstate import ( "encoding/hex" "math/big" "testing" - "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/contracts" - "github.com/0xPolygon/polygon-edge/state" - itrie "github.com/0xPolygon/polygon-edge/state/immutable-trie" "github.com/0xPolygon/polygon-edge/types" "github.com/Ethernal-Tech/ethgo" "github.com/Ethernal-Tech/ethgo/abi" "github.com/Ethernal-Tech/ethgo/contract" "github.com/Ethernal-Tech/ethgo/testutil" "github.com/Ethernal-Tech/ethgo/wallet" - "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -44,7 +40,7 @@ func TestSystemState_GetNextCommittedIndex(t *testing.T) { bin, err := hex.DecodeString(solcContract.Bin) require.NoError(t, err) - transition := newTestTransition(t, nil) + transition := NewTestTransition(t, nil) // deploy a contract result := transition.Create2(types.Address{}, bin, big.NewInt(0), 1000000000) @@ -92,7 +88,7 @@ func TestSystemState_GetEpoch(t *testing.T) { bin, err := hex.DecodeString(solcContract.Bin) require.NoError(t, err) - transition := newTestTransition(t, nil) + transition := NewTestTransition(t, nil) // deploy a contract result := transition.Create2(types.Address{}, bin, big.NewInt(0), 1000000000) @@ -120,7 +116,7 @@ func TestStateProvider_Txn_NotSupported(t *testing.T) { t.Parallel() provider := &stateProvider{ - transition: newTestTransition(t, nil), + transition: NewTestTransition(t, nil), } key, err := wallet.GenerateKey() @@ -129,34 +125,3 @@ func TestStateProvider_Txn_NotSupported(t *testing.T) { _, err = provider.Txn(ethgo.ZeroAddress, key, []byte{0x1}) require.ErrorIs(t, err, errSendTxnUnsupported) } - -func newTestTransition(t *testing.T, alloc map[types.Address]*chain.GenesisAccount) *state.Transition { - t.Helper() - - st := itrie.NewState(itrie.NewMemoryStorage()) - - ex := state.NewExecutor(&chain.Params{ - Forks: chain.AllForksEnabled, - BurnContract: map[uint64]types.Address{ - 0: types.ZeroAddress, - }, - }, st, hclog.NewNullLogger()) - - rootHash, err := ex.WriteGenesis(alloc, types.Hash{}) - require.NoError(t, err) - - ex.GetHash = func(h *types.Header) state.GetHashByNumber { - return func(i uint64) types.Hash { - return rootHash - } - } - - transition, err := ex.BeginTxn( - rootHash, - &types.Header{}, - types.ZeroAddress, - ) - assert.NoError(t, err) - - return transition -} diff --git a/consensus/polybft/system_state/test_helpers.go b/consensus/polybft/system_state/test_helpers.go new file mode 100644 index 0000000000..e6f9bfb5c9 --- /dev/null +++ b/consensus/polybft/system_state/test_helpers.go @@ -0,0 +1,43 @@ +package systemstate + +import ( + "testing" + + "github.com/0xPolygon/polygon-edge/chain" + "github.com/0xPolygon/polygon-edge/state" + itrie "github.com/0xPolygon/polygon-edge/state/immutable-trie" + "github.com/0xPolygon/polygon-edge/types" + "github.com/hashicorp/go-hclog" + "github.com/stretchr/testify/require" +) + +func NewTestTransition(t *testing.T, alloc map[types.Address]*chain.GenesisAccount) *state.Transition { + t.Helper() + + st := itrie.NewState(itrie.NewMemoryStorage()) + + ex := state.NewExecutor(&chain.Params{ + Forks: chain.AllForksEnabled, + BurnContract: map[uint64]types.Address{ + 0: types.ZeroAddress, + }, + }, st, hclog.NewNullLogger()) + + rootHash, err := ex.WriteGenesis(alloc, types.Hash{}) + require.NoError(t, err) + + ex.GetHash = func(h *types.Header) state.GetHashByNumber { + return func(i uint64) types.Hash { + return rootHash + } + } + + transition, err := ex.BeginTxn( + rootHash, + &types.Header{}, + types.ZeroAddress, + ) + require.NoError(t, err) + + return transition +} diff --git a/consensus/polybft/extra.go b/consensus/polybft/types/extra.go similarity index 98% rename from consensus/polybft/extra.go rename to consensus/polybft/types/extra.go index 1bbfd34144..eb20d2ebe6 100644 --- a/consensus/polybft/extra.go +++ b/consensus/polybft/types/extra.go @@ -1,4 +1,4 @@ -package polybft +package types import ( "fmt" @@ -129,7 +129,7 @@ func (i *Extra) UnmarshalRLPWith(v *fastrlp.Value) error { // ValidateFinalizedData contains extra data validations for finalized headers func (i *Extra) ValidateFinalizedData(header *types.Header, parent *types.Header, parents []*types.Header, - chainID uint64, consensusBackend polybftBackend, domain []byte, logger hclog.Logger) error { + chainID uint64, consensusBackend Polybft, domain []byte, logger hclog.Logger) error { // validate committed signatures blockNumber := header.Number if i.Committed == nil { @@ -171,7 +171,7 @@ func (i *Extra) ValidateFinalizedData(header *types.Header, parent *types.Header } // ValidateParentSignatures validates signatures for parent block -func (i *Extra) ValidateParentSignatures(blockNumber uint64, consensusBackend polybftBackend, parents []*types.Header, +func (i *Extra) ValidateParentSignatures(blockNumber uint64, consensusBackend Polybft, parents []*types.Header, parent *types.Header, parentExtra *Extra, domain []byte, logger hclog.Logger) error { // skip block 1 because genesis does not have committed signatures if blockNumber <= 1 { diff --git a/consensus/polybft/handlers.go b/consensus/polybft/types/handlers.go similarity index 84% rename from consensus/polybft/handlers.go rename to consensus/polybft/types/handlers.go index f048daec49..a9e288dae4 100644 --- a/consensus/polybft/handlers.go +++ b/consensus/polybft/types/handlers.go @@ -1,7 +1,9 @@ -package polybft +package types import ( "github.com/0xPolygon/polygon-edge/chain" + "github.com/0xPolygon/polygon-edge/consensus/polybft/config" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/types" bolt "go.etcd.io/bbolt" @@ -18,7 +20,7 @@ type PostBlockRequest struct { // used to save necessary data on PostBlock DBTx *bolt.Tx // CurrentClientConfig is the latest client configuration - CurrentClientConfig *PolyBFTConfig + CurrentClientConfig *config.PolyBFT // Forks holds forks configuration Forks *chain.Forks } @@ -32,7 +34,7 @@ type PostEpochRequest struct { // SystemState is the state of the governance smart contracts // after this block - SystemState SystemState + SystemState systemstate.SystemState // ValidatorSet is the validator set for the new epoch ValidatorSet validator.ValidatorSet diff --git a/consensus/polybft/types/types.go b/consensus/polybft/types/types.go new file mode 100644 index 0000000000..2485fd076d --- /dev/null +++ b/consensus/polybft/types/types.go @@ -0,0 +1,94 @@ +package types + +import ( + "time" + + "github.com/0xPolygon/polygon-edge/blockchain" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" + "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" + "github.com/0xPolygon/polygon-edge/state" + "github.com/0xPolygon/polygon-edge/types" + "github.com/Ethernal-Tech/ethgo/contract" + "github.com/hashicorp/go-hclog" + bolt "go.etcd.io/bbolt" +) + +// Polybft is the interface that provides the necessary functions +// to interact with the polybft consensus +type Polybft interface { + // GetValidators retrieves validator set for the given block + GetValidators(blockNumber uint64, parents []*types.Header) (validator.AccountSet, error) + + // GetValidators retrieves validator set for the given block + // Function expects that db tx is already open + GetValidatorsWithTx(blockNumber uint64, parents []*types.Header, + dbTx *bolt.Tx) (validator.AccountSet, error) + + // SetBlockTime updates the block time + SetBlockTime(blockTime time.Duration) +} + +type BlockBuilder interface { + Reset() error + WriteTx(*types.Transaction) error + Fill() + Build(func(h *types.Header)) (*types.FullBlock, error) + GetState() *state.Transition + Receipts() []*types.Receipt +} + +// blockchain is an interface that wraps the methods called on blockchain +type Blockchain interface { + // CurrentHeader returns the header of blockchain block head + CurrentHeader() *types.Header + + // CommitBlock commits a block to the chain. + CommitBlock(block *types.FullBlock) error + + // NewBlockBuilder is a factory method that returns a block builder on top of 'parent'. + NewBlockBuilder(parent *types.Header, coinbase types.Address, + txPool TxPool, blockTime time.Duration, logger hclog.Logger) (BlockBuilder, error) + + // ProcessBlock builds a final block from given 'block' on top of 'parent'. + ProcessBlock(parent *types.Header, block *types.Block) (*types.FullBlock, error) + + // GetStateProviderForBlock returns a reference to make queries to the state at 'block'. + GetStateProviderForBlock(block *types.Header) (contract.Provider, error) + + // GetStateProvider returns a reference to make queries to the provided state. + GetStateProvider(transition *state.Transition) contract.Provider + + // GetHeaderByNumber returns a reference to block header for the given block number. + GetHeaderByNumber(number uint64) (*types.Header, bool) + + // GetHeaderByHash returns a reference to block header for the given block hash + GetHeaderByHash(hash types.Hash) (*types.Header, bool) + + // GetSystemState creates a new instance of SystemState interface + GetSystemState(provider contract.Provider) systemstate.SystemState + + // SubscribeEvents subscribes to blockchain events + SubscribeEvents() blockchain.Subscription + + // UnubscribeEvents unsubscribes from blockchain events + UnubscribeEvents(subscription blockchain.Subscription) + + // GetChainID returns chain id of the current blockchain + GetChainID() uint64 + + // GetReceiptsByHash retrieves receipts by hash + GetReceiptsByHash(hash types.Hash) ([]*types.Receipt, error) +} + +type TxPool interface { + Prepare() + Length() uint64 + Peek() *types.Transaction + Pop(*types.Transaction) + Drop(*types.Transaction) + Demote(*types.Transaction) + SetSealing(bool) + ResetWithBlock(*types.Block) + ReinsertProposed() + ClearProposed() +} diff --git a/consensus/polybft/validator-snapshot/validator_snapshot_store.go b/consensus/polybft/validator-snapshot/validator_snapshot_store.go new file mode 100644 index 0000000000..c164130257 --- /dev/null +++ b/consensus/polybft/validator-snapshot/validator_snapshot_store.go @@ -0,0 +1,186 @@ +package validator_snapshot + +import ( + "encoding/json" + "fmt" + + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + "github.com/0xPolygon/polygon-edge/helper/common" + bolt "go.etcd.io/bbolt" +) + +var ( + // bucket to store validator snapshots + validatorSnapshotsBucket = []byte("validatorSnapshots") +) + +type validatorSnapshotStore struct { + db *bolt.DB +} + +func newValidatorSnapshotStore(db *bolt.DB) (*validatorSnapshotStore, error) { + store := &validatorSnapshotStore{db: db} + + return store, store.db.Update(func(tx *bolt.Tx) error { + if _, err := tx.CreateBucketIfNotExists(validatorSnapshotsBucket); err != nil { + return fmt.Errorf("failed to create bucket=%s: %w", string(validatorSnapshotsBucket), err) + } + + return nil + }) +} + +// insertValidatorSnapshot inserts a validator snapshot for the given block to its bucket in db +func (s *validatorSnapshotStore) insertValidatorSnapshot(validatorSnapshot *ValidatorSnapshot, dbTx *bolt.Tx) error { + insertFn := func(tx *bolt.Tx) error { + raw, err := json.Marshal(validatorSnapshot) + if err != nil { + return err + } + + return tx.Bucket(validatorSnapshotsBucket).Put(common.EncodeUint64ToBytes(validatorSnapshot.Epoch), raw) + } + + if dbTx == nil { + return s.db.Update(func(tx *bolt.Tx) error { + return insertFn(tx) + }) + } + + return insertFn(dbTx) +} + +// getValidatorSnapshot queries the validator snapshot for given block from db +func (s *validatorSnapshotStore) getValidatorSnapshot(epoch uint64) (*ValidatorSnapshot, error) { + var validatorSnapshot *ValidatorSnapshot + + err := s.db.View(func(tx *bolt.Tx) error { + v := tx.Bucket(validatorSnapshotsBucket).Get(common.EncodeUint64ToBytes(epoch)) + if v != nil { + return json.Unmarshal(v, &validatorSnapshot) + } + + return nil + }) + + return validatorSnapshot, err +} + +// getNearestOrEpochSnapshot returns the nearest or the exact epoch snapshot from db +func (s *validatorSnapshotStore) getNearestOrEpochSnapshot(epoch uint64, dbTx *bolt.Tx) (*ValidatorSnapshot, error) { + var ( + snapshot *ValidatorSnapshot + err error + ) + + getFn := func(tx *bolt.Tx) error { + for { + v := tx.Bucket(validatorSnapshotsBucket).Get(common.EncodeUint64ToBytes(epoch)) + if v != nil { + return json.Unmarshal(v, &snapshot) + } + + if epoch == 0 { // prevent uint64 underflow + break + } + + epoch-- + } + + return nil + } + + if dbTx == nil { + err = s.db.View(func(tx *bolt.Tx) error { + return getFn(tx) + }) + } else { + err = getFn(dbTx) + } + + return snapshot, err +} + +// cleanValidatorSnapshotsFromDB cleans the validator snapshots bucket if a limit is reached, +// but it leaves the latest (n) number of snapshots +func (s *validatorSnapshotStore) cleanValidatorSnapshotsFromDB(epoch uint64, dbTx *bolt.Tx) error { + cleanFn := func(tx *bolt.Tx) error { + bucket := tx.Bucket(validatorSnapshotsBucket) + + // paired list + keys := make([][]byte, 0) + values := make([][]byte, 0) + + for i := 0; i < NumberOfSnapshotsToLeaveInDB; i++ { // exclude the last inserted we already appended + key := common.EncodeUint64ToBytes(epoch) + value := bucket.Get(key) + + if value == nil { + continue + } + + keys = append(keys, key) + values = append(values, value) + epoch-- + } + + // removing an entire bucket is much faster than removing all keys + // look at thread https://github.com/boltdb/bolt/issues/667 + err := tx.DeleteBucket(validatorSnapshotsBucket) + if err != nil { + return err + } + + bucket, err = tx.CreateBucket(validatorSnapshotsBucket) + if err != nil { + return err + } + + // we start the loop in reverse so that the oldest of snapshots get inserted first in db + for i := len(keys) - 1; i >= 0; i-- { + if err := bucket.Put(keys[i], values[i]); err != nil { + return err + } + } + + return nil + } + + if dbTx == nil { + return s.db.Update(func(tx *bolt.Tx) error { + return cleanFn(tx) + }) + } + + return cleanFn(dbTx) +} + +// removeAllValidatorSnapshots drops a validator snapshot bucket and re-creates it in bolt database +func (s *validatorSnapshotStore) removeAllValidatorSnapshots() error { + return s.db.Update(func(tx *bolt.Tx) error { + // removing an entire bucket is much faster than removing all keys + // look at thread https://github.com/boltdb/bolt/issues/667 + err := tx.DeleteBucket(validatorSnapshotsBucket) + if err != nil { + return err + } + + _, err = tx.CreateBucket(validatorSnapshotsBucket) + if err != nil { + return err + } + + return nil + }) +} + +// validatorSnapshotsDBStats returns stats of validators snapshot bucket in db +func (s *validatorSnapshotStore) validatorSnapshotsDBStats() (*bolt.BucketStats, error) { + return state.BucketStats(validatorSnapshotsBucket, s.db) +} + +// beginDBTransaction creates and begins a transaction on BoltDB +// Note that transaction needs to be manually rollback or committed +func (s *validatorSnapshotStore) beginDBTransaction(isWriteTx bool) (*bolt.Tx, error) { + return s.db.Begin(isWriteTx) +} diff --git a/consensus/polybft/validator-snapshot/validator_snapshot_store_test.go b/consensus/polybft/validator-snapshot/validator_snapshot_store_test.go new file mode 100644 index 0000000000..df849acd69 --- /dev/null +++ b/consensus/polybft/validator-snapshot/validator_snapshot_store_test.go @@ -0,0 +1,186 @@ +package validator_snapshot + +import ( + "fmt" + "os" + "path" + "testing" + "time" + + "github.com/0xPolygon/polygon-edge/bls" + "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" + "github.com/0xPolygon/polygon-edge/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + bolt "go.etcd.io/bbolt" +) + +// newTestState creates new instance of state used by tests. +func newTestState(tb testing.TB) *validatorSnapshotStore { + tb.Helper() + + dir := fmt.Sprintf("/tmp/consensus-temp_%v", time.Now().UTC().Format(time.RFC3339Nano)) + err := os.Mkdir(dir, 0775) + + if err != nil { + tb.Fatal(err) + } + + tb.Cleanup(func() { + if err := os.RemoveAll(dir); err != nil { + tb.Fatal(err) + } + }) + + db, err := bolt.Open(path.Join(dir, "my.db"), 0666, nil) + if err != nil { + tb.Fatal(err) + } + + validatorSnapshotStore, err := newValidatorSnapshotStore(db) + if err != nil { + tb.Fatal(err) + } + + return validatorSnapshotStore +} + +func TestState_insertAndGetValidatorSnapshot(t *testing.T) { + t.Parallel() + + const ( + epoch = uint64(1) + epochEndingBlock = uint64(100) + ) + + state := newTestState(t) + keys, err := bls.CreateRandomBlsKeys(3) + + require.NoError(t, err) + + snapshot := validator.AccountSet{ + &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x18}), BlsKey: keys[0].PublicKey()}, + &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x23}), BlsKey: keys[1].PublicKey()}, + &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x37}), BlsKey: keys[2].PublicKey()}, + } + + assert.NoError(t, state.insertValidatorSnapshot( + &ValidatorSnapshot{ + Epoch: epoch, + EpochEndingBlock: epochEndingBlock, + Snapshot: snapshot, + }, nil)) + + snapshotFromDB, err := state.getValidatorSnapshot(epoch) + + assert.NoError(t, err) + assert.Equal(t, snapshot.Len(), snapshotFromDB.Snapshot.Len()) + assert.Equal(t, epoch, snapshotFromDB.Epoch) + assert.Equal(t, epochEndingBlock, snapshotFromDB.EpochEndingBlock) + + for i, v := range snapshot { + assert.Equal(t, v.Address, snapshotFromDB.Snapshot[i].Address) + assert.Equal(t, v.BlsKey, snapshotFromDB.Snapshot[i].BlsKey) + } +} + +func TestState_cleanValidatorSnapshotsFromDb(t *testing.T) { + t.Parallel() + + fixedEpochSize := uint64(10) + state := newTestState(t) + keys, err := bls.CreateRandomBlsKeys(3) + require.NoError(t, err) + + snapshot := validator.AccountSet{ + &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x18}), BlsKey: keys[0].PublicKey()}, + &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x23}), BlsKey: keys[1].PublicKey()}, + &validator.ValidatorMetadata{Address: types.BytesToAddress([]byte{0x37}), BlsKey: keys[2].PublicKey()}, + } + + var epoch uint64 + // add a couple of more snapshots above limit just to make sure we reached it + for i := 1; i <= ValidatorSnapshotLimit+2; i++ { + epoch = uint64(i) + assert.NoError(t, state.insertValidatorSnapshot( + &ValidatorSnapshot{ + Epoch: epoch, + EpochEndingBlock: epoch * fixedEpochSize, + Snapshot: snapshot, + }, nil)) + } + + snapshotFromDB, err := state.getValidatorSnapshot(epoch) + + assert.NoError(t, err) + assert.Equal(t, snapshot.Len(), snapshotFromDB.Snapshot.Len()) + assert.Equal(t, epoch, snapshotFromDB.Epoch) + assert.Equal(t, epoch*fixedEpochSize, snapshotFromDB.EpochEndingBlock) + + for i, v := range snapshot { + assert.Equal(t, v.Address, snapshotFromDB.Snapshot[i].Address) + assert.Equal(t, v.BlsKey, snapshotFromDB.Snapshot[i].BlsKey) + } + + assert.NoError(t, state.cleanValidatorSnapshotsFromDB(epoch, nil)) + + // test that last (numberOfSnapshotsToLeaveInDb) of snapshots are left in db after cleanup + validatorSnapshotsBucketStats, err := state.validatorSnapshotsDBStats() + require.NoError(t, err) + + assert.Equal(t, NumberOfSnapshotsToLeaveInDB, validatorSnapshotsBucketStats.KeyN) + + for i := 0; i < NumberOfSnapshotsToLeaveInDB; i++ { + snapshotFromDB, err = state.getValidatorSnapshot(epoch) + assert.NoError(t, err) + assert.NotNil(t, snapshotFromDB) + + epoch-- + } +} + +func TestEpochStore_getNearestOrEpochSnapshot(t *testing.T) { + t.Parallel() + + state := newTestState(t) + epoch := uint64(1) + tv := validator.NewTestValidators(t, 3) + + // Insert a snapshot for epoch 1 + snapshot := &ValidatorSnapshot{ + Epoch: epoch, + EpochEndingBlock: 100, + Snapshot: tv.GetPublicIdentities(), + } + + require.NoError(t, state.insertValidatorSnapshot(snapshot, nil)) + + t.Run("with existing dbTx", func(t *testing.T) { + t.Parallel() + + dbTx, err := state.beginDBTransaction(false) + require.NoError(t, err) + + result, err := state.getNearestOrEpochSnapshot(epoch, dbTx) + assert.NoError(t, err) + assert.Equal(t, snapshot, result) + + require.NoError(t, dbTx.Rollback()) + }) + + t.Run("without existing dbTx", func(t *testing.T) { + t.Parallel() + + result, err := state.getNearestOrEpochSnapshot(epoch, nil) + assert.NoError(t, err) + assert.Equal(t, snapshot, result) + }) + + t.Run("with non-existing epoch", func(t *testing.T) { + t.Parallel() + + result, err := state.getNearestOrEpochSnapshot(2, nil) + assert.NoError(t, err) + assert.Equal(t, snapshot, result) + }) +} diff --git a/consensus/polybft/validators_snapshot.go b/consensus/polybft/validator-snapshot/validators_snapshot.go similarity index 71% rename from consensus/polybft/validators_snapshot.go rename to consensus/polybft/validator-snapshot/validators_snapshot.go index e409e9424a..a8ae64cda1 100644 --- a/consensus/polybft/validators_snapshot.go +++ b/consensus/polybft/validator-snapshot/validators_snapshot.go @@ -1,4 +1,4 @@ -package polybft +package validator_snapshot import ( "errors" @@ -6,52 +6,70 @@ import ( "sync" "github.com/0xPolygon/polygon-edge/blockchain" + "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" + "github.com/0xPolygon/polygon-edge/consensus/polybft/state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/types" "github.com/hashicorp/go-hclog" bolt "go.etcd.io/bbolt" ) -type validatorSnapshot struct { +const ( + // ValidatorSnapshotLimit defines a maximum number of validator snapshots + // that can be stored in cache (both memory and db) + ValidatorSnapshotLimit = 100 + // NumberOfSnapshotsToLeaveInMemory defines a number of validator snapshots to leave in memory + NumberOfSnapshotsToLeaveInMemory = 12 + // NumberOfSnapshotsToLeaveInDB defines a number of validator snapshots to leave in db + NumberOfSnapshotsToLeaveInDB = 20 +) + +type ValidatorSnapshot struct { Epoch uint64 `json:"epoch"` EpochEndingBlock uint64 `json:"epochEndingBlock"` Snapshot validator.AccountSet `json:"snapshot"` } -func (vs *validatorSnapshot) copy() *validatorSnapshot { +func (vs *ValidatorSnapshot) copy() *ValidatorSnapshot { copiedAccountSet := vs.Snapshot.Copy() - return &validatorSnapshot{ + return &ValidatorSnapshot{ Epoch: vs.Epoch, EpochEndingBlock: vs.EpochEndingBlock, Snapshot: copiedAccountSet, } } -type validatorsSnapshotCache struct { - snapshots map[uint64]*validatorSnapshot - state *State - blockchain blockchainBackend +type ValidatorsSnapshotCache struct { + snapshots map[uint64]*ValidatorSnapshot + state *validatorSnapshotStore + blockchain polytypes.Blockchain lock sync.Mutex logger hclog.Logger } -// newValidatorsSnapshotCache initializes a new instance of validatorsSnapshotCache -func newValidatorsSnapshotCache( - logger hclog.Logger, state *State, blockchain blockchainBackend, -) *validatorsSnapshotCache { - return &validatorsSnapshotCache{ - snapshots: map[uint64]*validatorSnapshot{}, - state: state, +// NewValidatorsSnapshotCache initializes a new instance of validatorsSnapshotCache +func NewValidatorsSnapshotCache( + logger hclog.Logger, state *state.State, blockchain polytypes.Blockchain, +) (*ValidatorsSnapshotCache, error) { + vss, err := newValidatorSnapshotStore(state.DB()) + if err != nil { + return nil, fmt.Errorf("failed to create validator snapshot store: %w", err) + } + + return &ValidatorsSnapshotCache{ + snapshots: map[uint64]*ValidatorSnapshot{}, + state: vss, blockchain: blockchain, logger: logger.Named("validators_snapshot"), - } + }, nil } // GetSnapshot tries to retrieve the most recent cached snapshot (if any) and // applies pending validator set deltas to it. // Otherwise, it builds a snapshot from scratch and applies pending validator set deltas. -func (v *validatorsSnapshotCache) GetSnapshot( +func (v *ValidatorsSnapshotCache) GetSnapshot( blockNumber uint64, parents []*types.Header, dbTx *bolt.Tx) (validator.AccountSet, error) { tx := dbTx isPassedTxNil := dbTx == nil @@ -78,7 +96,7 @@ func (v *validatorsSnapshotCache) GetSnapshot( v.lock.Unlock() }() - _, extra, err := getBlockData(blockNumber, v.blockchain) + _, extra, err := helpers.GetBlockData(blockNumber, v.blockchain) if err != nil { return nil, err } @@ -116,7 +134,7 @@ func (v *validatorsSnapshotCache) GetSnapshot( return nil, fmt.Errorf("failed to compute snapshot for epoch 0: %w", err) } - err = v.storeSnapshot(genesisBlockSnapshot, tx) + err = v.StoreSnapshot(genesisBlockSnapshot, tx) if err != nil { return nil, fmt.Errorf("failed to store validators snapshot for epoch 0: %w", err) } @@ -146,7 +164,7 @@ func (v *validatorsSnapshotCache) GetSnapshot( } latestValidatorSnapshot = intermediateSnapshot - if err = v.storeSnapshot(latestValidatorSnapshot, tx); err != nil { + if err = v.StoreSnapshot(latestValidatorSnapshot, tx); err != nil { return nil, fmt.Errorf("failed to store validators snapshot for epoch %d: %w", latestValidatorSnapshot.Epoch, err) } @@ -173,11 +191,11 @@ func (v *validatorsSnapshotCache) GetSnapshot( } // computeSnapshot gets desired block header by block number, extracts its extra and applies given delta to the snapshot -func (v *validatorsSnapshotCache) computeSnapshot( - existingSnapshot *validatorSnapshot, +func (v *ValidatorsSnapshotCache) computeSnapshot( + existingSnapshot *ValidatorSnapshot, nextEpochEndBlockNumber uint64, parents []*types.Header, -) (*validatorSnapshot, error) { +) (*ValidatorSnapshot, error) { var header *types.Header v.logger.Trace("Compute snapshot started...", "BlockNumber", nextEpochEndBlockNumber) @@ -203,7 +221,7 @@ func (v *validatorsSnapshotCache) computeSnapshot( } } - extra, err := GetIbftExtra(header.ExtraData) + extra, err := polytypes.GetIbftExtra(header.ExtraData) if err != nil { return nil, fmt.Errorf("failed to decode extra from the block#%d: %w", header.Number, err) } @@ -234,17 +252,17 @@ func (v *validatorsSnapshotCache) computeSnapshot( "snapshot", snapshot.String(), "delta", extra.Validators) - return &validatorSnapshot{ + return &ValidatorSnapshot{ Epoch: snapshotEpoch, EpochEndingBlock: nextEpochEndBlockNumber, Snapshot: snapshot, }, nil } -// storeSnapshot stores given snapshot to the in-memory cache and database -func (v *validatorsSnapshotCache) storeSnapshot(snapshot *validatorSnapshot, dbTx *bolt.Tx) error { +// StoreSnapshot stores given snapshot to the in-memory cache and database +func (v *ValidatorsSnapshotCache) StoreSnapshot(snapshot *ValidatorSnapshot, dbTx *bolt.Tx) error { v.snapshots[snapshot.Epoch] = snapshot - if err := v.state.EpochStore.insertValidatorSnapshot(snapshot, dbTx); err != nil { + if err := v.state.insertValidatorSnapshot(snapshot, dbTx); err != nil { return fmt.Errorf("failed to insert validator snapshot for epoch %d to the database: %w", snapshot.Epoch, err) } @@ -254,8 +272,8 @@ func (v *validatorsSnapshotCache) storeSnapshot(snapshot *validatorSnapshot, dbT } // Cleanup cleans the validators cache in memory and db -func (v *validatorsSnapshotCache) cleanup(dbTx *bolt.Tx) error { - if len(v.snapshots) >= validatorSnapshotLimit { +func (v *ValidatorsSnapshotCache) cleanup(dbTx *bolt.Tx) error { + if len(v.snapshots) >= ValidatorSnapshotLimit { latestEpoch := uint64(0) for e := range v.snapshots { @@ -265,9 +283,9 @@ func (v *validatorsSnapshotCache) cleanup(dbTx *bolt.Tx) error { } startEpoch := latestEpoch - cache := make(map[uint64]*validatorSnapshot, numberOfSnapshotsToLeaveInMemory) + cache := make(map[uint64]*ValidatorSnapshot, NumberOfSnapshotsToLeaveInMemory) - for i := 0; i < numberOfSnapshotsToLeaveInMemory; i++ { + for i := 0; i < NumberOfSnapshotsToLeaveInMemory; i++ { if snapshot, exists := v.snapshots[startEpoch]; exists { cache[startEpoch] = snapshot } @@ -277,7 +295,7 @@ func (v *validatorsSnapshotCache) cleanup(dbTx *bolt.Tx) error { v.snapshots = cache - return v.state.EpochStore.cleanValidatorSnapshotsFromDB(latestEpoch, dbTx) + return v.state.cleanValidatorSnapshotsFromDB(latestEpoch, dbTx) } return nil @@ -285,8 +303,8 @@ func (v *validatorsSnapshotCache) cleanup(dbTx *bolt.Tx) error { // getLastCachedSnapshot gets the latest snapshot cached // If it doesn't have snapshot cached for desired epoch, it will return the latest one it has -func (v *validatorsSnapshotCache) getLastCachedSnapshot(currentEpoch uint64, - dbTx *bolt.Tx) (*validatorSnapshot, error) { +func (v *ValidatorsSnapshotCache) getLastCachedSnapshot(currentEpoch uint64, + dbTx *bolt.Tx) (*ValidatorSnapshot, error) { epochToQuery := currentEpoch cachedSnapshot := v.snapshots[currentEpoch] @@ -310,7 +328,7 @@ func (v *validatorsSnapshotCache) getLastCachedSnapshot(currentEpoch uint64, currentEpoch-- } - dbSnapshot, err := v.state.EpochStore.getNearestOrEpochSnapshot(epochToQuery, dbTx) + dbSnapshot, err := v.state.getNearestOrEpochSnapshot(epochToQuery, dbTx) if err != nil { return nil, err } @@ -330,10 +348,10 @@ func (v *validatorsSnapshotCache) getLastCachedSnapshot(currentEpoch uint64, // getNextEpochEndingBlock gets the epoch ending block of a newer epoch // It start checking the blocks from the provided epoch ending block of the previous epoch -func (v *validatorsSnapshotCache) getNextEpochEndingBlock(latestEpochEndingBlock uint64) (uint64, error) { +func (v *ValidatorsSnapshotCache) getNextEpochEndingBlock(latestEpochEndingBlock uint64) (uint64, error) { blockNumber := latestEpochEndingBlock + 1 // get next block - _, extra, err := getBlockData(blockNumber, v.blockchain) + _, extra, err := helpers.GetBlockData(blockNumber, v.blockchain) if err != nil { return 0, err } @@ -344,7 +362,7 @@ func (v *validatorsSnapshotCache) getNextEpochEndingBlock(latestEpochEndingBlock for startEpoch == epoch { blockNumber++ - _, extra, err = getBlockData(blockNumber, v.blockchain) + _, extra, err := helpers.GetBlockData(blockNumber, v.blockchain) if err != nil { if errors.Is(err, blockchain.ErrNoBlock) { return blockNumber - 1, nil @@ -358,3 +376,26 @@ func (v *validatorsSnapshotCache) getNextEpochEndingBlock(latestEpochEndingBlock return blockNumber - 1, nil } + +// isEpochEndingBlock checks if given block is an epoch ending block +func isEpochEndingBlock(blockNumber uint64, extra *polytypes.Extra, blockchain polytypes.Blockchain) (bool, error) { + if extra.Validators == nil { + // non epoch ending blocks have validator set delta as nil + return false, nil + } + + if !extra.Validators.IsEmpty() { + // if validator set delta is not empty, the validator set was changed in this block + // meaning the epoch changed as well + return true, nil + } + + _, nextBlockExtra, err := helpers.GetBlockData(blockNumber+1, blockchain) + if err != nil { + return false, err + } + + // validator set delta can be empty (no change in validator set happened) + // so we need to check if their epoch numbers are different + return extra.BlockMetaData.EpochNumber != nextBlockExtra.BlockMetaData.EpochNumber, nil +} diff --git a/consensus/polybft/validator-snapshot/validators_snapshot_test.go b/consensus/polybft/validator-snapshot/validators_snapshot_test.go new file mode 100644 index 0000000000..e3b0d5f365 --- /dev/null +++ b/consensus/polybft/validator-snapshot/validators_snapshot_test.go @@ -0,0 +1,425 @@ +package validator_snapshot + +// import ( +// "fmt" +// "testing" +// "time" + +// "github.com/0xPolygon/polygon-edge/consensus/polybft/helpers" +// polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" +// "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" +// "github.com/0xPolygon/polygon-edge/types" +// "github.com/hashicorp/go-hclog" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/mock" +// "github.com/stretchr/testify/require" +// ) + +// func TestValidatorsSnapshotCache_GetSnapshot_Build(t *testing.T) { +// t.Parallel() +// assertions := require.New(t) + +// const ( +// totalValidators = 10 +// validatorSetSize = 5 +// epochSize = uint64(10) +// ) + +// allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() + +// var oddValidators, evenValidators validator.AccountSet + +// for i := 0; i < totalValidators; i++ { +// if i%2 == 0 { +// evenValidators = append(evenValidators, allValidators[i]) +// } else { +// oddValidators = append(oddValidators, allValidators[i]) +// } +// } + +// headersMap := &helpers.TestHeadersMap{HeadersByNumber: make(map[uint64]*types.Header)} + +// createHeaders(t, headersMap, 0, epochSize-1, 1, nil, allValidators[:validatorSetSize]) +// createHeaders(t, headersMap, epochSize, 2*epochSize-1, 2, allValidators[:validatorSetSize], allValidators[validatorSetSize:]) +// createHeaders(t, headersMap, 2*epochSize, 3*epochSize-1, 3, allValidators[validatorSetSize:], oddValidators) +// createHeaders(t, headersMap, 3*epochSize, 4*epochSize-1, 4, oddValidators, evenValidators) + +// var cases = []struct { +// blockNumber uint64 +// expectedSnapshot validator.AccountSet +// validatorsOverlap bool +// parents []*types.Header +// }{ +// {4, allValidators[:validatorSetSize], false, nil}, +// {1 * epochSize, allValidators[validatorSetSize:], false, nil}, +// {13, allValidators[validatorSetSize:], false, nil}, +// {27, oddValidators, true, nil}, +// {36, evenValidators, true, nil}, +// {4, allValidators[:validatorSetSize], false, headersMap.GetHeaders()}, +// {13, allValidators[validatorSetSize:], false, headersMap.GetHeaders()}, +// {27, oddValidators, true, headersMap.GetHeaders()}, +// {36, evenValidators, true, headersMap.GetHeaders()}, +// } + +// blockchainMock := new(helpers.BlockchainMock) +// blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.GetHeader) + +// testValidatorsCache := &testValidatorsCache{ +// validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), +// } + +// for _, c := range cases { +// snapshot, err := testValidatorsCache.GetSnapshot(c.blockNumber, c.parents, nil) + +// assertions.NoError(err) +// assertions.Len(snapshot, c.expectedSnapshot.Len()) + +// if c.validatorsOverlap { +// for _, validator := range c.expectedSnapshot { +// // Order of validators is not preserved, because there are overlapping between validators set. +// // In that case, at the beginning of the set are the ones preserved from the previous validator set. +// // Newly validators are added to the end after the one from previous validator set. +// assertions.True(snapshot.ContainsAddress(validator.Address)) +// } +// } else { +// assertions.Equal(c.expectedSnapshot, snapshot) +// } + +// assertions.NoError(testValidatorsCache.cleanValidatorsCache()) + +// if c.parents != nil { +// blockchainMock.AssertNotCalled(t, "GetHeaderByNumber") +// } +// } +// } + +// func TestValidatorsSnapshotCache_GetSnapshot_FetchFromCache(t *testing.T) { +// t.Parallel() +// require := require.New(t) + +// const ( +// totalValidators = 10 +// validatorSetSize = 5 +// ) + +// allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() +// epochOneValidators := validator.AccountSet{allValidators[0], allValidators[len(allValidators)-1]} +// epochTwoValidators := allValidators[1 : len(allValidators)-2] + +// headersMap := &helpers.TestHeadersMap{HeadersByNumber: make(map[uint64]*types.Header)} +// createHeaders(t, headersMap, 0, 9, 1, nil, allValidators) +// createHeaders(t, headersMap, 10, 19, 2, allValidators, epochOneValidators) +// createHeaders(t, headersMap, 20, 29, 3, epochOneValidators, epochTwoValidators) + +// blockchainMock := new(helpers.BlockchainMock) +// blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.GetHeader) + +// testValidatorsCache := &testValidatorsCache{ +// validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), +// } + +// require.NoError(testValidatorsCache.storeSnapshot(&ValidatorSnapshot{1, 10, epochOneValidators}, nil)) +// require.NoError(testValidatorsCache.storeSnapshot(&ValidatorSnapshot{2, 20, epochTwoValidators}, nil)) + +// // Fetch snapshot from in memory cache +// snapshot, err := testValidatorsCache.GetSnapshot(10, nil, nil) +// require.NoError(err) +// require.Equal(epochOneValidators, snapshot) + +// // Invalidate in memory cache +// testValidatorsCache.snapshots = map[uint64]*ValidatorSnapshot{} +// require.NoError(testValidatorsCache.state.RemoveAllValidatorSnapshots()) +// // Fetch snapshot from database +// snapshot, err = testValidatorsCache.GetSnapshot(10, nil, nil) +// require.NoError(err) +// require.Equal(epochOneValidators, snapshot) + +// snapshot, err = testValidatorsCache.GetSnapshot(20, nil, nil) +// require.NoError(err) +// require.Equal(epochTwoValidators, snapshot) +// } + +// func TestValidatorsSnapshotCache_Cleanup(t *testing.T) { +// t.Parallel() +// require := require.New(t) + +// blockchainMock := new(helpers.BlockchainMock) +// cache := &testValidatorsCache{ +// validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), +// } +// snapshot := validator.NewTestValidators(t, 3).GetPublicIdentities() +// maxEpoch := uint64(0) + +// for i := uint64(0); i < ValidatorSnapshotLimit; i++ { +// require.NoError(cache.storeSnapshot(&ValidatorSnapshot{i, i * 10, snapshot}, nil)) + +// maxEpoch++ +// } + +// require.NoError(cache.cleanup(nil)) + +// // assertions for remaining snapshots in the in memory cache +// require.Len(cache.snapshots, NumberOfSnapshotsToLeaveInMemory) + +// currentEpoch := maxEpoch + +// for i := 0; i < NumberOfSnapshotsToLeaveInMemory; i++ { +// currentEpoch-- +// currentSnapshot, snapExists := cache.snapshots[currentEpoch] +// require.True(snapExists, fmt.Sprintf("failed to fetch in memory snapshot for epoch %d", currentEpoch)) +// require.Equal(snapshot, currentSnapshot.Snapshot, fmt.Sprintf("snapshots for epoch %d are not equal", currentEpoch)) +// } + +// stats, err := cache.state.ValidatorSnapshotsDBStats() +// require.NoError(err) + +// // assertions for remaining snapshots in database +// require.Equal(stats.KeyN, NumberOfSnapshotsToLeaveInDB) + +// currentEpoch = maxEpoch + +// for i := 0; i < NumberOfSnapshotsToLeaveInDB; i++ { +// currentEpoch-- +// currentSnapshot, err := cache.state.GetValidatorSnapshot(currentEpoch) +// require.NoError(err, fmt.Sprintf("failed to fetch database snapshot for epoch %d", currentEpoch)) +// require.Equal(snapshot, currentSnapshot.Snapshot, fmt.Sprintf("snapshots for epoch %d are not equal", currentEpoch)) +// } +// } + +// func TestValidatorsSnapshotCache_ComputeSnapshot_UnknownBlock(t *testing.T) { +// t.Parallel() +// assertions := assert.New(t) + +// const ( +// totalValidators = 15 +// validatorSetSize = totalValidators / 2 +// epochSize = uint64(10) +// ) + +// allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() +// headersMap := &helpers.TestHeadersMap{} +// headersMap.AddHeader(createValidatorDeltaHeader(t, 0, 0, nil, allValidators[:validatorSetSize])) +// headersMap.AddHeader(createValidatorDeltaHeader(t, 1*epochSize, 1, allValidators[:validatorSetSize], allValidators[validatorSetSize:])) + +// blockchainMock := new(helpers.BlockchainMock) +// blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.GetHeader) + +// testValidatorsCache := &testValidatorsCache{ +// validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), +// } + +// snapshot, err := testValidatorsCache.computeSnapshot(nil, 5*epochSize, nil) +// assertions.Nil(snapshot) +// assertions.ErrorContains(err, "unknown block. Block number=50") +// } + +// func TestValidatorsSnapshotCache_ComputeSnapshot_IncorrectExtra(t *testing.T) { +// t.Parallel() +// assertions := assert.New(t) + +// const ( +// totalValidators = 6 +// validatorSetSize = totalValidators / 2 +// epochSize = uint64(10) +// ) + +// allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() +// headersMap := &helpers.TestHeadersMap{} +// invalidHeader := createValidatorDeltaHeader(t, 1*epochSize, 1, allValidators[:validatorSetSize], allValidators[validatorSetSize:]) +// invalidHeader.ExtraData = []byte{0x2, 0x7} +// headersMap.AddHeader(invalidHeader) + +// blockchainMock := new(helpers.BlockchainMock) +// blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.GetHeader) + +// testValidatorsCache := &testValidatorsCache{ +// validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), +// } + +// snapshot, err := testValidatorsCache.computeSnapshot(nil, 1*epochSize, nil) +// assertions.Nil(snapshot) +// assertions.ErrorContains(err, "failed to decode extra from the block#10: wrong extra size: 2") +// } + +// func TestValidatorsSnapshotCache_ComputeSnapshot_ApplyDeltaFail(t *testing.T) { +// t.Parallel() +// assertions := assert.New(t) + +// const ( +// totalValidators = 6 +// validatorSetSize = totalValidators / 2 +// epochSize = uint64(10) +// ) + +// allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() +// headersMap := &helpers.TestHeadersMap{} +// headersMap.AddHeader(createValidatorDeltaHeader(t, 0, 0, nil, allValidators[:validatorSetSize])) +// headersMap.AddHeader(createValidatorDeltaHeader(t, 1*epochSize, 1, nil, allValidators[:validatorSetSize])) + +// blockchainMock := new(helpers.BlockchainMock) +// blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.GetHeader) + +// testValidatorsCache := &testValidatorsCache{ +// validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), +// } + +// snapshot, err := testValidatorsCache.computeSnapshot(&ValidatorSnapshot{0, 0, allValidators}, 1*epochSize, nil) +// assertions.Nil(snapshot) +// assertions.ErrorContains(err, "failed to apply delta to the validators snapshot, block#10") +// } + +// func TestValidatorsSnapshotCache_Empty(t *testing.T) { +// t.Parallel() + +// headersMap := &helpers.TestHeadersMap{HeadersByNumber: make(map[uint64]*types.Header)} + +// createHeaders(t, headersMap, 0, 1, 1, nil, nil) + +// blockchainMock := new(helpers.BlockchainMock) +// blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.GetHeader) + +// testValidatorsCache := &testValidatorsCache{ +// validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), +// } + +// _, err := testValidatorsCache.GetSnapshot(1, nil, nil) +// assert.ErrorContains(t, err, "validator snapshot is empty for block") +// } + +// func TestValidatorsSnapshotCache_HugeBuild(t *testing.T) { +// t.Parallel() + +// type epochValidatorSetIndexes struct { +// firstValIndex int +// lastValIndex int +// } + +// const ( +// epochSize = uint64(10) +// lastBlock = uint64(100_000) +// numOfEpochsToChangeValSet = 50 +// totalValidators = 20 +// validatorSetSize = 5 +// ) + +// allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() +// headersMap := &helpers.TestHeadersMap{HeadersByNumber: make(map[uint64]*types.Header)} + +// oldValidators := allValidators[:validatorSetSize] +// newValidators := oldValidators +// firstValIndex := 0 +// lastValIndex := validatorSetSize +// epochValidators := map[uint64]epochValidatorSetIndexes{} + +// // create headers for the first epoch separately +// createHeaders(t, headersMap, 0, epochSize-1, 1, nil, newValidators) + +// for i := epochSize; i < lastBlock; i += epochSize { +// from := i +// to := i + epochSize - 1 +// epoch := i/epochSize + 1 + +// oldValidators = newValidators + +// if epoch%numOfEpochsToChangeValSet == 0 { +// // every n epochs, change validators +// firstValIndex = lastValIndex +// lastValIndex += validatorSetSize + +// if lastValIndex > totalValidators { +// firstValIndex = 0 +// lastValIndex = validatorSetSize +// } + +// newValidators = allValidators[firstValIndex:lastValIndex] +// } + +// epochValidators[epoch] = epochValidatorSetIndexes{firstValIndex, lastValIndex} + +// createHeaders(t, headersMap, from, to, epoch, oldValidators, newValidators) +// } + +// blockchainMock := new(helpers.BlockchainMock) +// blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.getHeader) + +// validatorsSnapshotCache := newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock) + +// s := time.Now().UTC() + +// snapshot, err := validatorsSnapshotCache.GetSnapshot(lastBlock-epochSize, nil, nil) + +// t.Log("Time needed to calculate snapshot:", time.Since(s)) + +// require.NoError(t, err) +// require.NotNil(t, snapshot) +// require.NotEmpty(t, snapshot) + +// // check if the validators of random epochs are as expected +// snapshot, err = validatorsSnapshotCache.GetSnapshot(46, nil, nil) // epoch 5 where validator set did not change +// require.NoError(t, err) + +// epochValIndexes, ok := epochValidators[5] +// require.True(t, ok) +// require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) + +// snapshot, err = validatorsSnapshotCache.GetSnapshot(numOfEpochsToChangeValSet*epochSize, nil, nil) // epoch 50 where validator set was changed +// require.NoError(t, err) + +// epochValIndexes, ok = epochValidators[numOfEpochsToChangeValSet] +// require.True(t, ok) +// require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) + +// snapshot, err = validatorsSnapshotCache.GetSnapshot(2*numOfEpochsToChangeValSet*epochSize, nil, nil) // epoch 100 where validator set was changed +// require.NoError(t, err) + +// epochValIndexes, ok = epochValidators[2*numOfEpochsToChangeValSet] +// require.True(t, ok) +// require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) + +// snapshot, err = validatorsSnapshotCache.GetSnapshot(57903, nil, nil) // epoch 5790 where validator set did not change +// require.NoError(t, err) + +// epochValIndexes, ok = epochValidators[57903/epochSize+1] +// require.True(t, ok) +// require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) + +// snapshot, err = validatorsSnapshotCache.GetSnapshot(99991, nil, nil) // epoch 10000 where validator set did not change +// require.NoError(t, err) + +// epochValIndexes, ok = epochValidators[99991/epochSize+1] +// require.True(t, ok) +// require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) +// } + +// func createHeaders(t *testing.T, headersMap *helpers.TestHeadersMap, +// fromBlock, toBlock, epoch uint64, oldValidators, newValidators validator.AccountSet) { +// t.Helper() + +// headersMap.AddHeader(createValidatorDeltaHeader(t, fromBlock, epoch-1, oldValidators, newValidators)) + +// for i := fromBlock + 1; i <= toBlock; i++ { +// headersMap.AddHeader(createValidatorDeltaHeader(t, i, epoch, nil, nil)) +// } +// } + +// func createValidatorDeltaHeader(t *testing.T, blockNumber, epoch uint64, oldValidatorSet, newValidatorSet validator.AccountSet) *types.Header { +// t.Helper() + +// delta, _ := validator.CreateValidatorSetDelta(oldValidatorSet, newValidatorSet) +// extra := &polytypes.Extra{Validators: delta, BlockMetaData: &polytypes.BlockMetaData{EpochNumber: epoch}} + +// return &types.Header{ +// Number: blockNumber, +// ExtraData: extra.MarshalRLPTo(nil), +// } +// } + +// type testValidatorsCache struct { +// *validatorsSnapshotCache +// } + +// func (c *testValidatorsCache) cleanValidatorsCache() error { +// c.snapshots = make(map[uint64]*ValidatorSnapshot) + +// return c.state.RemoveAllValidatorSnapshots() +// } diff --git a/consensus/polybft/validator/validator_set_map.go b/consensus/polybft/validator/validator_set_map.go new file mode 100644 index 0000000000..32da04ea5b --- /dev/null +++ b/consensus/polybft/validator/validator_set_map.go @@ -0,0 +1,111 @@ +package validator + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "sort" + "strings" + + "github.com/0xPolygon/polygon-edge/types" +) + +var bigZero = big.NewInt(0) + +type ValidatorSetState struct { + BlockNumber uint64 `json:"block"` + EpochID uint64 `json:"epoch"` + UpdatedAtBlockNumber uint64 `json:"updated_at_block"` + Validators ValidatorStakeMap `json:"validators"` +} + +func (vs ValidatorSetState) Marshal() ([]byte, error) { + return json.Marshal(vs) +} + +func (vs *ValidatorSetState) Unmarshal(b []byte) error { + return json.Unmarshal(b, vs) +} + +// ValidatorStakeMap holds ValidatorMetadata for each validator address +type ValidatorStakeMap map[types.Address]*ValidatorMetadata + +// NewValidatorStakeMap returns a new instance of validatorStakeMap +func NewValidatorStakeMap(validatorSet AccountSet) ValidatorStakeMap { + stakeMap := make(ValidatorStakeMap, len(validatorSet)) + + for _, v := range validatorSet { + stakeMap[v.Address] = v.Copy() + } + + return stakeMap +} + +// AddStake adds given amount to a validator defined by address +func (sc *ValidatorStakeMap) AddStake(address types.Address, amount *big.Int) { + if metadata, exists := (*sc)[address]; exists { + metadata.VotingPower.Add(metadata.VotingPower, amount) + metadata.IsActive = metadata.VotingPower.Cmp(bigZero) > 0 + } else { + (*sc)[address] = &ValidatorMetadata{ + VotingPower: new(big.Int).Set(amount), + Address: address, + IsActive: amount.Cmp(bigZero) > 0, + } + } +} + +// RemoveStake removes given amount from validator defined by address +func (sc *ValidatorStakeMap) RemoveStake(address types.Address, amount *big.Int) { + stakeData := (*sc)[address] + stakeData.VotingPower.Sub(stakeData.VotingPower, amount) + stakeData.IsActive = stakeData.VotingPower.Cmp(bigZero) > 0 +} + +// GetSorted returns validators (*ValidatorMetadata) in sorted order +func (sc ValidatorStakeMap) GetSorted(maxValidatorSetSize int) AccountSet { + activeValidators := make(AccountSet, 0, len(sc)) + + for _, v := range sc { + if v.VotingPower.Cmp(bigZero) > 0 { + activeValidators = append(activeValidators, v) + } + } + + sort.Slice(activeValidators, func(i, j int) bool { + v1, v2 := activeValidators[i], activeValidators[j] + + switch v1.VotingPower.Cmp(v2.VotingPower) { + case 1: + return true + case 0: + return bytes.Compare(v1.Address[:], v2.Address[:]) < 0 + default: + return false + } + }) + + if len(activeValidators) <= maxValidatorSetSize { + return activeValidators + } + + return activeValidators[:maxValidatorSetSize] +} + +func (sc ValidatorStakeMap) String() string { + var sb strings.Builder + + for _, x := range sc.GetSorted(len(sc)) { + bls := "" + if x.BlsKey != nil { + bls = hex.EncodeToString(x.BlsKey.Marshal()) + } + + sb.WriteString(fmt.Sprintf("%s:%s:%s:%t\n", + x.Address, x.VotingPower, bls, x.IsActive)) + } + + return sb.String() +} diff --git a/consensus/polybft/validators_snapshot_test.go b/consensus/polybft/validators_snapshot_test.go deleted file mode 100644 index b1b12cabce..0000000000 --- a/consensus/polybft/validators_snapshot_test.go +++ /dev/null @@ -1,423 +0,0 @@ -package polybft - -import ( - "fmt" - "testing" - "time" - - "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" - "github.com/0xPolygon/polygon-edge/types" - "github.com/hashicorp/go-hclog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestValidatorsSnapshotCache_GetSnapshot_Build(t *testing.T) { - t.Parallel() - assertions := require.New(t) - - const ( - totalValidators = 10 - validatorSetSize = 5 - epochSize = uint64(10) - ) - - allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() - - var oddValidators, evenValidators validator.AccountSet - - for i := 0; i < totalValidators; i++ { - if i%2 == 0 { - evenValidators = append(evenValidators, allValidators[i]) - } else { - oddValidators = append(oddValidators, allValidators[i]) - } - } - - headersMap := &testHeadersMap{headersByNumber: make(map[uint64]*types.Header)} - - createHeaders(t, headersMap, 0, epochSize-1, 1, nil, allValidators[:validatorSetSize]) - createHeaders(t, headersMap, epochSize, 2*epochSize-1, 2, allValidators[:validatorSetSize], allValidators[validatorSetSize:]) - createHeaders(t, headersMap, 2*epochSize, 3*epochSize-1, 3, allValidators[validatorSetSize:], oddValidators) - createHeaders(t, headersMap, 3*epochSize, 4*epochSize-1, 4, oddValidators, evenValidators) - - var cases = []struct { - blockNumber uint64 - expectedSnapshot validator.AccountSet - validatorsOverlap bool - parents []*types.Header - }{ - {4, allValidators[:validatorSetSize], false, nil}, - {1 * epochSize, allValidators[validatorSetSize:], false, nil}, - {13, allValidators[validatorSetSize:], false, nil}, - {27, oddValidators, true, nil}, - {36, evenValidators, true, nil}, - {4, allValidators[:validatorSetSize], false, headersMap.getHeaders()}, - {13, allValidators[validatorSetSize:], false, headersMap.getHeaders()}, - {27, oddValidators, true, headersMap.getHeaders()}, - {36, evenValidators, true, headersMap.getHeaders()}, - } - - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.getHeader) - - testValidatorsCache := &testValidatorsCache{ - validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), - } - - for _, c := range cases { - snapshot, err := testValidatorsCache.GetSnapshot(c.blockNumber, c.parents, nil) - - assertions.NoError(err) - assertions.Len(snapshot, c.expectedSnapshot.Len()) - - if c.validatorsOverlap { - for _, validator := range c.expectedSnapshot { - // Order of validators is not preserved, because there are overlapping between validators set. - // In that case, at the beginning of the set are the ones preserved from the previous validator set. - // Newly validators are added to the end after the one from previous validator set. - assertions.True(snapshot.ContainsAddress(validator.Address)) - } - } else { - assertions.Equal(c.expectedSnapshot, snapshot) - } - - assertions.NoError(testValidatorsCache.cleanValidatorsCache()) - - if c.parents != nil { - blockchainMock.AssertNotCalled(t, "GetHeaderByNumber") - } - } -} - -func TestValidatorsSnapshotCache_GetSnapshot_FetchFromCache(t *testing.T) { - t.Parallel() - require := require.New(t) - - const ( - totalValidators = 10 - validatorSetSize = 5 - ) - - allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() - epochOneValidators := validator.AccountSet{allValidators[0], allValidators[len(allValidators)-1]} - epochTwoValidators := allValidators[1 : len(allValidators)-2] - - headersMap := &testHeadersMap{headersByNumber: make(map[uint64]*types.Header)} - createHeaders(t, headersMap, 0, 9, 1, nil, allValidators) - createHeaders(t, headersMap, 10, 19, 2, allValidators, epochOneValidators) - createHeaders(t, headersMap, 20, 29, 3, epochOneValidators, epochTwoValidators) - - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.getHeader) - - testValidatorsCache := &testValidatorsCache{ - validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), - } - - require.NoError(testValidatorsCache.storeSnapshot(&validatorSnapshot{1, 10, epochOneValidators}, nil)) - require.NoError(testValidatorsCache.storeSnapshot(&validatorSnapshot{2, 20, epochTwoValidators}, nil)) - - // Fetch snapshot from in memory cache - snapshot, err := testValidatorsCache.GetSnapshot(10, nil, nil) - require.NoError(err) - require.Equal(epochOneValidators, snapshot) - - // Invalidate in memory cache - testValidatorsCache.snapshots = map[uint64]*validatorSnapshot{} - require.NoError(testValidatorsCache.state.EpochStore.removeAllValidatorSnapshots()) - // Fetch snapshot from database - snapshot, err = testValidatorsCache.GetSnapshot(10, nil, nil) - require.NoError(err) - require.Equal(epochOneValidators, snapshot) - - snapshot, err = testValidatorsCache.GetSnapshot(20, nil, nil) - require.NoError(err) - require.Equal(epochTwoValidators, snapshot) -} - -func TestValidatorsSnapshotCache_Cleanup(t *testing.T) { - t.Parallel() - require := require.New(t) - - blockchainMock := new(blockchainMock) - cache := &testValidatorsCache{ - validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), - } - snapshot := validator.NewTestValidators(t, 3).GetPublicIdentities() - maxEpoch := uint64(0) - - for i := uint64(0); i < validatorSnapshotLimit; i++ { - require.NoError(cache.storeSnapshot(&validatorSnapshot{i, i * 10, snapshot}, nil)) - - maxEpoch++ - } - - require.NoError(cache.cleanup(nil)) - - // assertions for remaining snapshots in the in memory cache - require.Len(cache.snapshots, numberOfSnapshotsToLeaveInMemory) - - currentEpoch := maxEpoch - - for i := 0; i < numberOfSnapshotsToLeaveInMemory; i++ { - currentEpoch-- - currentSnapshot, snapExists := cache.snapshots[currentEpoch] - require.True(snapExists, fmt.Sprintf("failed to fetch in memory snapshot for epoch %d", currentEpoch)) - require.Equal(snapshot, currentSnapshot.Snapshot, fmt.Sprintf("snapshots for epoch %d are not equal", currentEpoch)) - } - - stats, err := cache.state.EpochStore.validatorSnapshotsDBStats() - require.NoError(err) - - // assertions for remaining snapshots in database - require.Equal(stats.KeyN, numberOfSnapshotsToLeaveInDB) - - currentEpoch = maxEpoch - - for i := 0; i < numberOfSnapshotsToLeaveInDB; i++ { - currentEpoch-- - currentSnapshot, err := cache.state.EpochStore.getValidatorSnapshot(currentEpoch) - require.NoError(err, fmt.Sprintf("failed to fetch database snapshot for epoch %d", currentEpoch)) - require.Equal(snapshot, currentSnapshot.Snapshot, fmt.Sprintf("snapshots for epoch %d are not equal", currentEpoch)) - } -} - -func TestValidatorsSnapshotCache_ComputeSnapshot_UnknownBlock(t *testing.T) { - t.Parallel() - assertions := assert.New(t) - - const ( - totalValidators = 15 - validatorSetSize = totalValidators / 2 - epochSize = uint64(10) - ) - - allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() - headersMap := &testHeadersMap{} - headersMap.addHeader(createValidatorDeltaHeader(t, 0, 0, nil, allValidators[:validatorSetSize])) - headersMap.addHeader(createValidatorDeltaHeader(t, 1*epochSize, 1, allValidators[:validatorSetSize], allValidators[validatorSetSize:])) - - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.getHeader) - - testValidatorsCache := &testValidatorsCache{ - validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), - } - - snapshot, err := testValidatorsCache.computeSnapshot(nil, 5*epochSize, nil) - assertions.Nil(snapshot) - assertions.ErrorContains(err, "unknown block. Block number=50") -} - -func TestValidatorsSnapshotCache_ComputeSnapshot_IncorrectExtra(t *testing.T) { - t.Parallel() - assertions := assert.New(t) - - const ( - totalValidators = 6 - validatorSetSize = totalValidators / 2 - epochSize = uint64(10) - ) - - allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() - headersMap := &testHeadersMap{} - invalidHeader := createValidatorDeltaHeader(t, 1*epochSize, 1, allValidators[:validatorSetSize], allValidators[validatorSetSize:]) - invalidHeader.ExtraData = []byte{0x2, 0x7} - headersMap.addHeader(invalidHeader) - - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.getHeader) - - testValidatorsCache := &testValidatorsCache{ - validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), - } - - snapshot, err := testValidatorsCache.computeSnapshot(nil, 1*epochSize, nil) - assertions.Nil(snapshot) - assertions.ErrorContains(err, "failed to decode extra from the block#10: wrong extra size: 2") -} - -func TestValidatorsSnapshotCache_ComputeSnapshot_ApplyDeltaFail(t *testing.T) { - t.Parallel() - assertions := assert.New(t) - - const ( - totalValidators = 6 - validatorSetSize = totalValidators / 2 - epochSize = uint64(10) - ) - - allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() - headersMap := &testHeadersMap{} - headersMap.addHeader(createValidatorDeltaHeader(t, 0, 0, nil, allValidators[:validatorSetSize])) - headersMap.addHeader(createValidatorDeltaHeader(t, 1*epochSize, 1, nil, allValidators[:validatorSetSize])) - - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.getHeader) - - testValidatorsCache := &testValidatorsCache{ - validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), - } - - snapshot, err := testValidatorsCache.computeSnapshot(&validatorSnapshot{0, 0, allValidators}, 1*epochSize, nil) - assertions.Nil(snapshot) - assertions.ErrorContains(err, "failed to apply delta to the validators snapshot, block#10") -} - -func TestValidatorsSnapshotCache_Empty(t *testing.T) { - t.Parallel() - - headersMap := &testHeadersMap{headersByNumber: make(map[uint64]*types.Header)} - - createHeaders(t, headersMap, 0, 1, 1, nil, nil) - - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.getHeader) - - testValidatorsCache := &testValidatorsCache{ - validatorsSnapshotCache: newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock), - } - - _, err := testValidatorsCache.GetSnapshot(1, nil, nil) - assert.ErrorContains(t, err, "validator snapshot is empty for block") -} - -func TestValidatorsSnapshotCache_HugeBuild(t *testing.T) { - t.Parallel() - - type epochValidatorSetIndexes struct { - firstValIndex int - lastValIndex int - } - - const ( - epochSize = uint64(10) - lastBlock = uint64(100_000) - numOfEpochsToChangeValSet = 50 - totalValidators = 20 - validatorSetSize = 5 - ) - - allValidators := validator.NewTestValidators(t, totalValidators).GetPublicIdentities() - headersMap := &testHeadersMap{headersByNumber: make(map[uint64]*types.Header)} - - oldValidators := allValidators[:validatorSetSize] - newValidators := oldValidators - firstValIndex := 0 - lastValIndex := validatorSetSize - epochValidators := map[uint64]epochValidatorSetIndexes{} - - // create headers for the first epoch separately - createHeaders(t, headersMap, 0, epochSize-1, 1, nil, newValidators) - - for i := epochSize; i < lastBlock; i += epochSize { - from := i - to := i + epochSize - 1 - epoch := i/epochSize + 1 - - oldValidators = newValidators - - if epoch%numOfEpochsToChangeValSet == 0 { - // every n epochs, change validators - firstValIndex = lastValIndex - lastValIndex += validatorSetSize - - if lastValIndex > totalValidators { - firstValIndex = 0 - lastValIndex = validatorSetSize - } - - newValidators = allValidators[firstValIndex:lastValIndex] - } - - epochValidators[epoch] = epochValidatorSetIndexes{firstValIndex, lastValIndex} - - createHeaders(t, headersMap, from, to, epoch, oldValidators, newValidators) - } - - blockchainMock := new(blockchainMock) - blockchainMock.On("GetHeaderByNumber", mock.Anything).Return(headersMap.getHeader) - - validatorsSnapshotCache := newValidatorsSnapshotCache(hclog.NewNullLogger(), newTestState(t), blockchainMock) - - s := time.Now().UTC() - - snapshot, err := validatorsSnapshotCache.GetSnapshot(lastBlock-epochSize, nil, nil) - - t.Log("Time needed to calculate snapshot:", time.Since(s)) - - require.NoError(t, err) - require.NotNil(t, snapshot) - require.NotEmpty(t, snapshot) - - // check if the validators of random epochs are as expected - snapshot, err = validatorsSnapshotCache.GetSnapshot(46, nil, nil) // epoch 5 where validator set did not change - require.NoError(t, err) - - epochValIndexes, ok := epochValidators[5] - require.True(t, ok) - require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) - - snapshot, err = validatorsSnapshotCache.GetSnapshot(numOfEpochsToChangeValSet*epochSize, nil, nil) // epoch 50 where validator set was changed - require.NoError(t, err) - - epochValIndexes, ok = epochValidators[numOfEpochsToChangeValSet] - require.True(t, ok) - require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) - - snapshot, err = validatorsSnapshotCache.GetSnapshot(2*numOfEpochsToChangeValSet*epochSize, nil, nil) // epoch 100 where validator set was changed - require.NoError(t, err) - - epochValIndexes, ok = epochValidators[2*numOfEpochsToChangeValSet] - require.True(t, ok) - require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) - - snapshot, err = validatorsSnapshotCache.GetSnapshot(57903, nil, nil) // epoch 5790 where validator set did not change - require.NoError(t, err) - - epochValIndexes, ok = epochValidators[57903/epochSize+1] - require.True(t, ok) - require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) - - snapshot, err = validatorsSnapshotCache.GetSnapshot(99991, nil, nil) // epoch 10000 where validator set did not change - require.NoError(t, err) - - epochValIndexes, ok = epochValidators[99991/epochSize+1] - require.True(t, ok) - require.True(t, allValidators[epochValIndexes.firstValIndex:epochValIndexes.lastValIndex].Equals(snapshot)) -} - -func createHeaders(t *testing.T, headersMap *testHeadersMap, - fromBlock, toBlock, epoch uint64, oldValidators, newValidators validator.AccountSet) { - t.Helper() - - headersMap.addHeader(createValidatorDeltaHeader(t, fromBlock, epoch-1, oldValidators, newValidators)) - - for i := fromBlock + 1; i <= toBlock; i++ { - headersMap.addHeader(createValidatorDeltaHeader(t, i, epoch, nil, nil)) - } -} - -func createValidatorDeltaHeader(t *testing.T, blockNumber, epoch uint64, oldValidatorSet, newValidatorSet validator.AccountSet) *types.Header { - t.Helper() - - delta, _ := validator.CreateValidatorSetDelta(oldValidatorSet, newValidatorSet) - extra := &Extra{Validators: delta, BlockMetaData: &BlockMetaData{EpochNumber: epoch}} - - return &types.Header{ - Number: blockNumber, - ExtraData: extra.MarshalRLPTo(nil), - } -} - -type testValidatorsCache struct { - *validatorsSnapshotCache -} - -func (c *testValidatorsCache) cleanValidatorsCache() error { - c.snapshots = make(map[uint64]*validatorSnapshot) - - return c.state.EpochStore.removeAllValidatorSnapshots() -} diff --git a/e2e-polybft/e2e/bridge_test.go b/e2e-polybft/e2e/bridge_test.go index 16050bb764..680ac42ce2 100644 --- a/e2e-polybft/e2e/bridge_test.go +++ b/e2e-polybft/e2e/bridge_test.go @@ -16,8 +16,9 @@ import ( "github.com/0xPolygon/polygon-edge/command/bridge/common" bridgeHelper "github.com/0xPolygon/polygon-edge/command/bridge/helper" validatorHelper "github.com/0xPolygon/polygon-edge/command/validator/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/e2e-polybft/framework" @@ -89,7 +90,7 @@ func TestE2E_Bridge_ExternalChainTokensTransfers(t *testing.T) { cluster.WaitForReady(t) - polybftCfg, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftCfg, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) validatorSrv := cluster.Servers[0] @@ -329,7 +330,7 @@ func TestE2E_Bridge_ERC721Transfer(t *testing.T) { cluster.WaitForReady(t) - polybftCfg, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftCfg, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) externalChainTxRelayer, err := txrelayer.NewTxRelayer(txrelayer.WithIPAddress(cluster.Bridges[bridgeOne].JSONRPCAddr())) @@ -437,7 +438,7 @@ func TestE2E_Bridge_ERC721Transfer(t *testing.T) { currentBlock, err := childEthEndpoint.GetBlockByNumber(jsonrpc.LatestBlockNumber, false) require.NoError(t, err) - currentExtra, err := polybft.GetIbftExtra(currentBlock.Header.ExtraData) + currentExtra, err := polytypes.GetIbftExtra(currentBlock.Header.ExtraData) require.NoError(t, err) t.Logf("Latest block number: %d, epoch number: %d\n", currentBlock.Number(), currentExtra.BlockMetaData.EpochNumber) @@ -498,7 +499,7 @@ func TestE2E_Bridge_ERC1155Transfer(t *testing.T) { cluster.WaitForReady(t) - polybftCfg, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftCfg, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) externalChainTxRelayer, err := txrelayer.NewTxRelayer(txrelayer.WithIPAddress(cluster.Bridges[bridgeOne].JSONRPCAddr())) @@ -689,7 +690,7 @@ func TestE2E_Bridge_InternalChainTokensTransfer(t *testing.T) { bridgeOne := 0 - polybftCfg, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftCfg, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) validatorSrv := cluster.Servers[0] @@ -973,7 +974,7 @@ func TestE2E_Bridge_Transfers_AccessLists(t *testing.T) { cluster.WaitForReady(t) - polybftCfg, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftCfg, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) validatorSrv := cluster.Servers[0] @@ -1106,7 +1107,7 @@ func TestE2E_Bridge_Transfers_AccessLists(t *testing.T) { currentBlock, err := childEthEndpoint.GetBlockByNumber(jsonrpc.LatestBlockNumber, false) require.NoError(t, err) - currentExtra, err := polybft.GetIbftExtra(currentBlock.Header.ExtraData) + currentExtra, err := polytypes.GetIbftExtra(currentBlock.Header.ExtraData) require.NoError(t, err) t.Logf("Latest block number: %d, epoch number: %d\n", currentBlock.Number(), currentExtra.BlockMetaData.EpochNumber) @@ -1159,7 +1160,7 @@ func TestE2E_Bridge_NonMintableERC20Token_WithPremine(t *testing.T) { framework.WithNumBlockConfirmations(numBlockConfirmations), framework.WithNativeTokenConfig(nativeTokenNonMintableConfig), // this enables London (EIP-1559) fork - framework.WithBurnContract(&polybft.BurnContractInfo{ + framework.WithBurnContract(&polycfg.BurnContractInfo{ BlockNumber: 0, Address: types.StringToAddress("0xBurnContractAddress")}), framework.WithSecretsCallback(func(_ []types.Address, tcc *framework.TestClusterConfig) { @@ -1195,7 +1196,7 @@ func TestE2E_Bridge_NonMintableERC20Token_WithPremine(t *testing.T) { cluster.WaitForReady(t) - polybftCfg, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftCfg, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) bridgeCfg := polybftCfg.Bridge[chainID.Uint64()] @@ -1295,7 +1296,7 @@ func TestE2E_Bridge_NonMintableERC20Token_WithPremine(t *testing.T) { currentBlock, err := childEthEndpoint.GetBlockByNumber(jsonrpc.LatestBlockNumber, false) require.NoError(t, err) - currentExtra, err := polybft.GetIbftExtra(currentBlock.Header.ExtraData) + currentExtra, err := polytypes.GetIbftExtra(currentBlock.Header.ExtraData) require.NoError(t, err) t.Logf("Latest block number: %d, epoch number: %d\n", currentBlock.Number(), currentExtra.BlockMetaData.EpochNumber) @@ -1431,7 +1432,7 @@ func TestE2E_Bridge_L1OriginatedNativeToken_ERC20StakingToken(t *testing.T) { cluster.WaitForReady(t) - polybftCfg, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftCfg, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) // first validator server(minter) diff --git a/e2e-polybft/e2e/burn_contract_test.go b/e2e-polybft/e2e/burn_contract_test.go index 96626b7586..ca5824eb0a 100644 --- a/e2e-polybft/e2e/burn_contract_test.go +++ b/e2e-polybft/e2e/burn_contract_test.go @@ -3,7 +3,7 @@ package e2e import ( "testing" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/e2e-polybft/framework" "github.com/0xPolygon/polygon-edge/jsonrpc" @@ -21,7 +21,7 @@ func TestE2E_BurnContract_Deployed(t *testing.T) { framework.WithBridges(1), framework.WithNativeTokenConfig(nativeTokenNonMintableConfig), framework.WithTestRewardToken(), - framework.WithBurnContract(&polybft.BurnContractInfo{ + framework.WithBurnContract(&polycfg.BurnContractInfo{ Address: contractAddr, DestinationAddress: destinationAddr, }), diff --git a/e2e-polybft/e2e/consensus_test.go b/e2e-polybft/e2e/consensus_test.go index 6d39383406..e818417a5c 100644 --- a/e2e-polybft/e2e/consensus_test.go +++ b/e2e-polybft/e2e/consensus_test.go @@ -18,8 +18,10 @@ import ( "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/genesis" validatorHelper "github.com/0xPolygon/polygon-edge/command/validator/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + systemstate "github.com/0xPolygon/polygon-edge/consensus/polybft/system_state" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/e2e-polybft/framework" @@ -174,7 +176,7 @@ func TestE2E_Consensus_RegisterValidator(t *testing.T) { relayer, err := txrelayer.NewTxRelayer(txrelayer.WithIPAddress(owner.JSONRPCAddr())) require.NoError(t, err) - polybftConfig, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftConfig, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) // create the first account and extract the address @@ -197,7 +199,7 @@ func TestE2E_Consensus_RegisterValidator(t *testing.T) { genesisBlock, err := owner.JSONRPC().GetBlockByNumber(0, false) require.NoError(t, err) - _, err = polybft.GetIbftExtra(genesisBlock.Header.ExtraData) + _, err = polytypes.GetIbftExtra(genesisBlock.Header.ExtraData) require.NoError(t, err) // owner whitelists both new validators @@ -315,7 +317,7 @@ func TestE2E_Consensus_Validator_Unstake(t *testing.T) { }), ) - polybftCfg, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftCfg, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) srv := cluster.Servers[0] @@ -546,7 +548,7 @@ func TestE2E_Consensus_EIP1559Check(t *testing.T) { cluster := framework.NewTestCluster(t, 5, framework.WithBridges(1), framework.WithNativeTokenConfig(nativeTokenNonMintableConfig), - framework.WithBurnContract(&polybft.BurnContractInfo{BlockNumber: 0, Address: burnContractAddr}), + framework.WithBurnContract(&polycfg.BurnContractInfo{BlockNumber: 0, Address: burnContractAddr}), framework.WithSecretsCallback(func(a []types.Address, config *framework.TestClusterConfig) { for range a { config.StakeAmounts = append(config.StakeAmounts, command.DefaultPremineBalance) @@ -692,7 +694,7 @@ func TestE2E_Consensus_ChangeVotingPowerByStakingPendingRewards(t *testing.T) { // waiting two epochs, so that some rewards get accumulated require.NoError(t, cluster.WaitForBlock(epochEndingBlock, 1*time.Minute)) - queryValidators := func(handler func(idx int, validatorInfo *polybft.ValidatorInfo)) { + queryValidators := func(handler func(idx int, validatorInfo *systemstate.ValidatorInfo)) { for i, validatorAddr := range votingPowerChangeValidators { // query validator info validatorInfo, err := validatorHelper.GetValidatorInfo( @@ -707,9 +709,9 @@ func TestE2E_Consensus_ChangeVotingPowerByStakingPendingRewards(t *testing.T) { bigZero := big.NewInt(0) // validatorsMap holds only changed validators - validatorsMap := make(map[types.Address]*polybft.ValidatorInfo, votingPowerChanges) + validatorsMap := make(map[types.Address]*systemstate.ValidatorInfo, votingPowerChanges) - queryValidators(func(idx int, validator *polybft.ValidatorInfo) { + queryValidators(func(idx int, validator *systemstate.ValidatorInfo) { t.Logf("[Validator#%d] Voting power (original)=%d, rewards=%d\n", idx+1, validator.Stake, validator.WithdrawableRewards) @@ -726,7 +728,7 @@ func TestE2E_Consensus_ChangeVotingPowerByStakingPendingRewards(t *testing.T) { require.NoError(t, validatorSrv.Stake(types.ZeroAddress, validator.WithdrawableRewards)) }) - queryValidators(func(idx int, validator *polybft.ValidatorInfo) { + queryValidators(func(idx int, validator *systemstate.ValidatorInfo) { t.Logf("[Validator#%d] Voting power (after stake)=%d\n", idx+1, validator.Stake) previousValidatorInfo := validatorsMap[validator.Address] @@ -752,7 +754,7 @@ func TestE2E_Consensus_ChangeVotingPowerByStakingPendingRewards(t *testing.T) { epochEndingBlock += epochSize - currentExtra, err := polybft.GetIbftExtra(latestBlock.Header.ExtraData) + currentExtra, err := polytypes.GetIbftExtra(latestBlock.Header.ExtraData) require.NoError(t, err) if currentExtra.Validators == nil || currentExtra.Validators.IsEmpty() { diff --git a/e2e-polybft/e2e/governance_test.go b/e2e-polybft/e2e/governance_test.go index d4f56d2725..e1a8ad8ceb 100644 --- a/e2e-polybft/e2e/governance_test.go +++ b/e2e-polybft/e2e/governance_test.go @@ -8,8 +8,9 @@ import ( "time" "github.com/0xPolygon/polygon-edge/command/validator/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/e2e-polybft/framework" "github.com/0xPolygon/polygon-edge/helper/common" @@ -61,7 +62,7 @@ func TestE2E_Governance_ProposeAndExecuteSimpleProposal(t *testing.T) { relayer, err := txrelayer.NewTxRelayer(txrelayer.WithClient(proposer.JSONRPC())) require.NoError(t, err) - polybftCfg, err := polybft.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) + polybftCfg, err := polycfg.LoadPolyBFTConfig(path.Join(cluster.Config.TmpDir, chainConfigFileName)) require.NoError(t, err) executeSuccessfulProposalCycle := func(t *testing.T, @@ -161,7 +162,7 @@ func TestE2E_Governance_ProposeAndExecuteSimpleProposal(t *testing.T) { jsonrpc.BlockNumber(endOfPreviousEpoch), false) require.NoError(t, err) - extra, err := polybft.GetIbftExtra(block.Header.ExtraData) + extra, err := polytypes.GetIbftExtra(block.Header.ExtraData) require.NoError(t, err) oldEpoch := extra.BlockMetaData.EpochNumber @@ -170,7 +171,7 @@ func TestE2E_Governance_ProposeAndExecuteSimpleProposal(t *testing.T) { jsonrpc.BlockNumber(endOfNewEpoch), false) require.NoError(t, err) - extra, err = polybft.GetIbftExtra(block.Header.ExtraData) + extra, err = polytypes.GetIbftExtra(block.Header.ExtraData) require.NoError(t, err) newEpoch := extra.BlockMetaData.EpochNumber diff --git a/e2e-polybft/e2e/jsonrpc_test.go b/e2e-polybft/e2e/jsonrpc_test.go index 9432237dba..64ec82e98e 100644 --- a/e2e-polybft/e2e/jsonrpc_test.go +++ b/e2e-polybft/e2e/jsonrpc_test.go @@ -11,7 +11,7 @@ import ( "github.com/Ethernal-Tech/ethgo" "github.com/stretchr/testify/require" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/e2e-polybft/framework" @@ -38,7 +38,7 @@ func TestE2E_JsonRPC(t *testing.T) { cluster := framework.NewTestCluster(t, 4, framework.WithEpochSize(int(epochSize)), framework.WithPremine(preminedAcctOne.Address(), preminedAcctTwo.Address()), - framework.WithBurnContract(&polybft.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), + framework.WithBurnContract(&polycfg.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), framework.WithHTTPS(), framework.WithTLSCertificate("/etc/ssl/certs/localhost.pem", "/etc/ssl/private/localhost.key"), ) diff --git a/e2e-polybft/e2e/storage_test.go b/e2e-polybft/e2e/storage_test.go index f95367646f..e3dddf13e2 100644 --- a/e2e-polybft/e2e/storage_test.go +++ b/e2e-polybft/e2e/storage_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/e2e-polybft/framework" "github.com/0xPolygon/polygon-edge/types" @@ -24,7 +24,7 @@ func TestE2E_Storage(t *testing.T) { cluster := framework.NewTestCluster(t, 5, framework.WithPremine(sender.Address()), - framework.WithBurnContract(&polybft.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), + framework.WithBurnContract(&polycfg.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), ) defer cluster.Stop() diff --git a/e2e-polybft/e2e/txpool_test.go b/e2e-polybft/e2e/txpool_test.go index 2620cae263..644385e453 100644 --- a/e2e-polybft/e2e/txpool_test.go +++ b/e2e-polybft/e2e/txpool_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/e2e-polybft/framework" @@ -26,7 +26,7 @@ func TestE2E_TxPool_Transfer(t *testing.T) { cluster := framework.NewTestCluster(t, 5, framework.WithPremine(sender.Address()), - framework.WithBurnContract(&polybft.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), + framework.WithBurnContract(&polycfg.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), ) defer cluster.Stop() @@ -112,7 +112,7 @@ func TestE2E_TxPool_Transfer_Linear(t *testing.T) { // first account should have some matics premined cluster := framework.NewTestCluster(t, 5, framework.WithPremine(premine.Address()), - framework.WithBurnContract(&polybft.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), + framework.WithBurnContract(&polycfg.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), ) defer cluster.Stop() @@ -251,7 +251,7 @@ func TestE2E_TxPool_BroadcastTransactions(t *testing.T) { // First account should have some matics premined cluster := framework.NewTestCluster(t, 5, framework.WithPremine(sender.Address()), - framework.WithBurnContract(&polybft.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), + framework.WithBurnContract(&polycfg.BurnContractInfo{BlockNumber: 0, Address: types.ZeroAddress}), ) defer cluster.Stop() diff --git a/e2e-polybft/framework/test-bridge.go b/e2e-polybft/framework/test-bridge.go index 06984b4a40..08efbe557c 100644 --- a/e2e-polybft/framework/test-bridge.go +++ b/e2e-polybft/framework/test-bridge.go @@ -17,7 +17,7 @@ import ( "github.com/0xPolygon/polygon-edge/command/genesis" cmdHelper "github.com/0xPolygon/polygon-edge/command/helper" polybftsecrets "github.com/0xPolygon/polygon-edge/command/secrets/init" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/types" "golang.org/x/sync/errgroup" @@ -326,7 +326,7 @@ func (t *TestBridge) deployExternalChainContracts(genesisPath string) error { } // fundAddressesOnRoot sends predefined amount of tokens to external chain addresses -func (t *TestBridge) fundAddressesOnRoot(polybftConfig polybft.PolyBFTConfig) error { +func (t *TestBridge) fundAddressesOnRoot(polybftConfig polycfg.PolyBFT) error { validatorSecrets, err := genesis.GetValidatorKeyFiles(t.clusterConfig.TmpDir, t.clusterConfig.ValidatorPrefix) if err != nil { return fmt.Errorf("could not get validator secrets on initial external chain funding of genesis validators: %w", err) @@ -404,8 +404,8 @@ func (t *TestBridge) FundValidators(secretsPaths []string, amounts []*big.Int) e } // mintNativeRootToken mints native er20 token on root for provided validators and other accounts in premine flag -func (t *TestBridge) mintNativeRootToken(validatorAddresses []types.Address, tokenConfig *polybft.TokenConfig, - polybftConfig polybft.PolyBFTConfig) error { +func (t *TestBridge) mintNativeRootToken(validatorAddresses []types.Address, tokenConfig *polycfg.Token, + polybftConfig polycfg.PolyBFT) error { if tokenConfig.IsMintable { // if token is mintable, it is premined in genesis command, // so we just return here @@ -447,8 +447,8 @@ func (t *TestBridge) mintNativeRootToken(validatorAddresses []types.Address, tok } // premineNativeRootToken will premine token on root for every validator and other addresses in premine flag -func (t *TestBridge) premineNativeRootToken(genesisPath string, tokenConfig *polybft.TokenConfig, - polybftConfig polybft.PolyBFTConfig) error { +func (t *TestBridge) premineNativeRootToken(genesisPath string, tokenConfig *polycfg.Token, + polybftConfig polycfg.PolyBFT) error { if tokenConfig.IsMintable { // if token is mintable, it is premined in genesis command, // so we just return here @@ -550,7 +550,7 @@ func (t *TestBridge) calculatePort() uint64 { } // finalizeGenesis finalizes genesis on BladeManager contract on root -func (t *TestBridge) finalizeGenesis(genesisPath string, tokenConfig *polybft.TokenConfig) error { +func (t *TestBridge) finalizeGenesis(genesisPath string, tokenConfig *polycfg.Token) error { if tokenConfig.IsMintable { // we don't need to finalize anything when we have mintable (child originated) token return nil diff --git a/e2e-polybft/framework/test-cluster.go b/e2e-polybft/framework/test-cluster.go index 3974870189..0fe3aa50cf 100644 --- a/e2e-polybft/framework/test-cluster.go +++ b/e2e-polybft/framework/test-cluster.go @@ -19,7 +19,7 @@ import ( "github.com/0xPolygon/polygon-edge/command" "github.com/0xPolygon/polygon-edge/command/genesis" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polycfg "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/helper/common" @@ -104,7 +104,7 @@ type TestClusterConfig struct { TmpDir string BlockGasLimit uint64 BlockTime time.Duration - BurnContract *polybft.BurnContractInfo + BurnContract *polycfg.BurnContractInfo ValidatorPrefix string Binary string ValidatorSetSize uint64 @@ -321,7 +321,7 @@ func WithBlockGasLimit(blockGasLimit uint64) ClusterOption { } } -func WithBurnContract(burnContract *polybft.BurnContractInfo) ClusterOption { +func WithBurnContract(burnContract *polycfg.BurnContractInfo) ClusterOption { return func(h *TestClusterConfig) { h.BurnContract = burnContract } @@ -616,7 +616,7 @@ func NewTestCluster(t *testing.T, validatorsCount int, opts ...ClusterOption) *T args = append(args, "--native-token-config", cluster.Config.NativeTokenConfigRaw) } - tokenConfig, err := polybft.ParseRawTokenConfig(cluster.Config.NativeTokenConfigRaw) + tokenConfig, err := polycfg.ParseRawTokenConfig(cluster.Config.NativeTokenConfigRaw) require.NoError(t, err) if len(cluster.Config.Premine) != 0 && tokenConfig.IsMintable { @@ -760,10 +760,10 @@ func NewTestCluster(t *testing.T, validatorsCount int, opts ...ClusterOption) *T err = bridge.deployExternalChainContracts(genesisPath) require.NoError(t, err) - polybftConfig, err := polybft.LoadPolyBFTConfig(genesisPath) + polybftConfig, err := polycfg.LoadPolyBFTConfig(genesisPath) require.NoError(t, err) - tokenConfig, err := polybft.ParseRawTokenConfig(cluster.Config.NativeTokenConfigRaw) + tokenConfig, err := polycfg.ParseRawTokenConfig(cluster.Config.NativeTokenConfigRaw) require.NoError(t, err) // fund addresses on the bridge chain diff --git a/e2e-polybft/framework/test-server.go b/e2e-polybft/framework/test-server.go index e283a722b9..8fb246ce15 100644 --- a/e2e-polybft/framework/test-server.go +++ b/e2e-polybft/framework/test-server.go @@ -14,7 +14,7 @@ import ( polybftsecrets "github.com/0xPolygon/polygon-edge/command/secrets/init" validatorHelper "github.com/0xPolygon/polygon-edge/command/validator/helper" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/validator" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/jsonrpc" @@ -380,7 +380,7 @@ func (t *TestServer) HasValidatorSealed(firstBlock, lastBlock uint64, validators return false, err } - extra, err := polybft.GetIbftExtra(block.Header.ExtraData) + extra, err := polytypes.GetIbftExtra(block.Header.ExtraData) if err != nil { return false, err } diff --git a/loadtest/sanitycheck/sanity_check_register_validator.go b/loadtest/sanitycheck/sanity_check_register_validator.go index 07973531d0..33d8be4778 100644 --- a/loadtest/sanitycheck/sanity_check_register_validator.go +++ b/loadtest/sanitycheck/sanity_check_register_validator.go @@ -4,9 +4,9 @@ import ( "fmt" "math/big" - "github.com/0xPolygon/polygon-edge/consensus/polybft" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" "github.com/0xPolygon/polygon-edge/consensus/polybft/signer" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/consensus/polybft/wallet" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" @@ -95,7 +95,7 @@ func (t *RegisterValidatorTest) runTest() (*wallet.Account, error) { return nil, err } - extra, err := polybft.GetIbftExtra(epochEndingBlock.ExtraData) + extra, err := polytypes.GetIbftExtra(epochEndingBlock.ExtraData) if err != nil { return nil, fmt.Errorf("failed to get ibft extra data for epoch ending block. Error: %w", err) } diff --git a/loadtest/sanitycheck/sanity_check_stake.go b/loadtest/sanitycheck/sanity_check_stake.go index ed9730c4c2..d2be7746e8 100644 --- a/loadtest/sanitycheck/sanity_check_stake.go +++ b/loadtest/sanitycheck/sanity_check_stake.go @@ -5,8 +5,8 @@ import ( "math/big" "time" - "github.com/0xPolygon/polygon-edge/consensus/polybft" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/helper/common" @@ -95,7 +95,7 @@ func (t *StakeTest) Run() error { return err } - extra, err := polybft.GetIbftExtra(epochEndingBlock.ExtraData) + extra, err := polytypes.GetIbftExtra(epochEndingBlock.ExtraData) if err != nil { return fmt.Errorf("failed to get ibft extra data for epoch ending block. Error: %w", err) } diff --git a/loadtest/sanitycheck/sanity_check_unstake.go b/loadtest/sanitycheck/sanity_check_unstake.go index b80764b808..4343db66c9 100644 --- a/loadtest/sanitycheck/sanity_check_unstake.go +++ b/loadtest/sanitycheck/sanity_check_unstake.go @@ -5,8 +5,8 @@ import ( "math/big" "time" - "github.com/0xPolygon/polygon-edge/consensus/polybft" "github.com/0xPolygon/polygon-edge/consensus/polybft/contractsapi" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/jsonrpc" @@ -99,7 +99,7 @@ func (t *UnstakeTest) Run() error { return err } - extra, err := polybft.GetIbftExtra(epochEndingBlock.ExtraData) + extra, err := polytypes.GetIbftExtra(epochEndingBlock.ExtraData) if err != nil { return fmt.Errorf("failed to get ibft extra data for epoch ending block. Error: %w", err) } diff --git a/loadtest/sanitycheck/sanity_check_unstake_all.go b/loadtest/sanitycheck/sanity_check_unstake_all.go index f96c5ac56d..647e52541f 100644 --- a/loadtest/sanitycheck/sanity_check_unstake_all.go +++ b/loadtest/sanitycheck/sanity_check_unstake_all.go @@ -3,7 +3,7 @@ package sanitycheck import ( "fmt" - "github.com/0xPolygon/polygon-edge/consensus/polybft" + polytypes "github.com/0xPolygon/polygon-edge/consensus/polybft/types" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/jsonrpc" "github.com/Ethernal-Tech/ethgo" @@ -71,7 +71,7 @@ func (t *UnstakeAllTest) Run() error { return err } - extra, err := polybft.GetIbftExtra(epochEndingBlock.ExtraData) + extra, err := polytypes.GetIbftExtra(epochEndingBlock.ExtraData) if err != nil { return fmt.Errorf("failed to get ibft extra data for epoch ending block. Error: %w", err) } diff --git a/server/builtin.go b/server/builtin.go index 411d4b35df..ec174e5d9a 100644 --- a/server/builtin.go +++ b/server/builtin.go @@ -6,6 +6,7 @@ import ( consensusDev "github.com/0xPolygon/polygon-edge/consensus/dev" consensusDummy "github.com/0xPolygon/polygon-edge/consensus/dummy" consensusPolyBFT "github.com/0xPolygon/polygon-edge/consensus/polybft" + consensusPolyBFTConfig "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/forkmanager" "github.com/0xPolygon/polygon-edge/secrets" "github.com/0xPolygon/polygon-edge/secrets/awsssm" @@ -27,7 +28,7 @@ type IsL1OriginatedTokenCheck func(config *chain.Params) (bool, error) const ( DevConsensus ConsensusType = "dev" - PolyBFTConsensus ConsensusType = consensusPolyBFT.ConsensusName + PolyBFTConsensus ConsensusType = consensusPolyBFTConfig.ConsensusName DummyConsensus ConsensusType = "dummy" ) diff --git a/server/server.go b/server/server.go index 3126f0dd67..90d9836705 100644 --- a/server/server.go +++ b/server/server.go @@ -26,7 +26,7 @@ import ( "github.com/0xPolygon/polygon-edge/blockchain/storagev2/memory" "github.com/0xPolygon/polygon-edge/chain" "github.com/0xPolygon/polygon-edge/consensus" - consensusPolyBFT "github.com/0xPolygon/polygon-edge/consensus/polybft" + consensusPolyBFT "github.com/0xPolygon/polygon-edge/consensus/polybft/config" "github.com/0xPolygon/polygon-edge/contracts" "github.com/0xPolygon/polygon-edge/crypto" "github.com/0xPolygon/polygon-edge/forkmanager"