diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 22f6514c8e36c..72b51bad18079 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -1630,6 +1630,15 @@ impl UpwardMessageSender for Pallet { fn send_upward_message(message: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> { Self::send_upward_message(message) } + + fn check_size(size: usize) -> Result<(), ()> { + let cfg = HostConfiguration::::get().ok_or(())?; + if size > cfg.max_upward_message_size as usize { + Err(()) + } else { + Ok(()) + } + } } impl InspectMessageQueues for Pallet { diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index bc707bc251c87..09e4a2a7bffe1 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -155,11 +155,19 @@ pub trait UpwardMessageSender { /// be dispatched or an error if the message cannot be sent. /// return the hash of the message sent fn send_upward_message(msg: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError>; + + /// Check whether the message size is acceptable for the channel. + fn check_size(size: usize) -> Result<(), ()>; } + impl UpwardMessageSender for () { fn send_upward_message(_msg: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> { Err(MessageSendError::NoChannel) } + + fn check_size(_size: usize) -> Result<(), ()> { + Err(()) + } } /// The status of a channel. diff --git a/cumulus/primitives/utility/src/lib.rs b/cumulus/primitives/utility/src/lib.rs index dd80335caaaab..0fa05470fa016 100644 --- a/cumulus/primitives/utility/src/lib.rs +++ b/cumulus/primitives/utility/src/lib.rs @@ -77,6 +77,9 @@ where .map_err(|()| SendError::ExceedsMaxMessageSize)?; let data = versioned_xcm.encode(); + // check if the `UpwardsMessageSender` may also complain about the size + T::check_size(data.len()).map_err(|_| SendError::ExceedsMaxMessageSize)?; + Ok((data, price)) } else { // Anything else is unhandled. This includes a message that is not meant for us. @@ -602,6 +605,9 @@ mod test_xcm_router { fn send_upward_message(_: UpwardMessage) -> Result<(u32, XcmHash), MessageSendError> { Err(MessageSendError::Other) } + fn check_size(size: usize) -> Result<(), ()> { + todo!("https://github.com/paritytech/polkadot-sdk/pull/8409") + } } #[test] diff --git a/prdoc/pr_8422.prdoc b/prdoc/pr_8422.prdoc new file mode 100644 index 0000000000000..6228dc80becb0 --- /dev/null +++ b/prdoc/pr_8422.prdoc @@ -0,0 +1,30 @@ +title: '[AHM] Staking async fixes for XCM and election planning' +doc: +- audience: Runtime Dev + description: |- + This PR brings a few small fixes related to the XCM messages of stkaing-async, among other small fixes: + + + * [x] Allows `xcm::validate` to check the message size, and we actually now act upon it in the `staking-async-rc/parachain-runtime`s. The code is a bit duplicate now, and there is a TOOD about how to better refactor it later. + * [x] Part of this work is backported separately as https://github.com/paritytech/polkadot-sdk/pull/8409 + * [x] It brings a default `EraElectionPlannerOf` which should be the right tool to use to ensure elections always happen in time, with an educated guess based on `ElectionProvider::duration` rather than a random number. + * [x] It adds a few unit tests about the above + * [x] It silences some logs that were needlessly `INFO`, and makes the printing of some types a bit more CLI friendly. + * [x] Renames `type SessionDuration` in `staking-async` to `type RelaySessionDuration` for better clarity. +crates: +- name: cumulus-pallet-parachain-system + bump: minor +- name: cumulus-primitives-core + bump: minor +- name: cumulus-primitives-utility + bump: minor +- name: pallet-staking-async-ah-client + bump: minor +- name: pallet-staking-async-rc-client + bump: minor +- name: pallet-staking-async-parachain-runtime + bump: minor +- name: pallet-staking-async-rc-runtime + bump: minor +- name: pallet-staking-async + bump: minor diff --git a/substrate/frame/election-provider-multi-block/src/mock/signed.rs b/substrate/frame/election-provider-multi-block/src/mock/signed.rs index a11e737612c08..543e6021ea5fa 100644 --- a/substrate/frame/election-provider-multi-block/src/mock/signed.rs +++ b/substrate/frame/election-provider-multi-block/src/mock/signed.rs @@ -25,13 +25,13 @@ use crate::{ }; use frame_election_provider_support::PageIndex; use frame_support::{ - assert_ok, dispatch::PostDispatchInfo, parameter_types, traits::EstimateCallFee, BoundedVec, + assert_ok, dispatch::PostDispatchInfo, parameter_types, traits::EstimateCallFee, }; use sp_npos_elections::ElectionScore; use sp_runtime::{traits::Zero, Perbill}; parameter_types! { - pub static MockSignedNextSolution: Option, Pages>> = None; + pub static MockSignedNextSolution: Option>> = None; pub static MockSignedNextScore: Option = Default::default(); pub static MockSignedResults: Vec = Default::default(); } diff --git a/substrate/frame/election-provider-multi-block/src/types.rs b/substrate/frame/election-provider-multi-block/src/types.rs index 53215c1f27de4..867c09ffb1a19 100644 --- a/substrate/frame/election-provider-multi-block/src/types.rs +++ b/substrate/frame/election-provider-multi-block/src/types.rs @@ -82,14 +82,13 @@ pub type AssignmentOf = CloneNoBound, EqNoBound, PartialEqNoBound, - MaxEncodedLen, DefaultNoBound, )] #[codec(mel_bound(T: crate::Config))] #[scale_info(skip_type_params(T))] pub struct PagedRawSolution { /// The individual pages. - pub solution_pages: BoundedVec, ::Pages>, + pub solution_pages: Vec>, /// The final claimed score post feasibility and concatenation of all pages. pub score: ElectionScore, /// The designated round. @@ -165,6 +164,23 @@ pub trait PadSolutionPages: Sized { fn pad_solution_pages(self, desired_pages: PageIndex) -> Self; } +impl PadSolutionPages for Vec { + fn pad_solution_pages(self, desired_pages: PageIndex) -> Self { + let desired_pages_usize = desired_pages as usize; + debug_assert!(self.len() <= desired_pages_usize); + if self.len() == desired_pages_usize { + return self + } + + // we basically need to prepend the list with this many items. + let empty_slots = desired_pages_usize.saturating_sub(self.len()); + sp_std::iter::repeat(Default::default()) + .take(empty_slots) + .chain(self.into_iter()) + .collect::>() + } +} + impl> PadSolutionPages for BoundedVec { @@ -391,8 +407,6 @@ impl Phase { #[cfg(test)] mod pagify { use super::{PadSolutionPages, Pagify}; - use frame_support::{traits::ConstU32, BoundedVec}; - use sp_core::bounded_vec; #[test] fn pagify_works() { @@ -410,15 +424,11 @@ mod pagify { #[test] fn pad_solution_pages_works() { // noop if the solution is complete, as with pagify. - let solution: BoundedVec<_, ConstU32<3>> = bounded_vec![1u32, 2, 3]; - assert_eq!(solution.pad_solution_pages(3).into_inner(), vec![1, 2, 3]); + let solution = vec![1u32, 2, 3]; + assert_eq!(solution.pad_solution_pages(3), vec![1, 2, 3]); // pads the solution with default if partial.. - let solution: BoundedVec<_, ConstU32<3>> = bounded_vec![2, 3]; - assert_eq!(solution.pad_solution_pages(3).into_inner(), vec![0, 2, 3]); - - // behaves the same as `pad_solution_pages(3)`. - let solution: BoundedVec<_, ConstU32<3>> = bounded_vec![2, 3]; - assert_eq!(solution.pad_solution_pages(4).into_inner(), vec![0, 2, 3]); + let solution = vec![2, 3]; + assert_eq!(solution.pad_solution_pages(3), vec![0, 2, 3]); } } diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs index bafc78cd9d6ed..6186a881e6a71 100644 --- a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs +++ b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs @@ -371,7 +371,7 @@ impl BaseMiner { } // convert each page to a compact struct -- no more change allowed. - let solution_pages: BoundedVec, T::Pages> = paged_assignments + let mut solution_pages: Vec> = paged_assignments .into_iter() .enumerate() .map(|(page_index, assignment_page)| { @@ -382,12 +382,11 @@ impl BaseMiner { .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(page)))?; // one last trimming -- `MaxBackersPerWinner`, the per-page variant. - let trimmed_assignment_page = - Self::trim_supports_max_backers_per_winner_per_page( - assignment_page, - voter_snapshot_page, - page_index as u32, - )?; + let trimmed_assignment_page = Self::trim_supports_max_backers_per_winner_per_page( + assignment_page, + voter_snapshot_page, + page_index as u32, + )?; let voter_index_fn = { let cache = helpers::generate_voter_cache::(&voter_snapshot_page); @@ -401,17 +400,11 @@ impl BaseMiner { ) .map_err::, _>(Into::into) }) - .collect::, _>>()? - .try_into() - .expect("`paged_assignments` is bound by `T::Pages`; length cannot change in iter chain; qed"); + .collect::, _>>()?; // now do the length trim. - let mut solution_pages_unbounded = solution_pages.into_inner(); let _trim_length_weight = - Self::maybe_trim_weight_and_len(&mut solution_pages_unbounded, &voter_pages)?; - let solution_pages = solution_pages_unbounded - .try_into() - .expect("maybe_trim_weight_and_len cannot increase the length of its input; qed."); + Self::maybe_trim_weight_and_len(&mut solution_pages, &voter_pages)?; miner_log!(debug, "trimmed {} voters due to length restriction.", _trim_length_weight); // finally, wrap everything up. Assign a fake score here, since we might need to re-compute diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs index 18ba2370683ce..0a0e5329cdeb4 100644 --- a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs +++ b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs @@ -165,7 +165,7 @@ mod pallet { // we select the most significant pages, based on `T::MinerPages`. let page_indices = crate::Pallet::::msp_range_for(T::MinerPages::get() as usize); ::verify_synchronous_multi( - paged_solution.solution_pages.into_inner(), + paged_solution.solution_pages, page_indices, claimed_score, ) @@ -235,7 +235,12 @@ mod pallet { assert!( UnsignedWeightsOf::::submit_unsigned().all_lte(T::BlockWeights::get().max_block), "weight of `submit_unsigned` is too high" - ) + ); + assert!( + ::MinerPages::get() as usize + <= ::Pages::get() as usize, + "number of pages in the unsigned phase is too high" + ); } #[cfg(feature = "try-runtime")] @@ -333,6 +338,10 @@ mod pallet { paged_solution.solution_pages.len() == T::MinerPages::get() as usize, CommonError::WrongPageCount ); + ensure!( + paged_solution.solution_pages.len() <= ::Pages::get() as usize, + CommonError::WrongPageCount + ); Ok(()) } diff --git a/substrate/frame/election-provider-multi-block/src/verifier/tests.rs b/substrate/frame/election-provider-multi-block/src/verifier/tests.rs index 9dfc056881417..c67568ba6fd83 100644 --- a/substrate/frame/election-provider-multi-block/src/verifier/tests.rs +++ b/substrate/frame/election-provider-multi-block/src/verifier/tests.rs @@ -881,7 +881,7 @@ mod multi_page_sync_verification { assert_eq!(::queued_score(), None); let _ = ::verify_synchronous_multi( - paged.solution_pages.clone().into_inner(), + paged.solution_pages.clone(), MultiBlock::msp_range_for(2), paged.score, ) @@ -909,7 +909,7 @@ mod multi_page_sync_verification { assert_eq!(::queued_score(), None); let _ = ::verify_synchronous_multi( - paged.solution_pages.clone().into_inner(), + paged.solution_pages.clone(), MultiBlock::msp_range_for(3), paged.score, ) @@ -941,7 +941,7 @@ mod multi_page_sync_verification { assert_eq!( ::verify_synchronous_multi( - paged.solution_pages.clone().into_inner(), + paged.solution_pages.clone(), MultiBlock::msp_range_for(2), paged.score, ) @@ -975,7 +975,7 @@ mod multi_page_sync_verification { assert_eq!( ::verify_synchronous_multi( - paged.solution_pages.clone().into_inner(), + paged.solution_pages.clone(), MultiBlock::msp_range_for(2), paged.score, ) @@ -1009,7 +1009,7 @@ mod multi_page_sync_verification { hypothetically!({ assert_ok!(::verify_synchronous_multi( - paged.solution_pages.clone().into_inner(), + paged.solution_pages.clone(), MultiBlock::msp_range_for(2), paged.score, )); @@ -1052,7 +1052,7 @@ mod multi_page_sync_verification { assert_eq!( ::verify_synchronous_multi( - paged.solution_pages.clone().into_inner(), + paged.solution_pages.clone(), MultiBlock::msp_range_for(2), paged.score, ) diff --git a/substrate/frame/staking-async/ah-client/src/lib.rs b/substrate/frame/staking-async/ah-client/src/lib.rs index 910f0c7cb90a2..eedfc2f814298 100644 --- a/substrate/frame/staking-async/ah-client/src/lib.rs +++ b/substrate/frame/staking-async/ah-client/src/lib.rs @@ -430,7 +430,7 @@ pub mod pallet { report: rc_client::ValidatorSetReport, ) -> DispatchResult { // Ensure the origin is one of Root or whatever is representing AssetHub. - log!(info, "Received new validator set report {:?}", report); + log!(debug, "Received new validator set report {}", report); T::AssetHubOrigin::ensure_origin_or_root(origin)?; // Check the operating mode. diff --git a/substrate/frame/staking-async/ahm-test/src/ah/mock.rs b/substrate/frame/staking-async/ahm-test/src/ah/mock.rs index ab3c5d0392887..035068a6146c7 100644 --- a/substrate/frame/staking-async/ahm-test/src/ah/mock.rs +++ b/substrate/frame/staking-async/ahm-test/src/ah/mock.rs @@ -316,8 +316,8 @@ impl multi_block::signed::Config for Runtime { parameter_types! { pub static BondingDuration: u32 = 3; pub static SlashDeferredDuration: u32 = 2; - pub static SessionsPerEra: u32 = 6; - pub static PlanningEraOffset: u32 = 1; + pub static RelaySessionsPerEra: u32 = 6; + pub static PlanningEraOffset: u32 = 2; } impl pallet_staking_async::Config for Runtime { @@ -326,7 +326,7 @@ impl pallet_staking_async::Config for Runtime { type AdminOrigin = EnsureRoot; type BondingDuration = BondingDuration; - type SessionsPerEra = SessionsPerEra; + type RelaySessionsPerEra = RelaySessionsPerEra; type PlanningEraOffset = PlanningEraOffset; type Currency = Balances; diff --git a/substrate/frame/staking-async/ahm-test/src/lib.rs b/substrate/frame/staking-async/ahm-test/src/lib.rs index 55faa6f63bea2..59d1b51c076ff 100644 --- a/substrate/frame/staking-async/ahm-test/src/lib.rs +++ b/substrate/frame/staking-async/ahm-test/src/lib.rs @@ -185,7 +185,7 @@ mod tests { rc::roll_until_matches( || { pallet_session::CurrentIndex::::get() == - current_session + ah::SessionsPerEra::get() + 1 + current_session + ah::RelaySessionsPerEra::get() + 1 }, true, ); diff --git a/substrate/frame/staking-async/rc-client/src/lib.rs b/substrate/frame/staking-async/rc-client/src/lib.rs index db09a4baa388c..8518f3f7d03f4 100644 --- a/substrate/frame/staking-async/rc-client/src/lib.rs +++ b/substrate/frame/staking-async/rc-client/src/lib.rs @@ -152,7 +152,7 @@ pub trait SendToRelayChain { fn validator_set(report: ValidatorSetReport); } -#[derive(Encode, Decode, DecodeWithMemTracking, Debug, Clone, PartialEq, TypeInfo)] +#[derive(Encode, Decode, DecodeWithMemTracking, Clone, PartialEq, TypeInfo)] /// A report about a new validator set. This is sent from AH -> RC. pub struct ValidatorSetReport { /// The new validator set. @@ -174,6 +174,28 @@ pub struct ValidatorSetReport { pub leftover: bool, } +impl core::fmt::Debug for ValidatorSetReport { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ValidatorSetReport") + .field("new_validator_set", &self.new_validator_set) + .field("id", &self.id) + .field("prune_up_to", &self.prune_up_to) + .field("leftover", &self.leftover) + .finish() + } +} + +impl core::fmt::Display for ValidatorSetReport { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ValidatorSetReport") + .field("new_validator_set", &self.new_validator_set.len()) + .field("id", &self.id) + .field("prune_up_to", &self.prune_up_to) + .field("leftover", &self.leftover) + .finish() + } +} + impl ValidatorSetReport { /// A new instance of self that is terminal. This is useful when we want to send everything in /// one go. @@ -196,7 +218,7 @@ impl ValidatorSetReport { Ok(self) } - /// Split self into `count` number of pieces. + /// Split self into chunks of `chunk_size` element. pub fn split(self, chunk_size: usize) -> Vec where AccountId: Clone, @@ -213,9 +235,7 @@ impl ValidatorSetReport { } } -#[derive( - Encode, Decode, DecodeWithMemTracking, Debug, Clone, PartialEq, TypeInfo, MaxEncodedLen, -)] +#[derive(Encode, Decode, DecodeWithMemTracking, Clone, PartialEq, TypeInfo, MaxEncodedLen)] /// The information that is sent from RC -> AH on session end. pub struct SessionReport { /// The session that is ending. @@ -246,6 +266,28 @@ pub struct SessionReport { pub leftover: bool, } +impl core::fmt::Debug for SessionReport { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("SessionReport") + .field("end_index", &self.end_index) + .field("validator_points", &self.validator_points) + .field("activation_timestamp", &self.activation_timestamp) + .field("leftover", &self.leftover) + .finish() + } +} + +impl core::fmt::Display for SessionReport { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("SessionReport") + .field("end_index", &self.end_index) + .field("validator_points", &self.validator_points.len()) + .field("activation_timestamp", &self.activation_timestamp) + .field("leftover", &self.leftover) + .finish() + } +} + impl SessionReport { /// A new instance of self that is terminal. This is useful when we want to send everything in /// one go. @@ -435,7 +477,7 @@ pub mod pallet { origin: OriginFor, report: SessionReport, ) -> DispatchResult { - log!(info, "Received session report: {:?}", report); + log!(debug, "Received session report: {}", report); T::RelayChainOrigin::ensure_origin_or_root(origin)?; match LastSessionReportEndingIndex::::get() { diff --git a/substrate/frame/staking-async/runtimes/parachain/build-and-run-zn.sh b/substrate/frame/staking-async/runtimes/parachain/build-and-run-zn.sh index 721c672662215..def8659ba978d 100755 --- a/substrate/frame/staking-async/runtimes/parachain/build-and-run-zn.sh +++ b/substrate/frame/staking-async/runtimes/parachain/build-and-run-zn.sh @@ -19,10 +19,8 @@ RUST_LOG=${LOG} ../../../../../target/release/chain-spec-builder \ --runtime ../../../../../target/release/wbuild/pallet-staking-async-parachain-runtime/pallet_staking_async_parachain_runtime.compact.compressed.wasm \ --relay-chain rococo-local \ --para-id 1100 \ - named-preset dot_size - # named-preset ksm_size - # named-preset development - # change this as per your needs ^^^ + named-preset ksm_size + # change this as per your needs ^^^ options: development / dot_size / ksm_size mv ./chain_spec.json ./parachain.json echo "✅ creating rc chain specs" diff --git a/substrate/frame/staking-async/runtimes/parachain/src/lib.rs b/substrate/frame/staking-async/runtimes/parachain/src/lib.rs index cb2a33af15a0a..730762f29369e 100644 --- a/substrate/frame/staking-async/runtimes/parachain/src/lib.rs +++ b/substrate/frame/staking-async/runtimes/parachain/src/lib.rs @@ -38,7 +38,9 @@ extern crate alloc; use alloc::{vec, vec::Vec}; use assets_common::{ + foreign_creators::ForeignCreators, local_and_foreign_assets::{LocalFromLeft, TargetFromLeft}, + matching::{FromNetwork, FromSiblingParachain}, AssetIdForPoolAssets, AssetIdForPoolAssetsConvert, AssetIdForTrustBackedAssetsConvert, }; use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen}; @@ -74,8 +76,11 @@ use parachains_common::{ BlockNumber, CollectionId, Hash, Header, ItemId, Nonce, Signature, AVERAGE_ON_INITIALIZE_RATIO, NORMAL_DISPATCH_RATIO, }; +use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use sp_api::impl_runtime_apis; use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; use sp_runtime::{ generic, impl_opaque_keys, traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, Verify}, @@ -88,25 +93,16 @@ use sp_version::RuntimeVersion; use testnet_parachains_constants::westend::{ consensus::*, currency::*, fee::WeightToFee, snowbridge::EthereumNetwork, time::*, }; -use xcm_config::{ - ForeignAssetsConvertedConcreteId, LocationToAccountId, PoolAssetsConvertedConcreteId, - PoolAssetsPalletLocation, TrustBackedAssetsConvertedConcreteId, - TrustBackedAssetsPalletLocation, WestendLocation, XcmOriginToTransactDispatchOrigin, -}; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; - -use assets_common::{ - foreign_creators::ForeignCreators, - matching::{FromNetwork, FromSiblingParachain}, -}; -use polkadot_runtime_common::{BlockHashCount, SlowAdjustingFeeUpdate}; use weights::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight}; use xcm::{ latest::prelude::AssetId, prelude::{VersionedAsset, VersionedAssetId, VersionedAssets, VersionedLocation, VersionedXcm}, }; +use xcm_config::{ + ForeignAssetsConvertedConcreteId, LocationToAccountId, PoolAssetsConvertedConcreteId, + PoolAssetsPalletLocation, TrustBackedAssetsConvertedConcreteId, + TrustBackedAssetsPalletLocation, WestendLocation, XcmOriginToTransactDispatchOrigin, +}; #[cfg(feature = "runtime-benchmarks")] use frame_support::traits::PalletInfoAccess; @@ -137,7 +133,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: alloc::borrow::Cow::Borrowed("asset-hub-next"), impl_name: alloc::borrow::Cow::Borrowed("asset-hub-next"), authoring_version: 1, - spec_version: 1_017_007, + spec_version: 1_000_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 16, diff --git a/substrate/frame/staking-async/runtimes/parachain/src/staking.rs b/substrate/frame/staking-async/runtimes/parachain/src/staking.rs index d1f2a71015e15..85cbe4e339272 100644 --- a/substrate/frame/staking-async/runtimes/parachain/src/staking.rs +++ b/substrate/frame/staking-async/runtimes/parachain/src/staking.rs @@ -32,12 +32,11 @@ use sp_runtime::{ }; parameter_types! { - pub storage SignedPhase: u32 = 3 * MINUTES; - pub storage UnsignedPhase: u32 = 1 * MINUTES; + pub storage SignedPhase: u32 = 3 * MINUTES / 2; + pub storage UnsignedPhase: u32 = 0 * MINUTES; pub storage SignedValidationPhase: u32 = Pages::get() + 1; - /// Compatible with Polkadot, we allow up to 22_500 nominators to be considered for election - pub storage MaxElectingVoters: u32 = 2000; + pub storage MaxElectingVoters: u32 = 1000; /// Maximum number of validators that we may want to elect. 1000 is the end target. pub const MaxValidatorSet: u32 = 1000; @@ -58,7 +57,7 @@ parameter_types! { pub MaxBackersPerWinner: u32 = VoterSnapshotPerBlock::get(); /// Total number of backers per winner across all pages. This is not used in the code yet. - pub MaxBackersPerWinnerFinal: u32 = MaxBackersPerWinner::get(); + pub MaxBackersPerWinnerFinal: u32 = MaxElectingVoters::get(); /// Size of the exposures. This should be small enough to make the reward payouts feasible. pub const MaxExposurePageSize: u32 = 64; @@ -237,6 +236,8 @@ impl pallet_staking_async::EraPayout for EraPayout { parameter_types! { // Six sessions in an era (6 hours). pub const SessionsPerEra: SessionIndex = prod_or_fast!(6, 1); + /// Duration of a relay session in our blocks. Needs to be hardcoded per-runtime. + pub const RelaySessionDuration: BlockNumber = 10; // 2 eras for unbonding (12 hours). pub const BondingDuration: sp_staking::EraIndex = 2; // 1 era in which slashes can be cancelled (6 hours). @@ -258,7 +259,7 @@ impl pallet_staking_async::Config for Runtime { type RewardRemainder = (); type Slash = (); type Reward = (); - type SessionsPerEra = SessionsPerEra; + type RelaySessionsPerEra = SessionsPerEra; type BondingDuration = BondingDuration; type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = EitherOf, StakingAdmin>; @@ -276,7 +277,8 @@ impl pallet_staking_async::Config for Runtime { type WeightInfo = weights::pallet_staking_async::WeightInfo; type MaxInvulnerables = frame_support::traits::ConstU32<20>; type MaxDisabledValidators = ConstU32<100>; - type PlanningEraOffset = ConstU32<2>; + type PlanningEraOffset = + pallet_staking_async::PlanningEraOffsetOf>; type RcClientInterface = StakingNextRcClient; } @@ -303,11 +305,89 @@ use pallet_staking_async_rc_client as rc_client; use xcm::latest::{prelude::*, SendXcm}; pub struct XcmToRelayChain(PhantomData); -impl rc_client::SendToRelayChain for XcmToRelayChain { - type AccountId = AccountId; - /// Send a new validator set report to relay chain. - fn validator_set(report: rc_client::ValidatorSetReport) { +impl XcmToRelayChain { + /// Splits a message until it can pass the validation step of `send_xcm`. + /// + /// It consumes a `ValidatorSetReport`, which should be in full, and possibly splits it into a + /// splitter vector thereof, returned as `Ok(vec)`. It also converts to results into + /// ready-to-send XCM messages. + /// + /// The maximum number of steps taken is optionally limited by `maybe_max_steps`. + /// + /// If validating still fails, due to any other error not taken into account, it return + /// `Err(reason)`. + /// + /// Notes: This is a UMP. Current values are: + /// + /// Polkadot: 65531 (64k) + /// Kusama: 65531 (64k) + /// Westend: 8388608 (8MB) + /// + /// + /// To test this, in the relay runtime's genesis config, tweak the `max_downward_message_size` + /// and `max_upward_message_size` values. + /// + /// TODO: good for now, but can be refactored and reused both in rc and in AH. What we need is: + /// + /// 1. `trait Splittable` over all types that can be sent over XCM and might be big + /// 2. `struct StakingXcmSender>` + fn split_until_validated( + report: rc_client::ValidatorSetReport, + destination: &Location, + maybe_max_steps: Option, + ) -> Result>, SendError> { + let mut chunk_size = report.new_validator_set.len(); + let mut steps = 0; + + loop { + let current_reports = report.clone().split(chunk_size); + + // the first report is the heaviest, the last one might be smaller. + let first_report = if let Some(r) = current_reports.first() { + r + } else { + log::debug!(target: "runtime::rc-client", "📨 unexpected: no reports to send"); + return Ok(vec![]); + }; + + log::debug!( + target: "runtime::rc-client", + "📨 step: {:?}, chunk_size: {:?}, report_size: {:?}", + steps, + chunk_size, + first_report.encoded_size(), + ); + let message = Self::message_from_report(first_report.clone()); + match ::validate(&mut Some(destination.clone()), &mut Some(message)) { + Ok((_ticket, price)) => { + log::debug!(target: "runtime::rc-client", "📨 validated, price: {:?}", price); + return Ok(current_reports + .into_iter() + .map(Self::message_from_report) + .collect::>()); + }, + Err(SendError::ExceedsMaxMessageSize) => { + log::debug!(target: "runtime::rc-client", "📨 ExceedsMaxMessageSize -- reducing chunk_size"); + chunk_size = chunk_size.saturating_div(2); + steps += 1; + if maybe_max_steps.map_or(false, |max_steps| steps > max_steps) { + log::error!(target: "runtime::rc-client", "📨 Exceeded max steps"); + return Err(SendError::ExceedsMaxMessageSize); + } else { + // try again with the new `chunk_size` + continue; + } + }, + Err(other) => { + log::error!(target: "runtime::rc-client", "📨 other error -- cannot send XCM: {:?}", other); + return Err(other); + }, + } + } + } + + fn message_from_report(report: rc_client::ValidatorSetReport) -> Xcm<()> { let message = Xcm(vec![ Instruction::UnpaidExecution { weight_limit: WeightLimit::Unlimited, @@ -321,16 +401,34 @@ impl rc_client::SendToRelayChain for XcmToRelayChain { .into(), }, ]); - let dest = Location::parent(); - let result = send_xcm::(dest, message); + message + } +} - match result { - Ok(_) => { - log::info!(target: "runtime", "Successfully sent validator set report to relay chain") - }, - Err(e) => { - log::error!(target: "runtime", "Failed to send validator set report to relay chain: {:?}", e) - }, +impl rc_client::SendToRelayChain for XcmToRelayChain { + type AccountId = AccountId; + + /// Send a new validator set report to relay chain. + fn validator_set(report: rc_client::ValidatorSetReport) { + let dest = Location::parent(); + let messages = if let Ok(r) = Self::split_until_validated(report, &dest, Some(8)) { + r + } else { + log::error!(target: "runtime::rc-client", "📨 Failed to split validator set report"); + return; + }; + + for (idx, message) in messages.into_iter().enumerate() { + log::debug!(target: "runtime::rc-client", "📨 sending validator set report part {}, message size: {:?}", idx, message.encoded_size()); + let result = send_xcm::(dest.clone(), message); + match result { + Ok(_) => { + log::debug!(target: "runtime::rc-client", "📨 Successfully sent validator set report part {} to relay chain", idx) + }, + Err(e) => { + log::error!(target: "runtime::rc-client", "📨 Failed to send validator set report to relay chain: {:?}", e) + }, + } } } } diff --git a/substrate/frame/staking-async/runtimes/parachain/zombienet-staking-runtimes.toml b/substrate/frame/staking-async/runtimes/parachain/zombienet-staking-runtimes.toml index 8cf6c5946dcf4..5f85c10a18f4d 100644 --- a/substrate/frame/staking-async/runtimes/parachain/zombienet-staking-runtimes.toml +++ b/substrate/frame/staking-async/runtimes/parachain/zombienet-staking-runtimes.toml @@ -10,9 +10,9 @@ rpc_port = 9944 [[relaychain.nodes]] name = "bob" validator = true -rpc_port = 9955 +rpc_port = 9945 args = [ - "-lruntime::system=debug,runtime::session=trace,runtime::staking::ah-client=trace", + "-lruntime::system=debug,runtime::session=trace,runtime::staking::ah-client=trace,runtime::ah-client=debug" ] [[parachains]] @@ -21,7 +21,7 @@ chain_spec_path = "./parachain.json" [parachains.collator] name = "charlie" -rpc_port = 9966 +rpc_port = 9946 args = [ - "-lruntime::system=debug,runtime::multiblock-election=debug,runtime::staking=debug,runtime::staking::rc-client=trace", + "-lruntime::system=debug,runtime::multiblock-election=debug,runtime::staking=debug,runtime::staking::rc-client=trace,runtime::rc-client=debug", ] diff --git a/substrate/frame/staking-async/runtimes/rc/src/genesis_config_presets.rs b/substrate/frame/staking-async/runtimes/rc/src/genesis_config_presets.rs index 7140d53d29359..701085278f74a 100644 --- a/substrate/frame/staking-async/runtimes/rc/src/genesis_config_presets.rs +++ b/substrate/frame/staking-async/runtimes/rc/src/genesis_config_presets.rs @@ -107,6 +107,7 @@ fn default_parachains_host_configuration( max_head_data_size: 32 * 1024, max_upward_queue_count: 8, max_upward_queue_size: 1024 * 1024, + // NOTE: these can be tweaked to mimic the XCM message splitting. max_downward_message_size: 1024 * 1024, max_upward_message_size: 50 * 1024, max_upward_message_num_per_candidate: 5, diff --git a/substrate/frame/staking-async/runtimes/rc/src/lib.rs b/substrate/frame/staking-async/runtimes/rc/src/lib.rs index d7e8002b3b253..dd297efb4b546 100644 --- a/substrate/frame/staking-async/runtimes/rc/src/lib.rs +++ b/substrate/frame/staking-async/runtimes/rc/src/lib.rs @@ -606,19 +606,129 @@ enum RcClientCalls { } pub struct XcmToAssetHub>(PhantomData<(T, AssetHubId)>); -impl> ah_client::SendToAssetHub for XcmToAssetHub { - type AccountId = AccountId; - fn relay_session_report(session_report: rc_client::SessionReport) { - let message = Xcm(vec![ +impl> XcmToAssetHub { + /// This is a downward message. + /// Westend: `51200` (50K) + /// Polkadot: `51200` (50K) + /// Kusama: `51200` (50K) + fn split_until_validated_session_report( + report: rc_client::SessionReport, + destination: &Location, + maybe_max_steps: Option, + ) -> Result>, SendError> { + let mut chunk_size = report.validator_points.len(); + let mut steps = 0; + + loop { + let current_reports = report.clone().split(chunk_size); + + // the first report is the heaviest, the last one might be smaller. + let first_report = if let Some(r) = current_reports.first() { + r + } else { + log::debug!(target: "runtime::ah-client", "📨 unexpected: no reports to send"); + return Ok(vec![]); + }; + + log::debug!( + target: "runtime::ah-client", + "📨 step: {:?}, chunk_size: {:?}, report_size: {:?}", + steps, + chunk_size, + first_report.encoded_size(), + ); + + let message = Self::session_message_from_report(first_report.clone()); + match ::validate(&mut Some(destination.clone()), &mut Some(message)) { + Ok((_ticket, price)) => { + log::debug!(target: "runtime::ah-client", "📨 validated, price: {:?}", price); + return Ok(current_reports + .into_iter() + .map(Self::session_message_from_report) + .collect::>()); + }, + Err(SendError::ExceedsMaxMessageSize) => { + log::debug!(target: "runtime::ah-client", "📨 ExceedsMaxMessageSize -- reducing chunk_size"); + chunk_size = chunk_size.saturating_div(2); + steps += 1; + if maybe_max_steps.map_or(false, |max_steps| steps > max_steps) { + log::error!(target: "runtime::ah-client", "📨 Exceeded max steps"); + return Err(SendError::ExceedsMaxMessageSize); + } else { + // try again with the new `chunk_size` + continue; + } + }, + Err(other) => { + log::error!(target: "runtime::ah-client", "📨 other error -- cannot send XCM: {:?}", other); + return Err(other); + }, + } + } + } + + fn session_message_from_report(report: rc_client::SessionReport) -> Xcm<()> { + Xcm(vec![ Instruction::UnpaidExecution { weight_limit: WeightLimit::Unlimited, check_origin: None, }, - Self::mk_asset_hub_call(RcClientCalls::RelaySessionReport(session_report)), - ]); - if let Err(err) = send_xcm::(AssetHubNextLocation::get(), message) { - log::error!(target: "runtime", "Failed to send relay session report message: {:?}", err); + Self::mk_asset_hub_call(RcClientCalls::RelaySessionReport(report)), + ]) + } + + fn mk_asset_hub_call( + call: RcClientCalls<::AccountId>, + ) -> Instruction<()> { + Instruction::Transact { + origin_kind: OriginKind::Superuser, + fallback_max_weight: None, + call: AssetHubRuntimePallets::RcClient(call).encode().into(), + } + } +} + +impl> ah_client::SendToAssetHub for XcmToAssetHub { + type AccountId = AccountId; + + fn relay_session_report(mut session_report: rc_client::SessionReport) { + // add some fake data to the session report to make it bigger + session_report.validator_points.extend( + (0..1000) + .into_iter() + .map(|i| { + let fake_validator = AccountId::decode( + &mut sp_runtime::traits::TrailingZeroInput::new(&[i as u8; 32]), + ) + .unwrap(); + (fake_validator, i) + }) + .collect::>(), + ); + + // then split and send + let dest = AssetHubNextLocation::get(); + let messages = if let Ok(r) = + Self::split_until_validated_session_report(session_report, &dest, Some(8)) + { + r + } else { + log::error!(target: "runtime::ah-client", "📨 Failed to split session report"); + return; + }; + + for (idx, message) in messages.into_iter().enumerate() { + log::debug!(target: "runtime::ah-client", "📨 sending session report part {}, message size: {:?}", idx, message.encoded_size()); + let result = send_xcm::(dest.clone(), message); + match result { + Ok(_) => { + log::debug!(target: "runtime::ah-client", "📨 Successfully sent session report part {} to relay chain", idx) + }, + Err(e) => { + log::error!(target: "runtime::ah-client", "📨 Failed to send session report to relay chain: {:?}", e) + }, + } } } @@ -634,19 +744,7 @@ impl> ah_client::SendToAssetHub for XcmToAssetH Self::mk_asset_hub_call(RcClientCalls::RelayNewOffence(session_index, offences)), ]); if let Err(err) = send_xcm::(AssetHubNextLocation::get(), message) { - log::error!(target: "runtime", "Failed to send relay offence message: {:?}", err); - } - } -} - -impl> XcmToAssetHub { - fn mk_asset_hub_call( - call: RcClientCalls<::AccountId>, - ) -> Instruction<()> { - Instruction::Transact { - origin_kind: OriginKind::Superuser, - fallback_max_weight: None, - call: AssetHubRuntimePallets::RcClient(call).encode().into(), + log::error!(target: "runtime::ah-client", "Failed to send relay offence message: {:?}", err); } } } diff --git a/substrate/frame/staking-async/src/lib.rs b/substrate/frame/staking-async/src/lib.rs index bed5c6ebefcb9..4fc31810782af 100644 --- a/substrate/frame/staking-async/src/lib.rs +++ b/substrate/frame/staking-async/src/lib.rs @@ -80,13 +80,14 @@ use frame_support::{ BoundedVec, DebugNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, WeakBoundedVec, }; +use frame_system::pallet_prelude::BlockNumberFor; use ledger::LedgerIntegrityState; use scale_info::TypeInfo; use sp_runtime::{ - traits::{AtLeast32BitUnsigned, StaticLookup}, - BoundedBTreeMap, Perbill, RuntimeDebug, + traits::{AtLeast32BitUnsigned, One, StaticLookup, UniqueSaturatedInto}, + BoundedBTreeMap, Perbill, RuntimeDebug, Saturating, }; -use sp_staking::{EraIndex, ExposurePage, PagedExposureMetadata}; +use sp_staking::{EraIndex, ExposurePage, PagedExposureMetadata, SessionIndex}; pub use sp_staking::{Exposure, IndividualExposure, StakerStatus}; pub use weights::WeightInfo; @@ -422,3 +423,27 @@ impl Contains for AllStakers { Ledger::::contains_key(account) } } + +/// A smart type to determine the [`Config::PlanningEraOffset`], given: +/// +/// * Expected relay session duration, `RS` +/// * Time taking into consideration for XCM sending, `S` +/// +/// It will use the estimated election duration, the relay session duration, and add one as it knows +/// the relay chain will want to buffer validators for one session. This is needed because we use +/// this in our calculation based on the "active era". +pub struct PlanningEraOffsetOf(core::marker::PhantomData<(T, RS, S)>); +impl>, S: Get>> Get + for PlanningEraOffsetOf +{ + fn get() -> SessionIndex { + let election_duration = ::duration_with_export(); + let sessions_needed = (election_duration + S::get()) / RS::get(); + // add one, because we know the RC session pallet wants to buffer for one session, and + // another one cause we will receive activation report one session after that. + sessions_needed + .saturating_add(One::one()) + .saturating_add(One::one()) + .unique_saturated_into() + } +} diff --git a/substrate/frame/staking-async/src/mock.rs b/substrate/frame/staking-async/src/mock.rs index a146e9f068344..66ad0653bfa59 100644 --- a/substrate/frame/staking-async/src/mock.rs +++ b/substrate/frame/staking-async/src/mock.rs @@ -34,7 +34,7 @@ use frame_support::{ }; use frame_system::{pallet_prelude::BlockNumberFor, EnsureRoot, EnsureSignedBy}; use pallet_staking_async_rc_client as rc_client; -use sp_core::ConstBool; +use sp_core::{ConstBool, ConstU64}; use sp_io; use sp_npos_elections::BalancingConfig; use sp_runtime::{traits::Zero, BuildStorage}; @@ -61,6 +61,22 @@ pub(crate) type AccountId = ::AccountId; pub(crate) type BlockNumber = BlockNumberFor; pub(crate) type Balance = ::Balance; +#[derive(Clone, Copy)] +pub enum PlanningEraMode { + Fixed(SessionIndex), + Smart, +} + +pub struct PlanningEraOffset; +impl Get for PlanningEraOffset { + fn get() -> SessionIndex { + match PlanningEraModeVal::get() { + PlanningEraMode::Fixed(value) => value, + PlanningEraMode::Smart => crate::PlanningEraOffsetOf::>::get(), + } + } +} + parameter_types! { pub static ExistentialDeposit: Balance = 1; pub static SlashDeferDuration: EraIndex = 0; @@ -73,9 +89,9 @@ parameter_types! { pub static MaxValidatorSet: u32 = 100; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); pub static AbsoluteMaxNominations: u32 = 16; - pub static PlanningEraOffset: u32 = 1; + pub static PlanningEraModeVal: PlanningEraMode = PlanningEraMode::Fixed(2); // Session configs - pub static SessionsPerEra: SessionIndex = 3; + pub static RelaySessionsPerEra: SessionIndex = 3; pub static Period: BlockNumber = 5; pub static Offset: BlockNumber = 0; } @@ -129,7 +145,8 @@ parameter_types! { pub static Pages: PageIndex = 1; pub static MaxBackersPerWinner: u32 = 256; pub static MaxWinnersPerPage: u32 = MaxValidatorSet::get(); - pub static StartReceived: bool = false; + pub static StartReceived: Option = None; + pub static ElectionDelay: BlockNumber = 0; } pub type InnerElection = onchain::OnChainExecution; @@ -164,22 +181,23 @@ impl ElectionProvider for TestElectionProvider { fn elect(page: PageIndex) -> Result, Self::Error> { if page == 0 { - StartReceived::set(false); + StartReceived::set(None); } InnerElection::elect(page) } fn start() -> Result<(), Self::Error> { - StartReceived::set(true); + StartReceived::set(Some(System::block_number())); Ok(()) } fn duration() -> Self::BlockNumber { - InnerElection::duration() + InnerElection::duration() + ElectionDelay::get() } fn status() -> Result { - if StartReceived::get() { - Ok(true) - } else { - Err(()) + let now = System::block_number(); + match StartReceived::get() { + Some(at) if now - at >= ElectionDelay::get() => Ok(true), + Some(_) => Ok(false), + None => Err(()), } } } @@ -336,7 +354,7 @@ pub mod session_mock { id: u32, prune_up_to: Option, ) { - log::debug!(target: "runtime::session_mock", "Received validator set: {:?}", new_validator_set); + log::debug!(target: "runtime::staking-async::session_mock", "Received validator set: {:?}", new_validator_set); let now = System::block_number(); // store the report for further inspection. ReceivedValidatorSets::mutate(|reports| { @@ -388,7 +406,7 @@ impl crate::pallet::pallet::Config for Test { type Currency = Balances; type RewardRemainder = RewardRemainderMock; type Reward = MockReward; - type SessionsPerEra = SessionsPerEra; + type RelaySessionsPerEra = RelaySessionsPerEra; type SlashDeferDuration = SlashDeferDuration; type AdminOrigin = EitherOfDiverse, EnsureSignedBy>; type EraPayout = OneTokenPerMillisecond; @@ -485,7 +503,15 @@ impl ExtBuilder { self } pub(crate) fn planning_era_offset(self, offset: SessionIndex) -> Self { - PlanningEraOffset::set(offset); + PlanningEraModeVal::set(PlanningEraMode::Fixed(offset)); + self + } + pub fn smart_era_planner(self) -> Self { + PlanningEraModeVal::set(PlanningEraMode::Smart); + self + } + pub fn election_delay(self, delay: BlockNumber) -> Self { + ElectionDelay::set(delay); self } pub(crate) fn nominate(mut self, nominate: bool) -> Self { @@ -510,7 +536,7 @@ impl ExtBuilder { self } pub(crate) fn session_per_era(self, length: SessionIndex) -> Self { - SessionsPerEra::set(length); + RelaySessionsPerEra::set(length); self } pub(crate) fn period(self, length: BlockNumber) -> Self { @@ -739,7 +765,7 @@ pub(crate) fn time_per_session() -> u64 { /// Time it takes to finish an era. pub(crate) fn time_per_era() -> u64 { - time_per_session() * SessionsPerEra::get() as u64 + time_per_session() * RelaySessionsPerEra::get() as u64 } pub(crate) fn reward_all_elected() { diff --git a/substrate/frame/staking-async/src/pallet/impls.rs b/substrate/frame/staking-async/src/pallet/impls.rs index 83679103b5ad7..fc32d29a2f923 100644 --- a/substrate/frame/staking-async/src/pallet/impls.rs +++ b/substrate/frame/staking-async/src/pallet/impls.rs @@ -1057,7 +1057,7 @@ impl rc_client::AHStakingInterface for Pallet { /// implies a new validator set has been applied, and we must increment the active era to keep /// the systems in sync. fn on_relay_session_report(report: rc_client::SessionReport) { - log!(debug, "session report received\n{:?}", report,); + log!(debug, "session report received: {}", report,); let consumed_weight = T::WeightInfo::rc_on_session_report(); let rc_client::SessionReport { diff --git a/substrate/frame/staking-async/src/pallet/mod.rs b/substrate/frame/staking-async/src/pallet/mod.rs index 4e803b07442f1..2dc5a8fcc4cd1 100644 --- a/substrate/frame/staking-async/src/pallet/mod.rs +++ b/substrate/frame/staking-async/src/pallet/mod.rs @@ -180,9 +180,9 @@ pub mod pallet { #[pallet::no_default_bounds] type Reward: OnUnbalanced>; - /// Number of sessions per era. + /// Number of sessions per era, as per the preferences of the relay chain. #[pallet::constant] - type SessionsPerEra: Get; + type RelaySessionsPerEra: Get; /// Number of sessions before the end of an era when the election for the next era will /// start. @@ -190,11 +190,11 @@ pub mod pallet { /// - This determines how many sessions **before** the last session of the era the staking /// election process should begin. /// - The value is bounded between **1** (election starts at the beginning of the last - /// session) and `SessionsPerEra` (election starts at the beginning of the first session - /// of the era). + /// session) and `RelaySessionsPerEra` (election starts at the beginning of the first + /// session of the era). /// /// ### Example: - /// - If `SessionsPerEra = 6` and `PlanningEraOffset = 1`, the election starts at the + /// - If `RelaySessionsPerEra = 6` and `PlanningEraOffset = 1`, the election starts at the /// beginning of session `6 - 1 = 5`. /// - If `PlanningEraOffset = 6`, the election starts at the beginning of session `6 - 6 = /// 0`, meaning it starts at the very beginning of the era. @@ -348,7 +348,7 @@ pub mod pallet { impl frame_system::DefaultConfig for TestDefaultConfig {} parameter_types! { - pub const SessionsPerEra: SessionIndex = 3; + pub const RelaySessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; } @@ -363,7 +363,7 @@ pub mod pallet { type RewardRemainder = (); type Slash = (); type Reward = (); - type SessionsPerEra = SessionsPerEra; + type RelaySessionsPerEra = RelaySessionsPerEra; type BondingDuration = BondingDuration; type PlanningEraOffset = ConstU32<1>; type SlashDeferDuration = (); diff --git a/substrate/frame/staking-async/src/session_rotation.rs b/substrate/frame/staking-async/src/session_rotation.rs index 288375ef19190..8f32537f4d7b7 100644 --- a/substrate/frame/staking-async/src/session_rotation.rs +++ b/substrate/frame/staking-async/src/session_rotation.rs @@ -719,14 +719,14 @@ impl Rotator { /// Returns whether we are at the session where we should plan the new era. fn is_plan_era_deadline(start_session: SessionIndex) -> bool { - let planning_era_offset = T::PlanningEraOffset::get().min(T::SessionsPerEra::get()); + let planning_era_offset = T::PlanningEraOffset::get().min(T::RelaySessionsPerEra::get()); // session at which we should plan the new era. - let target_plan_era_session = T::SessionsPerEra::get().saturating_sub(planning_era_offset); + let target_plan_era_session = + T::RelaySessionsPerEra::get().saturating_sub(planning_era_offset); let era_start_session = Self::active_era_start_session_index(); // progress of the active era in sessions. - let session_progress = - start_session.saturating_add(1).defensive_saturating_sub(era_start_session); + let session_progress = start_session.defensive_saturating_sub(era_start_session); log!( debug, diff --git a/substrate/frame/staking-async/src/tests/election_provider.rs b/substrate/frame/staking-async/src/tests/election_provider.rs index af9b9f8001837..a9b14b3ddbb51 100644 --- a/substrate/frame/staking-async/src/tests/election_provider.rs +++ b/substrate/frame/staking-async/src/tests/election_provider.rs @@ -24,7 +24,7 @@ use substrate_test_utils::assert_eq_uvec; use crate::tests::session_mock::ReceivedValidatorSets; #[test] -fn planning_era_offset_less_works() { +fn planning_era_offset_less_0() { // same as `basic_setup_sessions_per_era`, but notice how `PagedElectionProceeded` happens // one session later, and planning era is incremented one session later ExtBuilder::default() @@ -32,8 +32,62 @@ fn planning_era_offset_less_works() { .planning_era_offset(0) .no_flush_events() .build_and_execute(|| { - // this essentially makes the session duration 7, because the mock session will buffer - // for one session before activating the era. + // this essentially makes the session duration 8. After 6 sessions we realize we have do + // start an election (since offset = 0), then it is queued for one session (7), and then + // activated (8). + assert_eq!(Session::current_index(), 8); + assert_eq!(active_era(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 4, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 5, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 6, active_era: 0, planned_era: 1 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 7, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 20000, remainder: 20000 }, + Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 1 } + ] + ); + + Session::roll_until_active_era(2); + assert_eq!(Session::current_index(), 16); + assert_eq!(active_era(), 2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 9, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 10, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 11, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 12, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 13, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 14, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 15, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 20000, remainder: 20000 }, + Event::SessionRotated { starting_session: 16, active_era: 2, planned_era: 2 } + ] + ); + }); +} + +#[test] +fn planning_era_offset_works_1() { + // same as `basic_setup_sessions_per_era`, but notice how `PagedElectionProceeded` happens + // one session later, and planning era is incremented one session later + ExtBuilder::default() + .session_per_era(6) + .planning_era_offset(1) + .no_flush_events() + .build_and_execute(|| { + // this essentially makes the session duration 7. After 5 sessions we realize we have do + // start an election (since offset = 1), then it is queued for one session (6), and then + // activated (7). assert_eq!(Session::current_index(), 7); assert_eq!(active_era(), 1); @@ -74,14 +128,15 @@ fn planning_era_offset_less_works() { } #[test] -fn planning_era_offset_more_works() { +fn planning_era_offset_works_2() { ExtBuilder::default() .session_per_era(6) .planning_era_offset(2) .no_flush_events() .build_and_execute(|| { - // This effectively makes the era one session shorter. - assert_eq!(Session::current_index(), 5); + // start election at 4, and send it over. Buffered at 6, activated at 6. This is the + // expected behavior, and the default in `mock.rs`. + assert_eq!(Session::current_index(), 6); assert_eq!(active_era(), 1); assert_eq!( @@ -89,28 +144,123 @@ fn planning_era_offset_more_works() { vec![ Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 0 }, Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 0 }, - Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 1 }, + Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 4, active_era: 0, planned_era: 1 }, Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 6, active_era: 1, planned_era: 1 } + ] + ); + + Session::roll_until_active_era(2); + assert_eq!(Session::current_index(), 12); + assert_eq!(active_era(), 2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 7, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 9, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 10, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 11, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 12, active_era: 2, planned_era: 2 } + ] + ); + }); +} + +#[test] +fn planning_era_offset_works_smart() { + ExtBuilder::default() + .session_per_era(6) + .smart_era_planner() + .no_flush_events() + .build_and_execute(|| { + // This works exactly the same as offset = 2, which means we send and rotate validators + // such that the era duration remains 6 sessions. + assert_eq!(Session::current_index(), 6); + assert_eq!(active_era(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 0 }, Event::SessionRotated { starting_session: 4, active_era: 0, planned_era: 1 }, - Event::EraPaid { era_index: 0, validator_payout: 12500, remainder: 12500 }, - Event::SessionRotated { starting_session: 5, active_era: 1, planned_era: 1 } + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 6, active_era: 1, planned_era: 1 } ] ); Session::roll_until_active_era(2); - assert_eq!(Session::current_index(), 10); + assert_eq!(Session::current_index(), 12); assert_eq!(active_era(), 2); assert_eq!( staking_events_since_last_call(), vec![ - Event::SessionRotated { starting_session: 6, active_era: 1, planned_era: 1 }, Event::SessionRotated { starting_session: 7, active_era: 1, planned_era: 1 }, - Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 2 }, + Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 9, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 10, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 11, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 12, active_era: 2, planned_era: 2 } + ] + ); + }); +} + +#[test] +fn planning_era_offset_works_smart_with_delay() { + ExtBuilder::default() + .session_per_era(6) + .election_delay(7) + .smart_era_planner() + .no_flush_events() + .build_and_execute(|| { + // Same as above, but now election takes more time, more than 1 session to be exact. + // Notice how the era duration is kept at 6. + assert_eq!(Session::current_index(), 6); + assert_eq!(active_era(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 1, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 2, active_era: 0, planned_era: 0 }, + Event::SessionRotated { starting_session: 3, active_era: 0, planned_era: 1 }, + Event::SessionRotated { starting_session: 4, active_era: 0, planned_era: 1 }, Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 5, active_era: 0, planned_era: 1 }, + Event::EraPaid { era_index: 0, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 6, active_era: 1, planned_era: 1 } + ] + ); + + Session::roll_until_active_era(2); + assert_eq!(Session::current_index(), 12); + assert_eq!(active_era(), 2); + + assert_eq!( + staking_events_since_last_call(), + vec![ + Event::SessionRotated { starting_session: 7, active_era: 1, planned_era: 1 }, + Event::SessionRotated { starting_session: 8, active_era: 1, planned_era: 1 }, Event::SessionRotated { starting_session: 9, active_era: 1, planned_era: 2 }, - Event::EraPaid { era_index: 1, validator_payout: 12500, remainder: 12500 }, - Event::SessionRotated { starting_session: 10, active_era: 2, planned_era: 2 } + Event::SessionRotated { starting_session: 10, active_era: 1, planned_era: 2 }, + Event::PagedElectionProceeded { page: 0, result: Ok(2) }, + Event::SessionRotated { starting_session: 11, active_era: 1, planned_era: 2 }, + Event::EraPaid { era_index: 1, validator_payout: 15000, remainder: 15000 }, + Event::SessionRotated { starting_session: 12, active_era: 2, planned_era: 2 } ] ); }); @@ -426,7 +576,7 @@ mod paged_on_initialize_era_election_planner { // we will start the next election at the start of block 20 assert_eq!(System::block_number(), 15); - assert_eq!(PlanningEraOffset::get(), 1); + assert_eq!(PlanningEraOffset::get(), 2); // genesis validators are now in place. assert_eq!(current_era(), 1); @@ -541,7 +691,7 @@ mod paged_on_initialize_era_election_planner { // we will start the next election at the start of block 20 assert_eq!(System::block_number(), 15); - assert_eq!(PlanningEraOffset::get(), 1); + assert_eq!(PlanningEraOffset::get(), 2); // 1. election signal is sent here, Session::roll_until(20);