From e1629e758b684e811ae5f4af4e05df421ca82d22 Mon Sep 17 00:00:00 2001 From: Ankan Date: Sun, 16 Mar 2025 22:43:24 +0100 Subject: [PATCH 01/38] Revert "Implementation of ah-client and rc-client staking pallets (#7582)" This reverts commit 669b88900206c4421068a079fdc57cc447546ece. --- Cargo.lock | 38 --- Cargo.toml | 4 - prdoc/pr_7582.prdoc | 17 - .../election-provider-multi-block/src/lib.rs | 2 +- substrate/frame/staking/ah-client/Cargo.toml | 66 ---- substrate/frame/staking/ah-client/src/lib.rs | 322 ------------------ substrate/frame/staking/rc-client/Cargo.toml | 45 --- substrate/frame/staking/rc-client/src/lib.rs | 181 ---------- substrate/frame/staking/src/pallet/impls.rs | 6 - umbrella/Cargo.toml | 18 - umbrella/src/lib.rs | 10 - 11 files changed, 1 insertion(+), 708 deletions(-) delete mode 100644 prdoc/pr_7582.prdoc delete mode 100644 substrate/frame/staking/ah-client/Cargo.toml delete mode 100644 substrate/frame/staking/ah-client/src/lib.rs delete mode 100644 substrate/frame/staking/rc-client/Cargo.toml delete mode 100644 substrate/frame/staking/rc-client/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 0664351fd508e..1502f1576061d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13205,42 +13205,6 @@ dependencies = [ "substrate-test-utils", ] -[[package]] -name = "pallet-staking-ah-client" -version = "0.1.0" -dependencies = [ - "frame-support", - "frame-system", - "log", - "pallet-authorship", - "pallet-session", - "pallet-staking", - "pallet-staking-rc-client", - "parity-scale-codec", - "polkadot-primitives", - "polkadot-runtime-parachains", - "scale-info", - "sp-core 28.0.0", - "sp-runtime 31.0.1", - "sp-staking", - "staging-xcm", -] - -[[package]] -name = "pallet-staking-rc-client" -version = "0.1.0" -dependencies = [ - "frame-support", - "frame-system", - "log", - "parity-scale-codec", - "scale-info", - "sp-core 28.0.0", - "sp-runtime 31.0.1", - "sp-staking", - "staging-xcm", -] - [[package]] name = "pallet-staking-reward-curve" version = "11.0.0" @@ -15933,8 +15897,6 @@ dependencies = [ "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", - "pallet-staking-ah-client", - "pallet-staking-rc-client", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", diff --git a/Cargo.toml b/Cargo.toml index c355679143037..c0757ed3043fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -421,8 +421,6 @@ members = [ "substrate/frame/session/benchmarking", "substrate/frame/society", "substrate/frame/staking", - "substrate/frame/staking/ah-client", - "substrate/frame/staking/rc-client", "substrate/frame/staking/reward-curve", "substrate/frame/staking/reward-fn", "substrate/frame/staking/runtime-api", @@ -1003,8 +1001,6 @@ pallet-session-benchmarking = { path = "substrate/frame/session/benchmarking", d pallet-skip-feeless-payment = { path = "substrate/frame/transaction-payment/skip-feeless-payment", default-features = false } pallet-society = { path = "substrate/frame/society", default-features = false } pallet-staking = { path = "substrate/frame/staking", default-features = false } -pallet-staking-ah-client = { path = "substrate/frame/staking/ah-client", default-features = false } -pallet-staking-rc-client = { path = "substrate/frame/staking/rc-client", default-features = false } pallet-staking-reward-curve = { path = "substrate/frame/staking/reward-curve", default-features = false } pallet-staking-reward-fn = { path = "substrate/frame/staking/reward-fn", default-features = false } pallet-staking-runtime-api = { path = "substrate/frame/staking/runtime-api", default-features = false } diff --git a/prdoc/pr_7582.prdoc b/prdoc/pr_7582.prdoc deleted file mode 100644 index 26e594c4373f2..0000000000000 --- a/prdoc/pr_7582.prdoc +++ /dev/null @@ -1,17 +0,0 @@ -title: Implementation of `ah-client` and `rc-client` staking pallets -doc: -- audience: Runtime Dev - description: |- - This PR introduces the initial structure for `pallet-ah-client` and `pallet-rc-client`. These - pallets will reside on the relay chain and AssetHub, respectively, and will manage the interaction - between `pallet-session` on the relay chain and `pallet-staking` on AssetHub. - Both pallets are experimental and not intended for production use. -crates: -- name: pallet-staking-ah-client - bump: major -- name: pallet-staking-rc-client - bump: major -- name: pallet-election-provider-multi-block - bump: minor -- name: pallet-staking - bump: major diff --git a/substrate/frame/election-provider-multi-block/src/lib.rs b/substrate/frame/election-provider-multi-block/src/lib.rs index 86a94c67c5844..547b99cfc5416 100644 --- a/substrate/frame/election-provider-multi-block/src/lib.rs +++ b/substrate/frame/election-provider-multi-block/src/lib.rs @@ -66,7 +66,7 @@ //! //! ## Pagination //! -//! Most of the external APIs of this pallet are paginated. All pagination follow a pattern where if +//! Most of the external APIs of this pallet are paginated. All pagination follow a patter where if //! `N` pages exist, the first paginated call is `function(N-1)` and the last one is `function(0)`. //! For example, with 3 pages, the `elect` of [`ElectionProvider`] is expected to be called as //! `elect(2) -> elect(1) -> elect(0)`. In essence, calling a paginated function with index 0 is diff --git a/substrate/frame/staking/ah-client/Cargo.toml b/substrate/frame/staking/ah-client/Cargo.toml deleted file mode 100644 index 4c41380e48edd..0000000000000 --- a/substrate/frame/staking/ah-client/Cargo.toml +++ /dev/null @@ -1,66 +0,0 @@ -[package] -name = "pallet-staking-ah-client" -description = "Pallet handling the communication with staking-rc-client. It's role is to glue the staking pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way." -license = "Apache-2.0" -version = "0.1.0" -edition.workspace = true -authors.workspace = true -repository.workspace = true - -[dependencies] -codec = { workspace = true, features = ["derive"] } -frame-support = { workspace = true } -frame-system = { workspace = true } -log = { workspace = true } -pallet-authorship = { workspace = true } -pallet-session = { features = ["historical"], workspace = true } -pallet-staking = { workspace = true } -pallet-staking-rc-client = { workspace = true } -polkadot-primitives = { workspace = true } -polkadot-runtime-parachains = { workspace = true } -scale-info = { workspace = true, features = ["derive"] } -sp-core = { workspace = true } -sp-runtime = { workspace = true } -sp-staking = { workspace = true } -xcm = { workspace = true } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-authorship/std", - "pallet-session/std", - "pallet-staking-rc-client/std", - "pallet-staking/std", - "polkadot-primitives/std", - "polkadot-runtime-parachains/std", - "scale-info/std", - "sp-core/std", - "sp-runtime/std", - "sp-staking/std", - "xcm/std", -] -runtime-benchmarks = [ - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-staking-rc-client/runtime-benchmarks", - "pallet-staking/runtime-benchmarks", - "polkadot-primitives/runtime-benchmarks", - "polkadot-runtime-parachains/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "sp-staking/runtime-benchmarks", - "xcm/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "pallet-authorship/try-runtime", - "pallet-session/try-runtime", - "pallet-staking-rc-client/try-runtime", - "pallet-staking/try-runtime", - "polkadot-runtime-parachains/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/substrate/frame/staking/ah-client/src/lib.rs b/substrate/frame/staking/ah-client/src/lib.rs deleted file mode 100644 index 88aee9ee3e9da..0000000000000 --- a/substrate/frame/staking/ah-client/src/lib.rs +++ /dev/null @@ -1,322 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! This pallet is intended to be used on a relay chain and to communicate with its counterpart on -//! AssetHub (or a similar network) named `pallet-staking-rc-client`. -//! -//! This pallet serves as an interface between the staking pallet on AssetHub and the session pallet -//! on the relay chain. From the relay chain to AssetHub, its responsibilities are to send -//! information about session changes (start and end) and to report offenses. From AssetHub to the -//! relay chain, it receives information about the potentially new validator set for the session. -//! -//! All the communication between the two pallets is performed with XCM messages. - -#![cfg_attr(not(feature = "std"), no_std)] - -extern crate alloc; - -use alloc::vec::Vec; -use frame_support::pallet_prelude::*; -use pallet_staking_rc_client::Offence; -use sp_core::crypto::AccountId32; -use sp_runtime::traits::Convert; -use sp_staking::{offence::OffenceDetails, Exposure, SessionIndex}; -use xcm::prelude::*; - -const LOG_TARGET: &str = "runtime::staking::ah-client"; - -/// `pallet-staking-rc-client` pallet index on AssetHub. Used to construct remote calls. -/// -/// The codec index must correspond to the index of `pallet-staking-rc-client` in the -/// `construct_runtime` of AssetHub. -#[derive(Encode, Decode)] -enum AssetHubRuntimePallets { - #[codec(index = 50)] - RcClient(StakingCalls), -} - -/// Call encoding for the calls needed from the rc-client pallet. -#[derive(Encode, Decode)] -enum StakingCalls { - /// A session with the given index has started. - #[codec(index = 0)] - RelayChainSessionStart(SessionIndex), - // A session with the given index has ended. The block authors with their corresponding - // session points are provided. - #[codec(index = 1)] - RelayChainSessionEnd(SessionIndex, Vec<(AccountId32, u32)>), - /// Report one or more offences. - #[codec(index = 2)] - NewRelayChainOffences(SessionIndex, Vec), -} - -#[frame_support::pallet(dev_mode)] -pub mod pallet { - use crate::*; - use alloc::vec; - use core::result; - use frame_system::pallet_prelude::*; - use pallet_session::historical; - use pallet_staking::ExposureOf; - use polkadot_primitives::Id as ParaId; - use polkadot_runtime_parachains::origin::{ensure_parachain, Origin}; - use sp_runtime::Perbill; - use sp_staking::{offence::OnOffenceHandler, SessionIndex}; - - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); - - /// The balance type of this pallet. - pub type BalanceOf = ::CurrencyBalance; - - // `Exposure>` will be removed. This type alias exists only to - // suppress clippy warnings. - type ElectedValidatorSet = Vec<( - ::AccountId, - Exposure<::AccountId, BalanceOf>, - )>; - - #[pallet::pallet] - #[pallet::storage_version(STORAGE_VERSION)] - pub struct Pallet(_); - - // TODO: should contain some initial state, otherwise starting from genesis won't work - #[pallet::storage] - pub type ValidatorSet = StorageValue<_, Option>, ValueQuery>; - - /// Keeps track of the session points for each block author in the current session. - #[pallet::storage] - pub type BlockAuthors = StorageMap<_, Twox64Concat, AccountId32, u32, ValueQuery>; - - #[pallet::config] - pub trait Config: frame_system::Config { - type RuntimeOrigin: From<::RuntimeOrigin> - + Into::RuntimeOrigin>>; - /// Just the `Currency::Balance` type; we have this item to allow us to constrain it to - /// `From`. - type CurrencyBalance: sp_runtime::traits::AtLeast32BitUnsigned - + codec::FullCodec - + Copy - + MaybeSerializeDeserialize - + core::fmt::Debug - + Default - + From - + TypeInfo - + Send - + Sync - + MaxEncodedLen; - /// The ParaId of the AssetHub. - #[pallet::constant] - type AssetHubId: Get; - /// The XCM sender. - type SendXcm: SendXcm; - } - - #[pallet::error] - pub enum Error { - /// The ParaId making the call is not AssetHub. - NotAssetHub, - } - - #[pallet::call] - impl Pallet { - #[pallet::call_index(0)] - // #[pallet::weight(T::WeightInfo::new_validators())] // TODO - pub fn new_validator_set( - origin: OriginFor, - new_validator_set: ElectedValidatorSet, - ) -> DispatchResult { - // Ignore requests not coming from the AssetHub or root. - Self::ensure_root_or_para(origin, ::AssetHubId::get().into())?; - - // Save the validator set. We don't care if there is a validator set which was not used. - ValidatorSet::::put(Some(new_validator_set)); - - Ok(()) - } - } - - impl historical::SessionManager>> - for Pallet - { - fn new_session(_: sp_staking::SessionIndex) -> Option> { - // If there is a new validator set - return it. Otherwise return `None`. - ValidatorSet::::take() - } - - fn new_session_genesis( - _: SessionIndex, - ) -> Option>)>> { - ValidatorSet::::take() - } - - fn start_session(start_index: SessionIndex) { - >::start_session(start_index) - } - - fn end_session(end_index: SessionIndex) { - >::end_session(end_index) - } - } - - impl pallet_session::SessionManager for Pallet { - fn new_session(_: u32) -> Option::AccountId>> { - // Doesn't do anything because all the logic is handled in `historical::SessionManager` - // implementation - defensive!("new_session should not be called"); - None - } - - fn end_session(session_index: u32) { - let authors = BlockAuthors::::iter().collect::>(); - // The maximum number of block authors is `num_cores * max_validators_per_core` (both - // are parameters from [`SchedulerParams`]). - let _ = BlockAuthors::::clear(u32::MAX, None); - - let message = Xcm(vec![ - Instruction::UnpaidExecution { - weight_limit: WeightLimit::Unlimited, - check_origin: None, - }, - mk_asset_hub_call(StakingCalls::RelayChainSessionEnd(session_index, authors)), - ]); - - if let Err(err) = send_xcm::( - Location::new(0, [Junction::Parachain(T::AssetHubId::get())]), - message, - ) { - log::error!(target: LOG_TARGET, "Sending `RelayChainSessionEnd` to AssetHub failed: {:?}", err); - } - } - - fn start_session(session_index: u32) { - let message = Xcm(vec![ - Instruction::UnpaidExecution { - weight_limit: WeightLimit::Unlimited, - check_origin: None, - }, - mk_asset_hub_call(StakingCalls::RelayChainSessionStart(session_index)), - ]); - if let Err(err) = send_xcm::( - Location::new(0, [Junction::Parachain(T::AssetHubId::get())]), - message, - ) { - log::error!(target: LOG_TARGET, "Sending `RelayChainSessionStart` to AssetHub failed: {:?}", err); - } - } - } - - impl pallet_authorship::EventHandler> for Pallet - where - T: Config + pallet_authorship::Config + pallet_session::Config + Config, - T::AccountId: Into, - { - // Notes the authored block in `BlockAuthors`. - fn note_author(author: T::AccountId) { - BlockAuthors::::mutate(author.into(), |block_count| { - *block_count += 1; - }); - } - } - - impl - OnOffenceHandler, Weight> - for Pallet - where - T: pallet_session::Config::AccountId>, - T: pallet_session::historical::Config< - FullIdentification = Exposure<::AccountId, BalanceOf>, - FullIdentificationOf = ExposureOf, - >, - T::SessionHandler: pallet_session::SessionHandler<::AccountId>, - T::SessionManager: pallet_session::SessionManager<::AccountId>, - T::ValidatorIdOf: Convert< - ::AccountId, - Option<::AccountId>, - >, - T::AccountId: Into, - { - fn on_offence( - offenders: &[OffenceDetails< - T::AccountId, - pallet_session::historical::IdentificationTuple, - >], - slash_fraction: &[Perbill], - slash_session: SessionIndex, - ) -> Weight { - let offenders_and_slashes = offenders - .iter() - .cloned() - .zip(slash_fraction) - .map(|(offence, fraction)| { - Offence::new( - offence.offender.0.into(), - offence.reporters.into_iter().map(|r| r.into()).collect(), - *fraction, - ) - }) - .collect::>(); - - // send the offender immediately over xcm - let message = Xcm(vec![ - Instruction::UnpaidExecution { - weight_limit: WeightLimit::Unlimited, - check_origin: None, - }, - mk_asset_hub_call(StakingCalls::NewRelayChainOffences( - slash_session, - offenders_and_slashes, - )), - ]); - if let Err(err) = send_xcm::( - Location::new(0, [Junction::Parachain(T::AssetHubId::get())]), - message, - ) { - log::error!(target: LOG_TARGET, "Sending `NewRelayChainOffences` to AssetHub failed: {:?}", - err); - } - - Weight::zero() - } - } - - impl Pallet { - /// Ensure the origin is one of Root or the `para` itself. - fn ensure_root_or_para( - origin: ::RuntimeOrigin, - id: ParaId, - ) -> DispatchResult { - if let Ok(caller_id) = - ensure_parachain(::RuntimeOrigin::from(origin.clone())) - { - // Check if matching para id... - ensure!(caller_id == id, Error::::NotAssetHub); - } else { - // Check if root... - ensure_root(origin.clone())?; - } - Ok(()) - } - } - - fn mk_asset_hub_call(call: StakingCalls) -> Instruction<()> { - Instruction::Transact { - origin_kind: OriginKind::Superuser, - fallback_max_weight: None, - call: AssetHubRuntimePallets::RcClient(call).encode().into(), - } - } -} diff --git a/substrate/frame/staking/rc-client/Cargo.toml b/substrate/frame/staking/rc-client/Cargo.toml deleted file mode 100644 index 5498cae777e12..0000000000000 --- a/substrate/frame/staking/rc-client/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -name = "pallet-staking-rc-client" -description = "Pallet handling the communication with staking-ah-client. It's role is to glue the staking pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way." -license = "Apache-2.0" -version = "0.1.0" -edition.workspace = true -authors.workspace = true -repository.workspace = true - -[dependencies] -codec = { workspace = true, features = ["derive"] } -frame-support = { workspace = true } -frame-system = { workspace = true } -log = { workspace = true } -scale-info = { workspace = true, features = ["derive"] } -sp-core = { workspace = true } -sp-runtime = { features = ["serde"], workspace = true } -sp-staking = { features = ["serde"], workspace = true } -xcm = { workspace = true } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "sp-core/std", - "sp-runtime/std", - "sp-staking/std", - "xcm/std", -] -runtime-benchmarks = [ - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "sp-staking/runtime-benchmarks", - "xcm/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/substrate/frame/staking/rc-client/src/lib.rs b/substrate/frame/staking/rc-client/src/lib.rs deleted file mode 100644 index ab94df9c910ff..0000000000000 --- a/substrate/frame/staking/rc-client/src/lib.rs +++ /dev/null @@ -1,181 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! This pallet is intended to be used on AssetHub. It provides extrinsics used by -//! `pallet-staking-ah-client` and serves as an interface between the relay chain and the staking -//! pallet on AssetHub. - -#![cfg_attr(not(feature = "std"), no_std)] - -extern crate alloc; - -use alloc::vec::Vec; -use frame_support::pallet_prelude::*; -use sp_core::crypto::AccountId32; -use sp_runtime::Perbill; -use sp_staking::SessionIndex; -use xcm::prelude::*; - -const LOG_TARGET: &str = "runtime::staking::rc-client"; - -// Provides to the pallet a validator set produced by an election or other similar mechanism. -pub trait ElectionResultHandler { - fn handle_election_result(result: Vec); -} - -// API provided by the staking pallet. -pub trait StakingApi { - /// New session with index `start_index` has started on the relay chain. - fn on_relay_chain_session_start(start_index: SessionIndex); - /// A session with index `end_index` has ended on the relay chain. The block authors and their - /// corresponding session points are reported. - fn on_relay_chain_session_end(end_index: SessionIndex, block_authors: Vec<(AccountId32, u32)>); - /// Report one or more offences on the relay chain. - fn on_new_offences(offences: Vec); -} - -/// `pallet-staking-ah-client` pallet index on Relay chain. Used to construct remote calls. -/// -/// The codec index must correspond to the index of `pallet-staking-ah-client` in the -/// `construct_runtime` of the Relay chain. -#[derive(Encode, Decode)] -enum RelayChainRuntimePallets { - #[codec(index = 50)] - AhClient(SessionCalls), -} - -/// Call encoding for the calls needed from the pallet. -#[derive(Encode, Decode)] -enum SessionCalls { - #[codec(index = 0)] - NewValidatorSet(Vec), -} - -// An offence on the relay chain. Based on [`sp_staking::offence::OffenceDetails`]. -#[derive(Encode, Decode, DecodeWithMemTracking, Debug, Clone, PartialEq, TypeInfo)] -pub struct Offence { - offender: AccountId32, - reporters: Vec, - slash_fraction: Perbill, -} - -impl Offence { - pub fn new( - offender: AccountId32, - reporters: Vec, - slash_fraction: Perbill, - ) -> Self { - Self { offender, reporters, slash_fraction } - } -} - -#[frame_support::pallet(dev_mode)] -pub mod pallet { - use super::*; - use alloc::vec; - use frame_system::pallet_prelude::*; - - /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); - - #[pallet::pallet] - #[pallet::storage_version(STORAGE_VERSION)] - pub struct Pallet(_); - - #[pallet::config] - pub trait Config: frame_system::Config { - type AdminOrigin: EnsureOrigin; - /// A stable ID for a validator. - type ValidatorId: Member - + Parameter - + MaybeSerializeDeserialize - + MaxEncodedLen - + TryFrom; - - /// Handler for staking calls - type StakingApi: StakingApi; - /// The XCM sender. - type SendXcm: SendXcm; - } - - impl> ElectionResultHandler for Pallet { - fn handle_election_result(result: Vec) { - let new_validator_set = result.into_iter().map(Into::into).collect::>(); - - let message = Xcm(vec![ - Instruction::UnpaidExecution { - weight_limit: WeightLimit::Unlimited, - check_origin: None, - }, - mk_relay_chain_call(SessionCalls::NewValidatorSet(new_validator_set)), - ]); - - if let Err(err) = send_xcm::(Location::new(1, Here), message) { - log::error!(target: LOG_TARGET, "Sending `NewValidators` to relay chain failed: {:?}", err); - } - } - } - - #[pallet::call] - impl Pallet { - /// Called to indicate the start of a new session on the relay chain. - #[pallet::call_index(0)] - // #[pallet::weight(T::WeightInfo::end_session())] // TODO - pub fn relay_chain_session_start( - origin: OriginFor, - start_index: SessionIndex, - ) -> DispatchResult { - T::AdminOrigin::ensure_origin_or_root(origin)?; - T::StakingApi::on_relay_chain_session_start(start_index); - Ok(()) - } - - /// Called to indicate the end of a session on the relay chain. Accepts the session id and - /// the block authors with their corresponding session points for the finished session. - #[pallet::call_index(1)] - // #[pallet::weight(T::WeightInfo::end_session())] // TODO - pub fn relay_chain_session_end( - origin: OriginFor, - end_index: SessionIndex, - block_authors: Vec<(AccountId32, u32)>, - ) -> DispatchResult { - T::AdminOrigin::ensure_origin_or_root(origin)?; - T::StakingApi::on_relay_chain_session_end(end_index, block_authors); - Ok(()) - } - - /// Called to report one or more new offenses on the relay chain. - #[pallet::call_index(2)] - // #[pallet::weight(T::WeightInfo::end_session())] // TODO - pub fn new_relay_chain_offence( - origin: OriginFor, - offences: Vec, - ) -> DispatchResult { - T::AdminOrigin::ensure_origin_or_root(origin)?; - T::StakingApi::on_new_offences(offences); - Ok(()) - } - } - - fn mk_relay_chain_call(call: SessionCalls) -> Instruction<()> { - Instruction::Transact { - origin_kind: OriginKind::Superuser, - fallback_max_weight: None, - call: RelayChainRuntimePallets::AhClient(call).encode().into(), - } - } -} diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 0d4ec8c16e231..10e8c679fd6a1 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -521,12 +521,6 @@ impl Pallet { frame_support::print("Warning: A session appears to have been skipped."); Self::start_era(start_session); } - - // trigger election in the last session of the era - if start_session + 1 == next_active_era_start_session_index { - // TODO: trigger election - // Self::trigger_election(); - } } } diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index a1998e7bf2abd..9d010bd9b8e9d 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -136,8 +136,6 @@ std = [ "pallet-session?/std", "pallet-skip-feeless-payment?/std", "pallet-society?/std", - "pallet-staking-ah-client?/std", - "pallet-staking-rc-client?/std", "pallet-staking-reward-fn?/std", "pallet-staking-runtime-api?/std", "pallet-staking?/std", @@ -326,8 +324,6 @@ runtime-benchmarks = [ "pallet-session-benchmarking?/runtime-benchmarks", "pallet-skip-feeless-payment?/runtime-benchmarks", "pallet-society?/runtime-benchmarks", - "pallet-staking-ah-client?/runtime-benchmarks", - "pallet-staking-rc-client?/runtime-benchmarks", "pallet-staking?/runtime-benchmarks", "pallet-state-trie-migration?/runtime-benchmarks", "pallet-sudo?/runtime-benchmarks", @@ -468,8 +464,6 @@ try-runtime = [ "pallet-session?/try-runtime", "pallet-skip-feeless-payment?/try-runtime", "pallet-society?/try-runtime", - "pallet-staking-ah-client?/try-runtime", - "pallet-staking-rc-client?/try-runtime", "pallet-staking?/try-runtime", "pallet-state-trie-migration?/try-runtime", "pallet-statement?/try-runtime", @@ -690,8 +684,6 @@ runtime-full = [ "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", - "pallet-staking-ah-client", - "pallet-staking-rc-client", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", @@ -1680,16 +1672,6 @@ default-features = false optional = true path = "../substrate/frame/staking" -[dependencies.pallet-staking-ah-client] -default-features = false -optional = true -path = "../substrate/frame/staking/ah-client" - -[dependencies.pallet-staking-rc-client] -default-features = false -optional = true -path = "../substrate/frame/staking/rc-client" - [dependencies.pallet-staking-reward-curve] default-features = false optional = true diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 19f80aac4a451..89cd300b418f6 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -657,16 +657,6 @@ pub use pallet_society; #[cfg(feature = "pallet-staking")] pub use pallet_staking; -/// Pallet handling the communication with staking-rc-client. It's role is to glue the staking -/// pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way. -#[cfg(feature = "pallet-staking-ah-client")] -pub use pallet_staking_ah_client; - -/// Pallet handling the communication with staking-ah-client. It's role is to glue the staking -/// pallet (on AssetHub chain) and session pallet (on Relay Chain) in a transparent way. -#[cfg(feature = "pallet-staking-rc-client")] -pub use pallet_staking_rc_client; - /// Reward Curve for FRAME staking pallet. #[cfg(feature = "pallet-staking-reward-curve")] pub use pallet_staking_reward_curve; From 82513b0eb7213a40ca7d5d6bc4f83c42e61e5119 Mon Sep 17 00:00:00 2001 From: Ankan Date: Sun, 16 Mar 2025 23:22:31 +0100 Subject: [PATCH 02/38] Revert "[Staking] Bounded Slashing: Paginated Offence Processing & Slash Application (#7424)" This reverts commit dda2cb5969985ccbf67581e18eb7c579849e27bb. --- polkadot/runtime/test-runtime/src/lib.rs | 4 +- polkadot/runtime/westend/src/lib.rs | 1 - prdoc/pr_7424.prdoc | 37 - substrate/bin/node/runtime/src/lib.rs | 54 +- substrate/frame/babe/src/mock.rs | 4 +- substrate/frame/beefy/src/mock.rs | 4 +- .../test-staking-e2e/src/mock.rs | 9 +- substrate/frame/grandpa/src/mock.rs | 4 +- .../frame/offences/benchmarking/src/inner.rs | 15 +- .../frame/offences/benchmarking/src/mock.rs | 5 +- substrate/frame/root-offences/src/lib.rs | 17 +- substrate/frame/root-offences/src/mock.rs | 11 +- substrate/frame/root-offences/src/tests.rs | 12 +- .../frame/session/benchmarking/src/mock.rs | 6 +- substrate/frame/staking/src/benchmarking.rs | 70 +- substrate/frame/staking/src/lib.rs | 42 +- substrate/frame/staking/src/migrations.rs | 339 +++- substrate/frame/staking/src/mock.rs | 48 +- substrate/frame/staking/src/pallet/impls.rs | 386 ++--- substrate/frame/staking/src/pallet/mod.rs | 200 +-- substrate/frame/staking/src/slashing.rs | 393 +---- substrate/frame/staking/src/tests.rs | 1379 +++++++---------- substrate/frame/staking/src/weights.rs | 55 - 23 files changed, 1257 insertions(+), 1838 deletions(-) delete mode 100644 prdoc/pr_7424.prdoc diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 226e22c078359..694077dd21c94 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -323,8 +323,8 @@ impl pallet_session::Config for Runtime { } impl pallet_session::historical::Config for Runtime { - type FullIdentification = (); - type FullIdentificationOf = pallet_staking::NullIdentity; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } pallet_staking_reward_curve::build! { diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index b5dc9b8f55cd1..86358afb23e51 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1874,7 +1874,6 @@ pub mod migrations { parachains_shared::migration::MigrateToV1, parachains_scheduler::migration::MigrateV2ToV3, pallet_staking::migrations::v16::MigrateV15ToV16, - pallet_staking::migrations::v17::MigrateV16ToV17, pallet_session::migrations::v1::MigrateV0ToV1< Runtime, pallet_staking::migrations::v17::MigrateDisabledToSession, diff --git a/prdoc/pr_7424.prdoc b/prdoc/pr_7424.prdoc deleted file mode 100644 index e177f41371bc6..0000000000000 --- a/prdoc/pr_7424.prdoc +++ /dev/null @@ -1,37 +0,0 @@ -# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 -# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json - -title: 'Bounded Slashing: Paginated Offence Processing & Slash Application' - -doc: - - audience: Runtime Dev - description: | - This PR refactors the slashing mechanism in `pallet-staking` to be bounded by introducing paged offence processing and paged slash application. - - ### Key Changes - - Offences are queued instead of being processed immediately. - - Slashes are computed in pages, stored as a `StorageDoubleMap` with `(Validator, SlashFraction, PageIndex)` to uniquely identify them. - - Slashes are applied incrementally across multiple blocks instead of a single unbounded operation. - - New storage items: `OffenceQueue`, `ProcessingOffence`, `OffenceQueueEras`. - - Updated API for cancelling and applying slashes. - - Preliminary benchmarks added; further optimizations planned. - - This enables staking slashing to scale efficiently and removes a major blocker for staking migration to a parachain (AH). - -crates: -- name: pallet-babe - bump: patch -- name: pallet-staking - bump: major -- name: pallet-grandpa - bump: patch -- name: westend-runtime - bump: minor -- name: pallet-beefy - bump: patch -- name: pallet-offences-benchmarking - bump: patch -- name: pallet-session-benchmarking - bump: patch -- name: pallet-root-offences - bump: patch \ No newline at end of file diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 94729a26b6d55..c618831c0a771 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -680,6 +680,8 @@ impl_opaque_keys! { #[cfg(feature = "staking-playground")] pub mod staking_playground { + use pallet_staking::Exposure; + use super::*; /// An adapter to make the chain work with --dev only, even though it is running a large staking @@ -714,43 +716,61 @@ pub mod staking_playground { } } - impl pallet_session::historical::SessionManager for AliceAsOnlyValidator { + impl pallet_session::historical::SessionManager> + for AliceAsOnlyValidator + { fn end_session(end_index: sp_staking::SessionIndex) { - >::end_session( - end_index, - ) + , + >>::end_session(end_index) } - fn new_session(new_index: sp_staking::SessionIndex) -> Option> { - >::new_session( - new_index, - ) + fn new_session( + new_index: sp_staking::SessionIndex, + ) -> Option)>> { + , + >>::new_session(new_index) .map(|_ignored| { // construct a fake exposure for alice. - vec![(sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(), ())] + vec![( + sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(), + pallet_staking::Exposure { + total: 1_000_000_000, + own: 1_000_000_000, + others: vec![], + }, + )] }) } fn new_session_genesis( new_index: sp_staking::SessionIndex, - ) -> Option> { + ) -> Option)>> { , >>::new_session_genesis(new_index) .map(|_ignored| { // construct a fake exposure for alice. vec![( sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(), - (), + pallet_staking::Exposure { + total: 1_000_000_000, + own: 1_000_000_000, + others: vec![], + }, )] }) } fn start_session(start_index: sp_staking::SessionIndex) { - >::start_session( - start_index, - ) + , + >>::start_session(start_index) } } } @@ -776,8 +796,8 @@ impl pallet_session::Config for Runtime { } impl pallet_session::historical::Config for Runtime { - type FullIdentification = (); - type FullIdentificationOf = pallet_staking::NullIdentity; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } pallet_staking_reward_curve::build! { diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index ea977a547fee8..eeaebe02d3e8b 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -105,8 +105,8 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = (); - type FullIdentificationOf = pallet_staking::NullIdentity; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } impl pallet_authorship::Config for Test { diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 275bf18fe873d..46491996623fe 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -189,8 +189,8 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = (); - type FullIdentificationOf = pallet_staking::NullIdentity; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } impl pallet_authorship::Config for Test { diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index e4b77975707c4..135a52fece67b 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -147,8 +147,8 @@ impl pallet_session::Config for Runtime { type WeightInfo = (); } impl pallet_session::historical::Config for Runtime { - type FullIdentification = (); - type FullIdentificationOf = pallet_staking::NullIdentity; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } frame_election_provider_support::generate_solution_type!( @@ -909,7 +909,10 @@ pub(crate) fn on_offence_now( // Add offence to validator, slash it. pub(crate) fn add_slash(who: &AccountId) { on_offence_now( - &[OffenceDetails { offender: (*who, ()), reporters: vec![] }], + &[OffenceDetails { + offender: (*who, Staking::eras_stakers(active_era(), who)), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); } diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 482e767d32fc0..2fd0cbb5ffdcb 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -109,8 +109,8 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = (); - type FullIdentificationOf = pallet_staking::NullIdentity; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } impl pallet_authorship::Config for Test { diff --git a/substrate/frame/offences/benchmarking/src/inner.rs b/substrate/frame/offences/benchmarking/src/inner.rs index fa4349d1d94c8..3d3cd470bc24c 100644 --- a/substrate/frame/offences/benchmarking/src/inner.rs +++ b/substrate/frame/offences/benchmarking/src/inner.rs @@ -170,13 +170,6 @@ fn make_offenders( Ok(id_tuples) } -#[cfg(test)] -fn run_staking_next_block() { - use frame_support::traits::Hooks; - System::::set_block_number(System::::block_number().saturating_add(1u32.into())); - Staking::::on_initialize(System::::block_number()); -} - #[cfg(test)] fn assert_all_slashes_applied(offender_count: usize) where @@ -189,10 +182,10 @@ where // make sure that all slashes have been applied // deposit to reporter + reporter account endowed. assert_eq!(System::::read_events_for_pallet::>().len(), 2); - // (n nominators + one validator) * slashed + Slash Reported + Slash Computed + // (n nominators + one validator) * slashed + Slash Reported assert_eq!( System::::read_events_for_pallet::>().len(), - 1 * (offender_count + 1) as usize + 2 + 1 * (offender_count + 1) as usize + 1 ); // offence assert_eq!(System::::read_events_for_pallet::().len(), 1); @@ -239,8 +232,6 @@ mod benchmarks { #[cfg(test)] { - // slashes applied at the next block. - run_staking_next_block::(); assert_all_slashes_applied::(n as usize); } @@ -275,8 +266,6 @@ mod benchmarks { } #[cfg(test)] { - // slashes applied at the next block. - run_staking_next_block::(); assert_all_slashes_applied::(n as usize); } diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 63e440d9e0042..f37dbf55f52f7 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -33,6 +33,7 @@ use sp_runtime::{ }; type AccountId = u64; +type Balance = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { @@ -53,8 +54,8 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } impl pallet_session::historical::Config for Test { - type FullIdentification = (); - type FullIdentificationOf = pallet_staking::NullIdentity; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } sp_runtime::impl_opaque_keys! { diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index 8e91c4ecfd1cd..fd6ffc55e40c3 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -31,7 +31,7 @@ extern crate alloc; use alloc::vec::Vec; use pallet_session::historical::IdentificationTuple; -use pallet_staking::Pallet as Staking; +use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; use sp_runtime::Perbill; use sp_staking::offence::OnOffenceHandler; @@ -49,8 +49,11 @@ pub mod pallet { + pallet_staking::Config + pallet_session::Config::AccountId> + pallet_session::historical::Config< - FullIdentification = (), - FullIdentificationOf = pallet_staking::NullIdentity, + FullIdentification = Exposure< + ::AccountId, + BalanceOf, + >, + FullIdentificationOf = ExposureOf, > { type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -103,11 +106,15 @@ pub mod pallet { fn get_offence_details( offenders: Vec<(T::AccountId, Perbill)>, ) -> Result>, DispatchError> { + let now = pallet_staking::ActiveEra::::get() + .map(|e| e.index) + .ok_or(Error::::FailedToGetActiveEra)?; + Ok(offenders .clone() .into_iter() .map(|(o, _)| OffenceDetails:: { - offender: (o.clone(), ()), + offender: (o.clone(), Staking::::eras_stakers(now, &o)), reporters: Default::default(), }) .collect()) @@ -117,7 +124,7 @@ pub mod pallet { fn submit_offence(offenders: &[OffenceDetails], slash_fraction: &[Perbill]) { let session_index = as frame_support::traits::ValidatorSet>::session_index(); - as OnOffenceHandler< + as OnOffenceHandler< T::AccountId, IdentificationTuple, Weight, diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index ce55bdcbdd3c4..09223802f67d5 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -28,7 +28,7 @@ use frame_support::{ traits::{ConstU32, ConstU64, OneSessionHandler}, BoundedVec, }; -use pallet_staking::{BalanceOf, StakerStatus}; +use pallet_staking::StakerStatus; use sp_core::ConstBool; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; use sp_staking::{EraIndex, SessionIndex}; @@ -148,8 +148,8 @@ impl pallet_staking::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = (); - type FullIdentificationOf = pallet_staking::NullIdentity; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } sp_runtime::impl_opaque_keys! { @@ -298,11 +298,6 @@ pub(crate) fn run_to_block(n: BlockNumber) { ); } -/// Progress by n block. -pub(crate) fn advance_blocks(n: u64) { - run_to_block(System::block_number() + n); -} - pub(crate) fn active_era() -> EraIndex { pallet_staking::ActiveEra::::get().unwrap().index } diff --git a/substrate/frame/root-offences/src/tests.rs b/substrate/frame/root-offences/src/tests.rs index da6c49895bec1..289bb708efbbc 100644 --- a/substrate/frame/root-offences/src/tests.rs +++ b/substrate/frame/root-offences/src/tests.rs @@ -17,10 +17,7 @@ use super::*; use frame_support::{assert_err, assert_ok}; -use mock::{ - active_era, advance_blocks, start_session, ExtBuilder, RootOffences, RuntimeOrigin, System, - Test as T, -}; +use mock::{active_era, start_session, ExtBuilder, RootOffences, RuntimeOrigin, System, Test as T}; use pallet_staking::asset; #[test] @@ -45,10 +42,6 @@ fn create_offence_works_given_root_origin() { assert_ok!(RootOffences::create_offence(RuntimeOrigin::root(), offenders.clone())); System::assert_last_event(Event::OffenceCreated { offenders }.into()); - - // offence is processed in the following block. - advance_blocks(1); - // the slash should be applied right away. assert_eq!(asset::staked::(&11), 500); @@ -73,9 +66,6 @@ fn create_offence_wont_slash_non_active_validators() { System::assert_last_event(Event::OffenceCreated { offenders }.into()); - // advance to the next block so offence gets processed. - advance_blocks(1); - // so 31 didn't get slashed. assert_eq!(asset::staked::(&31), 500); diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 746c3b12e972b..235209f14cad2 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -27,11 +27,11 @@ use frame_support::{ derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; -use pallet_staking::NullIdentity; use sp_runtime::{traits::IdentityLookup, BuildStorage, KeyTypeId}; type AccountId = u64; type Nonce = u32; +type Balance = u64; type Block = frame_system::mocking::MockBlock; @@ -68,8 +68,8 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } impl pallet_session::historical::Config for Test { - type FullIdentification = (); - type FullIdentificationOf = NullIdentity; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } sp_runtime::impl_opaque_keys! { diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index c4299449196e6..ce4f0178a2480 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -802,33 +802,21 @@ mod benchmarks { #[benchmark] fn cancel_deferred_slash(s: Linear<1, MAX_SLASHES>) { + let mut unapplied_slashes = Vec::new(); let era = EraIndex::one(); - let dummy_account = || T::AccountId::decode(&mut TrailingZeroInput::zeroes()).unwrap(); - - // Insert `s` unapplied slashes with the new key structure - for i in 0..s { - let slash_key = (dummy_account(), Perbill::from_percent(i as u32 % 100), i); - let unapplied_slash = UnappliedSlash:: { - validator: slash_key.0.clone(), - own: Zero::zero(), - others: WeakBoundedVec::default(), - reporter: Default::default(), - payout: Zero::zero(), - }; - UnappliedSlashes::::insert(era, slash_key.clone(), unapplied_slash); + let dummy = || T::AccountId::decode(&mut TrailingZeroInput::zeroes()).unwrap(); + for _ in 0..MAX_SLASHES { + unapplied_slashes + .push(UnappliedSlash::>::default_from(dummy())); } + UnappliedSlashes::::insert(era, &unapplied_slashes); - let slash_keys: Vec<_> = (0..s) - .map(|i| (dummy_account(), Perbill::from_percent(i as u32 % 100), i)) - .collect(); + let slash_indices: Vec = (0..s).collect(); #[extrinsic_call] - _(RawOrigin::Root, era, slash_keys.clone()); + _(RawOrigin::Root, era, slash_indices); - // Ensure all `s` slashes are removed - for key in &slash_keys { - assert!(UnappliedSlashes::::get(era, key).is_none()); - } + assert_eq!(UnappliedSlashes::::get(&era).len(), (MAX_SLASHES - s) as usize); } #[benchmark] @@ -1149,46 +1137,6 @@ mod benchmarks { Ok(()) } - #[benchmark] - fn apply_slash() -> Result<(), BenchmarkError> { - let era = EraIndex::one(); - ActiveEra::::put(ActiveEraInfo { index: era, start: None }); - let (validator, nominators) = create_validator_with_nominators::( - T::MaxExposurePageSize::get() as u32, - T::MaxExposurePageSize::get() as u32, - false, - true, - RewardDestination::Staked, - era, - )?; - let slash_fraction = Perbill::from_percent(10); - let page_index = 0; - let slashed_balance = BalanceOf::::from(10u32); - - let slash_key = (validator.clone(), slash_fraction, page_index); - let slashed_nominators = - nominators.iter().map(|(n, _)| (n.clone(), slashed_balance)).collect::>(); - - let unapplied_slash = UnappliedSlash:: { - validator: validator.clone(), - own: slashed_balance, - others: WeakBoundedVec::force_from(slashed_nominators, None), - reporter: Default::default(), - payout: Zero::zero(), - }; - - // Insert an unapplied slash to be processed. - UnappliedSlashes::::insert(era, slash_key.clone(), unapplied_slash); - - #[extrinsic_call] - _(RawOrigin::Signed(validator.clone()), era, slash_key.clone()); - - // Ensure the slash has been applied and removed. - assert!(UnappliedSlashes::::get(era, &slash_key).is_none()); - - Ok(()) - } - #[benchmark] fn manual_slash() -> Result<(), BenchmarkError> { let era = EraIndex::zero(); diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 922df9f8c3289..1247470edf4ce 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -353,7 +353,7 @@ use frame_support::{ ConstU32, Contains, Defensive, DefensiveMax, DefensiveSaturating, Get, LockIdentifier, }, weights::Weight, - BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, WeakBoundedVec, + BoundedVec, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -923,19 +923,31 @@ impl { +#[derive(Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct UnappliedSlash { /// The stash ID of the offending validator. - validator: T::AccountId, + validator: AccountId, /// The validator's own slash. - own: BalanceOf, + own: Balance, /// All other slashed stakers and amounts. - others: WeakBoundedVec<(T::AccountId, BalanceOf), T::MaxExposurePageSize>, + others: Vec<(AccountId, Balance)>, /// Reporters of the offence; bounty payout recipients. - reporter: Option, + reporters: Vec, /// The amount of payout. - payout: BalanceOf, + payout: Balance, +} + +impl UnappliedSlash { + /// Initializes the default object using the given `validator`. + pub fn default_from(validator: AccountId) -> Self { + Self { + validator, + own: Zero::zero(), + others: vec![], + reporters: vec![], + payout: Zero::zero(), + } + } } /// Something that defines the maximum number of nominations per nominator based on a curve. @@ -983,7 +995,10 @@ pub trait SessionInterface { impl SessionInterface<::AccountId> for T where T: pallet_session::Config::AccountId>, - T: pallet_session::historical::Config, + T: pallet_session::historical::Config< + FullIdentification = Exposure<::AccountId, BalanceOf>, + FullIdentificationOf = ExposureOf, + >, T::SessionHandler: pallet_session::SessionHandler<::AccountId>, T::SessionManager: pallet_session::SessionManager<::AccountId>, T::ValidatorIdOf: Convert< @@ -1127,13 +1142,6 @@ impl Convert } } -pub struct NullIdentity; -impl Convert> for NullIdentity { - fn convert(_: T) -> Option<()> { - Some(()) - } -} - /// Filter historical offences out and only allow those from the bonding period. pub struct FilterHistoricalOffences { _inner: core::marker::PhantomData<(T, R)>, diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 5b0118da67ef7..96c63a657da93 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -18,12 +18,12 @@ //! [CHANGELOG.md](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/frame/staking/CHANGELOG.md). use super::*; +use frame_election_provider_support::SortedListProvider; use frame_support::{ migrations::VersionedMigration, pallet_prelude::ValueQuery, storage_alias, traits::{GetStorageVersion, OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}, - Twox64Concat, }; #[cfg(feature = "try-runtime")] @@ -36,6 +36,10 @@ use sp_runtime::TryRuntimeError; /// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] enum ObsoleteReleases { + V1_0_0Ancient, + V2_0_0, + V3_0_0, + V4_0_0, V5_0_0, // blockable validators. V6_0_0, // removal of all storage associated with offchain phragmen. V7_0_0, // keep track of number of nominators / validators in map @@ -56,92 +60,13 @@ impl Default for ObsoleteReleases { #[storage_alias] type StorageVersion = StorageValue, ObsoleteReleases, ValueQuery>; -/// Migrates `UnappliedSlashes` to a new storage structure to support paged slashing. -/// This ensures that slashing can be processed in batches, preventing large storage operations in a -/// single block. pub mod v17 { use super::*; - #[derive(Encode, Decode, TypeInfo, MaxEncodedLen)] - struct OldUnappliedSlash { - validator: T::AccountId, - /// The validator's own slash. - own: BalanceOf, - /// All other slashed stakers and amounts. - others: Vec<(T::AccountId, BalanceOf)>, - /// Reporters of the offence; bounty payout recipients. - reporters: Vec, - /// The amount of payout. - payout: BalanceOf, - } - - #[frame_support::storage_alias] - pub type OldUnappliedSlashes = - StorageMap, Twox64Concat, EraIndex, Vec>, ValueQuery>; - #[frame_support::storage_alias] pub type DisabledValidators = StorageValue, BoundedVec<(u32, OffenceSeverity), ConstU32<100>>, ValueQuery>; - pub struct VersionUncheckedMigrateV16ToV17(core::marker::PhantomData); - impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV16ToV17 { - fn on_runtime_upgrade() -> Weight { - let mut weight: Weight = Weight::zero(); - - OldUnappliedSlashes::::drain().for_each(|(era, slashes)| { - weight.saturating_accrue(T::DbWeight::get().reads(1)); - - for slash in slashes { - let validator = slash.validator.clone(); - let new_slash = UnappliedSlash { - validator: validator.clone(), - own: slash.own, - others: WeakBoundedVec::force_from(slash.others, None), - payout: slash.payout, - reporter: slash.reporters.first().cloned(), - }; - - // creating a slash key which is improbable to conflict with a new offence. - let slash_key = (validator, Perbill::from_percent(99), 9999); - UnappliedSlashes::::insert(era, slash_key, new_slash); - weight.saturating_accrue(T::DbWeight::get().writes(1)); - } - }); - - weight - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, sp_runtime::TryRuntimeError> { - let mut expected_slashes: u32 = 0; - OldUnappliedSlashes::::iter().for_each(|(_, slashes)| { - expected_slashes += slashes.len() as u32; - }); - - Ok(expected_slashes.encode()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), TryRuntimeError> { - let expected_slash_count = - u32::decode(&mut state.as_slice()).expect("Failed to decode state"); - - let actual_slash_count = UnappliedSlashes::::iter().count() as u32; - - ensure!(expected_slash_count == actual_slash_count, "Slash count mismatch"); - - Ok(()) - } - } - - pub type MigrateV16ToV17 = VersionedMigration< - 16, - 17, - VersionUncheckedMigrateV16ToV17, - Pallet, - ::DbWeight, - >; - pub struct MigrateDisabledToSession(core::marker::PhantomData); impl pallet_session::migrations::v1::MigrateDisabledValidators for MigrateDisabledToSession @@ -543,3 +468,257 @@ pub mod v11 { } } } + +pub mod v10 { + use super::*; + use frame_support::storage_alias; + + #[storage_alias] + type EarliestUnappliedSlash = StorageValue, EraIndex>; + + /// Apply any pending slashes that where queued. + /// + /// That means we might slash someone a bit too early, but we will definitely + /// won't forget to slash them. The cap of 512 is somewhat randomly taken to + /// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`. + pub struct MigrateToV10(core::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV10 { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + if StorageVersion::::get() == ObsoleteReleases::V9_0_0 { + let pending_slashes = UnappliedSlashes::::iter().take(512); + for (era, slashes) in pending_slashes { + for slash in slashes { + // in the old slashing scheme, the slash era was the key at which we read + // from `UnappliedSlashes`. + log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era); + slashing::apply_slash::(slash, era); + } + } + + EarliestUnappliedSlash::::kill(); + StorageVersion::::put(ObsoleteReleases::V10_0_0); + + log!(info, "MigrateToV10 executed successfully"); + T::DbWeight::get().reads_writes(1, 2) + } else { + log!(warn, "MigrateToV10 should be removed."); + T::DbWeight::get().reads(1) + } + } + } +} + +pub mod v9 { + use super::*; + #[cfg(feature = "try-runtime")] + use alloc::vec::Vec; + #[cfg(feature = "try-runtime")] + use codec::{Decode, Encode}; + + /// Migration implementation that injects all validators into sorted list. + /// + /// This is only useful for chains that started their `VoterList` just based on nominators. + pub struct InjectValidatorsIntoVoterList(core::marker::PhantomData); + impl OnRuntimeUpgrade for InjectValidatorsIntoVoterList { + fn on_runtime_upgrade() -> Weight { + if StorageVersion::::get() == ObsoleteReleases::V8_0_0 { + let prev_count = T::VoterList::count(); + let weight_of_cached = Pallet::::weight_of_fn(); + for (v, _) in Validators::::iter() { + let weight = weight_of_cached(&v); + let _ = T::VoterList::on_insert(v.clone(), weight).map_err(|err| { + log!(warn, "failed to insert {:?} into VoterList: {:?}", v, err) + }); + } + + log!( + info, + "injected a total of {} new voters, prev count: {} next count: {}, updating to version 9", + Validators::::count(), + prev_count, + T::VoterList::count(), + ); + + StorageVersion::::put(ObsoleteReleases::V9_0_0); + T::BlockWeights::get().max_block + } else { + log!( + warn, + "InjectValidatorsIntoVoterList being executed on the wrong storage \ + version, expected ObsoleteReleases::V8_0_0" + ); + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, TryRuntimeError> { + frame_support::ensure!( + StorageVersion::::get() == ObsoleteReleases::V8_0_0, + "must upgrade linearly" + ); + + let prev_count = T::VoterList::count(); + Ok(prev_count.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(prev_count: Vec) -> Result<(), TryRuntimeError> { + let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect( + "the state parameter should be something that was generated by pre_upgrade", + ); + let post_count = T::VoterList::count(); + let validators = Validators::::count(); + ensure!( + post_count == prev_count + validators, + "`VoterList` count after the migration must equal to the sum of \ + previous count and the current number of validators" + ); + + frame_support::ensure!( + StorageVersion::::get() == ObsoleteReleases::V9_0_0, + "must upgrade" + ); + Ok(()) + } + } +} + +pub mod v8 { + use super::*; + use crate::{Config, Nominators, Pallet, Weight}; + use frame_election_provider_support::SortedListProvider; + use frame_support::traits::Get; + + #[cfg(feature = "try-runtime")] + pub fn pre_migrate() -> Result<(), &'static str> { + frame_support::ensure!( + StorageVersion::::get() == ObsoleteReleases::V7_0_0, + "must upgrade linearly" + ); + + crate::log!(info, "👜 staking bags-list migration passes PRE migrate checks ✅",); + Ok(()) + } + + /// Migration to sorted `VoterList`. + pub fn migrate() -> Weight { + if StorageVersion::::get() == ObsoleteReleases::V7_0_0 { + crate::log!(info, "migrating staking to ObsoleteReleases::V8_0_0"); + + let migrated = T::VoterList::unsafe_regenerate( + Nominators::::iter().map(|(id, _)| id), + Pallet::::weight_of_fn(), + ); + + StorageVersion::::put(ObsoleteReleases::V8_0_0); + crate::log!( + info, + "👜 completed staking migration to ObsoleteReleases::V8_0_0 with {} voters migrated", + migrated, + ); + + T::BlockWeights::get().max_block + } else { + T::DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + pub fn post_migrate() -> Result<(), &'static str> { + T::VoterList::try_state().map_err(|_| "VoterList is not in a sane state.")?; + crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",); + Ok(()) + } +} + +pub mod v7 { + use super::*; + use frame_support::storage_alias; + + #[storage_alias] + type CounterForValidators = StorageValue, u32>; + #[storage_alias] + type CounterForNominators = StorageValue, u32>; + + pub fn pre_migrate() -> Result<(), &'static str> { + assert!( + CounterForValidators::::get().unwrap().is_zero(), + "CounterForValidators already set." + ); + assert!( + CounterForNominators::::get().unwrap().is_zero(), + "CounterForNominators already set." + ); + assert!(Validators::::count().is_zero(), "Validators already set."); + assert!(Nominators::::count().is_zero(), "Nominators already set."); + assert!(StorageVersion::::get() == ObsoleteReleases::V6_0_0); + Ok(()) + } + + pub fn migrate() -> Weight { + log!(info, "Migrating staking to ObsoleteReleases::V7_0_0"); + let validator_count = Validators::::iter().count() as u32; + let nominator_count = Nominators::::iter().count() as u32; + + CounterForValidators::::put(validator_count); + CounterForNominators::::put(nominator_count); + + StorageVersion::::put(ObsoleteReleases::V7_0_0); + log!(info, "Completed staking migration to ObsoleteReleases::V7_0_0"); + + T::DbWeight::get().reads_writes(validator_count.saturating_add(nominator_count).into(), 2) + } +} + +pub mod v6 { + use super::*; + use frame_support::{storage_alias, traits::Get, weights::Weight}; + + // NOTE: value type doesn't matter, we just set it to () here. + #[storage_alias] + type SnapshotValidators = StorageValue, ()>; + #[storage_alias] + type SnapshotNominators = StorageValue, ()>; + #[storage_alias] + type QueuedElected = StorageValue, ()>; + #[storage_alias] + type QueuedScore = StorageValue, ()>; + #[storage_alias] + type EraElectionStatus = StorageValue, ()>; + #[storage_alias] + type IsCurrentSessionFinal = StorageValue, ()>; + + /// check to execute prior to migration. + pub fn pre_migrate() -> Result<(), &'static str> { + // these may or may not exist. + log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::::exists()); + log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::::exists()); + log!(info, "QueuedElected.exits()? {:?}", QueuedElected::::exists()); + log!(info, "QueuedScore.exits()? {:?}", QueuedScore::::exists()); + // these must exist. + assert!( + IsCurrentSessionFinal::::exists(), + "IsCurrentSessionFinal storage item not found!" + ); + assert!(EraElectionStatus::::exists(), "EraElectionStatus storage item not found!"); + Ok(()) + } + + /// Migrate storage to v6. + pub fn migrate() -> Weight { + log!(info, "Migrating staking to ObsoleteReleases::V6_0_0"); + + SnapshotValidators::::kill(); + SnapshotNominators::::kill(); + QueuedElected::::kill(); + QueuedScore::::kill(); + EraElectionStatus::::kill(); + IsCurrentSessionFinal::::kill(); + + StorageVersion::::put(ObsoleteReleases::V6_0_0); + + log!(info, "Done."); + T::DbWeight::get().writes(6 + 1) + } +} diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index cf1b2c7912aef..d3eb74a98b1a8 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -154,8 +154,8 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = (); - type FullIdentificationOf = NullIdentity; + type FullIdentification = crate::Exposure; + type FullIdentificationOf = crate::ExposureOf; } impl pallet_authorship::Config for Test { type FindAuthor = Author11; @@ -728,11 +728,6 @@ pub(crate) fn run_to_block(n: BlockNumber) { ); } -/// Progress by n block. -pub(crate) fn advance_blocks(n: u64) { - run_to_block(System::block_number() + n); -} - /// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. pub(crate) fn start_session(end_session_idx: SessionIndex) { let period = Period::get(); @@ -835,25 +830,11 @@ pub(crate) fn on_offence_in_era( >], slash_fraction: &[Perbill], era: EraIndex, - advance_processing_blocks: bool, ) { - // counter to keep track of how many blocks we need to advance to process all the offences. - let mut process_blocks = 0u32; - for detail in offenders { - process_blocks += EraInfo::::get_page_count(era, &detail.offender.0); - } - let bonded_eras = crate::BondedEras::::get(); for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { - let _ = >::on_offence( - offenders, - slash_fraction, - start_session, - ); - if advance_processing_blocks { - advance_blocks(process_blocks as u64); - } + let _ = Staking::on_offence(offenders, slash_fraction, start_session); return } else if bonded_era > era { break @@ -866,9 +847,6 @@ pub(crate) fn on_offence_in_era( slash_fraction, pallet_staking::ErasStartSessionIndex::::get(era).unwrap(), ); - if advance_processing_blocks { - advance_blocks(process_blocks as u64); - } } else { panic!("cannot slash in era {}", era); } @@ -880,23 +858,19 @@ pub(crate) fn on_offence_now( pallet_session::historical::IdentificationTuple, >], slash_fraction: &[Perbill], - advance_processing_blocks: bool, ) { let now = pallet_staking::ActiveEra::::get().unwrap().index; - on_offence_in_era(offenders, slash_fraction, now, advance_processing_blocks); -} -pub(crate) fn offence_from( - offender: AccountId, - reporter: Option, -) -> OffenceDetails> { - OffenceDetails { - offender: (offender, ()), - reporters: reporter.map(|r| vec![(r)]).unwrap_or_default(), - } + on_offence_in_era(offenders, slash_fraction, now) } pub(crate) fn add_slash(who: &AccountId) { - on_offence_now(&[offence_from(*who, None)], &[Perbill::from_percent(10)], true); + on_offence_now( + &[OffenceDetails { + offender: (*who, Staking::eras_stakers(active_era(), who)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); } /// Make all validator and nominator request their payment diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 10e8c679fd6a1..66bfdc16aea55 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -34,12 +34,14 @@ use frame_support::{ use frame_system::{pallet_prelude::BlockNumberFor, RawOrigin}; use pallet_session::historical; use sp_runtime::{ - traits::{Bounded, CheckedAdd, Convert, SaturatedConversion, Saturating, StaticLookup, Zero}, + traits::{ + Bounded, CheckedAdd, Convert, One, SaturatedConversion, Saturating, StaticLookup, Zero, + }, ArithmeticError, DispatchResult, Perbill, Percent, }; use sp_staking::{ currency_to_vote::CurrencyToVote, - offence::{OffenceDetails, OffenceSeverity, OnOffenceHandler}, + offence::{OffenceDetails, OnOffenceHandler}, EraIndex, OnStakingUpdate, Page, SessionIndex, Stake, StakingAccount::{self, Controller, Stash}, StakingInterface, @@ -47,16 +49,15 @@ use sp_staking::{ use crate::{ asset, election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, - BalanceOf, BoundedExposuresOf, EraInfo, EraPayout, Exposure, Forcing, IndividualExposure, - LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, MaxWinnersPerPageOf, Nominations, - NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, SnapshotStatus, - StakingLedger, ValidatorPrefs, STAKING_ID, + BalanceOf, BoundedExposuresOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, + IndividualExposure, LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, MaxWinnersPerPageOf, + Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, + SnapshotStatus, StakingLedger, ValidatorPrefs, STAKING_ID, }; use alloc::{boxed::Box, vec, vec::Vec}; use super::pallet::*; -use crate::slashing::OffenceRecord; #[cfg(feature = "try-runtime")] use frame_support::ensure; #[cfg(any(test, feature = "try-runtime"))] @@ -575,6 +576,8 @@ impl Pallet { } } }); + + Self::apply_unapplied_slashes(active_era); } /// Compute payout for era. @@ -976,19 +979,17 @@ impl Pallet { } /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. - pub(crate) fn apply_unapplied_slashes(active_era: EraIndex) { - let mut slashes = UnappliedSlashes::::iter_prefix(&active_era).take(1); - if let Some((key, slash)) = slashes.next() { - log!( - debug, - "🦹 found slash {:?} scheduled to be executed in era {:?}", - slash, - active_era, - ); - let offence_era = active_era.saturating_sub(T::SlashDeferDuration::get()); - slashing::apply_slash::(slash, offence_era); - // remove the slash - UnappliedSlashes::::remove(&active_era, &key); + fn apply_unapplied_slashes(active_era: EraIndex) { + let era_slashes = UnappliedSlashes::::take(&active_era); + log!( + debug, + "found {} slashes scheduled to be executed in era {:?}", + era_slashes.len(), + active_era, + ); + for slash in era_slashes { + let slash_era = active_era.saturating_sub(T::SlashDeferDuration::get()); + slashing::apply_slash::(slash, slash_era); } } @@ -1414,6 +1415,129 @@ impl Pallet { Ok(()) } + + pub fn on_offence( + offenders: impl Iterator>, + slash_fractions: &[Perbill], + slash_session: SessionIndex, + ) -> Weight { + let reward_proportion = SlashRewardFraction::::get(); + let mut consumed_weight = Weight::from_parts(0, 0); + let mut add_db_reads_writes = |reads, writes| { + consumed_weight += T::DbWeight::get().reads_writes(reads, writes); + }; + + let active_era = { + let active_era = ActiveEra::::get(); + add_db_reads_writes(1, 0); + if active_era.is_none() { + // This offence need not be re-submitted. + return consumed_weight + } + active_era.expect("value checked not to be `None`; qed").index + }; + let active_era_start_session_index = ErasStartSessionIndex::::get(active_era) + .unwrap_or_else(|| { + frame_support::print("Error: start_session_index must be set for current_era"); + 0 + }); + add_db_reads_writes(1, 0); + + let window_start = active_era.saturating_sub(T::BondingDuration::get()); + + // Fast path for active-era report - most likely. + // `slash_session` cannot be in a future active era. It must be in `active_era` or before. + let slash_era = if slash_session >= active_era_start_session_index { + active_era + } else { + let eras = BondedEras::::get(); + add_db_reads_writes(1, 0); + + // Reverse because it's more likely to find reports from recent eras. + match eras.iter().rev().find(|&(_, sesh)| sesh <= &slash_session) { + Some((slash_era, _)) => *slash_era, + // Before bonding period. defensive - should be filtered out. + None => return consumed_weight, + } + }; + + add_db_reads_writes(1, 1); + + let slash_defer_duration = T::SlashDeferDuration::get(); + + let invulnerables = Invulnerables::::get(); + add_db_reads_writes(1, 0); + + for (details, slash_fraction) in offenders.zip(slash_fractions) { + let stash = &details.offender; + let exposure = Self::eras_stakers(active_era, stash); + + // Skip if the validator is invulnerable. + if invulnerables.contains(stash) { + continue + } + + Self::deposit_event(Event::::SlashReported { + validator: stash.clone(), + fraction: *slash_fraction, + slash_era, + }); + + let unapplied = slashing::compute_slash::(slashing::SlashParams { + stash, + slash: *slash_fraction, + exposure: &exposure, + slash_era, + window_start, + now: active_era, + reward_proportion, + }); + + if let Some(mut unapplied) = unapplied { + let nominators_len = unapplied.others.len() as u64; + let reporters_len = details.reporters.len() as u64; + + { + let upper_bound = 1 /* Validator/NominatorSlashInEra */ + 2 /* fetch_spans */; + let rw = upper_bound + nominators_len * upper_bound; + add_db_reads_writes(rw, rw); + } + unapplied.reporters = details.reporters.clone(); + if slash_defer_duration == 0 { + // Apply right away. + slashing::apply_slash::(unapplied, slash_era); + { + let slash_cost = (6, 5); + let reward_cost = (2, 2); + add_db_reads_writes( + (1 + nominators_len) * slash_cost.0 + reward_cost.0 * reporters_len, + (1 + nominators_len) * slash_cost.1 + reward_cost.1 * reporters_len, + ); + } + } else { + // Defer to end of some `slash_defer_duration` from now. + log!( + debug, + "deferring slash of {:?}% happened in {:?} (reported in {:?}) to {:?}", + slash_fraction, + slash_era, + active_era, + slash_era + slash_defer_duration + 1, + ); + UnappliedSlashes::::mutate( + slash_era.saturating_add(slash_defer_duration).saturating_add(One::one()), + move |for_later| for_later.push(unapplied), + ); + add_db_reads_writes(1, 1); + } + } else { + add_db_reads_writes(4 /* fetch_spans */, 5 /* kick_out_if_recent */) + } + } + + consumed_weight + } + } impl Pallet { @@ -1769,23 +1893,6 @@ impl historical::SessionManager historical::SessionManager for Pallet { - fn new_session(new_index: SessionIndex) -> Option> { - >::new_session(new_index) - .map(|validators| validators.into_iter().map(|v| (v, ())).collect()) - } - fn new_session_genesis(new_index: SessionIndex) -> Option> { - >::new_session_genesis(new_index) - .map(|validators| validators.into_iter().map(|v| (v, ())).collect()) - } - fn start_session(start_index: SessionIndex) { - >::start_session(start_index) - } - fn end_session(end_index: SessionIndex) { - >::end_session(end_index) - } -} - /// Add reward points to block authors: /// * 20 points to the block producer for producing a (non-uncle) block, impl pallet_authorship::EventHandler> for Pallet @@ -1803,7 +1910,10 @@ impl for Pallet where T: pallet_session::Config::AccountId>, - T: pallet_session::historical::Config, + T: pallet_session::historical::Config< + FullIdentification = Exposure<::AccountId, BalanceOf>, + FullIdentificationOf = ExposureOf, + >, T::SessionHandler: pallet_session::SessionHandler<::AccountId>, T::SessionManager: pallet_session::SessionManager<::AccountId>, T::ValidatorIdOf: Convert< @@ -1811,11 +1921,11 @@ where Option<::AccountId>, >, { - /// When an offence is reported, it is split into pages and put in the offence queue. - /// As offence queue is processed, computed slashes are queued to be applied after the - /// `SlashDeferDuration`. fn on_offence( - offenders: &[OffenceDetails>], + offenders: &[OffenceDetails< + T::AccountId, + pallet_session::historical::IdentificationTuple, + >], slash_fractions: &[Perbill], slash_session: SessionIndex, ) -> Weight { @@ -1832,197 +1942,8 @@ where let (ref offender, _) = details.offender; OffenceDetails { offender: offender.clone(), reporters: details.reporters.clone() } }); - Self::on_offence(offenders, slash_fractions, slash_session) - } -} -impl Pallet { - /// When an offence is reported, it is split into pages and put in the offence queue. - /// As offence queue is processed, computed slashes are queued to be applied after the - /// `SlashDeferDuration`. - pub fn on_offence( - offenders: impl Iterator>, - slash_fractions: &[Perbill], - slash_session: SessionIndex, - ) -> Weight { - // todo(ank4n): Needs to be properly benched. - let mut consumed_weight = Weight::zero(); - let mut add_db_reads_writes = |reads, writes| { - consumed_weight += T::DbWeight::get().reads_writes(reads, writes); - }; - - // Find the era to which offence belongs. - add_db_reads_writes(1, 0); - let Some(active_era) = ActiveEra::::get() else { - log!(warn, "🦹 on_offence: no active era; ignoring offence"); - return consumed_weight - }; - - add_db_reads_writes(1, 0); - let active_era_start_session = - ErasStartSessionIndex::::get(active_era.index).unwrap_or(0); - - // Fast path for active-era report - most likely. - // `slash_session` cannot be in a future active era. It must be in `active_era` or before. - let offence_era = if slash_session >= active_era_start_session { - active_era.index - } else { - add_db_reads_writes(1, 0); - match BondedEras::::get() - .iter() - // Reverse because it's more likely to find reports from recent eras. - .rev() - .find(|&(_, sesh)| sesh <= &slash_session) - .map(|(era, _)| *era) - { - Some(era) => era, - None => { - // defensive: this implies offence is for a discarded era, and should already be - // filtered out. - log!(warn, "🦹 on_offence: no era found for slash_session; ignoring offence"); - return Weight::default() - }, - } - }; - - add_db_reads_writes(1, 0); - let invulnerables = Invulnerables::::get(); - - for (details, slash_fraction) in offenders.zip(slash_fractions) { - let validator = &details.offender; - // Skip if the validator is invulnerable. - if invulnerables.contains(&validator) { - log!(debug, "🦹 on_offence: {:?} is invulnerable; ignoring offence", validator); - continue - } - - add_db_reads_writes(1, 0); - let Some(exposure_overview) = >::get(&offence_era, validator) - else { - // defensive: this implies offence is for a discarded era, and should already be - // filtered out. - log!( - warn, - "🦹 on_offence: no exposure found for {:?} in era {}; ignoring offence", - validator, - offence_era - ); - continue; - }; - - Self::deposit_event(Event::::OffenceReported { - validator: validator.clone(), - fraction: *slash_fraction, - offence_era, - }); - - if offence_era == active_era.index { - // offence is in the current active era. Report it to session to maybe disable the - // validator. - add_db_reads_writes(2, 2); - T::SessionInterface::report_offence( - validator.clone(), - OffenceSeverity(*slash_fraction), - ); - } - add_db_reads_writes(1, 0); - let prior_slash_fraction = ValidatorSlashInEra::::get(offence_era, validator) - .map_or(Zero::zero(), |(f, _)| f); - - add_db_reads_writes(1, 0); - if let Some(existing) = OffenceQueue::::get(offence_era, validator) { - if slash_fraction.deconstruct() > existing.slash_fraction.deconstruct() { - add_db_reads_writes(0, 2); - OffenceQueue::::insert( - offence_era, - validator, - OffenceRecord { - reporter: details.reporters.first().cloned(), - reported_era: active_era.index, - slash_fraction: *slash_fraction, - ..existing - }, - ); - - // update the slash fraction in the `ValidatorSlashInEra` storage. - ValidatorSlashInEra::::insert( - offence_era, - validator, - (slash_fraction, exposure_overview.own), - ); - - log!( - debug, - "🦹 updated slash for {:?}: {:?} (prior: {:?})", - validator, - slash_fraction, - prior_slash_fraction, - ); - } else { - log!( - debug, - "🦹 ignored slash for {:?}: {:?} (existing prior is larger: {:?})", - validator, - slash_fraction, - prior_slash_fraction, - ); - } - } else if slash_fraction.deconstruct() > prior_slash_fraction.deconstruct() { - add_db_reads_writes(0, 3); - ValidatorSlashInEra::::insert( - offence_era, - validator, - (slash_fraction, exposure_overview.own), - ); - - OffenceQueue::::insert( - offence_era, - validator, - OffenceRecord { - reporter: details.reporters.first().cloned(), - reported_era: active_era.index, - // there are cases of validator with no exposure, hence 0 page, so we - // saturate to avoid underflow. - exposure_page: exposure_overview.page_count.saturating_sub(1), - slash_fraction: *slash_fraction, - prior_slash_fraction, - }, - ); - - OffenceQueueEras::::mutate(|q| { - if let Some(eras) = q { - log!(debug, "🦹 inserting offence era {} into existing queue", offence_era); - eras.binary_search(&offence_era) - .err() - .map(|idx| eras.try_insert(idx, offence_era).defensive()); - } else { - let mut eras = BoundedVec::default(); - log!(debug, "🦹 inserting offence era {} into empty queue", offence_era); - let _ = eras.try_push(offence_era).defensive(); - *q = Some(eras); - } - }); - - log!( - debug, - "🦹 queued slash for {:?}: {:?} (prior: {:?})", - validator, - slash_fraction, - prior_slash_fraction, - ); - } else { - log!( - debug, - "🦹 ignored slash for {:?}: {:?} (already slashed in era with prior: {:?})", - validator, - slash_fraction, - prior_slash_fraction, - ); - } - } - - consumed_weight - } + Self::on_offence(offenders, slash_fractions, slash_session) } } impl ScoreProvider for Pallet { @@ -2454,7 +2375,6 @@ impl Pallet { /// /// -- SHOULD ONLY BE CALLED AT THE END OF A GIVEN BLOCK. pub fn ensure_snapshot_metadata_state(now: BlockNumberFor) -> Result<(), TryRuntimeError> { - use sp_runtime::traits::One; let next_election = Self::next_election_prediction(now); let pages = Self::election_pages().saturated_into::>(); let election_prep_start = next_election - pages; diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index bd035bbc0f0fd..2641d26969b91 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -77,7 +77,7 @@ pub mod pallet { use frame_election_provider_support::{ElectionDataProvider, PageIndex}; /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(17); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(16); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -649,67 +649,15 @@ pub mod pallet { #[pallet::storage] pub type CanceledSlashPayout = StorageValue<_, BalanceOf, ValueQuery>; - /// Stores reported offences in a queue until they are processed in subsequent blocks. - /// - /// Each offence is recorded under the corresponding era index and the offending validator's - /// account. If an offence spans multiple pages, only one page is processed at a time. Offences - /// are handled sequentially, with their associated slashes computed and stored in - /// `UnappliedSlashes`. These slashes are then applied in a future era as determined by - /// `SlashDeferDuration`. - /// - /// Any offences tied to an era older than `BondingDuration` are automatically dropped. - /// Processing always prioritizes the oldest era first. - #[pallet::storage] - pub type OffenceQueue = StorageDoubleMap< - _, - Twox64Concat, - EraIndex, - Twox64Concat, - T::AccountId, - slashing::OffenceRecord, - >; - - /// Tracks the eras that contain offences in `OffenceQueue`, sorted from **earliest to latest**. - /// - /// - This ensures efficient retrieval of the oldest offence without iterating through - /// `OffenceQueue`. - /// - When a new offence is added to `OffenceQueue`, its era is **inserted in sorted order** - /// if not already present. - /// - When all offences for an era are processed, it is **removed** from this list. - /// - The maximum length of this vector is bounded by `BondingDuration`. - /// - /// This eliminates the need for expensive iteration and sorting when fetching the next offence - /// to process. - #[pallet::storage] - pub type OffenceQueueEras = StorageValue<_, BoundedVec>; - - /// Tracks the currently processed offence record from the `OffenceQueue`. - /// - /// - When processing offences, an offence record is **popped** from the oldest era in - /// `OffenceQueue` and stored here. - /// - The function `process_offence` reads from this storage, processing one page of exposure at - /// a time. - /// - After processing a page, the `exposure_page` count is **decremented** until it reaches - /// zero. - /// - Once fully processed, the offence record is removed from this storage. - /// - /// This ensures that offences are processed incrementally, preventing excessive computation - /// in a single block while maintaining correct slashing behavior. - #[pallet::storage] - pub type ProcessingOffence = - StorageValue<_, (EraIndex, T::AccountId, slashing::OffenceRecord)>; - /// All unapplied slashes that are queued for later. #[pallet::storage] - pub type UnappliedSlashes = StorageDoubleMap< + #[pallet::unbounded] + pub type UnappliedSlashes = StorageMap< _, Twox64Concat, EraIndex, - Twox64Concat, - // Unique key for unapplied slashes: (validator, slash fraction, page index). - (T::AccountId, Perbill, u32), - UnappliedSlash, - OptionQuery, + Vec>>, + ValueQuery, >; /// A mapping from still-bonded eras to the first session index of that era. @@ -978,6 +926,13 @@ pub mod pallet { staker: T::AccountId, amount: BalanceOf, }, + /// A slash for the given validator, for the given percentage of their stake, at the given + /// era as been reported. + SlashReported { + validator: T::AccountId, + fraction: Perbill, + slash_era: EraIndex, + }, /// An old slashing report from a prior era was discarded because it could /// not be processed. OldSlashingReportDiscarded { @@ -1061,26 +1016,6 @@ pub mod pallet { page: PageIndex, result: Result, }, - /// An offence for the given validator, for the given percentage of their stake, at the - /// given era as been reported. - OffenceReported { - offence_era: EraIndex, - validator: T::AccountId, - fraction: Perbill, - }, - /// An offence has been processed and the corresponding slash has been computed. - SlashComputed { - offence_era: EraIndex, - slash_era: EraIndex, - offender: T::AccountId, - page: u32, - }, - /// An unapplied slash has been cancelled. - SlashCancelled { - slash_era: EraIndex, - slash_key: (T::AccountId, Perbill, u32), - payout: BalanceOf, - }, } #[pallet::error] @@ -1098,8 +1033,8 @@ pub mod pallet { EmptyTargets, /// Duplicate index. DuplicateIndex, - /// Slash record not found. - InvalidSlashRecord, + /// Slash record index out of bounds. + InvalidSlashIndex, /// Cannot have a validator or nominator role, with value less than the minimum defined by /// governance (see `MinValidatorBond` and `MinNominatorBond`). If unbonding is the /// intention, `chill` first to remove one's role as validator/nominator. @@ -1114,6 +1049,8 @@ pub mod pallet { InvalidEraToReward, /// Invalid number of nominations. InvalidNumberOfNominations, + /// Items are not sorted and unique. + NotSortedAndUnique, /// Rewards for this era have already been claimed for this validator. AlreadyClaimed, /// No nominators exist on this page. @@ -1154,8 +1091,6 @@ pub mod pallet { CannotReapStash, /// The stake of this account is already migrated to `Fungible` holds. AlreadyMigrated, - /// Era not yet started. - EraNotStarted, /// Account is restricted from participation in staking. This may happen if the account is /// staking in another way already, such as via pool. Restricted, @@ -1167,21 +1102,6 @@ pub mod pallet { /// that the `ElectableStashes` has been populated with all validators from all pages at /// the time of the election. fn on_initialize(now: BlockNumberFor) -> Weight { - // todo(ank4n): Hacky bench. Do it properly. - let mut consumed_weight = slashing::process_offence::(); - - consumed_weight.saturating_accrue(T::DbWeight::get().reads(1)); - if let Some(active_era) = ActiveEra::::get() { - let max_slash_page_size = T::MaxExposurePageSize::get(); - consumed_weight.saturating_accrue( - T::DbWeight::get().reads_writes( - 3 * max_slash_page_size as u64, - 3 * max_slash_page_size as u64, - ), - ); - Self::apply_unapplied_slashes(active_era.index); - } - let pages = Self::election_pages(); // election ongoing, fetch the next page. @@ -1209,9 +1129,7 @@ pub mod pallet { } }; - consumed_weight.saturating_accrue(inner_weight); - - consumed_weight + T::WeightInfo::on_initialize_noop().saturating_add(inner_weight) } fn on_finalize(_n: BlockNumberFor) { @@ -1977,35 +1895,33 @@ pub mod pallet { Ok(()) } - /// Cancels scheduled slashes for a given era before they are applied. + /// Cancel enactment of a deferred slash. /// - /// This function allows `T::AdminOrigin` to selectively remove pending slashes from - /// the `UnappliedSlashes` storage, preventing their enactment. + /// Can be called by the `T::AdminOrigin`. /// - /// ## Parameters - /// - `era`: The staking era for which slashes were deferred. - /// - `slash_keys`: A list of slash keys identifying the slashes to remove. This is a tuple - /// of `(stash, slash_fraction, page_index)`. + /// Parameters: era and indices of the slashes for that era to kill. #[pallet::call_index(17)] - #[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_keys.len() as u32))] + #[pallet::weight(T::WeightInfo::cancel_deferred_slash(slash_indices.len() as u32))] pub fn cancel_deferred_slash( origin: OriginFor, era: EraIndex, - slash_keys: Vec<(T::AccountId, Perbill, u32)>, + slash_indices: Vec, ) -> DispatchResult { T::AdminOrigin::ensure_origin(origin)?; - ensure!(!slash_keys.is_empty(), Error::::EmptyTargets); - - // Remove the unapplied slashes. - slash_keys.into_iter().for_each(|i| { - UnappliedSlashes::::take(&era, &i).map(|unapplied_slash| { - Self::deposit_event(Event::::SlashCancelled { - slash_era: era, - slash_key: i, - payout: unapplied_slash.payout, - }); - }); - }); + + ensure!(!slash_indices.is_empty(), Error::::EmptyTargets); + ensure!(is_sorted_and_unique(&slash_indices), Error::::NotSortedAndUnique); + + let mut unapplied = UnappliedSlashes::::get(&era); + let last_item = slash_indices[slash_indices.len() - 1]; + ensure!((last_item as usize) < unapplied.len(), Error::::InvalidSlashIndex); + + for (removed, index) in slash_indices.into_iter().enumerate() { + let index = (index as usize) - removed; + unapplied.remove(index); + } + + UnappliedSlashes::::insert(&era, &unapplied); Ok(()) } @@ -2569,45 +2485,6 @@ pub mod pallet { Ok(Pays::No.into()) } - /// Manually applies a deferred slash for a given era. - /// - /// Normally, slashes are automatically applied shortly after the start of the `slash_era`. - /// This function exists as a **fallback mechanism** in case slashes were not applied due to - /// unexpected reasons. It allows anyone to manually apply an unapplied slash. - /// - /// ## Parameters - /// - `slash_era`: The staking era in which the slash was originally scheduled. - /// - `slash_key`: A unique identifier for the slash, represented as a tuple: - /// - `stash`: The stash account of the validator being slashed. - /// - `slash_fraction`: The fraction of the stake that was slashed. - /// - `page_index`: The index of the exposure page being processed. - /// - /// ## Behavior - /// - The function is **permissionless**—anyone can call it. - /// - The `slash_era` **must be the current era or a past era**. If it is in the future, the - /// call fails with `EraNotStarted`. - /// - The fee is waived if the slash is successfully applied. - /// - /// ## TODO: Future Improvement - /// - Implement an **off-chain worker (OCW) task** to automatically apply slashes when there - /// is unused block space, improving efficiency. - #[pallet::call_index(31)] - #[pallet::weight(T::WeightInfo::apply_slash())] - pub fn apply_slash( - origin: OriginFor, - slash_era: EraIndex, - slash_key: (T::AccountId, Perbill, u32), - ) -> DispatchResultWithPostInfo { - let _ = ensure_signed(origin)?; - let active_era = ActiveEra::::get().map(|a| a.index).unwrap_or_default(); - ensure!(slash_era <= active_era, Error::::EraNotStarted); - let unapplied_slash = UnappliedSlashes::::take(&slash_era, &slash_key) - .ok_or(Error::::InvalidSlashRecord)?; - slashing::apply_slash::(unapplied_slash, slash_era); - - Ok(Pays::No.into()) - } - /// This function allows governance to manually slash a validator and is a /// **fallback mechanism**. /// @@ -2667,3 +2544,8 @@ pub mod pallet { } } } + +/// Check that list is sorted and has no duplicates. +fn is_sorted_and_unique(list: &[u32]) -> bool { + list.windows(2).all(|w| w[0] < w[1]) +} diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 30d4197a888a7..9352fda84a2f3 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -50,20 +50,20 @@ //! Based on research at use crate::{ - asset, log, BalanceOf, Config, EraInfo, Error, NegativeImbalanceOf, NominatorSlashInEra, - OffenceQueue, OffenceQueueEras, PagedExposure, Pallet, Perbill, ProcessingOffence, - SlashRewardFraction, SpanSlash, UnappliedSlash, UnappliedSlashes, ValidatorSlashInEra, + asset, BalanceOf, Config, Error, NegativeImbalanceOf, NominatorSlashInEra, + Pallet, Perbill, Exposure, + SpanSlash, UnappliedSlash, ValidatorSlashInEra, }; use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ ensure, - traits::{Defensive, DefensiveSaturating, Get, Imbalance, OnUnbalanced}, + traits::{Defensive, DefensiveSaturating, Imbalance, OnUnbalanced}, }; use scale_info::TypeInfo; use sp_runtime::{ traits::{Saturating, Zero}, - DispatchResult, RuntimeDebug, WeakBoundedVec, Weight, + DispatchResult, RuntimeDebug, }; use sp_staking::{EraIndex, StakingInterface}; @@ -209,12 +209,8 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> { pub(crate) stash: &'a T::AccountId, /// The proportion of the slash. pub(crate) slash: Perbill, - /// The prior slash proportion of the validator if the validator has been reported multiple - /// times in the same era, and a new greater slash replaces the old one. - /// Invariant: slash > prior_slash - pub(crate) prior_slash: Perbill, /// The exposure of the stash and all nominators. - pub(crate) exposure: &'a PagedExposure>, + pub(crate) exposure: &'a Exposure>, /// The era where the offence occurred. pub(crate) slash_era: EraIndex, /// The first era in the current bonding period. @@ -226,248 +222,76 @@ pub(crate) struct SlashParams<'a, T: 'a + Config> { pub(crate) reward_proportion: Perbill, } -/// Represents an offence record within the staking system, capturing details about a slashing -/// event. -#[derive(Clone, Encode, Decode, TypeInfo, MaxEncodedLen, PartialEq, RuntimeDebug)] -pub struct OffenceRecord { - /// The account ID of the entity that reported the offence. - pub reporter: Option, - - /// Era at which the offence was reported. - pub reported_era: EraIndex, - - /// The specific page of the validator's exposure currently being processed. - /// - /// Since a validator's total exposure can span multiple pages, this field serves as a pointer - /// to the current page being evaluated. The processing order starts from the last page - /// and moves backward, decrementing this value with each processed page. - /// - /// This ensures that all pages are systematically handled, and it helps track when - /// the entire exposure has been processed. - pub exposure_page: u32, - - /// The fraction of the validator's stake to be slashed for this offence. - pub slash_fraction: Perbill, - - /// The previous slash fraction of the validator's stake before being updated. - /// If a new, higher slash fraction is reported, this field stores the prior fraction - /// that was overwritten. This helps in tracking changes in slashes across multiple reports for - /// the same era. - pub prior_slash_fraction: Perbill, -} - -/// Loads next offence in the processing offence and returns the offense record to be processed. +/// Computes a slash of a validator and nominators. It returns an unapplied +/// record to be applied at some later point. Slashing metadata is updated in storage, +/// since unapplied records are only rarely intended to be dropped. /// -/// Note: this can mutate the following storage -/// - `ProcessingOffence` -/// - `OffenceQueue` -/// - `OffenceQueueEras` -fn next_offence() -> Option<(EraIndex, T::AccountId, OffenceRecord)> { - let processing_offence = ProcessingOffence::::get(); - - if let Some((offence_era, offender, offence_record)) = processing_offence { - // If the exposure page is 0, then the offence has been processed. - if offence_record.exposure_page == 0 { - ProcessingOffence::::kill(); - return Some((offence_era, offender, offence_record)) - } - - // Update the next page. - ProcessingOffence::::put(( - offence_era, - &offender, - OffenceRecord { - // decrement the page index. - exposure_page: offence_record.exposure_page.defensive_saturating_sub(1), - ..offence_record.clone() - }, - )); - - return Some((offence_era, offender, offence_record)) - } - - // Nothing in processing offence. Try to enqueue the next offence. - let Some(mut eras) = OffenceQueueEras::::get() else { return None }; - let Some(&oldest_era) = eras.first() else { return None }; - - let mut offence_iter = OffenceQueue::::iter_prefix(oldest_era); - let next_offence = offence_iter.next(); - - if let Some((ref validator, ref offence_record)) = next_offence { - // Update the processing offence if the offence is multi-page. - if offence_record.exposure_page > 0 { - // update processing offence with the next page. - ProcessingOffence::::put(( - oldest_era, - validator.clone(), - OffenceRecord { - exposure_page: offence_record.exposure_page.defensive_saturating_sub(1), - ..offence_record.clone() - }, - )); - } - - // Remove from `OffenceQueue` - OffenceQueue::::remove(oldest_era, &validator); - } +/// The pending slash record returned does not have initialized reporters. Those have +/// to be set at a higher level, if any. +pub(crate) fn compute_slash( + params: SlashParams, +) -> Option>> { + let mut reward_payout = Zero::zero(); + let mut val_slashed = Zero::zero(); - // If there are no offences left for the era, remove the era from `OffenceQueueEras`. - if offence_iter.next().is_none() { - if eras.len() == 1 { - // If there is only one era left, remove the entire queue. - OffenceQueueEras::::kill(); - } else { - // Remove the oldest era - eras.remove(0); - OffenceQueueEras::::put(eras); - } + // is the slash amount here a maximum for the era? + let own_slash = params.slash * params.exposure.own; + if params.slash * params.exposure.total == Zero::zero() { + // kick out the validator even if they won't be slashed, + // as long as the misbehavior is from their most recent slashing span. + kick_out_if_recent::(params); + return None } - next_offence.map(|(v, o)| (oldest_era, v, o)) -} - -/// Infallible function to process an offence. -pub(crate) fn process_offence() -> Weight { - // todo(ank4n): this needs to be properly benched. - let mut consumed_weight = Weight::from_parts(0, 0); - let mut add_db_reads_writes = |reads, writes| { - consumed_weight += T::DbWeight::get().reads_writes(reads, writes); - }; - - add_db_reads_writes(1, 1); - let Some((offence_era, offender, offence_record)) = next_offence::() else { - return consumed_weight - }; - - log!( - debug, - "🦹 Processing offence for {:?} in era {:?} with slash fraction {:?}", - offender, - offence_era, - offence_record.slash_fraction, - ); + let prior_slash_p = ValidatorSlashInEra::::get(¶ms.slash_era, params.stash) + .map_or(Zero::zero(), |(prior_slash_proportion, _)| prior_slash_proportion); - add_db_reads_writes(1, 0); - let reward_proportion = SlashRewardFraction::::get(); - - add_db_reads_writes(2, 0); - let Some(exposure) = - EraInfo::::get_paged_exposure(offence_era, &offender, offence_record.exposure_page) - else { - // this can only happen if the offence was valid at the time of reporting but became too old - // at the time of computing and should be discarded. - return consumed_weight - }; - - let slash_page = offence_record.exposure_page; - let slash_defer_duration = T::SlashDeferDuration::get(); - let slash_era = offence_era.saturating_add(slash_defer_duration); - let window_start = offence_record.reported_era.saturating_sub(T::BondingDuration::get()); - - add_db_reads_writes(3, 3); - let Some(mut unapplied) = compute_slash::(SlashParams { - stash: &offender, - slash: offence_record.slash_fraction, - prior_slash: offence_record.prior_slash_fraction, - exposure: &exposure, - slash_era: offence_era, - window_start, - now: offence_record.reported_era, - reward_proportion, - }) else { - log!( - debug, - "🦹 Slash of {:?}% happened in {:?} (reported in {:?}) is discarded, as could not compute slash", - offence_record.slash_fraction, - offence_era, - offence_record.reported_era, - ); - // No slash to apply. Discard. - return consumed_weight - }; - - >::deposit_event(super::Event::::SlashComputed { - offence_era, - slash_era, - offender: offender.clone(), - page: slash_page, - }); - - log!( - debug, - "🦹 Slash of {:?}% happened in {:?} (reported in {:?}) is computed", - offence_record.slash_fraction, - offence_era, - offence_record.reported_era, - ); - - // add the reporter to the unapplied slash. - unapplied.reporter = offence_record.reporter; - - if slash_defer_duration == 0 { - // Apply right away. - log!( - debug, - "🦹 applying slash instantly of {:?}% happened in {:?} (reported in {:?}) to {:?}", - offence_record.slash_fraction, - offence_era, - offence_record.reported_era, - offender, + // compare slash proportions rather than slash values to avoid issues due to rounding + // error. + if params.slash.deconstruct() > prior_slash_p.deconstruct() { + ValidatorSlashInEra::::insert( + ¶ms.slash_era, + params.stash, + &(params.slash, own_slash), ); - - let accounts_slashed = unapplied.others.len() as u64 + 1; - add_db_reads_writes(3 * accounts_slashed, 3 * accounts_slashed); - apply_slash::(unapplied, offence_era); } else { - // Historical Note: Previously, with BondingDuration = 28 and SlashDeferDuration = 27, - // slashes were applied at the start of the 28th era from `offence_era`. - // However, with paged slashing, applying slashes now takes multiple blocks. - // To account for this delay, slashes are now applied at the start of the 27th era from - // `offence_era`. - log!( - debug, - "🦹 deferring slash of {:?}% happened in {:?} (reported in {:?}) to {:?}", - offence_record.slash_fraction, - offence_era, - offence_record.reported_era, - slash_era, - ); + // we slash based on the max in era - this new event is not the max, + // so neither the validator or any nominators will need an update. + // + // this does lead to a divergence of our system from the paper, which + // pays out some reward even if the latest report is not max-in-era. + // we opt to avoid the nominator lookups and edits and leave more rewards + // for more drastic misbehavior. + return None + } - add_db_reads_writes(0, 1); - UnappliedSlashes::::insert( - slash_era, - (offender, offence_record.slash_fraction, slash_page), - unapplied, + // apply slash to validator. + { + let mut spans = fetch_spans::( + params.stash, + params.window_start, + &mut reward_payout, + &mut val_slashed, + params.reward_proportion, ); - } - consumed_weight -} + let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash); -/// Computes a slash of a validator and nominators. It returns an unapplied -/// record to be applied at some later point. Slashing metadata is updated in storage, -/// since unapplied records are only rarely intended to be dropped. -/// -/// The pending slash record returned does not have initialized reporters. Those have -/// to be set at a higher level, if any. -/// -/// If `nomintors_only` is set to `true`, only the nominator slashes will be computed. -pub(crate) fn compute_slash(params: SlashParams) -> Option> { - let (val_slashed, mut reward_payout) = slash_validator::(params.clone()); + if target_span == Some(spans.span_index()) { + // misbehavior occurred within the current slashing span - end current span. + // Check for details. + spans.end_span(params.now); + } + } let mut nominators_slashed = Vec::new(); - let (nom_slashed, nom_reward_payout) = - slash_nominators::(params.clone(), &mut nominators_slashed); - reward_payout += nom_reward_payout; + reward_payout += slash_nominators::(params.clone(), prior_slash_p, &mut nominators_slashed); - (nom_slashed + val_slashed > Zero::zero()).then_some(UnappliedSlash { + Some(UnappliedSlash { validator: params.stash.clone(), own: val_slashed, - others: WeakBoundedVec::force_from( - nominators_slashed, - Some("slashed nominators not expected to be larger than the bounds"), - ), - reporter: None, + others: nominators_slashed, + reporters: Vec::new(), payout: reward_payout, }) } @@ -492,70 +316,25 @@ fn kick_out_if_recent(params: SlashParams) { } } -/// Compute the slash for a validator. Returns the amount slashed and the reward payout. -fn slash_validator(params: SlashParams) -> (BalanceOf, BalanceOf) { - let own_slash = params.slash * params.exposure.exposure_metadata.own; - log!( - warn, - "🦹 slashing validator {:?} of stake: {:?} with {:?}% for {:?} in era {:?}", - params.stash, - params.exposure.exposure_metadata.own, - params.slash, - own_slash, - params.slash_era, - ); - - if own_slash == Zero::zero() { - // kick out the validator even if they won't be slashed, - // as long as the misbehavior is from their most recent slashing span. - kick_out_if_recent::(params); - return (Zero::zero(), Zero::zero()) - } - - // apply slash to validator. - let mut reward_payout = Zero::zero(); - let mut val_slashed = Zero::zero(); - - { - let mut spans = fetch_spans::( - params.stash, - params.window_start, - &mut reward_payout, - &mut val_slashed, - params.reward_proportion, - ); - - let target_span = spans.compare_and_update_span_slash(params.slash_era, own_slash); - - if target_span == Some(spans.span_index()) { - // misbehavior occurred within the current slashing span - end current span. - // Check for details. - spans.end_span(params.now); - } - } - - (val_slashed, reward_payout) -} - /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. /// -/// Returns the total amount slashed and amount of reward to pay out. +/// Returns the amount of reward to pay out. fn slash_nominators( params: SlashParams, + prior_slash_p: Perbill, nominators_slashed: &mut Vec<(T::AccountId, BalanceOf)>, -) -> (BalanceOf, BalanceOf) { - let mut reward_payout = BalanceOf::::zero(); - let mut total_slashed = BalanceOf::::zero(); +) -> BalanceOf { + let mut reward_payout = Zero::zero(); - nominators_slashed.reserve(params.exposure.exposure_page.others.len()); - for nominator in ¶ms.exposure.exposure_page.others { + nominators_slashed.reserve(params.exposure.others.len()); + for nominator in ¶ms.exposure.others { let stash = &nominator.who; let mut nom_slashed = Zero::zero(); - // the era slash of a nominator always grows, if the validator had a new max slash for the - // era. + // the era slash of a nominator always grows, if the validator + // had a new max slash for the era. let era_slash = { - let own_slash_prior = params.prior_slash * nominator.value; + let own_slash_prior = prior_slash_p * nominator.value; let own_slash_by_validator = params.slash * nominator.value; let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior); @@ -585,10 +364,9 @@ fn slash_nominators( } } nominators_slashed.push((stash.clone(), nom_slashed)); - total_slashed.saturating_accrue(nom_slashed); } - (total_slashed, reward_payout) + reward_payout } // helper struct for managing a set of spans we are currently inspecting. @@ -802,25 +580,22 @@ pub fn do_slash( } /// Apply a previously-unapplied slash. -pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash, slash_era: EraIndex) { +pub(crate) fn apply_slash( + unapplied_slash: UnappliedSlash>, + slash_era: EraIndex, +) { let mut slashed_imbalance = NegativeImbalanceOf::::zero(); let mut reward_payout = unapplied_slash.payout; - if unapplied_slash.own > Zero::zero() { - do_slash::( - &unapplied_slash.validator, - unapplied_slash.own, - &mut reward_payout, - &mut slashed_imbalance, - slash_era, - ); - } + do_slash::( + &unapplied_slash.validator, + unapplied_slash.own, + &mut reward_payout, + &mut slashed_imbalance, + slash_era, + ); for &(ref nominator, nominator_slash) in &unapplied_slash.others { - if nominator_slash.is_zero() { - continue - } - do_slash::( nominator, nominator_slash, @@ -830,11 +605,7 @@ pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash, slash_e ); } - pay_reporters::( - reward_payout, - slashed_imbalance, - &unapplied_slash.reporter.map(|v| crate::vec![v]).unwrap_or_default(), - ); + pay_reporters::(reward_payout, slashed_imbalance, &unapplied_slash.reporters); } /// Apply a reward payout to some reporters, paying the rewards out of the slashed imbalance. diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 554c705bfbb81..5e1b405f6d84e 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -49,7 +49,7 @@ use sp_runtime::{ }; use sp_staking::{ offence::{OffenceDetails, OnOffenceHandler}, - SessionIndex, StakingInterface, + SessionIndex, }; use substrate_test_utils::assert_eq_uvec; @@ -753,7 +753,10 @@ fn nominators_also_get_slashed_pro_rata() { let exposed_nominator = initial_exposure.others.first().unwrap().value; // 11 goes offline - on_offence_now(&[offence_from(11, None)], &[slash_percent], true); + on_offence_now( + &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], + &[slash_percent], + ); // both stakes must have been decreased. assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); @@ -2450,7 +2453,13 @@ fn reward_validator_slashing_validator_does_not_overflow() { ); // Check slashing - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(100)], + ); assert_eq!(asset::stakeable_balance::(&11), stake - 1); assert_eq!(asset::stakeable_balance::(&2), 1); @@ -2543,7 +2552,13 @@ fn era_is_always_same_length() { #[test] fn offence_doesnt_force_new_era() { ExtBuilder::default().build_and_execute(|| { - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(5)], + ); assert_eq!(ForceEra::::get(), Forcing::NotForcing); }); @@ -2555,7 +2570,13 @@ fn offence_ensures_new_era_without_clobbering() { assert_ok!(Staking::force_new_era_always(RuntimeOrigin::root())); assert_eq!(ForceEra::::get(), Forcing::ForceAlways); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(5)], + ); assert_eq!(ForceEra::::get(), Forcing::ForceAlways); }); @@ -2573,7 +2594,13 @@ fn offence_deselects_validator_even_when_slash_is_zero() { assert!(Session::validators().contains(&11)); assert!(>::contains_key(11)); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); assert_eq!(ForceEra::::get(), Forcing::NotForcing); assert!(is_disabled(11)); @@ -2593,10 +2620,16 @@ fn slashing_performed_according_exposure() { assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000); // Handle an offence with a historical exposure. - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Exposure { total: 500, own: 500, others: vec![] }), + reporters: vec![], + }], + &[Perbill::from_percent(50)], + ); // The stash account should be slashed for 250 (50% of 500). - assert_eq!(asset::stakeable_balance::(&11), 1000 / 2); + assert_eq!(asset::stakeable_balance::(&11), 1000 - 250); }); } @@ -2611,7 +2644,13 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { assert!(>::contains_key(11)); assert!(Session::validators().contains(&11)); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); assert_eq!(ForceEra::::get(), Forcing::NotForcing); assert!(is_disabled(11)); @@ -2627,7 +2666,14 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { mock::start_active_era(3); // an offence committed in era 1 is reported in era 3 - on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(0)], 1, true); + on_offence_in_era( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + 1, + ); // the validator doesn't get disabled for an old offence assert!(Validators::::iter().any(|(stash, _)| stash == 11)); @@ -2637,11 +2683,13 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_in_era( - &[offence_from(11, None)], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], // NOTE: A 100% slash here would clean up the account, causing de-registration. &[Perbill::from_percent(95)], 1, - true, ); // the validator doesn't get disabled again @@ -2653,9 +2701,9 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { } #[test] -fn only_first_reporter_receive_the_slice() { - // This test verifies that the first reporter of the offence receive their slice from the - // slashed amount. +fn reporters_receive_their_slice() { + // This test verifies that the reporters of the offence receive their slice from the slashed + // amount. ExtBuilder::default().build_and_execute(|| { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; @@ -2663,16 +2711,19 @@ fn only_first_reporter_receive_the_slice() { assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); on_offence_now( - &[OffenceDetails { offender: (11, ()), reporters: vec![1, 2] }], + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![1, 2], + }], &[Perbill::from_percent(50)], - true, ); // F1 * (reward_proportion * slash - 0) // 50% * (10% * initial_balance / 2) let reward = (initial_balance / 20) / 2; - assert_eq!(asset::total_balance::(&1), 10 + reward); - assert_eq!(asset::total_balance::(&2), 20 + 0); + let reward_each = reward / 2; // split into two pieces. + assert_eq!(asset::total_balance::(&1), 10 + reward_each); + assert_eq!(asset::total_balance::(&2), 20 + reward_each); }); } @@ -2686,14 +2737,26 @@ fn subsequent_reports_in_same_span_pay_out_less() { assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); - on_offence_now(&[offence_from(11, Some(1))], &[Perbill::from_percent(20)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![1], + }], + &[Perbill::from_percent(20)], + ); // F1 * (reward_proportion * slash - 0) // 50% * (10% * initial_balance * 20%) let reward = (initial_balance / 5) / 20; assert_eq!(asset::total_balance::(&1), 10 + reward); - on_offence_now(&[offence_from(11, Some(1))], &[Perbill::from_percent(50)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![1], + }], + &[Perbill::from_percent(50)], + ); let prior_payout = reward; @@ -2721,9 +2784,17 @@ fn invulnerables_are_not_slashed() { .collect(); on_offence_now( - &[offence_from(11, None), offence_from(21, None)], + &[ + OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }, + OffenceDetails { + offender: (21, Staking::eras_stakers(active_era(), &21)), + reporters: vec![], + }, + ], &[Perbill::from_percent(50), Perbill::from_percent(20)], - true, ); // The validator 11 hasn't been slashed, but 21 has been. @@ -2747,7 +2818,13 @@ fn dont_slash_if_fraction_is_zero() { ExtBuilder::default().build_and_execute(|| { assert_eq!(asset::stakeable_balance::(&11), 1000); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); // The validator hasn't been slashed. The new era is not forced. assert_eq!(asset::stakeable_balance::(&11), 1000); @@ -2762,18 +2839,36 @@ fn only_slash_for_max_in_era() { ExtBuilder::default().build_and_execute(|| { assert_eq!(asset::stakeable_balance::(&11), 1000); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(50)], + ); // The validator has been slashed and has been force-chilled. assert_eq!(asset::stakeable_balance::(&11), 500); assert_eq!(ForceEra::::get(), Forcing::NotForcing); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(25)], + ); // The validator has not been slashed additionally. assert_eq!(asset::stakeable_balance::(&11), 500); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(60)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(60)], + ); // The validator got slashed 10% more. assert_eq!(asset::stakeable_balance::(&11), 400); @@ -2789,13 +2884,25 @@ fn garbage_collection_after_slashing() { .build_and_execute(|| { assert_eq!(asset::stakeable_balance::(&11), 2000); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); assert_eq!(asset::stakeable_balance::(&11), 2000 - 200); assert!(SlashingSpans::::get(&11).is_some()); assert_eq!(SpanSlash::::get(&(11, 0)).amount(), &200); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(100)], + ); // validator and nominator slash in era are garbage-collected by era change, // so we don't test those here. @@ -2833,7 +2940,13 @@ fn garbage_collection_on_window_pruning() { assert_eq!(asset::stakeable_balance::(&101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - add_slash(&11); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(now, &11)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); assert_eq!(asset::stakeable_balance::(&11), 900); assert_eq!(asset::stakeable_balance::(&101), 2000 - (nominated_value / 10)); @@ -2871,7 +2984,14 @@ fn slashing_nominators_by_span_max() { let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value; let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(10)], 2, true); + on_offence_in_era( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + 2, + ); assert_eq!(asset::stakeable_balance::(&11), 900); @@ -2890,7 +3010,14 @@ fn slashing_nominators_by_span_max() { assert_eq!(get_span(101).iter().collect::>(), expected_spans); // second slash: higher era, higher value, same span. - on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(30)], 3, true); + on_offence_in_era( + &[OffenceDetails { + offender: (21, Staking::eras_stakers(active_era(), &21)), + reporters: vec![], + }], + &[Perbill::from_percent(30)], + 3, + ); // 11 was not further slashed, but 21 and 101 were. assert_eq!(asset::stakeable_balance::(&11), 900); @@ -2904,7 +3031,14 @@ fn slashing_nominators_by_span_max() { // third slash: in same era and on same validator as first, higher // in-era value, but lower slash value than slash 2. - on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(20)], 2, true); + on_offence_in_era( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(20)], + 2, + ); // 11 was further slashed, but 21 and 101 were not. assert_eq!(asset::stakeable_balance::(&11), 800); @@ -2931,7 +3065,13 @@ fn slashes_are_summed_across_spans() { let get_span = |account| SlashingSpans::::get(&account).unwrap(); - on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)], true); + on_offence_now( + &[OffenceDetails { + offender: (21, Staking::eras_stakers(active_era(), &21)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); let expected_spans = vec![ slashing::SlashingSpan { index: 1, start: 4, length: None }, @@ -2948,7 +3088,13 @@ fn slashes_are_summed_across_spans() { assert_eq!(Staking::slashable_balance_of(&21), 900); - on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)], true); + on_offence_now( + &[OffenceDetails { + offender: (21, Staking::eras_stakers(active_era(), &21)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); let expected_spans = vec![ slashing::SlashingSpan { index: 2, start: 5, length: None }, @@ -2974,10 +3120,13 @@ fn deferred_slashes_are_deferred() { System::reset_events(); - // only 1 page of exposure, so slashes will be applied in one block. - assert_eq!(EraInfo::::get_page_count(1, &11), 1); - - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); // nominations are not removed regardless of the deferring. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -2990,37 +3139,27 @@ fn deferred_slashes_are_deferred() { assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); - assert!(matches!( - staking_events_since_last_call().as_slice(), - &[ - Event::OffenceReported { validator: 11, offence_era: 1, .. }, - Event::SlashComputed { offence_era: 1, slash_era: 3, page: 0, .. }, - Event::PagedElectionProceeded { page: 0, result: Ok(2) }, - Event::StakersElected, - .., - ] - )); - - // the slashes for era 1 will start applying in era 3, to end before era 4. mock::start_active_era(3); - // Slashes not applied yet. Will apply in the next block after era starts. + assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); - // trigger slashing by advancing block. - advance_blocks(1); + + // at the start of era 4, slashes from era 1 are processed, + // after being deferred for at least 2 full eras. + mock::start_active_era(4); + assert_eq!(asset::stakeable_balance::(&11), 900); assert_eq!(asset::stakeable_balance::(&101), 2000 - (nominated_value / 10)); assert!(matches!( staking_events_since_last_call().as_slice(), &[ - // era 3 elections + Event::SlashReported { validator: 11, slash_era: 1, .. }, Event::PagedElectionProceeded { page: 0, result: Ok(2) }, Event::StakersElected, - Event::EraPaid { .. }, - // slashes applied from era 1 between era 3 and 4. + .., Event::Slashed { staker: 11, amount: 100 }, - Event::Slashed { staker: 101, amount: 12 }, + Event::Slashed { staker: 101, amount: 12 } ] )); }) @@ -3032,26 +3171,25 @@ fn retroactive_deferred_slashes_two_eras_before() { assert_eq!(BondingDuration::get(), 3); mock::start_active_era(1); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11); + + mock::start_active_era(3); assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); System::reset_events(); on_offence_in_era( - &[offence_from(11, None)], + &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], - 1, // should be deferred for two eras, and applied at the beginning of era 3. - true, + 1, // should be deferred for two full eras, and applied at the beginning of era 4. ); - mock::start_active_era(3); - // Slashes not applied yet. Will apply in the next block after era starts. - advance_blocks(1); + mock::start_active_era(4); assert!(matches!( staking_events_since_last_call().as_slice(), &[ - Event::OffenceReported { validator: 11, offence_era: 1, .. }, - Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 }, + Event::SlashReported { validator: 11, slash_era: 1, .. }, .., Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 } @@ -3065,6 +3203,9 @@ fn retroactive_deferred_slashes_one_before() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { assert_eq!(BondingDuration::get(), 3); + mock::start_active_era(1); + let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11); + // unbond at slash era. mock::start_active_era(2); assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); @@ -3073,23 +3214,21 @@ fn retroactive_deferred_slashes_one_before() { mock::start_active_era(3); System::reset_events(); on_offence_in_era( - &[offence_from(11, None)], + &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], &[Perbill::from_percent(10)], - 2, // should be deferred for two eras, and applied before the beginning of era 4. - true, + 2, // should be deferred for two full eras, and applied at the beginning of era 5. ); mock::start_active_era(4); assert_eq!(Staking::ledger(11.into()).unwrap().total, 1000); - // slash happens at next blocks. - advance_blocks(1); + // slash happens after the next line. + mock::start_active_era(5); assert!(matches!( staking_events_since_last_call().as_slice(), &[ - Event::OffenceReported { validator: 11, offence_era: 2, .. }, - Event::SlashComputed { offence_era: 2, slash_era: 4, offender: 11, page: 0 }, + Event::SlashReported { validator: 11, slash_era: 2, .. }, .., Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 } @@ -3115,7 +3254,13 @@ fn staker_cannot_bail_deferred_slash() { let exposure = Staking::eras_stakers(active_era(), &11); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); // now we chill assert_ok!(Staking::chill(RuntimeOrigin::signed(101))); @@ -3184,44 +3329,23 @@ fn remove_deferred() { assert_eq!(asset::stakeable_balance::(&101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - // deferred to start of era 3. - let slash_fraction_one = Perbill::from_percent(10); - on_offence_now(&[offence_from(11, None)], &[slash_fraction_one], true); + // deferred to start of era 4. + on_offence_now( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); mock::start_active_era(2); - // reported later, but deferred to start of era 3 as well. + // reported later, but deferred to start of era 4 as well. System::reset_events(); - let slash_fraction_two = Perbill::from_percent(15); - on_offence_in_era(&[offence_from(11, None)], &[slash_fraction_two], 1, true); - - assert_eq!( - UnappliedSlashes::::iter_prefix(&3).collect::>(), - vec![ - ( - (11, slash_fraction_one, 0), - UnappliedSlash { - validator: 11, - own: 100, - others: bounded_vec![(101, 12)], - reporter: None, - payout: 5 - } - ), - ( - (11, slash_fraction_two, 0), - UnappliedSlash { - validator: 11, - own: 50, - others: bounded_vec![(101, 7)], - reporter: None, - payout: 6 - } - ), - ] + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(15)], + 1, ); // fails if empty @@ -3230,13 +3354,8 @@ fn remove_deferred() { Error::::EmptyTargets ); - // cancel the slash with 10%. - assert_ok!(Staking::cancel_deferred_slash( - RuntimeOrigin::root(), - 3, - vec![(11, slash_fraction_one, 0)] - )); - assert_eq!(UnappliedSlashes::::iter_prefix(&3).count(), 1); + // cancel one of them. + assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0])); assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); @@ -3246,29 +3365,23 @@ fn remove_deferred() { assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); - // at the next blocks, slashes from era 1 are processed, 1 page a block, - // after being deferred for 2 eras. - advance_blocks(1); + // at the start of era 4, slashes from era 1 are processed, + // after being deferred for at least 2 full eras. + mock::start_active_era(4); // the first slash for 10% was cancelled, but the 15% one not. assert!(matches!( staking_events_since_last_call().as_slice(), &[ - Event::OffenceReported { validator: 11, offence_era: 1, .. }, - Event::SlashComputed { offence_era: 1, slash_era: 3, offender: 11, page: 0 }, - Event::SlashCancelled { - slash_era: 3, - slash_key: (11, fraction, 0), - payout: 5 - }, + Event::SlashReported { validator: 11, slash_era: 1, .. }, .., Event::Slashed { staker: 11, amount: 50 }, Event::Slashed { staker: 101, amount: 7 } - ] if fraction == slash_fraction_one + ] )); let slash_10 = Perbill::from_percent(10); - let slash_15 = slash_fraction_two; + let slash_15 = Perbill::from_percent(15); let initial_slash = slash_10 * nominated_value; let total_slash = slash_15 * nominated_value; @@ -3282,48 +3395,67 @@ fn remove_deferred() { #[test] fn remove_multi_deferred() { - ExtBuilder::default() - .slash_defer_duration(2) - .validator_count(4) - .set_status(41, StakerStatus::Validator) - .set_status(51, StakerStatus::Validator) - .build_and_execute(|| { - mock::start_active_era(1); + ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(asset::stakeable_balance::(&11), 1000); - assert_eq!(asset::stakeable_balance::(&101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true); + let exposure = Staking::eras_stakers(active_era(), &11); + assert_eq!(asset::stakeable_balance::(&101), 2000); - on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)], true); + on_offence_now( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)], true); + on_offence_now( + &[OffenceDetails { + offender: (21, Staking::eras_stakers(active_era(), &21)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); - on_offence_now(&[offence_from(41, None)], &[Perbill::from_percent(25)], true); + on_offence_now( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); - on_offence_now(&[offence_from(51, None)], &[Perbill::from_percent(25)], true); + on_offence_now( + &[OffenceDetails { offender: (42, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); - // there are 5 slashes to be applied in era 3. - assert_eq!(UnappliedSlashes::::iter_prefix(&3).count(), 5); + on_offence_now( + &[OffenceDetails { offender: (69, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); - // lets cancel 3 of them. - assert_ok!(Staking::cancel_deferred_slash( - RuntimeOrigin::root(), - 3, - vec![ - (11, Perbill::from_percent(10), 0), - (11, Perbill::from_percent(25), 0), - (51, Perbill::from_percent(25), 0), - ] - )); + assert_eq!(UnappliedSlashes::::get(&4).len(), 5); - let slashes = UnappliedSlashes::::iter_prefix(&3).collect::>(); - assert_eq!(slashes.len(), 2); - // the first item in the remaining slashes belongs to validator 41. - assert_eq!(slashes[0].0, (41, Perbill::from_percent(25), 0)); - // the second and last item in the remaining slashes belongs to validator 21. - assert_eq!(slashes[1].0, (21, Perbill::from_percent(10), 0)); - }) + // fails if list is not sorted + assert_noop!( + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![2, 0, 4]), + Error::::NotSortedAndUnique + ); + // fails if list is not unique + assert_noop!( + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![0, 2, 2]), + Error::::NotSortedAndUnique + ); + // fails if bad index + assert_noop!( + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![1, 2, 3, 4, 5]), + Error::::InvalidSlashIndex + ); + + assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0, 2, 4])); + + let slashes = UnappliedSlashes::::get(&4); + assert_eq!(slashes.len(), 2); + assert_eq!(slashes[0].validator, 21); + assert_eq!(slashes[1].validator, 42); + }) } #[test] @@ -3352,7 +3484,10 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert_eq!(exposure_11.total, 1000 + 125); assert_eq!(exposure_21.total, 1000 + 375); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true); + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); assert_eq!( staking_events_since_last_call(), @@ -3360,12 +3495,12 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid Event::PagedElectionProceeded { page: 0, result: Ok(7) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::OffenceReported { + Event::SlashReported { validator: 11, fraction: Perbill::from_percent(10), - offence_era: 1 + slash_era: 1 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::ValidatorDisabled { stash: 11 }, Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 }, ] @@ -3412,14 +3547,23 @@ fn non_slashable_offence_disables_validator() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); + let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); + // offence with no slash associated - on_offence_now(&[offence_from(11, None)], &[Perbill::zero()], true); + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); // it does NOT affect the nominator. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); // offence that slashes 25% of the bond - on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)], true); + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); // it DOES NOT affect the nominator. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3430,17 +3574,18 @@ fn non_slashable_offence_disables_validator() { Event::PagedElectionProceeded { page: 0, result: Ok(7) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::OffenceReported { + Event::SlashReported { validator: 11, fraction: Perbill::from_percent(0), - offence_era: 1 + slash_era: 1 }, - Event::OffenceReported { + Event::ValidatorDisabled { stash: 11 }, + Event::SlashReported { validator: 21, fraction: Perbill::from_percent(25), - offence_era: 1 + slash_era: 1 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 }, + Event::ValidatorDisabled { stash: 21 }, Event::Slashed { staker: 21, amount: 250 }, Event::Slashed { staker: 101, amount: 94 } ] @@ -3472,11 +3617,18 @@ fn slashing_independent_of_disabling_validator() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51]); + let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); + let now = ActiveEra::::get().unwrap().index; // --- Disable without a slash --- // offence with no slash associated - on_offence_in_era(&[offence_from(11, None)], &[Perbill::zero()], now, true); + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + now, + ); // nomination remains untouched. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3486,10 +3638,18 @@ fn slashing_independent_of_disabling_validator() { // --- Slash without disabling --- // offence that slashes 50% of the bond (setup for next slash) - on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(50)], now, true); + on_offence_in_era( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(50)], + now, + ); // offence that slashes 25% of the bond but does not disable - on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(25)], now, true); + on_offence_in_era( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + now, + ); // nomination remains untouched. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3504,25 +3664,24 @@ fn slashing_independent_of_disabling_validator() { Event::PagedElectionProceeded { page: 0, result: Ok(5) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::OffenceReported { + Event::SlashReported { validator: 11, fraction: Perbill::from_percent(0), - offence_era: 1 + slash_era: 1 }, - Event::OffenceReported { + Event::ValidatorDisabled { stash: 11 }, + Event::SlashReported { validator: 11, fraction: Perbill::from_percent(50), - offence_era: 1 + slash_era: 1 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, Event::Slashed { staker: 11, amount: 500 }, Event::Slashed { staker: 101, amount: 62 }, - Event::OffenceReported { + Event::SlashReported { validator: 21, fraction: Perbill::from_percent(25), - offence_era: 1 + slash_era: 1 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 }, Event::Slashed { staker: 21, amount: 250 }, Event::Slashed { staker: 101, amount: 94 } ] @@ -3558,14 +3717,25 @@ fn offence_threshold_doesnt_plan_new_era() { // we have 4 validators and an offending validator threshold of 1/3, // even if the third validator commits an offence a new era should not be forced - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)], true); + + let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); + let exposure_31 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &31); + + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(50)], + ); // 11 should be disabled because the byzantine threshold is 1 assert!(is_disabled(11)); assert_eq!(ForceEra::::get(), Forcing::NotForcing); - on_offence_now(&[offence_from(21, None)], &[Perbill::zero()], true); + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); // 21 should not be disabled because the number of disabled validators will be above the // byzantine threshold @@ -3573,7 +3743,10 @@ fn offence_threshold_doesnt_plan_new_era() { assert_eq!(ForceEra::::get(), Forcing::NotForcing); - on_offence_now(&[offence_from(31, None)], &[Perbill::zero()], true); + on_offence_now( + &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); // same for 31 assert!(!is_disabled(31)); @@ -3595,7 +3768,13 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); assert_eq!(::SessionsPerEra::get(), 3); - on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)], true); + let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); + + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); // nominations are not updated. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3609,7 +3788,10 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { assert!(is_disabled(21)); // validator 11 commits an offence - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)], true); + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); // nominations are not updated. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3725,9 +3907,14 @@ fn zero_slash_keeps_nominators() { mock::start_active_era(1); assert_eq!(asset::stakeable_balance::(&11), 1000); + + let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(asset::stakeable_balance::(&101), 2000); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)], true); + on_offence_now( + &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[Perbill::from_percent(0)], + ); assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); @@ -4720,7 +4907,6 @@ fn bond_during_era_does_not_populate_legacy_claimed_rewards() { } #[test] -#[ignore] fn offences_weight_calculated_correctly() { ExtBuilder::default().nominate(true).build_and_execute(|| { // On offence with zero offenders: 4 Reads, 1 Write @@ -4743,7 +4929,7 @@ fn offences_weight_calculated_correctly() { >, > = (1..10) .map(|i| OffenceDetails { - offender: (i, ()), + offender: (i, Staking::eras_stakers(active_era(), &i)), reporters: vec![], }) .collect(); @@ -4757,7 +4943,10 @@ fn offences_weight_calculated_correctly() { ); // On Offence with one offenders, Applied - let one_offender = [offence_from(11, Some(1))]; + let one_offender = [OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![1], + }]; let n = 1; // Number of offenders let rw = 3 + 3 * n; // rw reads and writes @@ -6850,7 +7039,13 @@ mod staking_interface { #[test] fn do_withdraw_unbonded_with_wrong_slash_spans_works_as_expected() { ExtBuilder::default().build_and_execute(|| { - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)], true); + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(active_era(), &11)), + reporters: vec![], + }], + &[Perbill::from_percent(100)], + ); assert_eq!(Staking::bonded(&11), Some(11)); @@ -7134,7 +7329,13 @@ mod staking_unchecked { let exposed_nominator = initial_exposure.others.first().unwrap().value; // 11 goes offline - on_offence_now(&[offence_from(11, None)], &[slash_percent], true); + on_offence_now( + &[OffenceDetails { + offender: (11, initial_exposure.clone()), + reporters: vec![], + }], + &[slash_percent], + ); let slash_amount = slash_percent * exposed_stake; let validator_share = @@ -7200,7 +7401,13 @@ mod staking_unchecked { let nominator_stake = Staking::ledger(101.into()).unwrap().total; // 11 goes offline - on_offence_now(&[offence_from(11, None)], &[slash_percent], true); + on_offence_now( + &[OffenceDetails { + offender: (11, initial_exposure.clone()), + reporters: vec![], + }], + &[slash_percent], + ); // both stakes must have been decreased to 0. assert_eq!(Staking::ledger(101.into()).unwrap().active, 0); @@ -8340,9 +8547,19 @@ fn reenable_lower_offenders_mock() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); + // offence with a low slash - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)], true); - on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(20)], true); + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(20)], + ); // it does NOT affect the nominator. assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); @@ -8352,7 +8569,10 @@ fn reenable_lower_offenders_mock() { assert!(is_disabled(21)); // offence with a higher slash - on_offence_now(&[offence_from(31, None)], &[Perbill::from_percent(50)], true); + on_offence_now( + &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], + &[Perbill::from_percent(50)], + ); // First offender is no longer disabled assert!(!is_disabled(11)); @@ -8367,28 +8587,29 @@ fn reenable_lower_offenders_mock() { Event::PagedElectionProceeded { page: 0, result: Ok(7) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::OffenceReported { + Event::SlashReported { validator: 11, fraction: Perbill::from_percent(10), - offence_era: 1 + slash_era: 1 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, + Event::ValidatorDisabled { stash: 11 }, Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 }, - Event::OffenceReported { + Event::SlashReported { validator: 21, fraction: Perbill::from_percent(20), - offence_era: 1 + slash_era: 1 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 }, + Event::ValidatorDisabled { stash: 21 }, Event::Slashed { staker: 21, amount: 200 }, Event::Slashed { staker: 101, amount: 75 }, - Event::OffenceReported { + Event::SlashReported { validator: 31, fraction: Perbill::from_percent(50), - offence_era: 1 + slash_era: 1 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 31, page: 0 }, + Event::ValidatorDisabled { stash: 31 }, + Event::ValidatorReenabled { stash: 11 }, Event::Slashed { staker: 31, amount: 250 }, ] ); @@ -8418,17 +8639,33 @@ fn do_not_reenable_higher_offenders_mock() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); + // offence with a major slash on_offence_now( - &[offence_from(11, None), offence_from(21, None), offence_from(31, None)], - &[Perbill::from_percent(50), Perbill::from_percent(50), Perbill::from_percent(10)], - true, + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(50)], + ); + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(50)], ); // both validators should be disabled assert!(is_disabled(11)); assert!(is_disabled(21)); + // offence with a minor slash + on_offence_now( + &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], + &[Perbill::from_percent(10)], + ); + + // First and second offenders are still disabled + assert!(is_disabled(11)); + assert!(is_disabled(21)); // New offender is not disabled as limit is reached and his prio is lower assert!(!is_disabled(31)); @@ -8438,29 +8675,28 @@ fn do_not_reenable_higher_offenders_mock() { Event::PagedElectionProceeded { page: 0, result: Ok(7) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, - Event::OffenceReported { + Event::SlashReported { validator: 11, fraction: Perbill::from_percent(50), - offence_era: 1 + slash_era: 1 }, - Event::OffenceReported { + Event::ValidatorDisabled { stash: 11 }, + Event::Slashed { staker: 11, amount: 500 }, + Event::Slashed { staker: 101, amount: 62 }, + Event::SlashReported { validator: 21, fraction: Perbill::from_percent(50), - offence_era: 1 + slash_era: 1 }, - Event::OffenceReported { + Event::ValidatorDisabled { stash: 21 }, + Event::Slashed { staker: 21, amount: 500 }, + Event::Slashed { staker: 101, amount: 187 }, + Event::SlashReported { validator: 31, fraction: Perbill::from_percent(10), - offence_era: 1 + slash_era: 1 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 31, page: 0 }, Event::Slashed { staker: 31, amount: 50 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 21, page: 0 }, - Event::Slashed { staker: 21, amount: 500 }, - Event::Slashed { staker: 101, amount: 187 }, - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, - Event::Slashed { staker: 11, amount: 500 }, - Event::Slashed { staker: 101, amount: 62 }, ] ); @@ -9152,588 +9388,177 @@ mod hold_migration { } } -mod paged_slashing { - use super::*; - use crate::slashing::OffenceRecord; - - #[test] - fn offence_processed_in_multi_block() { - // Ensure each page is processed only once. - ExtBuilder::default() - .has_stakers(false) - .slash_defer_duration(3) - .build_and_execute(|| { - let base_stake = 1000; - - // Create a validator: - bond_validator(11, base_stake); - assert_eq!(Validators::::count(), 1); - - // Track the total exposure of 11. - let mut exposure_counter = base_stake; - - // Exposure page size is 64, hence it creates 4 pages of exposure. - let expected_page_count = 4; - for i in 0..200 { - let bond_amount = base_stake + i as Balance; - bond_nominator(1000 + i, bond_amount, vec![11]); - // with multi page reward payout, payout exposure is same as total exposure. - exposure_counter += bond_amount; - } - - mock::start_active_era(1); - - assert_eq!( - ErasStakersOverview::::get(1, 11).expect("exposure should exist"), - PagedExposureMetadata { - total: exposure_counter, - own: base_stake, - page_count: expected_page_count, - nominator_count: 200, - } - ); - - mock::start_active_era(2); - System::reset_events(); - - // report an offence for 11 in era 1. - on_offence_in_era( - &[offence_from(11, None)], - &[Perbill::from_percent(10)], - 1, - false, - ); - - // ensure offence is queued. - assert_eq!( - staking_events_since_last_call().as_slice(), - vec![Event::OffenceReported { - validator: 11, - fraction: Perbill::from_percent(10), - offence_era: 1 - }] - ); - - // ensure offence queue has items. - assert_eq!( - OffenceQueue::::get(1, 11).unwrap(), - slashing::OffenceRecord { - reporter: None, - reported_era: 2, - // first page to be marked for processing. - exposure_page: expected_page_count - 1, - slash_fraction: Perbill::from_percent(10), - prior_slash_fraction: Perbill::zero(), - } - ); - - // The offence era is noted in the queue. - assert_eq!(OffenceQueueEras::::get().unwrap(), vec![1]); - - // ensure Processing offence is empty yet. - assert_eq!(ProcessingOffence::::get(), None); - - // ensure no unapplied slashes for era 4 (offence_era + slash_defer_duration). - assert_eq!(UnappliedSlashes::::iter_prefix(&4).collect::>().len(), 0); - - // Checkpoint 1: advancing to next block will compute the first page of slash. - advance_blocks(1); - - // ensure the last page of offence is processed. - // (offence is processed in reverse order of pages) - assert_eq!( - staking_events_since_last_call().as_slice(), - vec![Event::SlashComputed { - offence_era: 1, - slash_era: 4, - offender: 11, - page: expected_page_count - 1 - },] - ); - - // offender is removed from offence queue - assert_eq!(OffenceQueue::::get(1, 11), None); - - // offence era is removed from queue. - assert_eq!(OffenceQueueEras::::get(), None); - - // this offence is not completely processed yet, so it should be in processing. - assert_eq!( - ProcessingOffence::::get(), - Some(( - 1, - 11, - OffenceRecord { - reporter: None, - reported_era: 2, - // page 3 is processed, next page to be processed is 2. - exposure_page: 2, - slash_fraction: Perbill::from_percent(10), - prior_slash_fraction: Perbill::zero(), - } - )) - ); - - // unapplied slashes for era 4. - let slashes = UnappliedSlashes::::iter_prefix(&4).collect::>(); - // only one unapplied slash exists. - assert_eq!(slashes.len(), 1); - let (slash_key, unapplied_slash) = &slashes[0]; - // this is a unique key to ensure unapplied slash is not overwritten for multiple - // offence by offender in the same era. - assert_eq!(*slash_key, (11, Perbill::from_percent(10), expected_page_count - 1)); - - // validator own stake is only included in the first page. Since this is page 3, - // only nominators are slashed. - assert_eq!(unapplied_slash.own, 0); - assert_eq!(unapplied_slash.validator, 11); - assert_eq!(unapplied_slash.others.len(), 200 % 64); - - // Checkpoint 2: advancing to next block will compute the second page of slash. - advance_blocks(1); - - // offence queue still empty - assert_eq!(OffenceQueue::::get(1, 11), None); - assert_eq!(OffenceQueueEras::::get(), None); - - // processing offence points to next page. - assert_eq!( - ProcessingOffence::::get(), - Some(( - 1, - 11, - OffenceRecord { - reporter: None, - reported_era: 2, - // page 2 is processed, next page to be processed is 1. - exposure_page: 1, - slash_fraction: Perbill::from_percent(10), - prior_slash_fraction: Perbill::zero(), - } - )) - ); - - // there are two unapplied slashes for era 4. - assert_eq!(UnappliedSlashes::::iter_prefix(&4).collect::>().len(), 2); - - // ensure the last page of offence is processed. - // (offence is processed in reverse order of pages) - assert_eq!( - staking_events_since_last_call().as_slice(), - vec![Event::SlashComputed { - offence_era: 1, - slash_era: 4, - offender: 11, - page: expected_page_count - 2 - },] - ); - - // Checkpoint 3: advancing to two more blocks will complete the processing of the - // reported offence - advance_blocks(2); - - // no processing offence. - assert!(ProcessingOffence::::get().is_none()); - // total of 4 unapplied slash. - assert_eq!(UnappliedSlashes::::iter_prefix(&4).collect::>().len(), 4); - - // Checkpoint 4: lets verify the application of slashes in multiple blocks. - // advance to era 4. - mock::start_active_era(4); - // slashes are not applied just yet. From next blocks, they will be applied. - assert_eq!(UnappliedSlashes::::iter_prefix(&4).collect::>().len(), 4); - - // advance to next block. - advance_blocks(1); - // 1 slash is applied. - assert_eq!(UnappliedSlashes::::iter_prefix(&4).collect::>().len(), 3); - - // advance two blocks. - advance_blocks(2); - // 2 more slashes are applied. - assert_eq!(UnappliedSlashes::::iter_prefix(&4).collect::>().len(), 1); - - // advance one more block. - advance_blocks(1); - // all slashes are applied. - assert_eq!(UnappliedSlashes::::iter_prefix(&4).collect::>().len(), 0); - - // ensure all stakers are slashed correctly. - assert_eq!(asset::staked::(&11), 1000 - 100); - - for i in 0..200 { - let original_stake = 1000 + i as Balance; - let expected_slash = Perbill::from_percent(10) * original_stake; - assert_eq!(asset::staked::(&(1000 + i)), original_stake - expected_slash); - } - }) - } - - #[test] - fn offence_discarded_correctly() { - ExtBuilder::default().slash_defer_duration(3).build_and_execute(|| { - start_active_era(2); - - // Scenario 1: 11 commits an offence in era 2. - on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(10)], 2, false); - - // offence is queued, not processed yet. - let queued_offence_one = OffenceQueue::::get(2, 11).unwrap(); - assert_eq!(queued_offence_one.slash_fraction, Perbill::from_percent(10)); - assert_eq!(queued_offence_one.prior_slash_fraction, Perbill::zero()); - assert_eq!(OffenceQueueEras::::get().unwrap(), vec![2]); - - // Scenario 1A: 11 commits a second offence in era 2 with **lower** slash fraction than - // the previous offence. - on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(5)], 2, false); - - // the second offence is discarded. No change in the queue. - assert_eq!(OffenceQueue::::get(2, 11).unwrap(), queued_offence_one); - - // Scenario 1B: 11 commits a second offence in era 2 with **higher** slash fraction than - // the previous offence. - on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(15)], 2, false); - - // the second offence overwrites the first offence. - let overwritten_offence = OffenceQueue::::get(2, 11).unwrap(); - assert!(overwritten_offence.slash_fraction > queued_offence_one.slash_fraction); - assert_eq!(overwritten_offence.slash_fraction, Perbill::from_percent(15)); - assert_eq!(overwritten_offence.prior_slash_fraction, Perbill::zero()); - assert_eq!(OffenceQueueEras::::get().unwrap(), vec![2]); - - // Scenario 2: 11 commits another offence in era 2, but after the previous offence is - // processed. - advance_blocks(1); - assert!(OffenceQueue::::get(2, 11).is_none()); - assert!(OffenceQueueEras::::get().is_none()); - // unapplied slash is created for the offence. - assert!(UnappliedSlashes::::contains_key( - 2 + 3, - (11, Perbill::from_percent(15), 0) - )); - - // Scenario 2A: offence has **lower** slash fraction than the previous offence. - on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(14)], 2, false); - // offence is discarded. - assert!(OffenceQueue::::get(2, 11).is_none()); - assert!(OffenceQueueEras::::get().is_none()); - - // Scenario 2B: offence has **higher** slash fraction than the previous offence. - on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(16)], 2, false); - // process offence - advance_blocks(1); - // there are now two slash records for 11, for era 5, with the newer one only slashing - // the diff between slash fractions of 16 and 15. - let slash_one = - UnappliedSlashes::::get(2 + 3, (11, Perbill::from_percent(15), 0)).unwrap(); - let slash_two = - UnappliedSlashes::::get(2 + 3, (11, Perbill::from_percent(16), 0)).unwrap(); - assert!(slash_one.own > slash_two.own); - }); - } - - #[test] - fn offence_eras_queued_correctly() { - ExtBuilder::default().build_and_execute(|| { - // 11 and 21 are validators. - assert_eq!(Staking::status(&11).unwrap(), StakerStatus::Validator); - assert_eq!(Staking::status(&21).unwrap(), StakerStatus::Validator); - - start_active_era(2); - - // 11 and 21 commits offence in era 2. - on_offence_in_era( - &[offence_from(11, None), offence_from(21, None)], - &[Perbill::from_percent(10), Perbill::from_percent(20)], - 2, - false, - ); - - // 11 and 21 commits offence in era 1 but reported after the era 2 offence. - on_offence_in_era( - &[offence_from(11, None), offence_from(21, None)], - &[Perbill::from_percent(10), Perbill::from_percent(20)], - 1, - false, - ); - - // queued offence eras are sorted. - assert_eq!(OffenceQueueEras::::get().unwrap(), vec![1, 2]); - - // next two blocks, the offence in era 1 is processed. - advance_blocks(2); - - // only era 2 is left in the queue. - assert_eq!(OffenceQueueEras::::get().unwrap(), vec![2]); - - // next block, the offence in era 2 is processed. - advance_blocks(1); +// Tests for manual_slash extrinsic +// Covers the following scenarios: +// 1. Basic slashing functionality - verifies root origin slashing works correctly +// 2. Slashing with a lower percentage - should have no effect +// 3. Slashing with a higher percentage - should increase the slash amount +// 4. Slashing in non-existent eras - should fail with an error +// 5. Slashing in previous eras - should work within history depth +#[test] +fn manual_slashing_works() { + ExtBuilder::default().validator_count(2).build_and_execute(|| { + // setup: Start with era 0 + start_active_era(0); + + let validator_stash = 11; + let initial_balance = Staking::slashable_balance_of(&validator_stash); + assert!(initial_balance > 0, "Validator must have stake to be slashed"); + + // scenario 1: basic slashing works + // this verifies that the manual_slash extrinsic properly slashes a validator when + // called with root origin + let current_era = CurrentEra::::get().unwrap(); + let slash_fraction_1 = Perbill::from_percent(25); - // era still exist in the queue. - assert_eq!(OffenceQueueEras::::get().unwrap(), vec![2]); + // only root can call this function + assert_noop!( + Staking::manual_slash( + RuntimeOrigin::signed(10), + validator_stash, + current_era, + slash_fraction_1 + ), + BadOrigin + ); - // next block, the era 2 is processed. - advance_blocks(1); + // root can slash + assert_ok!(Staking::manual_slash( + RuntimeOrigin::root(), + validator_stash, + current_era, + slash_fraction_1 + )); - // queue is empty. - assert_eq!(OffenceQueueEras::::get(), None); - }); - } - #[test] - fn non_deferred_slash_applied_instantly() { - ExtBuilder::default().build_and_execute(|| { - mock::start_active_era(2); - let validator_stake = asset::staked::(&11); - let slash_fraction = Perbill::from_percent(10); - let expected_slash = slash_fraction * validator_stake; - System::reset_events(); + // process offence + advance_blocks(1); - // report an offence for 11 in era 1. - on_offence_in_era(&[offence_from(11, None)], &[slash_fraction], 1, false); + // check if balance was slashed correctly (25%) + let balance_after_first_slash = Staking::slashable_balance_of(&validator_stash); + let expected_balance_1 = initial_balance - (initial_balance / 4); // 25% slash - // ensure offence is queued. - assert_eq!( - staking_events_since_last_call().as_slice(), - vec![Event::OffenceReported { - validator: 11, - fraction: Perbill::from_percent(10), - offence_era: 1 - }] - ); + assert!( + balance_after_first_slash <= expected_balance_1 && + balance_after_first_slash >= expected_balance_1 - 5, + "First slash was not applied correctly. Expected around {}, got {}", + expected_balance_1, + balance_after_first_slash + ); - // process offence - advance_blocks(1); + // clear events from first slash + System::reset_events(); - // ensure slash is computed and applied. - assert_eq!( - staking_events_since_last_call().as_slice(), - vec![ - Event::SlashComputed { offence_era: 1, slash_era: 1, offender: 11, page: 0 }, - Event::Slashed { staker: 11, amount: expected_slash }, - // this is the nominator of 11. - Event::Slashed { staker: 101, amount: 12 }, - ] - ); + // scenario 2: slashing with a smaller fraction has no effect + // when a validator has already been slashed by a higher percentage, + // attempting to slash with a lower percentage should have no effect + let slash_fraction_2 = Perbill::from_percent(10); // Smaller than 25% + assert_ok!(Staking::manual_slash( + RuntimeOrigin::root(), + validator_stash, + current_era, + slash_fraction_2 + )); - // ensure validator is slashed. - assert_eq!(asset::staked::(&11), validator_stake - expected_slash); + // balance should not change because we already slashed with a higher percentage + let balance_after_second_slash = Staking::slashable_balance_of(&validator_stash); + assert_eq!( + balance_after_first_slash, balance_after_second_slash, + "Balance changed after slashing with smaller fraction" + ); + + // with the new implementation, we should see an OffenceReported event + // but no Slashed event yet as the slash will be queued + let has_offence_reported = System::events().iter().any(|record| { + matches!( + record.event, + RuntimeEvent::Staking(Event::::OffenceReported { + validator, + fraction, + .. + }) if validator == validator_stash && fraction == slash_fraction_2 + ) }); - } + assert!(has_offence_reported, "No OffenceReported event was emitted"); - #[test] - fn validator_with_no_exposure_slashed() { - ExtBuilder::default().build_and_execute(|| { - let validator_stake = asset::staked::(&11); - let slash_fraction = Perbill::from_percent(10); - let expected_slash = slash_fraction * validator_stake; - - // only 101 nominates 11, lets remove them. - assert_ok!(Staking::nominate(RuntimeOrigin::signed(101), vec![21])); - - start_active_era(2); - // ensure validator has no exposure. - assert_eq!(ErasStakersOverview::::get(2, 11).unwrap().page_count, 0,); - - // clear events - System::reset_events(); - - // report an offence for 11. - on_offence_now(&[offence_from(11, None)], &[slash_fraction], true); - - // ensure validator is slashed. - assert_eq!(asset::staked::(&11), validator_stake - expected_slash); - assert_eq!( - staking_events_since_last_call().as_slice(), - vec![ - Event::OffenceReported { - offence_era: 2, - validator: 11, - fraction: slash_fraction - }, - Event::SlashComputed { offence_era: 2, slash_era: 2, offender: 11, page: 0 }, - Event::Slashed { staker: 11, amount: expected_slash }, - ] - ); + // verify no Slashed event was emitted yet (since it's queued for later processing) + let no_slashed_events = !System::events().iter().any(|record| { + matches!(record.event, RuntimeEvent::Staking(Event::::Slashed { .. })) }); - } - - // Tests for manual_slash extrinsic - // Covers the following scenarios: - // 1. Basic slashing functionality - verifies root origin slashing works correctly - // 2. Slashing with a lower percentage - should have no effect - // 3. Slashing with a higher percentage - should increase the slash amount - // 4. Slashing in non-existent eras - should fail with an error - // 5. Slashing in previous eras - should work within history depth - #[test] - fn manual_slashing_works() { - ExtBuilder::default().validator_count(2).build_and_execute(|| { - // setup: Start with era 0 - start_active_era(0); - - let validator_stash = 11; - let initial_balance = Staking::slashable_balance_of(&validator_stash); - assert!(initial_balance > 0, "Validator must have stake to be slashed"); - - // scenario 1: basic slashing works - // this verifies that the manual_slash extrinsic properly slashes a validator when - // called with root origin - let current_era = CurrentEra::::get().unwrap(); - let slash_fraction_1 = Perbill::from_percent(25); - - // only root can call this function - assert_noop!( - Staking::manual_slash( - RuntimeOrigin::signed(10), - validator_stash, - current_era, - slash_fraction_1 - ), - BadOrigin - ); - - // root can slash - assert_ok!(Staking::manual_slash( - RuntimeOrigin::root(), - validator_stash, - current_era, - slash_fraction_1 - )); + assert!(no_slashed_events, "A Slashed event was incorrectly emitted immediately"); - // process offence - advance_blocks(1); - - // check if balance was slashed correctly (25%) - let balance_after_first_slash = Staking::slashable_balance_of(&validator_stash); - let expected_balance_1 = initial_balance - (initial_balance / 4); // 25% slash + // clear events again + System::reset_events(); - assert!( - balance_after_first_slash <= expected_balance_1 && - balance_after_first_slash >= expected_balance_1 - 5, - "First slash was not applied correctly. Expected around {}, got {}", - expected_balance_1, - balance_after_first_slash - ); + // scenario 3: slashing with a larger fraction works + // when a validator is slashed with a higher percentage than previous slashes, + // their stake should be further reduced to match the new larger slash percentage + let slash_fraction_3 = Perbill::from_percent(50); // Larger than 25% + assert_ok!(Staking::manual_slash( + RuntimeOrigin::root(), + validator_stash, + current_era, + slash_fraction_3 + )); - // clear events from first slash - System::reset_events(); + // process offence + advance_blocks(1); - // scenario 2: slashing with a smaller fraction has no effect - // when a validator has already been slashed by a higher percentage, - // attempting to slash with a lower percentage should have no effect - let slash_fraction_2 = Perbill::from_percent(10); // Smaller than 25% - assert_ok!(Staking::manual_slash( - RuntimeOrigin::root(), - validator_stash, - current_era, - slash_fraction_2 - )); + // check if balance was further slashed (from 75% to 50% of original) + let balance_after_third_slash = Staking::slashable_balance_of(&validator_stash); + let expected_balance_3 = initial_balance / 2; // 50% of original - // balance should not change because we already slashed with a higher percentage - let balance_after_second_slash = Staking::slashable_balance_of(&validator_stash); - assert_eq!( - balance_after_first_slash, balance_after_second_slash, - "Balance changed after slashing with smaller fraction" - ); + assert!( + balance_after_third_slash <= expected_balance_3 && + balance_after_third_slash >= expected_balance_3 - 5, + "Third slash was not applied correctly. Expected around {}, got {}", + expected_balance_3, + balance_after_third_slash + ); - // with the new implementation, we should see an OffenceReported event - // but no Slashed event yet as the slash will be queued - let has_offence_reported = System::events().iter().any(|record| { + // verify a Slashed event was emitted + assert!( + System::events().iter().any(|record| { matches!( record.event, - RuntimeEvent::Staking(Event::::OffenceReported { - validator, - fraction, - .. - }) if validator == validator_stash && fraction == slash_fraction_2 + RuntimeEvent::Staking(Event::::Slashed { staker, .. }) + if staker == validator_stash ) - }); - assert!(has_offence_reported, "No OffenceReported event was emitted"); - - // verify no Slashed event was emitted yet (since it's queued for later processing) - let no_slashed_events = !System::events().iter().any(|record| { - matches!(record.event, RuntimeEvent::Staking(Event::::Slashed { .. })) - }); - assert!(no_slashed_events, "A Slashed event was incorrectly emitted immediately"); - - // clear events again - System::reset_events(); - - // scenario 3: slashing with a larger fraction works - // when a validator is slashed with a higher percentage than previous slashes, - // their stake should be further reduced to match the new larger slash percentage - let slash_fraction_3 = Perbill::from_percent(50); // Larger than 25% - assert_ok!(Staking::manual_slash( - RuntimeOrigin::root(), - validator_stash, - current_era, - slash_fraction_3 - )); - - // process offence - advance_blocks(1); - - // check if balance was further slashed (from 75% to 50% of original) - let balance_after_third_slash = Staking::slashable_balance_of(&validator_stash); - let expected_balance_3 = initial_balance / 2; // 50% of original - - assert!( - balance_after_third_slash <= expected_balance_3 && - balance_after_third_slash >= expected_balance_3 - 5, - "Third slash was not applied correctly. Expected around {}, got {}", - expected_balance_3, - balance_after_third_slash - ); - - // verify a Slashed event was emitted - assert!( - System::events().iter().any(|record| { - matches!( - record.event, - RuntimeEvent::Staking(Event::::Slashed { staker, .. }) - if staker == validator_stash - ) - }), - "No Slashed event was emitted after effective slash" - ); + }), + "No Slashed event was emitted after effective slash" + ); - // scenario 4: slashing in a non-existent era fails - // the manual_slash extrinsic should validate that the era exists within history depth - assert_noop!( - Staking::manual_slash( - RuntimeOrigin::root(), - validator_stash, - 999, - slash_fraction_1 - ), - Error::::InvalidEraToReward - ); + // scenario 4: slashing in a non-existent era fails + // the manual_slash extrinsic should validate that the era exists within history depth + assert_noop!( + Staking::manual_slash(RuntimeOrigin::root(), validator_stash, 999, slash_fraction_1), + Error::::InvalidEraToReward + ); - // move to next era - start_active_era(1); + // move to next era + start_active_era(1); - // scenario 5: slashing in previous era still works - // as long as the era is within history depth, validators can be slashed for past eras - assert_ok!(Staking::manual_slash( - RuntimeOrigin::root(), - validator_stash, - 0, - Perbill::from_percent(75) - )); + // scenario 5: slashing in previous era still works + // as long as the era is within history depth, validators can be slashed for past eras + assert_ok!(Staking::manual_slash( + RuntimeOrigin::root(), + validator_stash, + 0, + Perbill::from_percent(75) + )); - // process offence - advance_blocks(1); + // process offence + advance_blocks(1); - // check balance was further reduced - let balance_after_fifth_slash = Staking::slashable_balance_of(&validator_stash); - let expected_balance_5 = initial_balance / 4; // 25% of original (75% slashed) + // check balance was further reduced + let balance_after_fifth_slash = Staking::slashable_balance_of(&validator_stash); + let expected_balance_5 = initial_balance / 4; // 25% of original (75% slashed) - assert!( - balance_after_fifth_slash <= expected_balance_5 && - balance_after_fifth_slash >= expected_balance_5 - 5, - "Fifth slash was not applied correctly. Expected around {}, got {}", - expected_balance_5, - balance_after_fifth_slash - ); - }) - } + assert!( + balance_after_fifth_slash <= expected_balance_5 && + balance_after_fifth_slash >= expected_balance_5 - 5, + "Fifth slash was not applied correctly. Expected around {}, got {}", + expected_balance_5, + balance_after_fifth_slash + ); + }) } diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index 1ccb534e4c50f..660d817bf30e2 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -106,7 +106,6 @@ pub trait WeightInfo { fn set_min_commission() -> Weight; fn restore_ledger() -> Weight; fn migrate_currency() -> Weight; - fn apply_slash() -> Weight; fn manual_slash() -> Weight; } @@ -871,33 +870,6 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().reads(6_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)) } - /// Storage: `Staking::ActiveEra` (r:1 w:0) - /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) - /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) - /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(1694), added: 4169, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:33 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:33 w:33) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `NominationPools::ReversePoolIdLookup` (r:33 w:0) - /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `DelegatedStaking::Agents` (r:33 w:33) - /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:33 w:33) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::VirtualStakers` (r:33 w:0) - /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:33 w:33) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - fn apply_slash() -> Weight { - // Proof Size summary in bytes: - // Measured: `14542` - // Estimated: `118668` - // Minimum execution time: 1_628_472_000 picoseconds. - Weight::from_parts(1_647_487_000, 118668) - .saturating_add(T::DbWeight::get().reads(233_u64)) - .saturating_add(T::DbWeight::get().writes(133_u64)) - } /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) @@ -1689,33 +1661,6 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(6_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } - /// Storage: `Staking::ActiveEra` (r:1 w:0) - /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) - /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) - /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(1694), added: 4169, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:33 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:33 w:33) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `NominationPools::ReversePoolIdLookup` (r:33 w:0) - /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `DelegatedStaking::Agents` (r:33 w:33) - /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:33 w:33) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::VirtualStakers` (r:33 w:0) - /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:33 w:33) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - fn apply_slash() -> Weight { - // Proof Size summary in bytes: - // Measured: `14542` - // Estimated: `118668` - // Minimum execution time: 1_628_472_000 picoseconds. - Weight::from_parts(1_647_487_000, 118668) - .saturating_add(RocksDbWeight::get().reads(233_u64)) - .saturating_add(RocksDbWeight::get().writes(133_u64)) - } /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) From 1ecb0c4cdafb846a781a56ef6bf1e646efa1c3c6 Mon Sep 17 00:00:00 2001 From: Ankan Date: Sun, 16 Mar 2025 23:51:56 +0100 Subject: [PATCH 03/38] Revert "[AHM] Multi-block staking election pallet (#7282)" This reverts commit a025562b65f71dab8c2a16e027ba6efe4972818a. --- .github/workflows/runtimes-matrix.json | 2 +- .github/workflows/tests.yml | 2 +- Cargo.lock | 28 - Cargo.toml | 1 - .../chains/relays/westend/src/genesis.rs | 10 +- polkadot/runtime/common/src/try_runtime.rs | 2 +- .../src/disputes/slashing/benchmarking.rs | 4 - polkadot/runtime/test-runtime/src/lib.rs | 11 +- .../westend/src/genesis_config_presets.rs | 12 +- polkadot/runtime/westend/src/lib.rs | 23 +- .../src/weights/pallet_fast_unstake.rs | 28 +- .../westend/src/weights/pallet_staking.rs | 164 +- prdoc/pr_6034.prdoc | 25 - prdoc/pr_6689.prdoc | 7 +- prdoc/pr_7042.prdoc | 4 +- prdoc/pr_7282.prdoc | 72 - .../frame-umbrella-weight-template.hbs | 17 - substrate/.maintain/frame-weight-template.hbs | 17 - substrate/bin/node/cli/Cargo.toml | 3 - substrate/bin/node/cli/src/chain_spec.rs | 92 +- .../cli/tests/res/default_genesis_config.json | 1 - substrate/bin/node/runtime/Cargo.toml | 6 - substrate/bin/node/runtime/src/constants.rs | 3 +- substrate/bin/node/runtime/src/lib.rs | 333 +-- substrate/bin/node/testing/src/genesis.rs | 5 +- substrate/frame/babe/src/mock.rs | 10 +- .../bags-list/remote-tests/src/snapshot.rs | 8 +- substrate/frame/bags-list/src/benchmarks.rs | 119 - substrate/frame/bags-list/src/lib.rs | 2 +- substrate/frame/bags-list/src/list/mod.rs | 11 +- substrate/frame/beefy/Cargo.toml | 1 - substrate/frame/beefy/src/mock.rs | 15 +- substrate/frame/beefy/src/tests.rs | 2 - substrate/frame/benchmarking/src/lib.rs | 2 +- substrate/frame/delegated-staking/src/mock.rs | 10 +- .../election-provider-multi-block/Cargo.toml | 84 - .../src/benchmarking.rs | 170 -- .../src/helpers.rs | 227 -- .../src/mock/mod.rs | 700 ------ .../src/mock/signed.rs | 255 --- .../src/mock/staking.rs | 238 -- .../src/mock/weight_info.rs | 85 - .../src/signed/benchmarking.rs | 171 -- .../src/signed/mod.rs | 858 ------- .../src/signed/tests.rs | 554 ----- .../src/unsigned/benchmarking.rs | 79 - .../src/unsigned/miner.rs | 1972 ----------------- .../src/unsigned/mod.rs | 633 ------ .../src/verifier/benchmarking.rs | 234 -- .../src/verifier/impls.rs | 955 -------- .../src/verifier/tests.rs | 1266 ----------- .../src/weights/measured/mod.rs | 21 - .../pallet_election_provider_multi_block.rs | 364 --- ...et_election_provider_multi_block_signed.rs | 272 --- ..._election_provider_multi_block_unsigned.rs | 153 -- ..._election_provider_multi_block_verifier.rs | 361 --- .../src/weights/mel/mod.rs | 21 - .../pallet_election_provider_multi_block.rs | 362 --- ...et_election_provider_multi_block_signed.rs | 270 --- ..._election_provider_multi_block_unsigned.rs | 151 -- ..._election_provider_multi_block_verifier.rs | 359 --- .../src/weights/mod.rs | 22 - .../src/weights/zero.rs | 89 - .../src/benchmarking.rs | 21 +- .../election-provider-multi-phase/src/lib.rs | 315 ++- .../election-provider-multi-phase/src/mock.rs | 62 +- .../src/signed.rs | 8 +- .../src/unsigned.rs | 285 +-- .../test-staking-e2e/src/mock.rs | 19 +- .../election-provider-support/Cargo.toml | 3 - .../benchmarking/src/inner.rs | 2 +- .../solution-type/fuzzer/src/compact.rs | 3 +- .../solution-type/src/codec.rs | 1 - .../solution-type/src/single_page.rs | 90 +- .../election-provider-support/src/bounds.rs | 10 - .../election-provider-support/src/lib.rs | 624 +----- .../election-provider-support/src/onchain.rs | 315 +-- .../election-provider-support/src/tests.rs | 30 +- .../election-provider-support/src/traits.rs | 23 - .../elections-phragmen/src/benchmarking.rs | 12 +- substrate/frame/fast-unstake/src/mock.rs | 26 +- substrate/frame/grandpa/Cargo.toml | 1 - substrate/frame/grandpa/src/mock.rs | 14 +- .../nomination-pools/benchmarking/src/mock.rs | 2 +- .../test-delegate-stake/src/mock.rs | 2 +- .../frame/offences/benchmarking/src/mock.rs | 5 +- substrate/frame/root-offences/src/mock.rs | 10 +- .../frame/session/benchmarking/src/inner.rs | 2 - .../frame/session/benchmarking/src/mock.rs | 5 +- substrate/frame/session/src/lib.rs | 10 +- substrate/frame/staking/Cargo.toml | 9 +- substrate/frame/staking/src/benchmarking.rs | 316 +-- substrate/frame/staking/src/lib.rs | 259 +-- substrate/frame/staking/src/migrations.rs | 28 +- substrate/frame/staking/src/mock.rs | 135 +- substrate/frame/staking/src/pallet/impls.rs | 655 ++---- substrate/frame/staking/src/pallet/mod.rs | 391 ++-- substrate/frame/staking/src/tests.rs | 616 +++-- .../frame/staking/src/tests_paged_election.rs | 971 -------- substrate/frame/staking/src/weights.rs | 199 +- .../construct_runtime/expand/outer_enums.rs | 2 +- .../procedural/src/pallet/expand/event.rs | 2 +- substrate/frame/support/src/lib.rs | 1 - .../deprecated_where_block.stderr | 16 +- .../primitives/npos-elections/src/helpers.rs | 28 +- .../primitives/npos-elections/src/lib.rs | 78 +- .../primitives/npos-elections/src/phragmen.rs | 4 +- .../primitives/npos-elections/src/phragmms.rs | 2 +- substrate/primitives/staking/src/lib.rs | 190 +- substrate/primitives/staking/src/offence.rs | 12 +- .../benchmarking-cli/src/pallet/command.rs | 6 +- .../frame/benchmarking-cli/src/pallet/mod.rs | 2 +- umbrella/Cargo.toml | 245 +- umbrella/src/lib.rs | 4 - 114 files changed, 1859 insertions(+), 16190 deletions(-) delete mode 100644 prdoc/pr_6034.prdoc delete mode 100644 prdoc/pr_7282.prdoc delete mode 100644 substrate/frame/election-provider-multi-block/Cargo.toml delete mode 100644 substrate/frame/election-provider-multi-block/src/benchmarking.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/helpers.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/mock/mod.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/mock/signed.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/mock/staking.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/mock/weight_info.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/signed/mod.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/signed/tests.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/unsigned/miner.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/unsigned/mod.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/verifier/impls.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/verifier/tests.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/mod.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_signed.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_unsigned.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_verifier.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/mod.rs delete mode 100644 substrate/frame/election-provider-multi-block/src/weights/zero.rs delete mode 100644 substrate/frame/staking/src/tests_paged_election.rs diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json index e560ed9fc6dd3..ce206c0781572 100644 --- a/.github/workflows/runtimes-matrix.json +++ b/.github/workflows/runtimes-matrix.json @@ -6,7 +6,7 @@ "header": "substrate/HEADER-APACHE2", "template": "substrate/.maintain/frame-weight-template.hbs", "bench_features": "runtime-benchmarks", - "bench_flags": "--exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage,pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier", + "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage", "uri": null, "is_relay": false }, diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a178ea02f0c5f..7fd960c6471d7 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -35,7 +35,7 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --exclude-pallets=pallet_election_provider_multi_block,pallet_election_provider_multi_block::signed,pallet_election_provider_multi_block::unsigned,pallet_election_provider_multi_block::verifier --extrinsic "*" --steps 2 --repeat 1 --quiet + run: forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks --quiet -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet # cf https://github.com/paritytech/polkadot-sdk/issues/1652 test-syscalls: diff --git a/Cargo.lock b/Cargo.lock index 1502f1576061d..d8500328ef8fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6615,7 +6615,6 @@ dependencies = [ "sp-io 30.0.0", "sp-npos-elections", "sp-runtime 31.0.1", - "sp-std 14.0.0", ] [[package]] @@ -11486,7 +11485,6 @@ dependencies = [ "sp-session", "sp-staking", "sp-state-machine 0.35.0", - "sp-tracing 16.0.0", ] [[package]] @@ -11978,29 +11976,6 @@ dependencies = [ "sp-tracing 16.0.0", ] -[[package]] -name = "pallet-election-provider-multi-block" -version = "0.9.0" -dependencies = [ - "frame-benchmarking", - "frame-election-provider-support", - "frame-support", - "frame-system", - "log", - "pallet-balances", - "parity-scale-codec", - "parking_lot 0.12.3", - "rand 0.8.5", - "scale-info", - "sp-arithmetic 23.0.0", - "sp-core 28.0.0", - "sp-io 30.0.0", - "sp-npos-elections", - "sp-runtime 31.0.1", - "sp-std 14.0.0", - "sp-tracing 16.0.0", -] - [[package]] name = "pallet-election-provider-multi-phase" version = "27.0.0" @@ -12296,7 +12271,6 @@ dependencies = [ "sp-runtime 31.0.1", "sp-session", "sp-staking", - "sp-tracing 16.0.0", ] [[package]] @@ -13191,7 +13165,6 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", - "rand 0.8.5", "rand_chacha 0.3.1", "scale-info", "serde", @@ -15845,7 +15818,6 @@ dependencies = [ "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", - "pallet-election-provider-multi-block", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", diff --git a/Cargo.toml b/Cargo.toml index c0757ed3043fc..65bf39b75e08b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,6 @@ members = [ "substrate/frame/core-fellowship", "substrate/frame/delegated-staking", "substrate/frame/democracy", - "substrate/frame/election-provider-multi-block", "substrate/frame/election-provider-multi-phase", "substrate/frame/election-provider-multi-phase/test-staking-e2e", "substrate/frame/election-provider-support", diff --git a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs index 4dc45cf7aecb4..2f02ca5f1932f 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/relays/westend/src/genesis.rs @@ -19,7 +19,7 @@ use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_babe::AuthorityId as BabeId; use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId; use sp_core::storage::Storage; -use sp_runtime::{BoundedVec, Perbill}; +use sp_runtime::Perbill; // Polkadot use polkadot_primitives::{AssignmentId, ValidatorId}; @@ -87,13 +87,7 @@ pub fn genesis() -> Storage { .iter() .map(|x| (x.0.clone(), x.1.clone(), STASH, pallet_staking::StakerStatus::Validator)) .collect(), - invulnerables: BoundedVec::try_from( - validators::initial_authorities() - .iter() - .map(|x| x.0.clone()) - .collect::>(), - ) - .expect("Limit for staking invulnerables must be less than initial authorities."), + invulnerables: validators::initial_authorities().iter().map(|x| x.0.clone()).collect(), force_era: pallet_staking::Forcing::ForceNone, slash_reward_fraction: Perbill::from_percent(10), ..Default::default() diff --git a/polkadot/runtime/common/src/try_runtime.rs b/polkadot/runtime/common/src/try_runtime.rs index 795249dde20b2..b22e170329206 100644 --- a/polkadot/runtime/common/src/try_runtime.rs +++ b/polkadot/runtime/common/src/try_runtime.rs @@ -36,7 +36,7 @@ where let all_stakers = Ledger::::iter().map(|(ctrl, l)| (ctrl, l.stash)).collect::>(); let mut all_exposed = BTreeSet::new(); - ErasStakersPaged::::iter().for_each(|((_era, val, _page), expo)| { + ErasStakers::::iter().for_each(|(_, val, expo)| { all_exposed.insert(val); all_exposed.extend(expo.others.iter().map(|ie| ie.who.clone())) }); diff --git a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs index 68d9ee4452732..bfd46d7524385 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing/benchmarking.rs @@ -82,12 +82,8 @@ where pallet_session::Pallet::::on_initialize(BlockNumberFor::::one()); initializer::Pallet::::on_initialize(BlockNumberFor::::one()); - // skip sessions until the new validator set is enacted while pallet_session::Pallet::::validators().len() < n as usize { - // initialize stakers in pallet_staking. This is suboptimal, but an easy way to avoid this - // being an infinite loop. - pallet_staking::Pallet::::populate_staking_election_testing_benchmarking_only().unwrap(); pallet_session::Pallet::::rotate_session(); } initializer::Pallet::::on_finalize(BlockNumberFor::::one()); diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 694077dd21c94..65e20eccd71a5 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -79,7 +79,7 @@ use polkadot_runtime_common::{ use polkadot_runtime_parachains::reward_points::RewardValidatorsWithEraPoints; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}; -use sp_core::{ConstBool, ConstU32, OpaqueMetadata}; +use sp_core::{ConstU32, OpaqueMetadata}; use sp_mmr_primitives as mmr; use sp_runtime::{ curve::PiecewiseLinear, @@ -349,7 +349,7 @@ parameter_types! { pub const MaxExposurePageSize: u32 = 64; pub const MaxNominators: u32 = 256; pub const MaxAuthorities: u32 = 100_000; - pub const OnChainMaxWinners: u32 = MaxAuthorities::get(); + pub const OnChainMaxWinners: u32 = u32::MAX; // Unbounded number of election targets and voters. pub ElectionBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default().build(); } @@ -362,9 +362,7 @@ impl onchain::Config for OnChainSeqPhragmen { type DataProvider = Staking; type WeightInfo = (); type Bounds = ElectionBoundsOnChain; - type MaxWinnersPerPage = OnChainMaxWinners; - type MaxBackersPerWinner = ConstU32<{ u32::MAX }>; - type Sort = ConstBool; + type MaxWinners = OnChainMaxWinners; } /// Upper limit on the number of NPOS nominations. @@ -402,9 +400,6 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (); type WeightInfo = (); - type MaxValidatorSet = MaxAuthorities; - type MaxInvulnerables = ConstU32<20>; - type MaxDisabledValidators = ConstU32<100>; type Filter = frame_support::traits::Nothing; } diff --git a/polkadot/runtime/westend/src/genesis_config_presets.rs b/polkadot/runtime/westend/src/genesis_config_presets.rs index af5e3607df485..76c0ce015c0d8 100644 --- a/polkadot/runtime/westend/src/genesis_config_presets.rs +++ b/polkadot/runtime/westend/src/genesis_config_presets.rs @@ -33,7 +33,7 @@ use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{crypto::get_public_from_string_or_panic, sr25519}; use sp_genesis_builder::PresetId; use sp_keyring::Sr25519Keyring; -use sp_runtime::{BoundedVec, Perbill}; +use sp_runtime::Perbill; use westend_runtime_constants::currency::UNITS as WND; /// Helper function to generate stash, controller and session key from seed @@ -202,10 +202,7 @@ fn westend_testnet_genesis( .iter() .map(|x| (x.0.clone(), x.0.clone(), STASH, StakerStatus::::Validator)) .collect::>(), - invulnerables: BoundedVec::try_from( - initial_authorities.iter().map(|x| x.0.clone()).collect::>() - ) - .expect("Too many invulnerable validators: upper limit is MaxInvulnerables from pallet staking config"), + invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect::>(), force_era: Forcing::NotForcing, slash_reward_fraction: Perbill::from_percent(10), }, @@ -376,10 +373,7 @@ fn westend_staging_testnet_config_genesis() -> serde_json::Value { .iter() .map(|x| (x.0.clone(), x.0.clone(), STASH, StakerStatus::::Validator)) .collect::>(), - invulnerables: BoundedVec::try_from( - initial_authorities.iter().map(|x| x.0.clone()).collect::>() - ) - .expect("Too many invulnerable validators: upper limit is MaxInvulnerables from pallet staking config"), + invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect::>(), force_era: Forcing::ForceNone, slash_reward_fraction: Perbill::from_percent(10), }, diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 86358afb23e51..6c2f44bb036b0 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -96,7 +96,7 @@ use sp_consensus_beefy::{ ecdsa_crypto::{AuthorityId as BeefyId, Signature as BeefySignature}, mmr::{BeefyDataProvider, MmrLeafVersion}, }; -use sp_core::{ConstBool, ConstU8, OpaqueMetadata, RuntimeDebug, H256}; +use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256}; use sp_runtime::{ generic, impl_opaque_keys, traits::{ @@ -586,10 +586,7 @@ parameter_types! { ElectionBoundsBuilder::default().voters_count(MaxElectingVoters::get().into()).build(); // Maximum winners that can be chosen as active validators pub const MaxActiveValidators: u32 = 1000; - // One page only, fill the whole page with the `MaxActiveValidators`. - pub const MaxWinnersPerPage: u32 = MaxActiveValidators::get(); - // Unbonded, thus the max backers per winner maps to the max electing voters limit. - pub const MaxBackersPerWinner: u32 = MaxElectingVoters::get(); + } frame_election_provider_support::generate_solution_type!( @@ -604,14 +601,12 @@ frame_election_provider_support::generate_solution_type!( pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { - type Sort = ConstBool; type System = Runtime; type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = weights::frame_election_provider_support::WeightInfo; + type MaxWinners = MaxActiveValidators; type Bounds = ElectionBounds; - type MaxBackersPerWinner = MaxBackersPerWinner; - type MaxWinnersPerPage = MaxWinnersPerPage; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -624,8 +619,7 @@ impl pallet_election_provider_multi_phase::MinerConfig for Runtime { as frame_election_provider_support::ElectionDataProvider >::MaxVotesPerVoter; - type MaxBackersPerWinner = MaxBackersPerWinner; - type MaxWinners = MaxWinnersPerPage; + type MaxWinners = MaxActiveValidators; // The unsigned submissions have to respect the weight of the submit_unsigned call, thus their // weight estimate function is wired to this call's weight. @@ -659,8 +653,6 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BetterSignedThreshold = (); type OffchainRepeat = OffchainRepeat; type MinerTxPriority = NposSolutionPriority; - type MaxWinners = MaxWinnersPerPage; - type MaxBackersPerWinner = MaxBackersPerWinner; type DataProvider = Staking; #[cfg(any(feature = "fast-runtime", feature = "runtime-benchmarks"))] type Fallback = onchain::OnChainExecution; @@ -669,8 +661,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { AccountId, BlockNumber, Staking, - MaxWinnersPerPage, - MaxBackersPerWinner, + MaxActiveValidators, )>; type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen< @@ -681,6 +672,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::elections::BenchmarkConfig; type ForceOrigin = EnsureRoot; type WeightInfo = weights::pallet_election_provider_multi_phase::WeightInfo; + type MaxWinners = MaxActiveValidators; type ElectionBounds = ElectionBounds; } @@ -762,7 +754,6 @@ impl pallet_staking::Config for Runtime { type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; type TargetList = UseValidatorsMap; - type MaxValidatorSet = MaxActiveValidators; type NominationsQuota = pallet_staking::FixedNominationsQuota<{ MaxNominations::get() }>; type MaxUnlockingChunks = frame_support::traits::ConstU32<32>; type HistoryDepth = frame_support::traits::ConstU32<84>; @@ -770,8 +761,6 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = polkadot_runtime_common::StakingBenchmarkingConfig; type EventListeners = (NominationPools, DelegatedStaking); type WeightInfo = weights::pallet_staking::WeightInfo; - type MaxInvulnerables = frame_support::traits::ConstU32<20>; - type MaxDisabledValidators = ConstU32<100>; type Filter = Nothing; } diff --git a/polkadot/runtime/westend/src/weights/pallet_fast_unstake.rs b/polkadot/runtime/westend/src/weights/pallet_fast_unstake.rs index 7caf815cf624a..406936c9788ee 100644 --- a/polkadot/runtime/westend/src/weights/pallet_fast_unstake.rs +++ b/polkadot/runtime/westend/src/weights/pallet_fast_unstake.rs @@ -97,20 +97,20 @@ impl pallet_fast_unstake::WeightInfo for WeightInfo .saturating_add(T::DbWeight::get().writes((6_u64).saturating_mul(b.into()))) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(b.into())) } - /// Storage: `FastUnstake::ErasToCheckPerBlock` (r:1 w:0) - /// Proof: `FastUnstake::ErasToCheckPerBlock` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ValidatorCount` (r:1 w:0) - /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `FastUnstake::Head` (r:1 w:1) - /// Proof: `FastUnstake::Head` (`max_values`: Some(1), `max_size`: Some(3087), added: 3582, mode: `MaxEncodedLen`) - /// Storage: `FastUnstake::CounterForQueue` (r:1 w:0) - /// Proof: `FastUnstake::CounterForQueue` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `ElectionProviderMultiPhase::CurrentPhase` (r:1 w:0) - /// Proof: `ElectionProviderMultiPhase::CurrentPhase` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStakersPaged` (r:257 w:0) - /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: FastUnstake ErasToCheckPerBlock (r:1 w:0) + /// Proof: FastUnstake ErasToCheckPerBlock (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ValidatorCount (r:1 w:0) + /// Proof: Staking ValidatorCount (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: FastUnstake Head (r:1 w:1) + /// Proof: FastUnstake Head (max_values: Some(1), max_size: Some(3087), added: 3582, mode: MaxEncodedLen) + /// Storage: FastUnstake CounterForQueue (r:1 w:0) + /// Proof: FastUnstake CounterForQueue (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) + /// Proof Skipped: ElectionProviderMultiPhase CurrentPhase (max_values: Some(1), max_size: None, mode: Measured) + /// Storage: Staking CurrentEra (r:1 w:0) + /// Proof: Staking CurrentEra (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) + /// Storage: Staking ErasStakers (r:257 w:0) + /// Proof Skipped: Staking ErasStakers (max_values: None, max_size: None, mode: Measured) /// The range of component `v` is `[1, 256]`. /// The range of component `b` is `[1, 64]`. fn on_idle_check(v: u32, b: u32, ) -> Weight { diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs index 496bc01e5e38d..3a99452cfd9fa 100644 --- a/polkadot/runtime/westend/src/weights/pallet_staking.rs +++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs @@ -51,54 +51,6 @@ use core::marker::PhantomData; /// Weight functions for `pallet_staking`. pub struct WeightInfo(PhantomData); impl pallet_staking::WeightInfo for WeightInfo { - /// Storage: `Staking::ProcessingOffence` (r:1 w:0) - /// Proof: `Staking::ProcessingOffence` (`max_values`: Some(1), `max_size`: Some(85), added: 580, mode: `MaxEncodedLen`) - /// Storage: `Staking::OffenceQueueEras` (r:1 w:0) - /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(9), added: 504, mode: `MaxEncodedLen`) - /// Storage: `Staking::ActiveEra` (r:1 w:0) - /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) - /// Storage: `Staking::UnappliedSlashes` (r:1 w:0) - /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) - /// Storage: `Staking::NextElectionPage` (r:1 w:0) - /// Proof: `Staking::NextElectionPage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xd93c9708f5182731b2e90757fd7abf7a` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xd93c9708f5182731b2e90757fd7abf7a` (r:1 w:0) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentPlannedSession` (r:1 w:0) - /// Proof: `Staking::CurrentPlannedSession` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) - /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `Babe::EpochIndex` (r:1 w:0) - /// Proof: `Babe::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Babe::GenesisSlot` (r:1 w:0) - /// Proof: `Babe::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Babe::CurrentSlot` (r:1 w:0) - /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Staking::ForceEra` (r:1 w:0) - /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `Staking::ElectableStashes` (r:1 w:0) - /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `MaxEncodedLen`) - fn on_initialize_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `890` - // Estimated: `33487` - // Minimum execution time: 35_166_000 picoseconds. - Weight::from_parts(35_901_000, 0) - .saturating_add(Weight::from_parts(0, 33487)) - .saturating_add(T::DbWeight::get().reads(14)) - } - /// The range of component `v` is `[1, 1000]`. - fn do_elect_paged_inner(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 185_000 picoseconds. - Weight::from_parts(236_633, 0) - .saturating_add(Weight::from_parts(0, 0)) - // Standard Error: 1 - .saturating_add(Weight::from_parts(2, 0).saturating_mul(v.into())) - } /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:178 w:0) @@ -592,6 +544,12 @@ impl pallet_staking::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 5706).saturating_mul(s.into())) } + /// Storage: `Staking::Bonded` (r:65 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:65 w:65) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersClipped` (r:1 w:0) + /// Proof: `Staking::ErasStakersClipped` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Staking::ErasStakersOverview` (r:1 w:0) /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) /// Storage: `Staking::ClaimedRewards` (r:1 w:1) @@ -700,6 +658,116 @@ impl pallet_staking::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } + + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:178 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:110 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:110 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:110 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:110 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:11 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumValidatorCount` (r:1 w:0) + /// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:1) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:20) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:0 w:10) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:0 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// The range of component `v` is `[1, 10]`. + /// The range of component `n` is `[0, 100]`. + fn new_era(v: u32, n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (716 ±0) + v * (3594 ±0)` + // Estimated: `456136 + n * (3566 ±4) + v * (3566 ±40)` + // Minimum execution time: 654_756_000 picoseconds. + Weight::from_parts(658_861_000, 0) + .saturating_add(Weight::from_parts(0, 456136)) + // Standard Error: 2_078_102 + .saturating_add(Weight::from_parts(67_775_668, 0).saturating_mul(v.into())) + // Standard Error: 207_071 + .saturating_add(Weight::from_parts(22_624_711, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(184)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(8)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) + } + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:178 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:2000 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:2000 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:2000 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:2000 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1000 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// The range of component `v` is `[500, 1000]`. + /// The range of component `n` is `[500, 1000]`. + fn get_npos_voters(v: u32, n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `3141 + n * (907 ±0) + v * (391 ±0)` + // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` + // Minimum execution time: 42_790_195_000 picoseconds. + Weight::from_parts(42_954_437_000, 0) + .saturating_add(Weight::from_parts(0, 456136)) + // Standard Error: 478_107 + .saturating_add(Weight::from_parts(6_744_044, 0).saturating_mul(v.into())) + // Standard Error: 478_107 + .saturating_add(Weight::from_parts(4_837_739, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(179)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(1)) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) + } + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:1001 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// The range of component `v` is `[500, 1000]`. + fn get_npos_targets(v: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `979 + v * (50 ±0)` + // Estimated: `3510 + v * (2520 ±0)` + // Minimum execution time: 2_851_801_000 picoseconds. + Weight::from_parts(4_477_533, 0) + .saturating_add(Weight::from_parts(0, 3510)) + // Standard Error: 8_644 + .saturating_add(Weight::from_parts(5_811_682, 0).saturating_mul(v.into())) + .saturating_add(T::DbWeight::get().reads(2)) + .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) + } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:0 w:1) diff --git a/prdoc/pr_6034.prdoc b/prdoc/pr_6034.prdoc deleted file mode 100644 index e6ecd8aae5c8c..0000000000000 --- a/prdoc/pr_6034.prdoc +++ /dev/null @@ -1,25 +0,0 @@ -title: Adds multi-block election types and refactors current single logic to support it - -doc: - - audience: Runtime Dev - description: | - This PR adds election types and structs required to run a multi-block election. In addition, - it modifies EPM, staking pallet and all dependent pallets and logic to use the multi-block types. - -crates: - - name: frame-election-provider-support - bump: major - - name: pallet-election-provider-multi-phase - bump: major - - name: pallet-staking - bump: major - - name: pallet-fast-unstake - bump: minor - - name: pallet-delegated-staking - bump: minor - - name: sp-npos-elections - bump: major - - name: sp-staking - bump: major - - name: pallet-bags-list-remote-tests - bump: minor diff --git a/prdoc/pr_6689.prdoc b/prdoc/pr_6689.prdoc index 72e935e2e984a..2cbb49cd7dd24 100644 --- a/prdoc/pr_6689.prdoc +++ b/prdoc/pr_6689.prdoc @@ -1,12 +1,13 @@ title: '[pallet-revive] Update gas encoding' doc: - audience: Runtime Dev - description: | + description: |- Update the current approach to attach the `ref_time`, `pov` and `deposit` parameters to an Ethereum transaction. - Previously, these three parameters were passed along with the signed payload, and the fees resulting from gas × gas_price were checked to ensure they matched the actual fees paid by the user for the extrinsic +Previously, these three parameters were passed along with the signed payload, and the fees resulting from gas × gas_price were checked to ensure they matched the actual fees paid by the user for the extrinsic + This approach unfortunately can be attacked. A malicious actor could force such a transaction to fail by injecting low values for some of these extra parameters as they are not part of the signed payload. - The new approach encodes these 3 extra parameters in the lower digits of the transaction gas, using the log2 of the actual values to encode each components on 2 digits + The new approach encodes these 3 extra parameters in the lower digits of the transaction gas, using the log2 of the actual values to encode each components on 2 digits crates: - name: pallet-revive-eth-rpc bump: minor diff --git a/prdoc/pr_7042.prdoc b/prdoc/pr_7042.prdoc index 1c585f9dff0d6..00fb34c6af493 100644 --- a/prdoc/pr_7042.prdoc +++ b/prdoc/pr_7042.prdoc @@ -1,4 +1,4 @@ -title: networking::TransactionPool should accept Arc +title: `networking::TransactionPool` should accept `Arc` doc: - audience: Node Dev description: The `sc_network_transactions::config::TransactionPool` trait now returns an `Arc` for transactions. @@ -6,4 +6,4 @@ crates: - name: sc-network-transactions bump: minor - name: sc-service - bump: minor + bump: minor \ No newline at end of file diff --git a/prdoc/pr_7282.prdoc b/prdoc/pr_7282.prdoc deleted file mode 100644 index 3d12a8b184abd..0000000000000 --- a/prdoc/pr_7282.prdoc +++ /dev/null @@ -1,72 +0,0 @@ -title: AHM Multi-block staking election pallet -doc: -- audience: Runtime Dev - description: | - ## Multi Block Election Pallet - - This PR adds the first iteration of the multi-block staking pallet. - - From this point onwards, the staking and its election provider pallets are being customized to work in AssetHub. While usage in solo-chains is still possible, it is not longer the main focus of this pallet. For a safer usage, please fork and user an older version of this pallet. -crates: -- name: pallet-election-provider-multi-block - bump: major -- name: frame-election-provider-support - bump: major -- name: frame-election-provider-solution-type - bump: major -- name: sp-npos-elections - bump: major -- name: sp-staking - bump: major -- name: pallet-staking - bump: major -- name: pallet-election-provider-multi-phase - bump: major -- name: westend-runtime - bump: major -- name: pallet-delegated-staking - bump: major -- name: pallet-fast-unstake - bump: major -- name: pallet-session-benchmarking - bump: major -- name: sc-consensus-grandpa - bump: major -- name: pallet-babe - bump: major -- name: pallet-beefy - bump: major -- name: pallet-grandpa - bump: major -- name: pallet-nomination-pools - bump: major -- name: pallet-root-offences - bump: major -- name: pallet-nomination-pools-benchmarking - bump: major -- name: pallet-offences-benchmarking - bump: major -- name: cumulus-pov-validator - bump: major -- name: polkadot-sdk - bump: major -- name: asset-hub-rococo-runtime - bump: major -- name: pallet-bags-list - bump: major -- name: frame-benchmarking - bump: major -- name: frame-support-procedural - bump: major -- name: frame-support - bump: major -- name: frame-benchmarking-cli - bump: major -- name: polkadot-runtime-common - bump: major -- name: pallet-elections-phragmen - bump: major -- name: pallet-election-provider-support-benchmarking - bump: major -- name: pallet-session - bump: major diff --git a/substrate/.maintain/frame-umbrella-weight-template.hbs b/substrate/.maintain/frame-umbrella-weight-template.hbs index 6985944b0a3bd..c99758c41d9d7 100644 --- a/substrate/.maintain/frame-umbrella-weight-template.hbs +++ b/substrate/.maintain/frame-umbrella-weight-template.hbs @@ -1,20 +1,3 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - {{header}} //! Autogenerated weights for `{{pallet}}` //! diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs index c2a22200dc99b..624fc57aa3295 100644 --- a/substrate/.maintain/frame-weight-template.hbs +++ b/substrate/.maintain/frame-weight-template.hbs @@ -1,20 +1,3 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - {{header}} //! Autogenerated weights for `{{pallet}}` //! diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 46ccff34bf742..7b355074823c3 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -182,9 +182,6 @@ try-runtime = [ "polkadot-sdk/try-runtime", "substrate-cli-test-utils/try-runtime", ] -staking-playground = [ - "kitchensink-runtime/staking-playground", -] [[bench]] name = "transaction_pool" diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index 08efe2cf4bd61..1b0b29a1062c9 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -314,38 +314,66 @@ pub fn testnet_genesis( let (initial_authorities, endowed_accounts, stakers) = configure_accounts(initial_authorities, initial_nominators, endowed_accounts, STASH); - let staking_playground_config = if cfg!(feature = "staking-playground") { - Some(get_staking_playground_config()) - } else { - None - }; - - // Todo: After #7748 is done, we can refactor this to avoid - // calling into the native runtime. - kitchensink_runtime::genesis_config_presets::kitchensink_genesis( - initial_authorities - .iter() - .map(|x| { - ( - x.0.clone(), - // stash account is controller - x.0.clone(), - session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - ), - ) - }) - .collect(), - root_key, - endowed_accounts, - stakers, - staking_playground_config, - ) + + serde_json::json!({ + "balances": { + "balances": endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect::>(), + }, + "session": { + "keys": initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + ), + ) + }) + .collect::>(), + }, + "staking": { + "validatorCount": initial_authorities.len() as u32, + "minimumValidatorCount": initial_authorities.len() as u32, + "invulnerables": initial_authorities.iter().map(|x| x.0.clone()).collect::>(), + "slashRewardFraction": Perbill::from_percent(10), + "stakers": stakers.clone(), + }, + "elections": { + "members": endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .map(|member| (member, STASH)) + .collect::>(), + }, + "technicalCommittee": { + "members": endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .collect::>(), + }, + "sudo": { "key": Some(root_key.clone()) }, + "babe": { + "epochConfig": Some(kitchensink_runtime::BABE_GENESIS_EPOCH_CONFIG), + }, + "society": { "pot": 0 }, + "assets": { + // This asset is used by the NIS pallet as counterpart currency. + "assets": vec![(9, Sr25519Keyring::Alice.to_account_id(), true, 1)], + }, + "nominationPools": { + "minCreateBond": 10 * DOLLARS, + "minJoinBond": 1 * DOLLARS, + }, + }) } fn get_staking_playground_config() -> StakingPlaygroundConfig { diff --git a/substrate/bin/node/cli/tests/res/default_genesis_config.json b/substrate/bin/node/cli/tests/res/default_genesis_config.json index 4d8462520214c..8ad2428f78554 100644 --- a/substrate/bin/node/cli/tests/res/default_genesis_config.json +++ b/substrate/bin/node/cli/tests/res/default_genesis_config.json @@ -22,7 +22,6 @@ "multiplier": "1000000000000000000" }, "staking": { - "devStakers": null, "validatorCount": 0, "minimumValidatorCount": 0, "invulnerables": [], diff --git a/substrate/bin/node/runtime/Cargo.toml b/substrate/bin/node/runtime/Cargo.toml index c0eac973c7a46..3c776b21f3320 100644 --- a/substrate/bin/node/runtime/Cargo.toml +++ b/substrate/bin/node/runtime/Cargo.toml @@ -75,9 +75,3 @@ experimental = [ "pallet-example-tasks/experimental", ] metadata-hash = ["substrate-wasm-builder/metadata-hash"] -# Test temp feature to allow this chain to be used for swift testing of staking elections. should -# only be run by --dev chain. It will create a large staking election process as per the constants -# in `chain_spec.rs`, but `Alice` will be the only authority that is communicated to the node and -# ergo block production works fine with --dev and is independent of staking election. See ` pub -# struct AliceAsOnlyValidator`. -staking-playground = [] diff --git a/substrate/bin/node/runtime/src/constants.rs b/substrate/bin/node/runtime/src/constants.rs index 576ed5401802a..d13dca48d1f12 100644 --- a/substrate/bin/node/runtime/src/constants.rs +++ b/substrate/bin/node/runtime/src/constants.rs @@ -63,8 +63,7 @@ pub mod time { // NOTE: Currently it is not possible to change the epoch duration after the chain has started. // Attempting to do so will brick block production. - pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 20 * MINUTES; - + pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10 * MINUTES; pub const EPOCH_DURATION_IN_SLOTS: u64 = { const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64; diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index c618831c0a771..062672373b300 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -678,121 +678,18 @@ impl_opaque_keys! { } } -#[cfg(feature = "staking-playground")] -pub mod staking_playground { - use pallet_staking::Exposure; - - use super::*; - - /// An adapter to make the chain work with --dev only, even though it is running a large staking - /// election. - /// - /// It will ignore the staking election and just set the validator set to alice. - /// - /// Needs to be fed into `type SessionManager`. - pub struct AliceAsOnlyValidator; - impl pallet_session::SessionManager for AliceAsOnlyValidator { - fn end_session(end_index: sp_staking::SessionIndex) { - >::end_session(end_index) - } - - fn new_session(new_index: sp_staking::SessionIndex) -> Option> { - >::new_session(new_index).map( - |_ignored_validators| { - vec![sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into()] - }, - ) - } - - fn new_session_genesis(new_index: sp_staking::SessionIndex) -> Option> { - >::new_session_genesis(new_index) - .map(|_ignored_validators| { - vec![sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into()] - }) - } - - fn start_session(start_index: sp_staking::SessionIndex) { - >::start_session(start_index) - } - } - - impl pallet_session::historical::SessionManager> - for AliceAsOnlyValidator - { - fn end_session(end_index: sp_staking::SessionIndex) { - , - >>::end_session(end_index) - } - - fn new_session( - new_index: sp_staking::SessionIndex, - ) -> Option)>> { - , - >>::new_session(new_index) - .map(|_ignored| { - // construct a fake exposure for alice. - vec![( - sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(), - pallet_staking::Exposure { - total: 1_000_000_000, - own: 1_000_000_000, - others: vec![], - }, - )] - }) - } - - fn new_session_genesis( - new_index: sp_staking::SessionIndex, - ) -> Option)>> { - , - >>::new_session_genesis(new_index) - .map(|_ignored| { - // construct a fake exposure for alice. - vec![( - sp_keyring::Sr25519Keyring::AliceStash.to_account_id().into(), - pallet_staking::Exposure { - total: 1_000_000_000, - own: 1_000_000_000, - others: vec![], - }, - )] - }) - } - - fn start_session(start_index: sp_staking::SessionIndex) { - , - >>::start_session(start_index) - } - } -} - impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = ::AccountId; type ValidatorIdOf = pallet_staking::StashOf; type ShouldEndSession = Babe; type NextSessionRotation = Babe; + type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisablingStrategy = pallet_session::disabling::UpToLimitWithReEnablingDisablingStrategy; type WeightInfo = pallet_session::weights::SubstrateWeight; - #[cfg(not(feature = "staking-playground"))] - type SessionManager = pallet_session::historical::NoteHistoricalRoot; - #[cfg(feature = "staking-playground")] - type SessionManager = pallet_session::historical::NoteHistoricalRoot< - Self, - staking_playground::AliceAsOnlyValidator, - >; } impl pallet_session::historical::Config for Runtime { @@ -811,16 +708,8 @@ pallet_staking_reward_curve::build! { ); } -#[cfg(not(feature = "staking-playground"))] parameter_types! { pub const SessionsPerEra: sp_staking::SessionIndex = 6; -} -#[cfg(feature = "staking-playground")] -parameter_types! { - pub const SessionsPerEra: sp_staking::SessionIndex = 2; -} - -parameter_types! { pub const BondingDuration: sp_staking::EraIndex = 24 * 28; pub const SlashDeferDuration: sp_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; @@ -835,35 +724,10 @@ const MAX_QUOTA_NOMINATIONS: u32 = 16; pub struct StakingBenchmarkingConfig; impl pallet_staking::BenchmarkingConfig for StakingBenchmarkingConfig { - type MaxNominators = ConstU32<5000>; + type MaxNominators = ConstU32<1000>; type MaxValidators = ConstU32<1000>; } -use frame_election_provider_support::{BoundedSupportsOf, ElectionProvider, PageIndex}; -pub struct MultiElectionProvider; -impl ElectionProvider for MultiElectionProvider { - type AccountId = ::AccountId; - type BlockNumber = ::BlockNumber; - type DataProvider = ::DataProvider; - type Error = ::Error; - type Pages = ::Pages; - type MaxBackersPerWinner = ::MaxBackersPerWinner; - type MaxWinnersPerPage = ::MaxWinnersPerPage; - - fn elect(page: PageIndex) -> Result, Self::Error> { - if page == 0 && !cfg!(feature = "runtime-benchmarks") { - // TODO: later on, we can even compare the results of the multi-page and multi-block - // election in here. - let _ = ElectionProviderMultiPhase::elect(page); - } - MultiBlock::elect(page) - } - - fn ongoing() -> bool { - MultiBlock::ongoing() - } -} - impl pallet_staking::Config for Runtime { type OldCurrency = Balances; type Currency = Balances; @@ -886,9 +750,8 @@ impl pallet_staking::Config for Runtime { type SessionInterface = Self; type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; - type MaxExposurePageSize = multi_block_impls::MaxExposurePageSize; - type MaxValidatorSet = multi_block_impls::MaxWinnersPerPage; - type ElectionProvider = MultiElectionProvider; + type MaxExposurePageSize = ConstU32<256>; + type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainExecution; type VoterList = VoterList; type NominationsQuota = pallet_staking::FixedNominationsQuota; @@ -900,8 +763,6 @@ impl pallet_staking::Config for Runtime { type EventListeners = (NominationPools, DelegatedStaking); type WeightInfo = pallet_staking::weights::SubstrateWeight; type BenchmarkingConfig = StakingBenchmarkingConfig; - type MaxInvulnerables = ConstU32<20>; - type MaxDisabledValidators = ConstU32<100>; type Filter = Nothing; } @@ -916,132 +777,10 @@ impl pallet_fast_unstake::Config for Runtime { type WeightInfo = (); } -frame_election_provider_support::generate_solution_type!( - #[compact] - pub struct NposSolution16::< - VoterIndex = u32, - TargetIndex = u16, - Accuracy = sp_runtime::PerU16, - MaxVoters = ConstU32<22500>, - >(16) -); - -pub(crate) mod multi_block_impls { - use super::*; - use pallet_election_provider_multi_block as multi_block; - use pallet_election_provider_multi_phase as multi_phase; - - frame_election_provider_support::generate_solution_type!( - #[compact] - pub struct MultiBlockSolution::< - VoterIndex = u16, - TargetIndex = u16, - Accuracy = sp_runtime::Percent, - MaxVoters = ConstU32<{22500 / Pages::get()}>, - >(16) - ); - - parameter_types! { - pub const Pages: u32 = 32; - // nominators snapshot size - pub VoterSnapshotPerBlock: u32 = 22500 / Pages::get(); - // validator snapshot size - pub TargetSnapshotPerBlock: u32 = 1000; - pub SignedPhase: u32 = 3 * EPOCH_DURATION_IN_BLOCKS / 4; - // 2 signed solutions to be validate - pub SignedValidation: u32 = Pages::get() * 2; - pub UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; - pub MaxWinnersPerPage: u32 = 1000; - pub MaxBackersPerWinner: u32 = 128; - pub MaxExposurePageSize: u32 = 32; - } - - impl multi_block::unsigned::miner::MinerConfig for Runtime { - type AccountId = AccountId; - type Hash = Hash; - type MaxBackersPerWinner = ::MaxBackersPerWinner; - type MaxBackersPerWinnerFinal = - ::MaxBackersPerWinnerFinal; - type MaxWinnersPerPage = ::MaxWinnersPerPage; - type MaxVotesPerVoter = - <::DataProvider as ElectionDataProvider>::MaxVotesPerVoter; - type MaxLength = MinerMaxLength; - type Solver = ::OffchainSolver; - type Pages = Pages; - type Solution = MultiBlockSolution; - type VoterSnapshotPerBlock = ::VoterSnapshotPerBlock; - type TargetSnapshotPerBlock = ::TargetSnapshotPerBlock; - } - - impl multi_block::Config for Runtime { - type AdminOrigin = EnsureRoot; - type RuntimeEvent = RuntimeEvent; - type DataProvider = Staking; - #[cfg(not(feature = "runtime-benchmarks"))] - type Fallback = multi_block::Continue; - #[cfg(feature = "runtime-benchmarks")] - type Fallback = onchain::OnChainExecution; - // prepare for election 5 blocks ahead of time - type Lookahead = ConstU32<5>; - // split election into 8 pages. - type Pages = Pages; - // allow 2 signed solutions to be verified. - type SignedValidationPhase = SignedValidation; - // TODO: sanity check that the length of all phases is within reason. - type SignedPhase = SignedPhase; - type UnsignedPhase = UnsignedPhase; - type TargetSnapshotPerBlock = TargetSnapshotPerBlock; - type VoterSnapshotPerBlock = VoterSnapshotPerBlock; - type Verifier = MultiBlockVerifier; - type MinerConfig = Self; - type WeightInfo = multi_block::weights::AllZeroWeights; - } - - impl multi_block::verifier::Config for Runtime { - type MaxBackersPerWinner = MaxBackersPerWinner; - type MaxWinnersPerPage = MaxWinnersPerPage; - type MaxBackersPerWinnerFinal = ConstU32<{ u32::MAX }>; - type RuntimeEvent = RuntimeEvent; - type SolutionDataProvider = MultiBlockSigned; - type SolutionImprovementThreshold = (); - type WeightInfo = multi_block::weights::AllZeroWeights; - } - - parameter_types! { - pub const BailoutGraceRatio: Perbill = Perbill::from_percent(50); - } - - impl multi_block::signed::Config for Runtime { - type BailoutGraceRatio = BailoutGraceRatio; - // TODO: we need an increase factor for this pallet as well. - type DepositBase = SignedFixedDeposit; - type DepositPerPage = SignedDepositByte; - type MaxSubmissions = ConstU32<8>; - type RewardBase = SignedRewardBase; - - type EstimateCallFee = TransactionPayment; - type Currency = Balances; - - type RuntimeEvent = RuntimeEvent; - type RuntimeHoldReason = RuntimeHoldReason; - type WeightInfo = multi_block::weights::AllZeroWeights; - } - - impl multi_block::unsigned::Config for Runtime { - type OffchainSolver = ::Solver; - // offchain usage of miner configs - type MinerTxPriority = ::MinerTxPriority; - // TODO: this needs to be an educated number: "estimate mining time per page * pages" - type OffchainRepeat = ConstU32<5>; - - type WeightInfo = multi_block::weights::AllZeroWeights; - } -} - parameter_types! { - // phase durations. 1/2 of the last session for each. - pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 2; - pub const UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 2; + // phase durations. 1/4 of the last session for each. + pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; + pub const UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; // signed config pub const SignedRewardBase: Balance = 1 * DOLLARS; @@ -1062,15 +801,29 @@ parameter_types! { .get(DispatchClass::Normal); } +frame_election_provider_support::generate_solution_type!( + #[compact] + pub struct NposSolution16::< + VoterIndex = u32, + TargetIndex = u16, + Accuracy = sp_runtime::PerU16, + MaxVoters = MaxElectingVotersSolution, + >(16) +); + parameter_types! { - /// Note: the EPM in this runtime runs the election on-chain. The election bounds must be - /// carefully set so that an election round fits in one block. + // Note: the EPM in this runtime runs the election on-chain. The election bounds must be + // carefully set so that an election round fits in one block. pub ElectionBoundsMultiPhase: ElectionBounds = ElectionBoundsBuilder::default() .voters_count(10_000.into()).targets_count(1_500.into()).build(); pub ElectionBoundsOnChain: ElectionBounds = ElectionBoundsBuilder::default() .voters_count(5_000.into()).targets_count(1_250.into()).build(); pub MaxNominations: u32 = ::LIMIT as u32; + pub MaxElectingVotersSolution: u32 = 40_000; + // The maximum winners that can be elected by the Election pallet which is equivalent to the + // maximum active validators the staking pallet can have. + pub MaxActiveValidators: u32 = 1000; } /// The numbers configured here could always be more than the the maximum limits of staking pallet @@ -1114,7 +867,6 @@ impl Get> for OffchainRandomBalancing { pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { - type Sort = ConstBool; type System = Runtime; type Solver = SequentialPhragmen< AccountId, @@ -1122,10 +874,8 @@ impl onchain::Config for OnChainSeqPhragmen { >; type DataProvider = ::DataProvider; type WeightInfo = frame_election_provider_support::weights::SubstrateWeight; + type MaxWinners = ::MaxWinners; type Bounds = ElectionBoundsOnChain; - type MaxBackersPerWinner = - ::MaxBackersPerWinner; - type MaxWinnersPerPage = multi_block_impls::MaxWinnersPerPage; } impl pallet_election_provider_multi_phase::MinerConfig for Runtime { @@ -1135,8 +885,7 @@ impl pallet_election_provider_multi_phase::MinerConfig for Runtime { type Solution = NposSolution16; type MaxVotesPerVoter = <::DataProvider as ElectionDataProvider>::MaxVotesPerVoter; - type MaxWinners = multi_block_impls::MaxWinnersPerPage; - type MaxBackersPerWinner = multi_block_impls::MaxBackersPerWinner; + type MaxWinners = MaxActiveValidators; // The unsigned submissions have to respect the weight of the submit_unsigned call, thus their // weight estimate function is wired to this call's weight. @@ -1170,21 +919,11 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SlashHandler = (); // burn slashes type RewardHandler = (); // rewards are minted from the void type DataProvider = Staking; - #[cfg(not(feature = "runtime-benchmarks"))] - type Fallback = frame_election_provider_support::NoElection<( - AccountId, - BlockNumber, - Staking, - multi_block_impls::MaxWinnersPerPage, - multi_block_impls::MaxBackersPerWinner, - )>; - #[cfg(feature = "runtime-benchmarks")] type Fallback = onchain::OnChainExecution; type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen, OffchainRandomBalancing>; type ForceOrigin = EnsureRootOrHalfCouncil; - type MaxWinners = multi_block_impls::MaxWinnersPerPage; - type MaxBackersPerWinner = multi_block_impls::MaxBackersPerWinner; + type MaxWinners = MaxActiveValidators; type ElectionBounds = ElectionBoundsMultiPhase; type BenchmarkingConfig = ElectionProviderBenchmarkConfig; type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight; @@ -1478,8 +1217,8 @@ parameter_types! { pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 7; pub const MaxVotesPerVoter: u32 = 16; - pub const MaxVoters: u32 = 256; - pub const MaxCandidates: u32 = 128; + pub const MaxVoters: u32 = 512; + pub const MaxCandidates: u32 = 64; pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } @@ -1759,7 +1498,7 @@ parameter_types! { pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); /// We prioritize im-online heartbeats over election solution submission. pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; - pub const MaxAuthorities: u32 = 1000; + pub const MaxAuthorities: u32 = 100; pub const MaxKeys: u32 = 10_000; pub const MaxPeerInHeartbeats: u32 = 10_000; } @@ -3020,16 +2759,6 @@ mod runtime { #[runtime::pallet_index(84)] pub type AssetsFreezer = pallet_assets_freezer::Pallet; - - // Order is important! - #[runtime::pallet_index(85)] - pub type MultiBlock = pallet_election_provider_multi_block::Pallet; - #[runtime::pallet_index(86)] - pub type MultiBlockVerifier = pallet_election_provider_multi_block::verifier::Pallet; - #[runtime::pallet_index(87)] - pub type MultiBlockUnsigned = pallet_election_provider_multi_block::unsigned::Pallet; - #[runtime::pallet_index(88)] - pub type MultiBlockSigned = pallet_election_provider_multi_block::signed::Pallet; } impl TryFrom for pallet_revive::Call { @@ -3244,10 +2973,6 @@ mod benches { [pallet_asset_conversion_tx_payment, AssetConversionTxPayment] [pallet_transaction_payment, TransactionPayment] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] - [pallet_election_provider_multi_block, MultiBlock] - [pallet_election_provider_multi_block::verifier, MultiBlockVerifier] - [pallet_election_provider_multi_block::unsigned, MultiBlockUnsigned] - [pallet_election_provider_multi_block::signed, MultiBlockSigned] [pallet_election_provider_support_benchmarking, EPSBench::] [pallet_elections_phragmen, Elections] [pallet_fast_unstake, FastUnstake] diff --git a/substrate/bin/node/testing/src/genesis.rs b/substrate/bin/node/testing/src/genesis.rs index aaa19e15d07de..624b00b4d6c23 100644 --- a/substrate/bin/node/testing/src/genesis.rs +++ b/substrate/bin/node/testing/src/genesis.rs @@ -24,7 +24,7 @@ use kitchensink_runtime::{ RuntimeGenesisConfig, SessionConfig, SocietyConfig, StakerStatus, StakingConfig, }; use sp_keyring::Ed25519Keyring; -use sp_runtime::{BoundedVec, Perbill}; +use sp_runtime::Perbill; /// Create genesis runtime configuration for tests. pub fn config() -> RuntimeGenesisConfig { @@ -65,8 +65,7 @@ pub fn config_endowed(extra_endowed: Vec) -> RuntimeGenesisConfig { validator_count: 3, minimum_validator_count: 0, slash_reward_fraction: Perbill::from_percent(10), - invulnerables: BoundedVec::try_from(vec![alice(), bob(), charlie()]) - .expect("Too many invulnerable validators: upper limit is MaxInvulnerables from pallet staking config"), + invulnerables: vec![alice(), bob(), charlie()], ..Default::default() }, society: SocietyConfig { pot: 0 }, diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index eeaebe02d3e8b..20634704fb025 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -31,7 +31,7 @@ use pallet_session::historical as pallet_session_historical; use sp_consensus_babe::{AuthorityId, AuthorityPair, Randomness, Slot, VrfSignature}; use sp_core::{ crypto::{Pair, VrfSecret}, - ConstBool, U256, + U256, }; use sp_io; use sp_runtime::{ @@ -39,7 +39,7 @@ use sp_runtime::{ impl_opaque_keys, testing::{Digest, DigestItem, Header, TestXt}, traits::{Header as _, OpaqueKeys}, - BoundedVec, BuildStorage, Perbill, + BuildStorage, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; @@ -152,9 +152,7 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinnersPerPage = ConstU32<100>; - type MaxBackersPerWinner = ConstU32<100>; - type Sort = ConstBool; + type MaxWinners = ConstU32<100>; type Bounds = ElectionsBounds; } @@ -346,7 +344,7 @@ pub fn new_test_ext_raw_authorities(authorities: Vec) -> sp_io::Tes validator_count: 8, force_era: pallet_staking::Forcing::ForceNew, minimum_validator_count: 0, - invulnerables: BoundedVec::new(), + invulnerables: vec![], ..Default::default() }; diff --git a/substrate/frame/bags-list/remote-tests/src/snapshot.rs b/substrate/frame/bags-list/remote-tests/src/snapshot.rs index f8ba7b8d02433..5f999aa0b8b75 100644 --- a/substrate/frame/bags-list/remote-tests/src/snapshot.rs +++ b/substrate/frame/bags-list/remote-tests/src/snapshot.rs @@ -22,10 +22,7 @@ use frame_election_provider_support::{ }; use frame_support::traits::PalletInfoAccess; use remote_externalities::{Builder, Mode, OnlineConfig}; -use sp_runtime::{ - traits::{Block as BlockT, Zero}, - DeserializeOwned, -}; +use sp_runtime::{traits::Block as BlockT, DeserializeOwned}; /// Execute create a snapshot from pallet-staking. pub async fn execute(voter_limit: Option, currency_unit: u64, ws_url: String) @@ -73,9 +70,8 @@ where Some(v) => DataProviderBounds { count: Some(CountBound(v as u32)), size: None }, }; - // single page voter snapshot, thus page index == 0. let voters = - as ElectionDataProvider>::electing_voters(bounds, Zero::zero()) + as ElectionDataProvider>::electing_voters(bounds) .unwrap(); let mut voters_nominator_only = voters diff --git a/substrate/frame/bags-list/src/benchmarks.rs b/substrate/frame/bags-list/src/benchmarks.rs index 7db4c4bb359f7..55f4c24835ea6 100644 --- a/substrate/frame/bags-list/src/benchmarks.rs +++ b/substrate/frame/bags-list/src/benchmarks.rs @@ -29,125 +29,6 @@ use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::One; benchmarks_instance_pallet! { - // iteration of any number of items should only touch that many nodes and bags. - #[extra] - iter { - let n = 100; - - // clear any pre-existing storage. - List::::unsafe_clear(); - - // add n nodes, half to first bag and half to second bag. - let bag_thresh = T::BagThresholds::get()[0]; - let second_bag_thresh = T::BagThresholds::get()[1]; - - - for i in 0..n/2 { - let node: T::AccountId = account("node", i, 0); - assert_ok!(List::::insert(node.clone(), bag_thresh - One::one())); - } - for i in 0..n/2 { - let node: T::AccountId = account("node", i, 1); - assert_ok!(List::::insert(node.clone(), bag_thresh + One::one())); - } - assert_eq!( - List::::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::>(), - vec![ - (bag_thresh, (n / 2) as usize), - (second_bag_thresh, (n / 2) as usize), - ] - ); - }: { - let voters = List::::iter(); - let len = voters.collect::>().len(); - assert!(len as u32 == n, "len is {}, expected {}", len, n); - } - - // iteration of any number of items should only touch that many nodes and bags. - #[extra] - iter_take { - let n = 100; - - // clear any pre-existing storage. - List::::unsafe_clear(); - - // add n nodes, half to first bag and half to second bag. - let bag_thresh = T::BagThresholds::get()[0]; - let second_bag_thresh = T::BagThresholds::get()[1]; - - - for i in 0..n/2 { - let node: T::AccountId = account("node", i, 0); - assert_ok!(List::::insert(node.clone(), bag_thresh - One::one())); - } - for i in 0..n/2 { - let node: T::AccountId = account("node", i, 1); - assert_ok!(List::::insert(node.clone(), bag_thresh + One::one())); - } - assert_eq!( - List::::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::>(), - vec![ - (bag_thresh, (n / 2) as usize), - (second_bag_thresh, (n / 2) as usize), - ] - ); - }: { - // this should only go into one of the bags - let voters = List::::iter().take(n as usize / 4 ); - let len = voters.collect::>().len(); - assert!(len as u32 == n / 4, "len is {}, expected {}", len, n / 4); - } - - #[extra] - iter_from { - let n = 100; - - // clear any pre-existing storage. - List::::unsafe_clear(); - - // populate the first 4 bags with n/4 nodes each - let bag_thresh = T::BagThresholds::get()[0]; - - for i in 0..n/4 { - let node: T::AccountId = account("node", i, 0); - assert_ok!(List::::insert(node.clone(), bag_thresh - One::one())); - } - for i in 0..n/4 { - let node: T::AccountId = account("node", i, 1); - assert_ok!(List::::insert(node.clone(), bag_thresh + One::one())); - } - - let bag_thresh = T::BagThresholds::get()[2]; - - for i in 0..n/4 { - let node: T::AccountId = account("node", i, 2); - assert_ok!(List::::insert(node.clone(), bag_thresh - One::one())); - } - - for i in 0..n/4 { - let node: T::AccountId = account("node", i, 3); - assert_ok!(List::::insert(node.clone(), bag_thresh + One::one())); - } - - assert_eq!( - List::::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::>(), - vec![ - (T::BagThresholds::get()[0], (n / 4) as usize), - (T::BagThresholds::get()[1], (n / 4) as usize), - (T::BagThresholds::get()[2], (n / 4) as usize), - (T::BagThresholds::get()[3], (n / 4) as usize), - ] - ); - - // iter from someone in the 3rd bag, so this should touch ~75 nodes and 3 bags - let from: T::AccountId = account("node", 0, 2); - }: { - let voters = List::::iter_from(&from).unwrap(); - let len = voters.collect::>().len(); - assert!(len as u32 == 74, "len is {}, expected {}", len, 74); - } - - rebag_non_terminal { // An expensive case for rebag-ing (rebag a non-terminal node): // diff --git a/substrate/frame/bags-list/src/lib.rs b/substrate/frame/bags-list/src/lib.rs index 606b07b6e7b6f..37077cd2d4835 100644 --- a/substrate/frame/bags-list/src/lib.rs +++ b/substrate/frame/bags-list/src/lib.rs @@ -148,7 +148,7 @@ pub use list::{notional_bag_for, Bag, List, ListError, Node}; pub use pallet::*; pub use weights::WeightInfo; -pub(crate) const LOG_TARGET: &str = "runtime::bags-list"; +pub(crate) const LOG_TARGET: &str = "runtime::bags_list"; // syntactic sugar for logging. #[macro_export] diff --git a/substrate/frame/bags-list/src/list/mod.rs b/substrate/frame/bags-list/src/list/mod.rs index 1fe4ffffaa658..8344674fa1341 100644 --- a/substrate/frame/bags-list/src/list/mod.rs +++ b/substrate/frame/bags-list/src/list/mod.rs @@ -255,7 +255,7 @@ impl, I: 'static> List { /// Iterate over all nodes in all bags in the list. /// /// Full iteration can be expensive; it's recommended to limit the number of items with - /// `.take(n)`, or call `.next()` one by one. + /// `.take(n)`. pub(crate) fn iter() -> impl Iterator> { // We need a touch of special handling here: because we permit `T::BagThresholds` to // omit the final bound, we need to ensure that we explicitly include that threshold in the @@ -302,13 +302,6 @@ impl, I: 'static> List { .filter_map(Bag::get) .flat_map(|bag| bag.iter()); - crate::log!( - debug, - "starting to iterate from {:?}, who's bag is {:?}, and there are {:?} leftover bags", - &start, - start_node_upper, - idx - ); Ok(start_bag.chain(leftover_bags)) } @@ -348,7 +341,7 @@ impl, I: 'static> List { bag.put(); crate::log!( - trace, + debug, "inserted {:?} with score {:?} into bag {:?}, new count is {}", id, score, diff --git a/substrate/frame/beefy/Cargo.toml b/substrate/frame/beefy/Cargo.toml index 1cb4c41f41b09..b8e952dfbd66d 100644 --- a/substrate/frame/beefy/Cargo.toml +++ b/substrate/frame/beefy/Cargo.toml @@ -36,7 +36,6 @@ sp-core = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-staking = { workspace = true, default-features = true } sp-state-machine = { workspace = true } -sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 46491996623fe..ee84d9e5bbe40 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -16,6 +16,9 @@ // limitations under the License. use codec::{Decode, DecodeWithMemTracking, Encode}; +use scale_info::TypeInfo; +use std::vec; + use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, onchain, SequentialPhragmen, Weight, @@ -26,15 +29,14 @@ use frame_support::{ }; use frame_system::pallet_prelude::HeaderFor; use pallet_session::historical as pallet_session_historical; -use scale_info::TypeInfo; -use sp_core::{crypto::KeyTypeId, ConstBool, ConstU128}; +use sp_core::{crypto::KeyTypeId, ConstU128}; use sp_runtime::{ app_crypto::ecdsa::Public, curve::PiecewiseLinear, impl_opaque_keys, testing::TestXt, traits::{Header as HeaderT, OpaqueKeys}, - BoundedVec, BuildStorage, Perbill, + BuildStorage, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; use sp_state_machine::BasicExternalities; @@ -236,9 +238,7 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinnersPerPage = ConstU32<100>; - type MaxBackersPerWinner = ConstU32<100>; - type Sort = ConstBool; + type MaxWinners = ConstU32<100>; type Bounds = ElectionsBoundsOnChain; } @@ -278,7 +278,6 @@ impl ExtBuilder { } pub fn build(self) -> sp_io::TestExternalities { - sp_tracing::try_init_simple(); let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances: Vec<_> = @@ -315,7 +314,7 @@ impl ExtBuilder { validator_count: 2, force_era: pallet_staking::Forcing::ForceNew, minimum_validator_count: 0, - invulnerables: BoundedVec::new(), + invulnerables: vec![], ..Default::default() }; diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index 5f713a41cafa5..1bd0a72b25ecd 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -39,8 +39,6 @@ use crate::{self as beefy, mock::*, Call, Config, Error, WeightInfoExt}; fn init_block(block: u64) { System::set_block_number(block); - // Staking has to also be initialized, and be the first, to have the new validator set ready. - Staking::on_initialize(block); Session::on_initialize(block); } diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs index 0af02ccc1af40..6e21356e9d47a 100644 --- a/substrate/frame/benchmarking/src/lib.rs +++ b/substrate/frame/benchmarking/src/lib.rs @@ -381,7 +381,7 @@ pub use v1::*; /// /// #[extrinsic_call] /// _(RuntimeOrigin::Signed(caller), vec![0u8; l]); -/// +/// /// // Everything onwards will be treated as test. /// assert_last_event::(Event::FooExecuted { result: Ok(()) }.into()); /// Ok(()) diff --git a/substrate/frame/delegated-staking/src/mock.rs b/substrate/frame/delegated-staking/src/mock.rs index 44068ee5a7f31..a944d3808a237 100644 --- a/substrate/frame/delegated-staking/src/mock.rs +++ b/substrate/frame/delegated-staking/src/mock.rs @@ -24,7 +24,7 @@ use frame_support::{ PalletId, }; -use sp_runtime::{traits::IdentityLookup, BoundedVec, BuildStorage, Perbill}; +use sp_runtime::{traits::IdentityLookup, BuildStorage, Perbill}; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, @@ -32,7 +32,7 @@ use frame_election_provider_support::{ }; use frame_support::dispatch::RawOrigin; use pallet_staking::{ActiveEra, ActiveEraInfo, CurrentEra}; -use sp_core::{ConstBool, U256}; +use sp_core::U256; use sp_runtime::traits::Convert; use sp_staking::{Agent, Stake, StakingInterface}; @@ -96,9 +96,7 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinnersPerPage = ConstU32<100>; - type MaxBackersPerWinner = ConstU32<100>; - type Sort = ConstBool; + type MaxWinners = ConstU32<100>; type Bounds = ElectionsBoundsOnChain; } @@ -224,7 +222,7 @@ impl ExtBuilder { // ideal validator count validator_count: 2, minimum_validator_count: 1, - invulnerables: BoundedVec::new(), + invulnerables: vec![], slash_reward_fraction: Perbill::from_percent(10), min_nominator_bond: ExistentialDeposit::get(), min_validator_bond: ExistentialDeposit::get(), diff --git a/substrate/frame/election-provider-multi-block/Cargo.toml b/substrate/frame/election-provider-multi-block/Cargo.toml deleted file mode 100644 index 907523d288305..0000000000000 --- a/substrate/frame/election-provider-multi-block/Cargo.toml +++ /dev/null @@ -1,84 +0,0 @@ -[package] -name = "pallet-election-provider-multi-block" -version = "0.9.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -homepage.workspace = true -repository.workspace = true -description = "PALLET multi phase+block election providers" - -[lints] -workspace = true - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -codec = { features = [ - "derive", -], workspace = true } -log = { workspace = true } -scale-info = { features = [ - "derive", -], workspace = true } - -frame-election-provider-support = { workspace = true } -frame-support = { workspace = true } -frame-system = { workspace = true } - -sp-arithmetic = { workspace = true } -sp-core = { workspace = true } -sp-io = { workspace = true } -sp-npos-elections = { workspace = true } -sp-runtime = { workspace = true } -sp-std = { workspace = true } - -# Optional imports for benchmarking -frame-benchmarking = { optional = true, workspace = true } -rand = { features = ["alloc", "small_rng"], optional = true, workspace = true } - -[dev-dependencies] -frame-benchmarking = { workspace = true, default-features = true } -pallet-balances = { workspace = true, default-features = true } -parking_lot = { workspace = true, default-features = true } -sp-core = { workspace = true } -sp-io = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-benchmarking?/std", - "frame-election-provider-support/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-balances/std", - "rand/std", - "scale-info/std", - "sp-arithmetic/std", - "sp-core/std", - "sp-io/std", - "sp-npos-elections/std", - "sp-runtime/std", - "sp-std/std", - "sp-tracing/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-election-provider-support/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "rand", - "sp-runtime/runtime-benchmarks", -] -try-runtime = [ - "frame-election-provider-support/try-runtime", - "frame-support/try-runtime", - "frame-system/try-runtime", - "pallet-balances/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/substrate/frame/election-provider-multi-block/src/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/benchmarking.rs deleted file mode 100644 index 2af6e6747a7f1..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/benchmarking.rs +++ /dev/null @@ -1,170 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{Config, CurrentPhase, Pallet, Phase, Snapshot}; -use frame_benchmarking::v2::*; -use frame_election_provider_support::ElectionDataProvider; -use frame_support::pallet_prelude::*; -const SNAPSHOT_NOT_BIG_ENOUGH: &'static str = "Snapshot page is not full, you should run this \ -benchmark with enough genesis stakers in staking (DataProvider) to fill a page of voters/targets \ -as per VoterSnapshotPerBlock and TargetSnapshotPerBlock. Generate at least \ -2 * VoterSnapshotPerBlock) nominators and TargetSnapshotPerBlock validators"; - -#[benchmarks(where T: crate::signed::Config + crate::unsigned::Config + crate::verifier::Config)] -mod benchmarks { - use super::*; - - #[benchmark] - fn on_initialize_nothing() -> Result<(), BenchmarkError> { - T::DataProvider::set_next_election(Pallet::::reasonable_next_election()); - assert_eq!(CurrentPhase::::get(), Phase::Off); - - #[block] - { - Pallet::::roll_next(true, false); - } - - assert_eq!(CurrentPhase::::get(), Phase::Off); - Ok(()) - } - - #[benchmark] - fn on_initialize_into_snapshot_msp() -> Result<(), BenchmarkError> { - assert!(T::Pages::get() >= 2, "this benchmark only works in a runtime with 2 pages or more, set at least `type Pages = 2` for benchmark run"); - T::DataProvider::set_next_election(Pallet::::reasonable_next_election()); - // TODO: the results of this benchmark cause too many hits to voters bags list, why??? - - // roll to next block until we are about to go into the snapshot. - Pallet::::run_until_before_matches(|| { - matches!(CurrentPhase::::get(), Phase::Snapshot(_)) - }); - - // since we reverted the last page, we are still in phase Off. - assert_eq!(CurrentPhase::::get(), Phase::Off); - - #[block] - { - Pallet::::roll_next(true, false); - } - - assert_eq!(CurrentPhase::::get(), Phase::Snapshot(T::Pages::get() - 1)); - assert_eq!( - Snapshot::::voters_decode_len(T::Pages::get() - 1).unwrap() as u32, - T::VoterSnapshotPerBlock::get(), - "{}", - SNAPSHOT_NOT_BIG_ENOUGH - ); - assert_eq!( - Snapshot::::targets_decode_len().unwrap() as u32, - T::TargetSnapshotPerBlock::get(), - "{}", - SNAPSHOT_NOT_BIG_ENOUGH - ); - - Ok(()) - } - - #[benchmark] - fn on_initialize_into_snapshot_rest() -> Result<(), BenchmarkError> { - assert!(T::Pages::get() >= 2, "this benchmark only works in a runtime with 2 pages or more, set at least `type Pages = 2` for benchmark run"); - T::DataProvider::set_next_election(Pallet::::reasonable_next_election()); - - // roll to the first block of the snapshot. - Pallet::::roll_until_matches(|| matches!(CurrentPhase::::get(), Phase::Snapshot(_))); - - assert_eq!(CurrentPhase::::get(), Phase::Snapshot(T::Pages::get() - 1)); - - // take one more snapshot page. - #[block] - { - Pallet::::roll_next(true, false); - } - - assert_eq!(CurrentPhase::::get(), Phase::Snapshot(T::Pages::get() - 2)); - assert_eq!( - Snapshot::::voters_decode_len(T::Pages::get() - 2).unwrap() as u32, - T::VoterSnapshotPerBlock::get(), - "{}", - SNAPSHOT_NOT_BIG_ENOUGH - ); - Ok(()) - } - - #[benchmark] - fn on_initialize_into_signed() -> Result<(), BenchmarkError> { - T::DataProvider::set_next_election(Pallet::::reasonable_next_election()); - Pallet::::run_until_before_matches(|| matches!(CurrentPhase::::get(), Phase::Signed)); - - assert_eq!(CurrentPhase::::get(), Phase::Snapshot(0)); - - #[block] - { - Pallet::::roll_next(true, false); - } - - assert_eq!(CurrentPhase::::get(), Phase::Signed); - - Ok(()) - } - - #[benchmark] - fn on_initialize_into_signed_validation() -> Result<(), BenchmarkError> { - T::DataProvider::set_next_election(Pallet::::reasonable_next_election()); - Pallet::::run_until_before_matches(|| { - matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) - }); - - assert_eq!(CurrentPhase::::get(), Phase::Signed); - - #[block] - { - Pallet::::roll_next(true, false); - } - - Ok(()) - } - - #[benchmark] - fn on_initialize_into_unsigned() -> Result<(), BenchmarkError> { - T::DataProvider::set_next_election(Pallet::::reasonable_next_election()); - Pallet::::run_until_before_matches(|| { - matches!(CurrentPhase::::get(), Phase::Unsigned(_)) - }); - assert!(matches!(CurrentPhase::::get(), Phase::SignedValidation(_))); - - #[block] - { - Pallet::::roll_next(true, false); - } - - assert!(matches!(CurrentPhase::::get(), Phase::Unsigned(_))); - Ok(()) - } - - #[benchmark] - fn manage() -> Result<(), BenchmarkError> { - #[block] - {} - Ok(()) - } - - impl_benchmark_test_suite!( - Pallet, - crate::mock::ExtBuilder::full().build_unchecked(), - crate::mock::Runtime - ); -} diff --git a/substrate/frame/election-provider-multi-block/src/helpers.rs b/substrate/frame/election-provider-multi-block/src/helpers.rs deleted file mode 100644 index 20396ac97d224..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/helpers.rs +++ /dev/null @@ -1,227 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Some helper functions/macros for this crate. - -use crate::{ - types::{PageIndex, VoterOf}, - unsigned::miner::MinerConfig, - AllVoterPagesOf, SolutionTargetIndexOf, SolutionVoterIndexOf, VoteWeight, -}; -use frame_support::{traits::Get, BoundedVec}; -use sp_runtime::SaturatedConversion; -use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, prelude::*}; - -/// Emit a log specific to this pallet, setting the target to [`crate::LOG_PREFIX`] -#[macro_export] -macro_rules! log { - ($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => { - log::$level!( - target: $crate::LOG_PREFIX, - concat!("[#{:?}] 🗳🗳🗳 ", $pattern), >::block_number() $(, $values)* - ) - }; -} - -/// Emit a log within a submodule of the pallet -#[macro_export] -macro_rules! sublog { - ($level:tt, $sub_pallet:tt, $pattern:expr $(, $values:expr)* $(,)?) => { - #[cfg(not(feature = "std"))] - log!($level, $pattern $(, $values )*); - #[cfg(feature = "std")] - log::$level!( - target: format!("{}::{}", $crate::LOG_PREFIX, $sub_pallet).as_ref(), - concat!("[#{:?}] 🗳🗳🗳 ", $pattern), >::block_number() $(, $values )* - ) - }; -} - -/// Emit a log from within the offchain miner. -#[macro_export] -macro_rules! miner_log { - ($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => { - log::$level!( - target: $crate::LOG_PREFIX, - concat!("[⛏️miner] 🗳🗳🗳 ", $pattern) $(, $values)* - ) - }; -} - -/// Generate an `efficient closure of voters and the page in which they live in. -pub(crate) fn generate_voter_page_fn( - paged_snapshot: &AllVoterPagesOf, -) -> impl Fn(&T::AccountId) -> Option { - let mut cache: BTreeMap = BTreeMap::new(); - paged_snapshot - .iter() - .enumerate() - .map(|(page, whatever)| (page.saturated_into::(), whatever)) - .for_each(|(page, page_voters)| { - page_voters.iter().for_each(|(v, _, _)| { - let _existed = cache.insert(v.clone(), page); - // if a duplicate exists, we only consider the last one. Defensive only, should - // never happen. - debug_assert!(_existed.is_none()); - }); - }); - move |who| cache.get(who).copied() -} - -/// Generate a btree-map cache of the voters and their indices within the provided `snapshot`. -/// -/// This does not care about pagination. `snapshot` might be a single page or the entire blob of -/// voters. -/// -/// This can be used to efficiently build index getter closures. -pub(crate) fn generate_voter_cache>( - snapshot: &BoundedVec, AnyBound>, -) -> BTreeMap { - let mut cache: BTreeMap = BTreeMap::new(); - snapshot.iter().enumerate().for_each(|(i, (x, _, _))| { - let _existed = cache.insert(x.clone(), i); - // if a duplicate exists, we only consider the last one. Defensive only, should never - // happen. - debug_assert!(_existed.is_none()); - }); - - cache -} - -/// Create a function that returns the index of a voter in the snapshot. -/// -/// Same as [`voter_index_fn`] but the returned function owns all its necessary data; nothing is -/// borrowed. -pub(crate) fn voter_index_fn_owned( - cache: BTreeMap, -) -> impl Fn(&T::AccountId) -> Option> { - move |who| { - cache - .get(who) - .and_then(|i| >>::try_into(*i).ok()) - } -} - -/// Same as [`voter_index_fn`], but the returning index is converted into usize, if possible. -/// -/// ## Warning -/// -/// Note that this will represent the snapshot data from which the `cache` is generated. -pub(crate) fn voter_index_fn_usize( - cache: &BTreeMap, -) -> impl Fn(&T::AccountId) -> Option + '_ { - move |who| cache.get(who).cloned() -} - -/// A non-optimized, linear version of [`voter_index_fn`] that does not need a cache and does a -/// linear search. -/// -/// ## Warning -/// -/// Not meant to be used in production. -#[cfg(test)] -pub(crate) fn voter_index_fn_linear( - snapshot: &Vec>, -) -> impl Fn(&T::AccountId) -> Option> + '_ { - move |who| { - snapshot - .iter() - .position(|(x, _, _)| x == who) - .and_then(|i| >>::try_into(i).ok()) - } -} - -/// Create a function that returns the index of a target in the snapshot. -/// -/// The returned index type is the same as the one defined in `T::Solution::Target`. -/// -/// Note: to the extent possible, the returned function should be cached and reused. Producing that -/// function requires a `O(n log n)` data transform. Each invocation of that function completes -/// in `O(log n)`. -pub(crate) fn target_index_fn( - snapshot: &Vec, -) -> impl Fn(&T::AccountId) -> Option> + '_ { - let cache: BTreeMap<_, _> = - snapshot.iter().enumerate().map(|(idx, account_id)| (account_id, idx)).collect(); - move |who| { - cache - .get(who) - .and_then(|i| >>::try_into(*i).ok()) - } -} - -/// Create a function the returns the index to a target in the snapshot. -/// -/// The returned index type is the same as the one defined in `T::Solution::Target`. -/// -/// ## Warning -/// -/// Not meant to be used in production. -#[cfg(test)] -pub(crate) fn target_index_fn_linear( - snapshot: &Vec, -) -> impl Fn(&T::AccountId) -> Option> + '_ { - move |who| { - snapshot - .iter() - .position(|x| x == who) - .and_then(|i| >>::try_into(i).ok()) - } -} - -/// Create a function that can map a voter index ([`SolutionVoterIndexOf`]) to the actual voter -/// account using a linearly indexible snapshot. -pub(crate) fn voter_at_fn( - snapshot: &Vec>, -) -> impl Fn(SolutionVoterIndexOf) -> Option + '_ { - move |i| { - as TryInto>::try_into(i) - .ok() - .and_then(|i| snapshot.get(i).map(|(x, _, _)| x).cloned()) - } -} - -/// Create a function that can map a target index ([`SolutionTargetIndexOf`]) to the actual target -/// account using a linearly indexible snapshot. -pub(crate) fn target_at_fn( - snapshot: &Vec, -) -> impl Fn(SolutionTargetIndexOf) -> Option + '_ { - move |i| { - as TryInto>::try_into(i) - .ok() - .and_then(|i| snapshot.get(i).cloned()) - } -} - -/// Create a function to get the stake of a voter. -/// -/// ## Warning -/// -/// The cache need must be derived from the same snapshot. Zero is returned if a voter is -/// non-existent. -pub(crate) fn stake_of_fn<'a, T: MinerConfig, AnyBound: Get>( - snapshot: &'a BoundedVec, AnyBound>, - cache: &'a BTreeMap, -) -> impl Fn(&T::AccountId) -> VoteWeight + 'a { - move |who| { - if let Some(index) = cache.get(who) { - snapshot.get(*index).map(|(_, x, _)| x).cloned().unwrap_or_default() - } else { - 0 - } - } -} diff --git a/substrate/frame/election-provider-multi-block/src/mock/mod.rs b/substrate/frame/election-provider-multi-block/src/mock/mod.rs deleted file mode 100644 index 5c68494f66b57..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/mock/mod.rs +++ /dev/null @@ -1,700 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -mod signed; -mod staking; -mod weight_info; - -use super::*; -use crate::{ - self as multi_block, - signed::{self as signed_pallet, HoldReason}, - unsigned::{ - self as unsigned_pallet, - miner::{MinerConfig, OffchainMinerError, OffchainWorkerMiner}, - }, - verifier::{self as verifier_pallet, AsynchronousVerifier, Status}, -}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_election_provider_support::{ - bounds::{ElectionBounds, ElectionBoundsBuilder}, - InstantElectionProvider, NposSolution, SequentialPhragmen, -}; -pub use frame_support::{assert_noop, assert_ok}; -use frame_support::{ - derive_impl, parameter_types, - traits::{fungible::InspectHold, Hooks}, - weights::{constants, Weight}, -}; -use frame_system::EnsureRoot; -use parking_lot::RwLock; -pub use signed::*; -use sp_core::{ - offchain::{ - testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, - OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, - }, - ConstBool, -}; -use sp_npos_elections::EvaluateSupport; -use sp_runtime::{ - bounded_vec, - traits::{BlakeTwo256, IdentityLookup}, - BuildStorage, PerU16, Perbill, -}; -pub use staking::*; -use std::{sync::Arc, vec}; - -pub type Extrinsic = sp_runtime::testing::TestXt; - -pub type Balance = u64; -pub type AccountId = u64; -pub type BlockNumber = u64; -pub type VoterIndex = u32; -pub type TargetIndex = u16; - -frame_support::construct_runtime!( - pub enum Runtime { - System: frame_system, - Balances: pallet_balances, - MultiBlock: multi_block, - SignedPallet: signed_pallet, - VerifierPallet: verifier_pallet, - UnsignedPallet: unsigned_pallet, - } -); - -frame_election_provider_support::generate_solution_type!( - pub struct TestNposSolution::< - VoterIndex = VoterIndex, - TargetIndex = TargetIndex, - Accuracy = PerU16, - MaxVoters = ConstU32::<2_000> - >(16) -); - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] -impl frame_system::Config for Runtime { - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type BlockLength = (); - type BlockWeights = BlockWeights; - type AccountData = pallet_balances::AccountData; - type Block = frame_system::mocking::MockBlock; -} - -const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -parameter_types! { - pub const ExistentialDeposit: Balance = 1; - pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights - ::with_sensible_defaults( - Weight::from_parts(2u64 * constants::WEIGHT_REF_TIME_PER_SECOND, u64::MAX), - NORMAL_DISPATCH_RATIO, - ); -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)] -impl pallet_balances::Config for Runtime { - type Balance = Balance; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type MaxLocks = (); - type MaxReserves = (); - type ReserveIdentifier = [u8; 8]; - type WeightInfo = (); -} - -#[allow(unused)] -#[derive(Clone)] -pub enum FallbackModes { - // TODO: test for this mode - Continue, - Emergency, - Onchain, -} - -parameter_types! { - pub static Pages: PageIndex = 3; - pub static UnsignedPhase: BlockNumber = 5; - pub static SignedPhase: BlockNumber = 5; - pub static SignedValidationPhase: BlockNumber = 5; - - pub static FallbackMode: FallbackModes = FallbackModes::Emergency; - pub static MinerTxPriority: u64 = 100; - pub static SolutionImprovementThreshold: Perbill = Perbill::zero(); - pub static OffchainRepeat: BlockNumber = 5; - pub static MinerMaxLength: u32 = 256; - pub static MaxVotesPerVoter: u32 = ::LIMIT as u32; - - // by default we stick to 3 pages to host our 12 voters. - pub static VoterSnapshotPerBlock: VoterIndex = 4; - // and 4 targets, whom we fetch all. - pub static TargetSnapshotPerBlock: TargetIndex = 4; - pub static Lookahead: BlockNumber = 0; - - // we have 12 voters in the default setting, this should be enough to make sure they are not - // trimmed accidentally in any test. - #[derive(Encode, Decode, PartialEq, Eq, Debug, scale_info::TypeInfo, MaxEncodedLen)] - pub static MaxBackersPerWinner: u32 = 12; - pub static MaxBackersPerWinnerFinal: u32 = 12; - // we have 4 targets in total and we desire `Desired` thereof, no single page can represent more - // than the min of these two. - #[derive(Encode, Decode, PartialEq, Eq, Debug, scale_info::TypeInfo, MaxEncodedLen)] - pub static MaxWinnersPerPage: u32 = (staking::Targets::get().len() as u32).min(staking::DesiredTargets::get()); -} - -impl crate::verifier::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type SolutionImprovementThreshold = SolutionImprovementThreshold; - type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal; - type MaxBackersPerWinner = MaxBackersPerWinner; - type MaxWinnersPerPage = MaxWinnersPerPage; - type SolutionDataProvider = signed::DualSignedPhase; - type WeightInfo = (); -} - -impl crate::unsigned::Config for Runtime { - type OffchainRepeat = OffchainRepeat; - type MinerTxPriority = MinerTxPriority; - type OffchainSolver = SequentialPhragmen; - type WeightInfo = (); -} - -impl MinerConfig for Runtime { - type AccountId = AccountId; - type Hash = ::Hash; - type MaxLength = MinerMaxLength; - type Pages = Pages; - type MaxVotesPerVoter = MaxVotesPerVoter; - type Solution = TestNposSolution; - type Solver = SequentialPhragmen; - type TargetSnapshotPerBlock = TargetSnapshotPerBlock; - type VoterSnapshotPerBlock = VoterSnapshotPerBlock; - type MaxBackersPerWinner = MaxBackersPerWinner; - type MaxBackersPerWinnerFinal = MaxBackersPerWinnerFinal; - type MaxWinnersPerPage = MaxWinnersPerPage; -} - -impl crate::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type SignedPhase = SignedPhase; - type SignedValidationPhase = SignedValidationPhase; - type UnsignedPhase = UnsignedPhase; - type DataProvider = staking::MockStaking; - type Fallback = MockFallback; - type TargetSnapshotPerBlock = TargetSnapshotPerBlock; - type VoterSnapshotPerBlock = VoterSnapshotPerBlock; - type Lookahead = Lookahead; - type MinerConfig = Self; - type WeightInfo = weight_info::DualMockWeightInfo; - type Verifier = VerifierPallet; - type AdminOrigin = EnsureRoot; - type Pages = Pages; -} - -parameter_types! { - pub static OnChainElectionBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); -} - -impl onchain::Config for Runtime { - type DataProvider = staking::MockStaking; - type MaxBackersPerWinner = MaxBackersPerWinner; - type MaxWinnersPerPage = MaxWinnersPerPage; - type Sort = ConstBool; - type Solver = SequentialPhragmen; - type System = Runtime; - type WeightInfo = (); - type Bounds = OnChainElectionBounds; -} - -pub struct MockFallback; -impl ElectionProvider for MockFallback { - type AccountId = AccountId; - type BlockNumber = u64; - type Error = String; - type DataProvider = staking::MockStaking; - type Pages = ConstU32<1>; - type MaxBackersPerWinner = MaxBackersPerWinner; - type MaxWinnersPerPage = MaxWinnersPerPage; - - fn elect(_remaining: PageIndex) -> Result, Self::Error> { - unreachable!() - } - - fn ongoing() -> bool { - false - } -} - -impl InstantElectionProvider for MockFallback { - fn instant_elect( - voters: Vec>, - targets: Vec, - desired_targets: u32, - ) -> Result, Self::Error> { - match FallbackMode::get() { - FallbackModes::Continue => - crate::Continue::::instant_elect(voters, targets, desired_targets) - .map_err(|x| x.to_string()), - FallbackModes::Emergency => crate::InitiateEmergencyPhase::::instant_elect( - voters, - targets, - desired_targets, - ) - .map_err(|x| x.to_string()), - FallbackModes::Onchain => onchain::OnChainExecution::::instant_elect( - voters, - targets, - desired_targets, - ) - .map_err(|e| format!("onchain fallback failed: {:?}", e)), - } - } - fn bother() -> bool { - matches!(FallbackMode::get(), FallbackModes::Onchain) - } -} - -impl frame_system::offchain::CreateTransactionBase for Runtime -where - RuntimeCall: From, -{ - type RuntimeCall = RuntimeCall; - type Extrinsic = Extrinsic; -} - -impl frame_system::offchain::CreateInherent for Runtime -where - RuntimeCall: From, -{ - fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { - Extrinsic::new_bare(call) - } -} - -pub struct ExtBuilder {} - -impl ExtBuilder { - pub fn full() -> Self { - Self {} - } - - pub fn verifier() -> Self { - SignedPhase::set(0); - SignedValidationPhase::set(0); - signed::SignedPhaseSwitch::set(signed::SignedSwitch::Mock); - Self {} - } - - pub fn unsigned() -> Self { - SignedPhase::set(0); - SignedValidationPhase::set(0); - signed::SignedPhaseSwitch::set(signed::SignedSwitch::Mock); - Self {} - } - - pub fn signed() -> Self { - UnsignedPhase::set(0); - Self {} - } -} - -impl ExtBuilder { - pub(crate) fn max_backers_per_winner(self, c: u32) -> Self { - MaxBackersPerWinner::set(c); - self - } - pub(crate) fn max_backers_per_winner_final(self, c: u32) -> Self { - MaxBackersPerWinnerFinal::set(c); - self - } - pub(crate) fn miner_tx_priority(self, p: u64) -> Self { - MinerTxPriority::set(p); - self - } - pub(crate) fn solution_improvement_threshold(self, p: Perbill) -> Self { - SolutionImprovementThreshold::set(p); - self - } - pub(crate) fn pages(self, pages: PageIndex) -> Self { - Pages::set(pages); - self - } - pub(crate) fn lookahead(self, lookahead: BlockNumber) -> Self { - Lookahead::set(lookahead); - self - } - pub(crate) fn voter_per_page(self, count: u32) -> Self { - VoterSnapshotPerBlock::set(count); - self - } - pub(crate) fn miner_max_length(self, len: u32) -> Self { - MinerMaxLength::set(len); - self - } - pub(crate) fn desired_targets(self, t: u32) -> Self { - staking::DesiredTargets::set(t); - self - } - pub(crate) fn signed_phase(self, d: BlockNumber, v: BlockNumber) -> Self { - SignedPhase::set(d); - SignedValidationPhase::set(v); - self - } - pub(crate) fn unsigned_phase(self, d: BlockNumber) -> Self { - UnsignedPhase::set(d); - self - } - pub(crate) fn signed_validation_phase(self, d: BlockNumber) -> Self { - SignedValidationPhase::set(d); - self - } - #[allow(unused)] - pub(crate) fn add_voter(self, who: AccountId, stake: Balance, targets: Vec) -> Self { - staking::VOTERS.with(|v| v.borrow_mut().push((who, stake, targets.try_into().unwrap()))); - self - } - pub(crate) fn fallback_mode(self, mode: FallbackModes) -> Self { - FallbackMode::set(mode); - self - } - pub(crate) fn build_unchecked(self) -> sp_io::TestExternalities { - sp_tracing::try_init_simple(); - let mut storage = - frame_system::GenesisConfig::::default().build_storage().unwrap(); - - let _ = pallet_balances::GenesisConfig:: { - balances: vec![ - // bunch of account for submitting stuff only. - (91, 100), - (92, 100), - (93, 100), - (94, 100), - (95, 100), - (96, 100), - (97, 100), - (99, 100), - (999, 100), - (9999, 100), - ], - ..Default::default() - } - .assimilate_storage(&mut storage); - - sp_io::TestExternalities::from(storage) - } - - /// Warning: this does not execute the post-sanity-checks. - pub(crate) fn build_offchainify(self) -> (sp_io::TestExternalities, Arc>) { - let mut ext = self.build_unchecked(); - let (offchain, _offchain_state) = TestOffchainExt::new(); - let (pool, pool_state) = TestTransactionPoolExt::new(); - - ext.register_extension(OffchainDbExt::new(offchain.clone())); - ext.register_extension(OffchainWorkerExt::new(offchain)); - ext.register_extension(TransactionPoolExt::new(pool)); - - (ext, pool_state) - } - - /// Build the externalities, and execute the given s`test` closure with it. - pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) { - let mut ext = self.build_unchecked(); - ext.execute_with_sanity_checks(test); - } -} - -pub trait ExecuteWithSanityChecks { - fn execute_with_sanity_checks(&mut self, test: impl FnOnce() -> ()); -} - -impl ExecuteWithSanityChecks for sp_io::TestExternalities { - fn execute_with_sanity_checks(&mut self, test: impl FnOnce() -> ()) { - self.execute_with(test); - self.execute_with(all_pallets_sanity_checks) - } -} - -fn all_pallets_sanity_checks() { - let now = System::block_number(); - let _ = VerifierPallet::do_try_state(now).unwrap(); - let _ = UnsignedPallet::do_try_state(now).unwrap(); - let _ = MultiBlock::do_try_state(now).unwrap(); - let _ = SignedPallet::do_try_state(now).unwrap(); -} - -/// Fully verify a solution. -/// -/// This will progress the blocks until the verifier pallet is done verifying it. -/// -/// The solution must have already been loaded via `load_and_start_verification`. -/// -/// Return the final supports, which is the outcome. If this succeeds, then the valid variant of the -/// `QueuedSolution` form `verifier` is ready to be read. -pub fn roll_to_full_verification() -> Vec> { - // we must be ready to verify. - assert_eq!(VerifierPallet::status(), Status::Ongoing(Pages::get() - 1)); - - while matches!(VerifierPallet::status(), Status::Ongoing(_)) { - roll_to(System::block_number() + 1); - } - - (MultiBlock::lsp()..=MultiBlock::msp()) - .map(|p| VerifierPallet::get_queued_solution_page(p).unwrap_or_default()) - .collect::>() -} - -/// Generate a single page of `TestNposSolution` from the give supports. -/// -/// All of the voters in this support must live in a single page of the snapshot, noted by -/// `snapshot_page`. -pub fn solution_from_supports( - supports: sp_npos_elections::Supports, - snapshot_page: PageIndex, -) -> TestNposSolution { - let staked = sp_npos_elections::supports_to_staked_assignment(supports); - let assignments = sp_npos_elections::assignment_staked_to_ratio_normalized(staked).unwrap(); - - let voters = crate::Snapshot::::voters(snapshot_page).unwrap(); - let targets = crate::Snapshot::::targets().unwrap(); - let voter_index = helpers::voter_index_fn_linear::(&voters); - let target_index = helpers::target_index_fn_linear::(&targets); - - TestNposSolution::from_assignment(&assignments, &voter_index, &target_index).unwrap() -} - -/// Generate a raw paged solution from the given vector of supports. -/// -/// Given vector must be aligned with the snapshot, at most need to be 'pagified' which we do -/// internally. -pub fn raw_paged_from_supports( - paged_supports: Vec>, - round: u32, -) -> PagedRawSolution { - let score = { - let flattened = paged_supports.iter().cloned().flatten().collect::>(); - flattened.evaluate() - }; - - let solution_pages = paged_supports - .pagify(Pages::get()) - .map(|(page_index, page_support)| solution_from_supports(page_support.to_vec(), page_index)) - .collect::>(); - - let solution_pages = solution_pages.try_into().unwrap(); - PagedRawSolution { solution_pages, score, round } -} - -/// ensure that the snapshot fully exists. -/// -/// NOTE: this should not be used that often, because we check snapshot in sanity checks, which are -/// called ALL THE TIME. -pub fn assert_full_snapshot() { - assert_ok!(Snapshot::::ensure_snapshot(true, Pages::get())); -} - -/// ensure that the no snapshot exists. -/// -/// NOTE: this should not be used that often, because we check snapshot in sanity checks, which are -/// called ALL THE TIME. -pub fn assert_none_snapshot() { - assert_ok!(Snapshot::::ensure_snapshot(false, Pages::get())); -} - -/// Simple wrapper for mining a new solution. Just more handy in case the interface of mine solution -/// changes. -/// -/// For testing, we never want to do reduce. -pub fn mine_full_solution() -> Result, OffchainMinerError> { - OffchainWorkerMiner::::mine_solution(Pages::get(), false) -} - -/// Same as [`mine_full_solution`] but with custom pages. -pub fn mine_solution( - pages: PageIndex, -) -> Result, OffchainMinerError> { - OffchainWorkerMiner::::mine_solution(pages, false) -} - -/// Assert that `count` voters exist across `pages` number of pages. -pub fn ensure_voters(pages: PageIndex, count: usize) { - assert_eq!(crate::Snapshot::::voter_pages(), pages); - assert_eq!(crate::Snapshot::::voters_iter_flattened().count(), count); -} - -/// Assert that `count` targets exist across `pages` number of pages. -pub fn ensure_targets(pages: PageIndex, count: usize) { - assert_eq!(crate::Snapshot::::target_pages(), pages); - assert_eq!(crate::Snapshot::::targets().unwrap().len(), count); -} - -/// get the events of the multi-block pallet. -pub fn multi_block_events() -> Vec> { - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::MultiBlock(inner) = e { Some(inner) } else { None }) - .collect::>() -} - -/// get the events of the verifier pallet. -pub fn verifier_events() -> Vec> { - System::events() - .into_iter() - .map(|r| r.event) - .filter_map( - |e| if let RuntimeEvent::VerifierPallet(inner) = e { Some(inner) } else { None }, - ) - .collect::>() -} - -/// proceed block number to `n`. -pub fn roll_to(n: BlockNumber) { - crate::Pallet::::roll_to( - n, - matches!(SignedPhaseSwitch::get(), SignedSwitch::Real), - true, - ); -} - -/// proceed block number to whenever the snapshot is fully created (`Phase::Snapshot(0)`). -pub fn roll_to_snapshot_created() { - while !matches!(MultiBlock::current_phase(), Phase::Snapshot(0)) { - roll_next() - } - assert_full_snapshot(); -} - -/// proceed block number to whenever the unsigned phase is open (`Phase::Unsigned(_)`). -pub fn roll_to_unsigned_open() { - while !matches!(MultiBlock::current_phase(), Phase::Unsigned(_)) { - roll_next() - } -} - -/// proceed block number to whenever the signed phase is open (`Phase::Signed(_)`). -pub fn roll_to_signed_open() { - while !matches!(MultiBlock::current_phase(), Phase::Signed) { - roll_next(); - } -} - -/// proceed block number to whenever the signed validation phase is open -/// (`Phase::SignedValidation(_)`). -pub fn roll_to_signed_validation_open() { - while !matches!(MultiBlock::current_phase(), Phase::SignedValidation(_)) { - roll_next() - } -} - -/// Proceed one block. -pub fn roll_next() { - roll_to(System::block_number() + 1); -} - -/// Proceed one block, and execute offchain workers as well. -pub fn roll_next_with_ocw(maybe_pool: Option>>) { - roll_to_with_ocw(System::block_number() + 1, maybe_pool) -} - -/// proceed block number to `n`, while running all offchain workers as well. -pub fn roll_to_with_ocw(n: BlockNumber, maybe_pool: Option>>) { - use sp_runtime::traits::Dispatchable; - let now = System::block_number(); - for i in now + 1..=n { - // check the offchain transaction pool, and if anything's there, submit it. - if let Some(ref pool) = maybe_pool { - pool.read() - .transactions - .clone() - .into_iter() - .map(|uxt| ::decode(&mut &*uxt).unwrap()) - .for_each(|xt| { - xt.function.dispatch(frame_system::RawOrigin::None.into()).unwrap(); - }); - pool.try_write().unwrap().transactions.clear(); - } - - System::set_block_number(i); - - MultiBlock::on_initialize(i); - VerifierPallet::on_initialize(i); - UnsignedPallet::on_initialize(i); - if matches!(SignedPhaseSwitch::get(), SignedSwitch::Real) { - SignedPallet::on_initialize(i); - } - - MultiBlock::offchain_worker(i); - VerifierPallet::offchain_worker(i); - UnsignedPallet::offchain_worker(i); - if matches!(SignedPhaseSwitch::get(), SignedSwitch::Real) { - SignedPallet::offchain_worker(i); - } - - // invariants must hold at the end of each block. - all_pallets_sanity_checks() - } -} - -/// An invalid solution with any score. -pub fn fake_solution(score: ElectionScore) -> PagedRawSolution { - PagedRawSolution { - score, - solution_pages: bounded_vec![Default::default()], - ..Default::default() - } -} - -/// A real solution that's valid, but has a really bad score. -/// -/// This is different from `solution_from_supports` in that it does not require the snapshot to -/// exist. -// TODO: probably deprecate this. -pub fn raw_paged_solution_low_score() -> PagedRawSolution { - PagedRawSolution { - solution_pages: vec![TestNposSolution { - // 2 targets, both voting for themselves - votes1: vec![(0, 0), (1, 2)], - ..Default::default() - }] - .try_into() - .unwrap(), - round: 0, - score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 }, - } -} - -/// Get the free and held balance of `who`. -pub fn balances(who: AccountId) -> (Balance, Balance) { - ( - Balances::free_balance(who), - Balances::balance_on_hold(&HoldReason::SignedSubmission.into(), &who), - ) -} - -/// Election bounds based on just the given count. -pub fn bound_by_count(count: Option) -> DataProviderBounds { - DataProviderBounds { count: count.map(|x| x.into()), size: None } -} - -pub fn emergency_solution() -> (BoundedSupportsOf, ElectionScore) { - let supports = onchain::OnChainExecution::::elect(0).unwrap(); - let score = supports.evaluate(); - (supports, score) -} diff --git a/substrate/frame/election-provider-multi-block/src/mock/signed.rs b/substrate/frame/election-provider-multi-block/src/mock/signed.rs deleted file mode 100644 index 33436374cd1d4..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/mock/signed.rs +++ /dev/null @@ -1,255 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{Balance, Balances, Pages, Runtime, RuntimeEvent, SignedPallet, System}; -use crate::{ - mock::{ - balances, multi_block_events, roll_next, roll_to_signed_validation_open, verifier_events, - AccountId, RuntimeHoldReason, RuntimeOrigin, VerifierPallet, - }, - signed::{self as signed_pallet, Event as SignedEvent, Submissions}, - unsigned::miner::MinerConfig, - verifier::{self, AsynchronousVerifier, SolutionDataProvider, VerificationResult, Verifier}, - Event, PadSolutionPages, PagedRawSolution, Pagify, Phase, SolutionOf, -}; -use frame_election_provider_support::PageIndex; -use frame_support::{ - assert_ok, dispatch::PostDispatchInfo, parameter_types, traits::EstimateCallFee, BoundedVec, -}; -use sp_npos_elections::ElectionScore; -use sp_runtime::{traits::Zero, Perbill}; - -parameter_types! { - pub static MockSignedNextSolution: Option, Pages>> = None; - pub static MockSignedNextScore: Option = Default::default(); - pub static MockSignedResults: Vec = Default::default(); -} - -/// A simple implementation of the signed phase that can be controller by some static variables -/// directly. -/// -/// Useful for when you don't care too much about the signed phase. -pub struct MockSignedPhase; -impl SolutionDataProvider for MockSignedPhase { - type Solution = ::Solution; - fn get_page(page: PageIndex) -> Option { - MockSignedNextSolution::get().map(|i| i.get(page as usize).cloned().unwrap_or_default()) - } - - fn get_score() -> Option { - MockSignedNextScore::get() - } - - fn report_result(result: verifier::VerificationResult) { - MOCK_SIGNED_RESULTS.with(|r| r.borrow_mut().push(result)); - } -} - -pub struct FixedCallFee; -impl EstimateCallFee, Balance> for FixedCallFee { - fn estimate_call_fee(_: &signed_pallet::Call, _: PostDispatchInfo) -> Balance { - 1 - } -} - -parameter_types! { - pub static SignedDepositBase: Balance = 5; - pub static SignedDepositPerPage: Balance = 1; - pub static SignedMaxSubmissions: u32 = 3; - pub static SignedRewardBase: Balance = 3; - pub static SignedPhaseSwitch: SignedSwitch = SignedSwitch::Real; - pub static BailoutGraceRatio: Perbill = Perbill::from_percent(20); -} - -impl crate::signed::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type RuntimeHoldReason = RuntimeHoldReason; - type Currency = Balances; - type DepositBase = SignedDepositBase; - type DepositPerPage = SignedDepositPerPage; - type EstimateCallFee = FixedCallFee; - type MaxSubmissions = SignedMaxSubmissions; - type RewardBase = SignedRewardBase; - type BailoutGraceRatio = BailoutGraceRatio; - type WeightInfo = (); -} - -/// Control which signed phase is being used. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum SignedSwitch { - Mock, - Real, -} - -pub struct DualSignedPhase; -impl SolutionDataProvider for DualSignedPhase { - type Solution = ::Solution; - fn get_page(page: PageIndex) -> Option { - match SignedPhaseSwitch::get() { - SignedSwitch::Mock => MockSignedNextSolution::get() - .map(|i| i.get(page as usize).cloned().unwrap_or_default()), - SignedSwitch::Real => SignedPallet::get_page(page), - } - } - - fn get_score() -> Option { - match SignedPhaseSwitch::get() { - SignedSwitch::Mock => MockSignedNextScore::get(), - SignedSwitch::Real => SignedPallet::get_score(), - } - } - - fn report_result(result: verifier::VerificationResult) { - match SignedPhaseSwitch::get() { - SignedSwitch::Mock => MOCK_SIGNED_RESULTS.with(|r| r.borrow_mut().push(result)), - SignedSwitch::Real => SignedPallet::report_result(result), - } - } -} - -/// get the events of the verifier pallet. -pub fn signed_events() -> Vec> { - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| if let RuntimeEvent::SignedPallet(inner) = e { Some(inner) } else { None }) - .collect::>() -} - -/// Load a signed solution into its pallet. -pub fn load_signed_for_verification(who: AccountId, paged: PagedRawSolution) { - let initial_balance = Balances::free_balance(&who); - assert_eq!(balances(who), (initial_balance, 0)); - - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(who), paged.score)); - - assert_eq!( - balances(who), - (initial_balance - SignedDepositBase::get(), SignedDepositBase::get()) - ); - - for (page_index, solution_page) in paged.solution_pages.pagify(Pages::get()) { - assert_ok!(SignedPallet::submit_page( - RuntimeOrigin::signed(who), - page_index, - Some(Box::new(solution_page.clone())) - )); - } - - let mut events = signed_events(); - for _ in 0..Pages::get() { - let event = events.pop().unwrap(); - assert!(matches!(event, SignedEvent::Stored(_, x, _) if x == who)) - } - assert!(matches!(events.pop().unwrap(), SignedEvent::Registered(_, x, _) if x == who)); - - let full_deposit = - SignedDepositBase::get() + (Pages::get() as Balance) * SignedDepositPerPage::get(); - assert_eq!(balances(who), (initial_balance - full_deposit, full_deposit)); -} - -/// Same as [`load_signed_for_verification`], but also goes forward to the beginning of the signed -/// verification phase. -pub fn load_signed_for_verification_and_start( - who: AccountId, - paged: PagedRawSolution, - _round: u32, -) { - load_signed_for_verification(who, paged); - - // now the solution should start being verified. - roll_to_signed_validation_open(); - assert_eq!( - multi_block_events(), - vec![ - Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) }, - Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed }, - Event::PhaseTransitioned { from: Phase::Signed, to: Phase::SignedValidation(20) } - ] - ); - assert_eq!(verifier_events(), vec![]); -} - -/// Same as [`load_signed_for_verification_and_start`], but also goes forward enough blocks for the -/// solution to be verified, assuming it is all correct. -/// -/// In other words, it goes [`Pages`] blocks forward. -pub fn load_signed_for_verification_and_start_and_roll_to_verified( - who: AccountId, - paged: PagedRawSolution, - _round: u32, -) { - load_signed_for_verification(who, paged.clone()); - - // now the solution should start being verified. - roll_to_signed_validation_open(); - assert_eq!( - multi_block_events(), - vec![ - Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) }, - Event::PhaseTransitioned { from: Phase::Snapshot(0), to: Phase::Signed }, - Event::PhaseTransitioned { from: Phase::Signed, to: Phase::SignedValidation(20) } - ] - ); - assert_eq!(verifier_events(), vec![]); - - // there is no queued solution prior to the last page of the solution getting verified - assert_eq!(::Verifier::queued_score(), None); - - // roll to the block it is finalized. - for _ in 0..Pages::get() { - roll_next(); - } - - assert_eq!( - verifier_events(), - vec![ - // TODO: these are hardcoded for 3 page. - verifier::Event::Verified(2, 2), - verifier::Event::Verified(1, 2), - verifier::Event::Verified(0, 2), - verifier::Event::Queued(paged.score, None), - ] - ); - - // there is now a queued solution. - assert_eq!(::Verifier::queued_score(), Some(paged.score)); -} - -/// Load a full raw paged solution for verification. -/// -/// More or less the equivalent of `load_signed_for_verification_and_start`, but when -/// `SignedSwitch::Mock` is set. -pub fn load_mock_signed_and_start(raw_paged: PagedRawSolution) { - assert_eq!( - SignedPhaseSwitch::get(), - SignedSwitch::Mock, - "you should not use this if mock phase is not being mocked" - ); - MockSignedNextSolution::set(Some(raw_paged.solution_pages.pad_solution_pages(Pages::get()))); - MockSignedNextScore::set(Some(raw_paged.score)); - - // Let's gooooo! - assert_ok!(::start()); -} - -/// Ensure that no submission data exists in `round` for `who`. -pub fn assert_no_data_for(round: u32, who: AccountId) { - assert!(!Submissions::::leaderboard(round).into_iter().any(|(x, _)| x == who)); - assert!(Submissions::::metadata_of(round, who).is_none()); - assert!(Submissions::::pages_of(round, who).count().is_zero()); -} diff --git a/substrate/frame/election-provider-multi-block/src/mock/staking.rs b/substrate/frame/election-provider-multi-block/src/mock/staking.rs deleted file mode 100644 index bb4adb4d297a7..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/mock/staking.rs +++ /dev/null @@ -1,238 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{AccountId, MaxVotesPerVoter, Runtime}; -use crate::VoterOf; -use frame_election_provider_support::{ - data_provider, DataProviderBounds, ElectionDataProvider, PageIndex, VoteWeight, -}; -use frame_support::pallet_prelude::*; -use sp_core::bounded_vec; -use sp_std::prelude::*; - -pub type T = Runtime; - -frame_support::parameter_types! { - pub static Targets: Vec = vec![10, 20, 30, 40]; - pub static Voters: Vec> = vec![ - // page 2: - (1, 10, bounded_vec![10, 20]), - (2, 10, bounded_vec![30, 40]), - (3, 10, bounded_vec![40]), - (4, 10, bounded_vec![10, 20, 40]), - // page 1: - (5, 10, bounded_vec![10, 30, 40]), - (6, 10, bounded_vec![20, 30, 40]), - (7, 10, bounded_vec![20, 30]), - (8, 10, bounded_vec![10]), - // page 0: (self-votes) - (10, 10, bounded_vec![10]), - (20, 20, bounded_vec![20]), - (30, 30, bounded_vec![30]), - (40, 40, bounded_vec![40]), - ]; - pub static DesiredTargets: u32 = 2; - pub static EpochLength: u64 = 30; - - pub static LastIteratedVoterIndex: Option = None; -} - -pub struct MockStaking; -impl ElectionDataProvider for MockStaking { - type AccountId = AccountId; - type BlockNumber = u64; - type MaxVotesPerVoter = MaxVotesPerVoter; - - fn electable_targets( - bounds: DataProviderBounds, - remaining: PageIndex, - ) -> data_provider::Result> { - let targets = Targets::get(); - - if remaining != 0 { - crate::log!( - warn, - "requesting targets for non-zero page, we will return the same page in any case" - ); - } - if bounds.slice_exhausted(&targets) { - return Err("Targets too big") - } - - Ok(targets) - } - - fn electing_voters( - bounds: DataProviderBounds, - remaining: PageIndex, - ) -> data_provider::Result< - Vec<(AccountId, VoteWeight, BoundedVec)>, - > { - let mut voters = Voters::get(); - - // jump to the first non-iterated, if this is a follow up. - if let Some(index) = LastIteratedVoterIndex::get() { - voters = voters.iter().skip(index).cloned().collect::>(); - } - - // take as many as you can. - if let Some(max_len) = bounds.count.map(|c| c.0 as usize) { - voters.truncate(max_len) - } - - if voters.is_empty() { - return Ok(vec![]) - } - - if remaining > 0 { - let last = voters.last().cloned().unwrap(); - LastIteratedVoterIndex::set(Some( - Voters::get().iter().position(|v| v == &last).map(|i| i + 1).unwrap(), - )); - } else { - LastIteratedVoterIndex::set(None) - } - - Ok(voters) - } - - fn desired_targets() -> data_provider::Result { - Ok(DesiredTargets::get()) - } - - fn next_election_prediction(now: u64) -> u64 { - now + EpochLength::get() - now % EpochLength::get() - } - - #[cfg(feature = "runtime-benchmarks")] - fn put_snapshot( - voters: Vec<(AccountId, VoteWeight, BoundedVec)>, - targets: Vec, - _target_stake: Option, - ) { - Targets::set(targets); - Voters::set(voters); - } - - #[cfg(feature = "runtime-benchmarks")] - fn clear() { - Targets::set(vec![]); - Voters::set(vec![]); - } - - #[cfg(feature = "runtime-benchmarks")] - fn add_voter( - voter: AccountId, - weight: VoteWeight, - targets: BoundedVec, - ) { - let mut current = Voters::get(); - current.push((voter, weight, targets)); - Voters::set(current); - } - - #[cfg(feature = "runtime-benchmarks")] - fn add_target(target: AccountId) { - use super::ExistentialDeposit; - - let mut current = Targets::get(); - current.push(target); - Targets::set(current); - - // to be on-par with staking, we add a self vote as well. the stake is really not that - // important. - let mut current = Voters::get(); - current.push((target, ExistentialDeposit::get() as u64, vec![target].try_into().unwrap())); - Voters::set(current); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{bound_by_count, ExtBuilder}; - - #[test] - fn targets() { - ExtBuilder::full().build_and_execute(|| { - assert_eq!(Targets::get().len(), 4); - - // any non-zero page returns page zero. - assert_eq!(MockStaking::electable_targets(bound_by_count(None), 2).unwrap().len(), 4); - assert_eq!(MockStaking::electable_targets(bound_by_count(None), 1).unwrap().len(), 4); - - // 0 is also fine. - assert_eq!(MockStaking::electable_targets(bound_by_count(None), 0).unwrap().len(), 4); - - // fetch less targets is error, because targets cannot be sorted (both by MockStaking, - // and the real staking). - assert!(MockStaking::electable_targets(bound_by_count(Some(2)), 0).is_err()); - - // more targets is fine. - assert!(MockStaking::electable_targets(bound_by_count(Some(4)), 0).is_ok()); - assert!(MockStaking::electable_targets(bound_by_count(Some(5)), 0).is_ok()); - }); - } - - #[test] - fn multi_page_votes() { - ExtBuilder::full().build_and_execute(|| { - assert_eq!(MockStaking::electing_voters(bound_by_count(None), 0).unwrap().len(), 12); - assert!(LastIteratedVoterIndex::get().is_none()); - - assert_eq!( - MockStaking::electing_voters(bound_by_count(Some(4)), 0) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![1, 2, 3, 4], - ); - assert!(LastIteratedVoterIndex::get().is_none()); - - assert_eq!( - MockStaking::electing_voters(bound_by_count(Some(4)), 2) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![1, 2, 3, 4], - ); - assert_eq!(LastIteratedVoterIndex::get().unwrap(), 4); - - assert_eq!( - MockStaking::electing_voters(bound_by_count(Some(4)), 1) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![5, 6, 7, 8], - ); - assert_eq!(LastIteratedVoterIndex::get().unwrap(), 8); - - assert_eq!( - MockStaking::electing_voters(bound_by_count(Some(4)), 0) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![10, 20, 30, 40], - ); - assert!(LastIteratedVoterIndex::get().is_none()); - }) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/mock/weight_info.rs b/substrate/frame/election-provider-multi-block/src/mock/weight_info.rs deleted file mode 100644 index a5f28f4fbd2d8..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/mock/weight_info.rs +++ /dev/null @@ -1,85 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// TODO: would love to ditch this, too big to handle here. - -use crate::{self as multi_block}; -use frame_support::weights::Weight; -use sp_runtime::traits::Zero; - -frame_support::parameter_types! { - pub static MockWeightInfo: bool = false; -} - -pub struct DualMockWeightInfo; -impl multi_block::WeightInfo for DualMockWeightInfo { - fn on_initialize_nothing() -> Weight { - if MockWeightInfo::get() { - Zero::zero() - } else { - <() as multi_block::WeightInfo>::on_initialize_nothing() - } - } - - fn on_initialize_into_snapshot_msp() -> Weight { - if MockWeightInfo::get() { - Zero::zero() - } else { - <() as multi_block::WeightInfo>::on_initialize_into_snapshot_msp() - } - } - - fn on_initialize_into_snapshot_rest() -> Weight { - if MockWeightInfo::get() { - Zero::zero() - } else { - <() as multi_block::WeightInfo>::on_initialize_into_snapshot_rest() - } - } - - fn on_initialize_into_signed() -> Weight { - if MockWeightInfo::get() { - Zero::zero() - } else { - <() as multi_block::WeightInfo>::on_initialize_into_signed() - } - } - - fn on_initialize_into_signed_validation() -> Weight { - if MockWeightInfo::get() { - Zero::zero() - } else { - <() as multi_block::WeightInfo>::on_initialize_into_signed_validation() - } - } - - fn on_initialize_into_unsigned() -> Weight { - if MockWeightInfo::get() { - Zero::zero() - } else { - <() as multi_block::WeightInfo>::on_initialize_into_unsigned() - } - } - - fn manage() -> Weight { - if MockWeightInfo::get() { - Zero::zero() - } else { - <() as multi_block::WeightInfo>::manage() - } - } -} diff --git a/substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs deleted file mode 100644 index 1e9facd72fb67..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/signed/benchmarking.rs +++ /dev/null @@ -1,171 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{ - signed::{Config, Pallet, Submissions}, - types::PagedRawSolution, - unsigned::miner::OffchainWorkerMiner, - CurrentPhase, Phase, Round, -}; -use frame_benchmarking::v2::*; -use frame_election_provider_support::ElectionDataProvider; -use frame_support::pallet_prelude::*; -use frame_system::RawOrigin; -use sp_npos_elections::ElectionScore; -use sp_std::boxed::Box; - -#[benchmarks(where T: crate::Config + crate::verifier::Config + crate::unsigned::Config)] -mod benchmarks { - use super::*; - - #[benchmark] - fn register_not_full() -> Result<(), BenchmarkError> { - CurrentPhase::::put(Phase::Signed); - let round = Round::::get(); - let alice = crate::Pallet::::funded_account("alice", 0); - let score = ElectionScore::default(); - - assert_eq!(Submissions::::sorted_submitters(round).len(), 0); - #[block] - { - Pallet::::register(RawOrigin::Signed(alice).into(), score)?; - } - - assert_eq!(Submissions::::sorted_submitters(round).len(), 1); - Ok(()) - } - - #[benchmark] - fn register_eject() -> Result<(), BenchmarkError> { - CurrentPhase::::put(Phase::Signed); - let round = Round::::get(); - - for i in 0..T::MaxSubmissions::get() { - let submitter = crate::Pallet::::funded_account("submitter", i); - let score = ElectionScore { minimal_stake: i.into(), ..Default::default() }; - Pallet::::register(RawOrigin::Signed(submitter.clone()).into(), score)?; - - // The first one, which will be ejected, has also submitted all pages - if i == 0 { - for p in 0..T::Pages::get() { - let page = Some(Default::default()); - Pallet::::submit_page(RawOrigin::Signed(submitter.clone()).into(), p, page)?; - } - } - } - - let who = crate::Pallet::::funded_account("who", 0); - let score = - ElectionScore { minimal_stake: T::MaxSubmissions::get().into(), ..Default::default() }; - - assert_eq!( - Submissions::::sorted_submitters(round).len(), - T::MaxSubmissions::get() as usize - ); - - #[block] - { - Pallet::::register(RawOrigin::Signed(who).into(), score)?; - } - - assert_eq!( - Submissions::::sorted_submitters(round).len(), - T::MaxSubmissions::get() as usize - ); - Ok(()) - } - - #[benchmark] - fn submit_page() -> Result<(), BenchmarkError> { - T::DataProvider::set_next_election(crate::Pallet::::reasonable_next_election()); - crate::Pallet::::roll_until_matches(|| { - matches!(CurrentPhase::::get(), Phase::Signed) - }); - - // mine a full solution - let PagedRawSolution { score, solution_pages, .. } = - OffchainWorkerMiner::::mine_solution(T::Pages::get(), false).unwrap(); - let page = Some(Box::new(solution_pages[0].clone())); - - // register alice - let alice = crate::Pallet::::funded_account("alice", 0); - Pallet::::register(RawOrigin::Signed(alice.clone()).into(), score)?; - - #[block] - { - Pallet::::submit_page(RawOrigin::Signed(alice).into(), 0, page)?; - } - - Ok(()) - } - - #[benchmark] - fn unset_page() -> Result<(), BenchmarkError> { - T::DataProvider::set_next_election(crate::Pallet::::reasonable_next_election()); - crate::Pallet::::roll_until_matches(|| { - matches!(CurrentPhase::::get(), Phase::Signed) - }); - - // mine a full solution - let PagedRawSolution { score, solution_pages, .. } = - OffchainWorkerMiner::::mine_solution(T::Pages::get(), false).unwrap(); - let page = Some(Box::new(solution_pages[0].clone())); - - // register alice - let alice = crate::Pallet::::funded_account("alice", 0); - Pallet::::register(RawOrigin::Signed(alice.clone()).into(), score)?; - - // submit page - Pallet::::submit_page(RawOrigin::Signed(alice.clone()).into(), 0, page)?; - - #[block] - { - Pallet::::submit_page(RawOrigin::Signed(alice).into(), 0, None)?; - } - - Ok(()) - } - - #[benchmark] - fn bail() -> Result<(), BenchmarkError> { - CurrentPhase::::put(Phase::Signed); - let alice = crate::Pallet::::funded_account("alice", 0); - - // register alice - let score = ElectionScore::default(); - Pallet::::register(RawOrigin::Signed(alice.clone()).into(), score)?; - - // submit all pages - for p in 0..T::Pages::get() { - let page = Some(Default::default()); - Pallet::::submit_page(RawOrigin::Signed(alice.clone()).into(), p, page)?; - } - - #[block] - { - Pallet::::bail(RawOrigin::Signed(alice).into())?; - } - - Ok(()) - } - - impl_benchmark_test_suite!( - Pallet, - crate::mock::ExtBuilder::signed().build_unchecked(), - crate::mock::Runtime - ); -} diff --git a/substrate/frame/election-provider-multi-block/src/signed/mod.rs b/substrate/frame/election-provider-multi-block/src/signed/mod.rs deleted file mode 100644 index 1784a87b22433..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/signed/mod.rs +++ /dev/null @@ -1,858 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The signed phase of the multi-block election system. -//! -//! Signed submissions work on the basis of keeping a queue of submissions from random signed -//! accounts, and sorting them based on the best claimed score to the worse. -//! -//! Once the time to evaluate the signed phase comes (`Phase::SignedValidation`), the solutions are -//! checked from best-to-worse claim, and they end up in either of the 3 buckets: -//! -//! 1. If they are the first, correct solution (and consequently the best one, since we start -//! evaluating from the best claim), they are rewarded. -//! 2. Any solution after the first correct solution is refunded in an unbiased way. -//! 3. Any invalid solution that wasted valuable blockchain time gets slashed for their deposit. -//! -//! ## Future Plans: -//! -//! **Lazy deletion**: -//! Overall, this pallet can avoid the need to delete any storage item, by: -//! 1. outsource the storage of solution data to some other pallet. -//! 2. keep it here, but make everything be also a map of the round number, so that we can keep old -//! storage, and it is ONLY EVER removed, when after that round number is over. This can happen -//! for more or less free by the submitter itself, and by anyone else as well, in which case they -//! get a share of the the sum deposit. The share increases as times goes on. -//! **Metadata update**: imagine you mis-computed your score. - -// TODO: we should delete this async and once the round is passed. -// Registration would consequently be as follows: -// - If you get ejected, and you are lazy removed, a percentage of your deposit is burned. If we set -// this to 100%, we will not have bad submissions after the queue is full. The queue can be made -// full by purely an attacker, in which case the sum of deposits should be large enough to cover -// the fact that we will have a bad election. -// - whitelisted accounts who will not pay deposits are needed. They can still be ejected, but for -// free. -// - Deposit should exponentially increase, and in general we should not allow for more than say 8 -// signed submissions. - -use crate::{ - types::SolutionOf, - verifier::{AsynchronousVerifier, SolutionDataProvider, Status, VerificationResult}, -}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_election_provider_support::PageIndex; -use frame_support::{ - dispatch::DispatchResultWithPostInfo, - pallet_prelude::{StorageDoubleMap, ValueQuery, *}, - traits::{ - tokens::{ - fungible::{Inspect, Mutate, MutateHold}, - Fortitude, Precision, - }, - Defensive, DefensiveSaturating, EstimateCallFee, - }, - transactional, BoundedVec, Twox64Concat, -}; -use frame_system::{ensure_signed, pallet_prelude::*}; -use scale_info::TypeInfo; -use sp_io::MultiRemovalResults; -use sp_npos_elections::ElectionScore; -use sp_runtime::{traits::Saturating, Perbill}; -use sp_std::prelude::*; - -/// Explore all weights -pub use crate::weights::measured::pallet_election_provider_multi_block_signed::*; -/// Exports of this pallet -pub use pallet::*; - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; - -pub(crate) type SignedWeightsOf = ::WeightInfo; - -#[cfg(test)] -mod tests; - -type BalanceOf = - <::Currency as Inspect<::AccountId>>::Balance; - -/// All of the (meta) data around a signed submission -#[derive(Encode, Decode, MaxEncodedLen, TypeInfo, Default, DebugNoBound)] -#[cfg_attr(test, derive(frame_support::PartialEqNoBound, frame_support::EqNoBound))] -#[codec(mel_bound(T: Config))] -#[scale_info(skip_type_params(T))] -pub struct SubmissionMetadata { - /// The amount of deposit that has been held in reserve. - deposit: BalanceOf, - /// The amount of transaction fee that this submission has cost for its submitter so far. - fee: BalanceOf, - /// The amount of rewards that we expect to give to this submission, if deemed worthy. - reward: BalanceOf, - /// The score that this submission is claiming to achieve. - claimed_score: ElectionScore, - /// A bounded-bool-vec of pages that have been submitted so far. - pages: BoundedVec, -} - -impl SolutionDataProvider for Pallet { - type Solution = SolutionOf; - - fn get_page(page: PageIndex) -> Option { - // note: a non-existing page will still be treated as merely an empty page. This could be - // re-considered. - let current_round = Self::current_round(); - Submissions::::leader(current_round).map(|(who, _score)| { - sublog!(info, "signed", "returning page {} of {:?}'s submission as leader.", page, who); - Submissions::::get_page_of(current_round, &who, page).unwrap_or_default() - }) - } - - fn get_score() -> Option { - Submissions::::leader(Self::current_round()).map(|(_who, score)| score) - } - - fn report_result(result: crate::verifier::VerificationResult) { - // assumption of the trait. - debug_assert!(matches!(::status(), Status::Nothing)); - let current_round = Self::current_round(); - - match result { - VerificationResult::Queued => { - // defensive: if there is a result to be reported, then we must have had some - // leader. - if let Some((winner, metadata)) = - Submissions::::take_leader_with_data(Self::current_round()).defensive() - { - // first, let's give them their reward. - let reward = metadata.reward.saturating_add(metadata.fee); - let _r = T::Currency::mint_into(&winner, reward); - debug_assert!(_r.is_ok()); - Self::deposit_event(Event::::Rewarded( - current_round, - winner.clone(), - reward, - )); - - // then, unreserve their deposit - let _res = T::Currency::release( - &HoldReason::SignedSubmission.into(), - &winner, - metadata.deposit, - Precision::BestEffort, - ); - debug_assert!(_res.is_ok()); - - // note: we could wipe this data either over time, or via transactions. - while let Some((discarded, metadata)) = - Submissions::::take_leader_with_data(Self::current_round()) - { - let _res = T::Currency::release( - &HoldReason::SignedSubmission.into(), - &discarded, - metadata.deposit, - Precision::BestEffort, - ); - debug_assert_eq!(_res, Ok(metadata.deposit)); - Self::deposit_event(Event::::Discarded(current_round, discarded)); - } - - // everything should have been clean. - #[cfg(debug_assertions)] - assert!(Submissions::::ensure_killed(current_round).is_ok()); - } - }, - VerificationResult::Rejected => { - // defensive: if there is a result to be reported, then we must have had some - // leader. - if let Some((loser, metadata)) = - Submissions::::take_leader_with_data(Self::current_round()).defensive() - { - // first, let's slash their deposit. - let slash = metadata.deposit; - let _res = T::Currency::burn_held( - &HoldReason::SignedSubmission.into(), - &loser, - slash, - Precision::BestEffort, - Fortitude::Force, - ); - debug_assert_eq!(_res, Ok(slash)); - Self::deposit_event(Event::::Slashed(current_round, loser.clone(), slash)); - - // inform the verifier that they can now try again, if we're still in the signed - // validation phase. - if crate::Pallet::::current_phase().is_signed_validation() && - Submissions::::has_leader(current_round) - { - // defensive: verifier just reported back a result, it must be in clear - // state. - let _ = ::start().defensive(); - } - } - }, - VerificationResult::DataUnavailable => { - unreachable!("TODO") - }, - } - } -} - -#[frame_support::pallet] -pub mod pallet { - use super::{WeightInfo, *}; - - #[pallet::config] - #[pallet::disable_frame_system_supertrait_check] - pub trait Config: crate::Config { - /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent> - + TryInto>; - - /// Handler to the currency. - type Currency: Inspect - + Mutate - + MutateHold; - - /// Base deposit amount for a submission. - type DepositBase: Get>; - - /// Extra deposit per-page. - type DepositPerPage: Get>; - - /// Base reward that is given to the winner. - type RewardBase: Get>; - - /// Maximum number of submissions. This, combined with `SignedValidationPhase` and `Pages` - /// dictates how many signed solutions we can verify. - type MaxSubmissions: Get; - - /// The ratio of the deposit to return in case a signed account submits a solution via - /// [`Pallet::register`], but later calls [`Pallet::bail`]. - /// - /// This should be large enough to cover for the deletion cost of possible all pages. To be - /// safe, you can put it to 100% to begin with to fully dis-incentivize bailing. - type BailoutGraceRatio: Get; - - /// Handler to estimate the fee of a call. Useful to refund the transaction fee of the - /// submitter for the winner. - type EstimateCallFee: EstimateCallFee, BalanceOf>; - - /// Overarching hold reason. - type RuntimeHoldReason: From; - - /// Provided weights of this pallet. - type WeightInfo: WeightInfo; - } - - /// The hold reason of this palelt. - #[pallet::composite_enum] - pub enum HoldReason { - /// Because of submitting a signed solution. - #[codec(index = 0)] - SignedSubmission, - } - - /// Wrapper type for signed submissions. - /// - /// It handles 3 storage items: - /// - /// 1. [`SortedScores`]: A flat vector of all submissions' `(submitter_id, claimed_score)`. - /// 2. [`SubmissionStorage`]: Paginated map of of all submissions, keyed by submitter and page. - /// 3. [`SubmissionMetadataStorage`]: Map from submitter to the metadata of their submission. - /// - /// All storage items in this group are mapped, and their first key is the `round` to which they - /// belong to. In essence, we are storing multiple versions of each group. - /// - /// ### Invariants: - /// - /// This storage group is sane, clean, and consistent if the following invariants are held: - /// - /// Among the submissions of each round: - /// - `SortedScores` should never contain duplicate account ids. - /// - For any account id in `SortedScores`, a corresponding value should exist in - /// `SubmissionMetadataStorage` under that account id's key. - /// - And the value of `metadata.score` must be equal to the score stored in - /// `SortedScores`. - /// - And visa versa: for any key existing in `SubmissionMetadataStorage`, an item must exist in - /// `SortedScores`. - /// - For any first key existing in `SubmissionStorage`, a key must exist in - /// `SubmissionMetadataStorage`. - /// - For any first key in `SubmissionStorage`, the number of second keys existing should be the - /// same as the `true` count of `pages` in [`SubmissionMetadata`] (this already implies the - /// former, since it uses the metadata). - /// - /// All mutating functions are only allowed to transition into states where all of the above - /// conditions are met. - /// - /// No particular invariant exists between data that related to different rounds. They are - /// purely independent. - pub(crate) struct Submissions(sp_std::marker::PhantomData); - - #[pallet::storage] - type SortedScores = StorageMap< - _, - Twox64Concat, - u32, - BoundedVec<(T::AccountId, ElectionScore), T::MaxSubmissions>, - ValueQuery, - >; - - /// Triple map from (round, account, page) to a solution page. - #[pallet::storage] - type SubmissionStorage = StorageNMap< - _, - ( - NMapKey, - NMapKey, - NMapKey, - ), - SolutionOf, - OptionQuery, - >; - - /// Map from account to the metadata of their submission. - /// - /// invariant: for any Key1 of type `AccountId` in [`Submissions`], this storage map also has a - /// value. - #[pallet::storage] - type SubmissionMetadataStorage = - StorageDoubleMap<_, Twox64Concat, u32, Twox64Concat, T::AccountId, SubmissionMetadata>; - - impl Submissions { - // -- mutating functions - - /// Generic checked mutation helper. - /// - /// All mutating functions must be fulled through this bad boy. The round at which the - /// mutation happens must be provided - fn mutate_checked R>(_round: u32, mutate: F) -> R { - let result = mutate(); - - #[cfg(debug_assertions)] - { - assert!(Self::sanity_check_round(_round).is_ok()); - assert!(Self::sanity_check_round(_round + 1).is_ok()); - assert!(Self::sanity_check_round(_round.saturating_sub(1)).is_ok()); - } - - result - } - - /// *Fully* **TAKE** (i.e. get and remove) the leader from storage, with all of its - /// associated data. - /// - /// This removes all associated data of the leader from storage, discarding the submission - /// data and score, returning the rest. - pub(crate) fn take_leader_with_data( - round: u32, - ) -> Option<(T::AccountId, SubmissionMetadata)> { - Self::mutate_checked(round, || { - SortedScores::::mutate(round, |sorted| sorted.pop()).and_then( - |(submitter, _score)| { - // NOTE: safe to remove unbounded, as at most `Pages` pages are stored. - let r: MultiRemovalResults = SubmissionStorage::::clear_prefix( - (round, &submitter), - u32::MAX, - None, - ); - debug_assert!(r.unique <= T::Pages::get()); - - SubmissionMetadataStorage::::take(round, &submitter) - .map(|metadata| (submitter, metadata)) - }, - ) - }) - } - - /// *Fully* **TAKE** (i.e. get and remove) a submission from storage, with all of its - /// associated data. - /// - /// This removes all associated data of the submitter from storage, discarding the - /// submission data and score, returning the metadata. - pub(crate) fn take_submission_with_data( - round: u32, - who: &T::AccountId, - ) -> Option> { - Self::mutate_checked(round, || { - SortedScores::::mutate(round, |sorted_scores| { - if let Some(index) = sorted_scores.iter().position(|(x, _)| x == who) { - sorted_scores.remove(index); - } - }); - // Note: safe to remove unbounded, as at most `Pages` pages are stored. - let r = SubmissionStorage::::clear_prefix((round, who), u32::MAX, None); - debug_assert!(r.unique <= T::Pages::get()); - - SubmissionMetadataStorage::::take(round, who) - }) - } - - /// Try and register a new solution. - /// - /// Registration can only happen for the current round. - /// - /// registration might fail if the queue is already full, and the solution is not good - /// enough to eject the weakest. - fn try_register( - round: u32, - who: &T::AccountId, - metadata: SubmissionMetadata, - ) -> Result { - Self::mutate_checked(round, || Self::try_register_inner(round, who, metadata)) - } - - fn try_register_inner( - round: u32, - who: &T::AccountId, - metadata: SubmissionMetadata, - ) -> Result { - let mut sorted_scores = SortedScores::::get(round); - - let discarded = if let Some(_) = sorted_scores.iter().position(|(x, _)| x == who) { - return Err(Error::::Duplicate.into()); - } else { - // must be new. - debug_assert!(!SubmissionMetadataStorage::::contains_key(round, who)); - - let pos = match sorted_scores - .binary_search_by_key(&metadata.claimed_score, |(_, y)| *y) - { - // an equal score exists, unlikely, but could very well happen. We just put them - // next to each other. - Ok(pos) => pos, - // new score, should be inserted in this pos. - Err(pos) => pos, - }; - - let record = (who.clone(), metadata.claimed_score); - match sorted_scores.force_insert_keep_right(pos, record) { - Ok(None) => false, - Ok(Some((discarded, _score))) => { - let metadata = SubmissionMetadataStorage::::take(round, &discarded); - // Note: safe to remove unbounded, as at most `Pages` pages are stored. - let _r = SubmissionStorage::::clear_prefix( - (round, &discarded), - u32::MAX, - None, - ); - debug_assert!(_r.unique <= T::Pages::get()); - let to_refund = metadata.map(|m| m.deposit).defensive_unwrap_or_default(); - let _released = T::Currency::release( - &HoldReason::SignedSubmission.into(), - &discarded, - to_refund, - Precision::BestEffort, - )?; - debug_assert_eq!(_released, to_refund); - Pallet::::deposit_event(Event::::Discarded(round, discarded)); - true - }, - Err(_) => return Err(Error::::QueueFull.into()), - } - }; - - SortedScores::::insert(round, sorted_scores); - SubmissionMetadataStorage::::insert(round, who, metadata); - Ok(discarded) - } - - /// Submit a page of `solution` to the `page` index of `who`'s submission. - /// - /// Updates the deposit in the metadata accordingly. - /// - /// - If `maybe_solution` is `None`, then the given page is deleted. - /// - `who` must have already registered their submission. - /// - If the page is duplicate, it will replaced. - pub(crate) fn try_mutate_page( - round: u32, - who: &T::AccountId, - page: PageIndex, - maybe_solution: Option>>, - ) -> DispatchResultWithPostInfo { - Self::mutate_checked(round, || { - Self::try_mutate_page_inner(round, who, page, maybe_solution) - }) - } - - fn try_mutate_page_inner( - round: u32, - who: &T::AccountId, - page: PageIndex, - maybe_solution: Option>>, - ) -> DispatchResultWithPostInfo { - let mut metadata = - SubmissionMetadataStorage::::get(round, who).ok_or(Error::::NotRegistered)?; - ensure!(page < T::Pages::get(), Error::::BadPageIndex); - - // defensive only: we resize `meta.pages` once to be `T::Pages` elements once, and never - // resize it again; `page` is checked here to be in bound; element must exist; qed. - if let Some(page_bit) = metadata.pages.get_mut(page as usize).defensive() { - *page_bit = maybe_solution.is_some(); - } - - // update deposit. - let new_pages: BalanceOf = - (metadata.pages.iter().filter(|x| **x).count() as u32).into(); - let new_deposit = T::DepositBase::get() + T::DepositPerPage::get() * new_pages; - let old_deposit = metadata.deposit; - if new_deposit > old_deposit { - let to_reserve = new_deposit - old_deposit; - T::Currency::hold(&HoldReason::SignedSubmission.into(), who, to_reserve)?; - } else { - let to_unreserve = old_deposit - new_deposit; - let _res = T::Currency::release( - &HoldReason::SignedSubmission.into(), - who, - to_unreserve, - Precision::BestEffort, - ); - debug_assert_eq!(_res, Ok(to_unreserve)); - }; - metadata.deposit = new_deposit; - - // If a page is being added, we record the fee as well. For removals, we ignore the fee - // as it is negligible, and we don't want to encourage anyone to submit and remove - // anyways. Note that fee is only refunded for the winner anyways. - if maybe_solution.is_some() { - let fee = T::EstimateCallFee::estimate_call_fee( - &Call::submit_page { page, maybe_solution: maybe_solution.clone() }, - None.into(), - ); - metadata.fee.saturating_accrue(fee); - } - - SubmissionStorage::::mutate_exists((round, who, page), |maybe_old_solution| { - *maybe_old_solution = maybe_solution.map(|s| *s) - }); - SubmissionMetadataStorage::::insert(round, who, metadata); - Ok(().into()) - } - - // -- getter functions - pub(crate) fn has_leader(round: u32) -> bool { - !SortedScores::::get(round).is_empty() - } - - pub(crate) fn leader(round: u32) -> Option<(T::AccountId, ElectionScore)> { - SortedScores::::get(round).last().cloned() - } - - pub(crate) fn get_page_of( - round: u32, - who: &T::AccountId, - page: PageIndex, - ) -> Option> { - SubmissionStorage::::get((round, who, &page)) - } - } - - #[allow(unused)] - #[cfg(any(feature = "try-runtime", test, feature = "runtime-benchmarks", debug_assertions))] - impl Submissions { - pub(crate) fn sorted_submitters(round: u32) -> BoundedVec { - use frame_support::traits::TryCollect; - SortedScores::::get(round).into_iter().map(|(x, _)| x).try_collect().unwrap() - } - - pub fn submissions_iter( - round: u32, - ) -> impl Iterator)> { - SubmissionStorage::::iter_prefix((round,)).map(|((x, y), z)| (x, y, z)) - } - - pub fn metadata_iter( - round: u32, - ) -> impl Iterator)> { - SubmissionMetadataStorage::::iter_prefix(round) - } - - pub fn metadata_of(round: u32, who: T::AccountId) -> Option> { - SubmissionMetadataStorage::::get(round, who) - } - - pub fn pages_of( - round: u32, - who: T::AccountId, - ) -> impl Iterator)> { - SubmissionStorage::::iter_prefix((round, who)) - } - - pub fn leaderboard( - round: u32, - ) -> BoundedVec<(T::AccountId, ElectionScore), T::MaxSubmissions> { - SortedScores::::get(round) - } - - /// Ensure that all the storage items associated with the given round are in `killed` state, - /// meaning that in the expect state after an election is OVER. - pub(crate) fn ensure_killed(round: u32) -> DispatchResult { - ensure!(Self::metadata_iter(round).count() == 0, "metadata_iter not cleared."); - ensure!(Self::submissions_iter(round).count() == 0, "submissions_iter not cleared."); - ensure!(Self::sorted_submitters(round).len() == 0, "sorted_submitters not cleared."); - - Ok(()) - } - - /// Perform all the sanity checks of this storage item group at the given round. - pub(crate) fn sanity_check_round(round: u32) -> DispatchResult { - use sp_std::collections::btree_set::BTreeSet; - let sorted_scores = SortedScores::::get(round); - assert_eq!( - sorted_scores.clone().into_iter().map(|(x, _)| x).collect::>().len(), - sorted_scores.len() - ); - - let _ = SubmissionMetadataStorage::::iter_prefix(round) - .map(|(submitter, meta)| { - let mut matches = SortedScores::::get(round) - .into_iter() - .filter(|(who, _score)| who == &submitter) - .collect::>(); - - ensure!( - matches.len() == 1, - "item existing in metadata but missing in sorted list.", - ); - - let (_, score) = matches.pop().expect("checked; qed"); - ensure!(score == meta.claimed_score, "score mismatch"); - Ok(()) - }) - .collect::, &'static str>>()?; - - ensure!( - SubmissionStorage::::iter_key_prefix((round,)).map(|(k1, _k2)| k1).all( - |submitter| SubmissionMetadataStorage::::contains_key(round, submitter) - ), - "missing metadata of submitter" - ); - - for submitter in SubmissionStorage::::iter_key_prefix((round,)).map(|(k1, _k2)| k1) { - let pages_count = - SubmissionStorage::::iter_key_prefix((round, &submitter)).count(); - let metadata = SubmissionMetadataStorage::::get(round, submitter) - .expect("metadata checked to exist for all keys; qed"); - let assumed_pages_count = metadata.pages.iter().filter(|x| **x).count(); - ensure!(pages_count == assumed_pages_count, "wrong page count"); - } - - Ok(()) - } - } - - #[pallet::pallet] - pub struct Pallet(PhantomData); - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// Upcoming submission has been registered for the given account, with the given score. - Registered(u32, T::AccountId, ElectionScore), - /// A page of solution solution with the given index has been stored for the given account. - Stored(u32, T::AccountId, PageIndex), - /// The given account has been rewarded with the given amount. - Rewarded(u32, T::AccountId, BalanceOf), - /// The given account has been slashed with the given amount. - Slashed(u32, T::AccountId, BalanceOf), - /// The given account has been discarded. - Discarded(u32, T::AccountId), - /// The given account has bailed. - Bailed(u32, T::AccountId), - } - - #[pallet::error] - pub enum Error { - /// The phase is not signed. - PhaseNotSigned, - /// The submission is a duplicate. - Duplicate, - /// The queue is full. - QueueFull, - /// The page index is out of bounds. - BadPageIndex, - /// The account is not registered. - NotRegistered, - /// No submission found. - NoSubmission, - } - - #[pallet::call] - impl Pallet { - /// Register oneself for an upcoming signed election. - #[pallet::weight(SignedWeightsOf::::register_eject())] - #[pallet::call_index(0)] - pub fn register( - origin: OriginFor, - claimed_score: ElectionScore, - ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - ensure!(crate::Pallet::::current_phase().is_signed(), Error::::PhaseNotSigned); - - // note: we could already check if this is a duplicate here, but prefer keeping the code - // simple for now. - - let deposit = T::DepositBase::get(); - let reward = T::RewardBase::get(); - let fee = T::EstimateCallFee::estimate_call_fee( - &Call::register { claimed_score }, - None.into(), - ); - let mut pages = BoundedVec::<_, _>::with_bounded_capacity(T::Pages::get() as usize); - pages.bounded_resize(T::Pages::get() as usize, false); - - let new_metadata = SubmissionMetadata { claimed_score, deposit, reward, fee, pages }; - - T::Currency::hold(&HoldReason::SignedSubmission.into(), &who, deposit)?; - let round = Self::current_round(); - let discarded = Submissions::::try_register(round, &who, new_metadata)?; - Self::deposit_event(Event::::Registered(round, who, claimed_score)); - - // maybe refund. - if discarded { - Ok(().into()) - } else { - Ok(Some(SignedWeightsOf::::register_not_full()).into()) - } - } - - /// Submit a single page of a solution. - /// - /// Must always come after [`Pallet::register`]. - /// - /// `maybe_solution` can be set to `None` to erase the page. - /// - /// Collects deposits from the signed origin based on [`Config::DepositBase`] and - /// [`Config::DepositPerPage`]. - #[pallet::weight(SignedWeightsOf::::submit_page())] - #[pallet::call_index(1)] - pub fn submit_page( - origin: OriginFor, - page: PageIndex, - maybe_solution: Option>>, - ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - ensure!(crate::Pallet::::current_phase().is_signed(), Error::::PhaseNotSigned); - let is_set = maybe_solution.is_some(); - - let round = Self::current_round(); - Submissions::::try_mutate_page(round, &who, page, maybe_solution)?; - Self::deposit_event(Event::::Stored(round, who, page)); - - // maybe refund. - if is_set { - Ok(().into()) - } else { - Ok(Some(SignedWeightsOf::::unset_page()).into()) - } - } - - /// Retract a submission. - /// - /// A portion of the deposit may be returned, based on the [`Config::BailoutGraceRatio`]. - /// - /// This will fully remove the solution from storage. - #[pallet::weight(SignedWeightsOf::::bail())] - #[pallet::call_index(2)] - #[transactional] - pub fn bail(origin: OriginFor) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - ensure!(crate::Pallet::::current_phase().is_signed(), Error::::PhaseNotSigned); - let round = Self::current_round(); - let metadata = Submissions::::take_submission_with_data(round, &who) - .ok_or(Error::::NoSubmission)?; - - let deposit = metadata.deposit; - let to_refund = T::BailoutGraceRatio::get() * deposit; - let to_slash = deposit.defensive_saturating_sub(to_refund); - - let _res = T::Currency::release( - &HoldReason::SignedSubmission.into(), - &who, - to_refund, - Precision::BestEffort, - ) - .defensive(); - debug_assert_eq!(_res, Ok(to_refund)); - - let _res = T::Currency::burn_held( - &HoldReason::SignedSubmission.into(), - &who, - to_slash, - Precision::BestEffort, - Fortitude::Force, - ) - .defensive(); - debug_assert_eq!(_res, Ok(to_slash)); - - Self::deposit_event(Event::::Bailed(round, who)); - - Ok(None.into()) - } - } - - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_initialize(now: BlockNumberFor) -> Weight { - // this code is only called when at the boundary of phase transition, which is already - // captured by the parent pallet. No need for weight. - let weight_taken_into_account: Weight = Default::default(); - - if crate::Pallet::::current_phase().is_signed_validation_open_at(now) { - let maybe_leader = Submissions::::leader(Self::current_round()); - sublog!( - info, - "signed", - "signed validation started, sending validation start signal? {:?}", - maybe_leader.is_some() - ); - - // start an attempt to verify our best thing. - if maybe_leader.is_some() { - // defensive: signed phase has just began, verifier should be in a clear state - // and ready to accept a solution. - let _ = ::start().defensive(); - } - } - - if crate::Pallet::::current_phase().is_unsigned_open_at(now) { - // signed validation phase just ended, make sure you stop any ongoing operation. - sublog!(info, "signed", "signed validation ended, sending validation stop signal",); - ::stop(); - } - - weight_taken_into_account - } - - #[cfg(feature = "try-runtime")] - fn try_state(n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { - Self::do_try_state(n) - } - } -} - -impl Pallet { - #[cfg(any(feature = "try-runtime", test, feature = "runtime-benchmarks"))] - pub(crate) fn do_try_state(_n: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { - Submissions::::sanity_check_round(Self::current_round()) - } - - fn current_round() -> u32 { - crate::Pallet::::round() - } -} diff --git a/substrate/frame/election-provider-multi-block/src/signed/tests.rs b/substrate/frame/election-provider-multi-block/src/signed/tests.rs deleted file mode 100644 index 7d0b1652c1ed7..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/signed/tests.rs +++ /dev/null @@ -1,554 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{Event as SignedEvent, *}; -use crate::{mock::*, verifier::FeasibilityError}; -use sp_core::bounded_vec; - -pub type T = Runtime; - -mod calls { - use super::*; - use crate::Phase; - use sp_runtime::{DispatchError, TokenError::FundsUnavailable}; - - #[test] - fn cannot_register_with_insufficient_balance() { - ExtBuilder::signed().build_and_execute(|| { - roll_to_signed_open(); - // 777 is not funded. - assert_noop!( - SignedPallet::register(RuntimeOrigin::signed(777), Default::default()), - DispatchError::Token(FundsUnavailable) - ); - }); - - ExtBuilder::signed().build_and_execute(|| { - roll_to_signed_open(); - // 99 is funded but deposit is too high. - assert_eq!(balances(99), (100, 0)); - SignedDepositBase::set(101); - assert_noop!( - SignedPallet::register(RuntimeOrigin::signed(99), Default::default()), - DispatchError::Token(FundsUnavailable) - ); - }) - } - - #[test] - fn cannot_register_if_not_signed() { - ExtBuilder::signed().build_and_execute(|| { - assert!(crate::Pallet::::current_phase() != Phase::Signed); - assert_noop!( - SignedPallet::register(RuntimeOrigin::signed(99), Default::default()), - Error::::PhaseNotSigned - ); - }) - } - - #[test] - fn register_metadata_works() { - ExtBuilder::signed().build_and_execute(|| { - roll_to_signed_open(); - assert_full_snapshot(); - - assert_eq!(balances(99), (100, 0)); - let score = ElectionScore { minimal_stake: 100, ..Default::default() }; - - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score)); - assert_eq!(balances(99), (95, 5)); - - assert_eq!(Submissions::::metadata_iter(1).count(), 0); - assert_eq!(Submissions::::metadata_iter(0).count(), 1); - assert_eq!( - Submissions::::metadata_of(0, 99).unwrap(), - SubmissionMetadata { - claimed_score: score, - deposit: 5, - fee: 1, - pages: bounded_vec![false, false, false], - reward: 3 - } - ); - assert_eq!( - *Submissions::::leaderboard(0), - vec![(99, ElectionScore { minimal_stake: 100, ..Default::default() })] - ); - assert!(matches!(signed_events().as_slice(), &[ - SignedEvent::Registered(_, x, _), - ] if x == 99)); - - // second ones submits - assert_eq!(balances(999), (100, 0)); - let score = ElectionScore { minimal_stake: 90, ..Default::default() }; - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(999), score)); - assert_eq!(balances(999), (95, 5)); - - assert_eq!( - Submissions::::metadata_of(0, 999).unwrap(), - SubmissionMetadata { - claimed_score: score, - deposit: 5, - fee: 1, - pages: bounded_vec![false, false, false], - reward: 3 - } - ); - assert!(matches!(signed_events().as_slice(), &[ - SignedEvent::Registered(..), - SignedEvent::Registered(_, x, _), - ] if x == 999)); - - assert_eq!( - *Submissions::::leaderboard(0), - vec![ - (999, ElectionScore { minimal_stake: 90, ..Default::default() }), - (99, ElectionScore { minimal_stake: 100, ..Default::default() }) - ] - ); - assert_eq!(Submissions::::metadata_iter(1).count(), 0); - assert_eq!(Submissions::::metadata_iter(0).count(), 2); - - // submit again with a new score. - assert_noop!( - SignedPallet::register( - RuntimeOrigin::signed(999), - ElectionScore { minimal_stake: 80, ..Default::default() } - ), - Error::::Duplicate, - ); - }) - } - - #[test] - fn page_submission_accumulates_fee() { - ExtBuilder::signed().build_and_execute(|| { - roll_to_signed_open(); - assert_full_snapshot(); - - let score = ElectionScore { minimal_stake: 100, ..Default::default() }; - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score)); - - // fee for register is recorded. - assert_eq!( - Submissions::::metadata_of(0, 99).unwrap(), - SubmissionMetadata { - claimed_score: score, - deposit: 5, - fee: 1, - pages: bounded_vec![false, false, false], - reward: 3 - } - ); - - // fee for page submission is recorded. - assert_ok!(SignedPallet::submit_page( - RuntimeOrigin::signed(99), - 0, - Some(Default::default()) - )); - assert_eq!( - Submissions::::metadata_of(0, 99).unwrap(), - SubmissionMetadata { - claimed_score: score, - deposit: 6, - fee: 2, - pages: bounded_vec![true, false, false], - reward: 3 - } - ); - - // another fee for page submission is recorded. - assert_ok!(SignedPallet::submit_page( - RuntimeOrigin::signed(99), - 1, - Some(Default::default()) - )); - assert_eq!( - Submissions::::metadata_of(0, 99).unwrap(), - SubmissionMetadata { - claimed_score: score, - deposit: 7, - fee: 3, - pages: bounded_vec![true, true, false], - reward: 3 - } - ); - - // removal updates deposit but not the fee - assert_ok!(SignedPallet::submit_page(RuntimeOrigin::signed(99), 1, None)); - - assert_eq!( - Submissions::::metadata_of(0, 99).unwrap(), - SubmissionMetadata { - claimed_score: score, - deposit: 6, - fee: 3, - pages: bounded_vec![true, false, false], - reward: 3 - } - ); - }); - } - - #[test] - fn metadata_submission_sorted_based_on_stake() { - ExtBuilder::signed().build_and_execute(|| { - roll_to_signed_open(); - assert_full_snapshot(); - - let score_from = |x| ElectionScore { minimal_stake: x, ..Default::default() }; - let assert_held = |x| assert_eq!(balances(x), (95, 5)); - let assert_unheld = |x| assert_eq!(balances(x), (100, 0)); - - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(91), score_from(100))); - assert_eq!(*Submissions::::leaderboard(0), vec![(91, score_from(100))]); - assert_held(91); - assert!( - matches!(signed_events().as_slice(), &[SignedEvent::Registered(_, x, _)] if x == 91) - ); - - // weaker one comes while we have space. - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(92), score_from(90))); - assert_eq!( - *Submissions::::leaderboard(0), - vec![(92, score_from(90)), (91, score_from(100))] - ); - assert_held(92); - assert!(matches!(signed_events().as_slice(), &[ - SignedEvent::Registered(..), - SignedEvent::Registered(_, x, _), - ] if x == 92)); - - // stronger one comes while we have have space. - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(93), score_from(110))); - assert_eq!( - *Submissions::::leaderboard(0), - vec![(92, score_from(90)), (91, score_from(100)), (93, score_from(110))] - ); - assert_held(93); - assert!(matches!(signed_events().as_slice(), &[ - SignedEvent::Registered(..), - SignedEvent::Registered(..), - SignedEvent::Registered(_, x, _), - ] if x == 93)); - - // weaker one comes while we don't have space. - assert_noop!( - SignedPallet::register(RuntimeOrigin::signed(94), score_from(80)), - Error::::QueueFull - ); - assert_eq!( - *Submissions::::leaderboard(0), - vec![(92, score_from(90)), (91, score_from(100)), (93, score_from(110))] - ); - assert_unheld(94); - // no event has been emitted this time. - assert!(matches!( - signed_events().as_slice(), - &[ - SignedEvent::Registered(..), - SignedEvent::Registered(..), - SignedEvent::Registered(..), - ] - )); - - // stronger one comes while we don't have space. Eject the weakest - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(94), score_from(120))); - assert_eq!( - *Submissions::::leaderboard(0), - vec![(91, score_from(100)), (93, score_from(110)), (94, score_from(120))] - ); - assert!(matches!( - signed_events().as_slice(), - &[ - SignedEvent::Registered(..), - SignedEvent::Registered(..), - SignedEvent::Registered(..), - SignedEvent::Discarded(_, 92), - SignedEvent::Registered(_, 94, _), - ] - )); - assert_held(94); - assert_unheld(92); - - // another stronger one comes, only replace the weakest. - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(95), score_from(105))); - assert_eq!( - *Submissions::::leaderboard(0), - vec![(95, score_from(105)), (93, score_from(110)), (94, score_from(120))] - ); - assert_held(95); - assert_unheld(91); - assert!(matches!( - signed_events().as_slice(), - &[ - SignedEvent::Registered(..), - SignedEvent::Registered(..), - SignedEvent::Registered(..), - SignedEvent::Discarded(..), - SignedEvent::Registered(..), - SignedEvent::Discarded(_, 91), - SignedEvent::Registered(_, 95, _), - ] - )); - }) - } - - #[test] - fn can_bail_at_a_cost() { - ExtBuilder::signed().build_and_execute(|| { - roll_to_signed_open(); - assert_full_snapshot(); - - let score = ElectionScore { minimal_stake: 100, ..Default::default() }; - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score)); - assert_eq!(balances(99), (95, 5)); - - // not submitted, cannot bailout. - assert_noop!(SignedPallet::bail(RuntimeOrigin::signed(999)), Error::::NoSubmission); - - // can bail. - assert_ok!(SignedPallet::bail(RuntimeOrigin::signed(99))); - // 20% of the deposit returned, which is 1, 4 is slashed. - assert_eq!(balances(99), (96, 0)); - assert_no_data_for(0, 99); - - assert_eq!( - signed_events(), - vec![Event::Registered(0, 99, score), Event::Bailed(0, 99)] - ); - }); - } - - #[test] - fn can_submit_pages() { - ExtBuilder::signed().build_and_execute(|| { - roll_to_signed_open(); - assert_full_snapshot(); - - assert_noop!( - SignedPallet::submit_page(RuntimeOrigin::signed(99), 0, Default::default()), - Error::::NotRegistered - ); - - assert_ok!(SignedPallet::register( - RuntimeOrigin::signed(99), - ElectionScore { minimal_stake: 100, ..Default::default() } - )); - - assert_eq!(Submissions::::pages_of(0, 99).count(), 0); - assert_eq!(balances(99), (95, 5)); - - // indices 0, 1, 2 are valid. - assert_noop!( - SignedPallet::submit_page(RuntimeOrigin::signed(99), 3, Default::default()), - Error::::BadPageIndex - ); - - // add the first page. - assert_ok!(SignedPallet::submit_page( - RuntimeOrigin::signed(99), - 0, - Some(Default::default()) - )); - assert_eq!(Submissions::::pages_of(0, 99).count(), 1); - assert_eq!(balances(99), (94, 6)); - assert_eq!( - Submissions::::metadata_of(0, 99).unwrap().pages.into_inner(), - vec![true, false, false] - ); - - // replace it again, nada. - assert_ok!(SignedPallet::submit_page( - RuntimeOrigin::signed(99), - 0, - Some(Default::default()) - )); - assert_eq!(Submissions::::pages_of(0, 99).count(), 1); - assert_eq!(balances(99), (94, 6)); - - // add a new one. - assert_ok!(SignedPallet::submit_page( - RuntimeOrigin::signed(99), - 1, - Some(Default::default()) - )); - assert_eq!(Submissions::::pages_of(0, 99).count(), 2); - assert_eq!(balances(99), (93, 7)); - assert_eq!( - Submissions::::metadata_of(0, 99).unwrap().pages.into_inner(), - vec![true, true, false] - ); - - // remove one, deposit is back. - assert_ok!(SignedPallet::submit_page(RuntimeOrigin::signed(99), 0, None)); - assert_eq!(Submissions::::pages_of(0, 99).count(), 1); - assert_eq!(balances(99), (94, 6)); - assert_eq!( - Submissions::::metadata_of(0, 99).unwrap().pages.into_inner(), - vec![false, true, false] - ); - - assert!(matches!( - signed_events().as_slice(), - &[ - SignedEvent::Registered(..), - SignedEvent::Stored(.., 0), - SignedEvent::Stored(.., 0), - SignedEvent::Stored(.., 1), - SignedEvent::Stored(.., 0), - ] - )); - }); - } -} - -mod e2e { - use super::*; - #[test] - fn good_bad_evil() { - // an extensive scenario: 3 solutions submitted, once rewarded, one slashed, and one - // discarded. - ExtBuilder::signed().build_and_execute(|| { - roll_to_signed_open(); - assert_full_snapshot(); - - // an invalid, but weak solution. - { - let score = - ElectionScore { minimal_stake: 10, sum_stake: 10, sum_stake_squared: 100 }; - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(99), score)); - assert_ok!(SignedPallet::submit_page( - RuntimeOrigin::signed(99), - 0, - Some(Default::default()) - )); - - assert_eq!(balances(99), (94, 6)); - } - - // a valid, strong solution. - let strong_score = { - let paged = mine_full_solution().unwrap(); - load_signed_for_verification(999, paged.clone()); - assert_eq!(balances(999), (92, 8)); - paged.score - }; - - // an invalid, strong solution. - { - let mut score = strong_score; - score.minimal_stake *= 2; - assert_ok!(SignedPallet::register(RuntimeOrigin::signed(92), score)); - assert_eq!(balances(92), (95, 5)); - // we don't even bother to submit a page.. - } - - assert_eq!( - Submissions::::leaderboard(0) - .into_iter() - .map(|(x, _)| x) - .collect::>(), - vec![99, 999, 92] - ); - - roll_to_signed_validation_open(); - - // 92 is slashed in 3 blocks, 999 becomes rewarded in 3 blocks, , and 99 is discarded. - roll_next(); - roll_next(); - roll_next(); - - assert_eq!( - Submissions::::leaderboard(0) - .into_iter() - .map(|(x, _)| x) - .collect::>(), - vec![99, 999] - ); - - roll_next(); - roll_next(); - roll_next(); - - assert_eq!( - signed_events(), - vec![ - Event::Registered( - 0, - 99, - ElectionScore { minimal_stake: 10, sum_stake: 10, sum_stake_squared: 100 } - ), - Event::Stored(0, 99, 0), - Event::Registered( - 0, - 999, - ElectionScore { - minimal_stake: 55, - sum_stake: 130, - sum_stake_squared: 8650 - } - ), - Event::Stored(0, 999, 0), - Event::Stored(0, 999, 1), - Event::Stored(0, 999, 2), - Event::Registered( - 0, - 92, - ElectionScore { - minimal_stake: 110, - sum_stake: 130, - sum_stake_squared: 8650 - } - ), - Event::Slashed(0, 92, 5), - Event::Rewarded(0, 999, 7), - Event::Discarded(0, 99) - ] - ); - - assert_eq!( - verifier_events(), - vec![ - crate::verifier::Event::Verified(2, 0), - crate::verifier::Event::Verified(1, 0), - crate::verifier::Event::Verified(0, 0), - crate::verifier::Event::VerificationFailed(0, FeasibilityError::InvalidScore), - crate::verifier::Event::Verified(2, 2), - crate::verifier::Event::Verified(1, 2), - crate::verifier::Event::Verified(0, 2), - crate::verifier::Event::Queued( - ElectionScore { - minimal_stake: 55, - sum_stake: 130, - sum_stake_squared: 8650 - }, - None - ) - ] - ); - - assert_eq!(balances(99), (100, 0)); - assert_eq!(balances(999), (107, 0)); - assert_eq!(balances(92), (95, 0)); - - // signed pallet should be in 100% clean state. - assert_ok!(Submissions::::ensure_killed(0)); - }) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs deleted file mode 100644 index 76efe9d9492f7..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/unsigned/benchmarking.rs +++ /dev/null @@ -1,79 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{ - unsigned::{miner::OffchainWorkerMiner, Call, Config, Pallet}, - verifier::Verifier, - CurrentPhase, Phase, -}; -use frame_benchmarking::v2::*; -use frame_election_provider_support::ElectionDataProvider; -use frame_support::{assert_ok, pallet_prelude::*}; -use frame_system::RawOrigin; -use sp_std::boxed::Box; -#[benchmarks(where T: crate::Config + crate::signed::Config + crate::verifier::Config)] -mod benchmarks { - use super::*; - - #[benchmark] - fn validate_unsigned() -> Result<(), BenchmarkError> { - // TODO: for now we are not using this, maybe remove? - // roll to unsigned phase open - T::DataProvider::set_next_election(crate::Pallet::::reasonable_next_election()); - crate::Pallet::::roll_until_matches(|| { - matches!(CurrentPhase::::get(), Phase::Unsigned(_)) - }); - let call: Call = OffchainWorkerMiner::::mine_solution(1, false) - .map(|solution| Call::submit_unsigned { paged_solution: Box::new(solution) }) - .unwrap(); - - #[block] - { - assert_ok!(Pallet::::validate_unsigned(TransactionSource::Local, &call)); - } - - Ok(()) - } - - #[benchmark] - fn submit_unsigned() -> Result<(), BenchmarkError> { - // roll to unsigned phase open - T::DataProvider::set_next_election(crate::Pallet::::reasonable_next_election()); - crate::Pallet::::roll_until_matches(|| { - matches!(CurrentPhase::::get(), Phase::Unsigned(_)) - }); - // TODO: we need to better ensure that this is actually worst case - let solution = OffchainWorkerMiner::::mine_solution(1, false).unwrap(); - - // nothing is queued - assert!(T::Verifier::queued_score().is_none()); - #[block] - { - assert_ok!(Pallet::::submit_unsigned(RawOrigin::None.into(), Box::new(solution))); - } - - // something is queued - assert!(T::Verifier::queued_score().is_some()); - Ok(()) - } - - impl_benchmark_test_suite!( - Pallet, - crate::mock::ExtBuilder::full().build_unchecked(), - crate::mock::Runtime - ); -} diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs b/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs deleted file mode 100644 index cccfef1398358..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/unsigned/miner.rs +++ /dev/null @@ -1,1972 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::{Call, Config, Pallet}; -use crate::{ - helpers, - types::{PadSolutionPages, *}, - verifier::{self}, - CommonError, -}; -use codec::Encode; -use frame_election_provider_support::{ExtendedBalance, NposSolver, Support, VoteWeight}; -use frame_support::{traits::Get, BoundedVec}; -use frame_system::pallet_prelude::*; -use scale_info::TypeInfo; -use sp_npos_elections::EvaluateSupport; -use sp_runtime::{ - offchain::storage::{MutateStorageError, StorageValueRef}, - traits::{SaturatedConversion, Saturating, Zero}, -}; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; - -/// The type of the snapshot. -/// -/// Used to express errors. -#[derive(Debug, Eq, PartialEq)] -pub enum SnapshotType { - /// Voters at the given page missing. - Voters(PageIndex), - /// Targets missing. - Targets, - /// Metadata missing. - Metadata, - /// Desired targets missing. - DesiredTargets, -} - -pub(crate) type MinerSolverErrorOf = <::Solver as NposSolver>::Error; - -/// The errors related to the [`BaseMiner`]. -#[derive( - frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound, -)] -pub enum MinerError { - /// An internal error in the NPoS elections crate. - NposElections(sp_npos_elections::Error), - /// An internal error in the generic solver. - Solver(MinerSolverErrorOf), - /// Snapshot data was unavailable unexpectedly. - SnapshotUnAvailable(SnapshotType), - /// The base, common errors from the pallet. - Common(CommonError), - /// The solution generated from the miner is not feasible. - Feasibility(verifier::FeasibilityError), - /// Some page index has been invalid. - InvalidPage, - /// Too many winners were removed during trimming. - TooManyWinnersRemoved, - /// A defensive error has occurred. - Defensive(&'static str), -} - -impl From for MinerError { - fn from(e: sp_npos_elections::Error) -> Self { - MinerError::NposElections(e) - } -} - -impl From for MinerError { - fn from(e: verifier::FeasibilityError) -> Self { - MinerError::Feasibility(e) - } -} - -impl From for MinerError { - fn from(e: CommonError) -> Self { - MinerError::Common(e) - } -} - -/// The errors related to the `OffchainWorkerMiner`. -#[derive( - frame_support::DebugNoBound, frame_support::EqNoBound, frame_support::PartialEqNoBound, -)] -pub(crate) enum OffchainMinerError { - /// An error in the base miner. - BaseMiner(MinerError), - /// The base, common errors from the pallet. - Common(CommonError), - /// Something went wrong fetching the lock. - Lock(&'static str), - /// Submitting a transaction to the pool failed. - PoolSubmissionFailed, - /// Cannot restore a solution that was not stored. - NoStoredSolution, - /// Cached solution is not a `submit_unsigned` call. - SolutionCallInvalid, - /// Failed to store a solution. - FailedToStoreSolution, -} - -impl From> for OffchainMinerError { - fn from(e: MinerError) -> Self { - OffchainMinerError::BaseMiner(e) - } -} - -impl From for OffchainMinerError { - fn from(e: CommonError) -> Self { - OffchainMinerError::Common(e) - } -} - -/// Configurations for the miner. -/// -/// This is extracted from the main crate's config so that an offchain miner can readily use the -/// [`BaseMiner`] without needing to deal with the rest of the pallet's configuration. -pub trait MinerConfig { - /// The account id type. - type AccountId: Ord + Clone + codec::Codec + core::fmt::Debug; - /// The solution that the miner is mining. - /// The solution type. - type Solution: codec::FullCodec - + Default - + PartialEq - + Eq - + Clone - + sp_std::fmt::Debug - + Ord - + NposSolution - + TypeInfo - + codec::MaxEncodedLen; - /// The solver type. - type Solver: NposSolver; - /// The maximum length that the miner should use for a solution, per page. - type MaxLength: Get; - /// Maximum number of votes per voter. - /// - /// Must be the same as configured in the [`crate::Config::DataProvider`]. - type MaxVotesPerVoter: Get; - /// Maximum number of winners to select per page. - /// - /// The miner should respect this, it is used for trimming, and bounded data types. - /// - /// Should equal to the onchain value set in `Verifier::Config`. - type MaxWinnersPerPage: Get; - /// Maximum number of backers per winner, per page. - /// - /// The miner should respect this, it is used for trimming, and bounded data types. - /// - /// Should equal to the onchain value set in `Verifier::Config`. - type MaxBackersPerWinner: Get; - /// Maximum number of backers, per winner, across all pages. - /// - /// The miner should respect this, it is used for trimming, and bounded data types. - /// - /// Should equal to the onchain value set in `Verifier::Config`. - type MaxBackersPerWinnerFinal: Get; - /// Maximum number of backers, per winner, per page. - - /// Maximum number of pages that we may compute. - /// - /// Must be the same as configured in the [`crate::Config`]. - type Pages: Get; - /// Maximum number of voters per snapshot page. - /// - /// Must be the same as configured in the [`crate::Config`]. - type VoterSnapshotPerBlock: Get; - /// Maximum number of targets per snapshot page. - /// - /// Must be the same as configured in the [`crate::Config`]. - type TargetSnapshotPerBlock: Get; - /// The hash type of the runtime. - type Hash: Eq + PartialEq; -} - -/// A base miner that is only capable of mining a new solution and checking it against the state of -/// this pallet for feasibility, and trimming its length/weight. -pub struct BaseMiner(sp_std::marker::PhantomData); - -/// Parameterized `BoundedSupports` for the miner. -pub type SupportsOfMiner = frame_election_provider_support::BoundedSupports< - ::AccountId, - ::MaxWinnersPerPage, - ::MaxBackersPerWinner, ->; - -/// Aggregator for inputs to [`BaseMiner`]. -pub struct MineInput { - /// Number of winners to pick. - pub desired_targets: u32, - /// All of the targets. - pub all_targets: BoundedVec, - /// Paginated list of voters. - /// - /// Note for staking-miners: How this is calculated is rather delicate, and the order of the - /// nested vectors matter. See carefully how `OffchainWorkerMiner::mine_solution` is doing - /// this. - pub voter_pages: AllVoterPagesOf, - /// Number of pages to mind. - /// - /// Note for staking-miner: Always use [`MinerConfig::Pages`] unless explicitly wanted - /// otherwise. - pub pages: PageIndex, - /// Whether to reduce the solution. Almost always`` - pub do_reduce: bool, - /// The current round for which the solution is being calculated. - pub round: u32, -} - -impl BaseMiner { - /// Mine a new npos solution, with the given number of pages. - /// - /// This miner is only capable of mining a solution that either uses all of the pages of the - /// snapshot, or the top `pages` thereof. - /// - /// This always trims the solution to match a few parameters: - /// - /// [`MinerConfig::MaxWinnersPerPage`], [`MinerConfig::MaxBackersPerWinner`], - /// [`MinerConfig::MaxBackersPerWinnerFinal`] and [`MinerConfig::MaxLength`]. - /// - /// The order of pages returned is aligned with the snapshot. For example, the index 0 of the - /// returning solution pages corresponds to the page 0 of the snapshot. - /// - /// The only difference is, if the solution is partial, then [`Pagify`] must be used to properly - /// pad the results. - pub fn mine_solution( - MineInput { desired_targets, all_targets, voter_pages, mut pages, do_reduce, round }: MineInput< - T, - >, - ) -> Result, MinerError> { - pages = pages.min(T::Pages::get()); - - // we also build this closure early, so we can let `targets` be consumed. - let voter_page_fn = helpers::generate_voter_page_fn::(&voter_pages); - let target_index_fn = helpers::target_index_fn::(&all_targets); - - // now flatten the voters, ready to be used as if pagination did not existed. - let all_voters: AllVoterPagesFlattenedOf = voter_pages - .iter() - .cloned() - .flatten() - .collect::>() - .try_into() - .expect("Flattening the voters into `AllVoterPagesFlattenedOf` cannot fail; qed"); - - let ElectionResult { winners: _, assignments } = T::Solver::solve( - desired_targets as usize, - all_targets.clone().to_vec(), - all_voters.clone().into_inner(), - ) - .map_err(|e| MinerError::Solver(e))?; - - // reduce and trim supports. We don't trim length and weight here, since those are dependent - // on the final form of the solution ([`PagedRawSolution`]), thus we do it later. - let trimmed_assignments = { - // Implementation note: the overall code path is as follows: election_results -> - // assignments -> staked assignments -> reduce -> supports -> trim supports -> staked - // assignments -> final assignments - // This is by no means the most performant, but is the clear and correct. - use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, - reduce, supports_to_staked_assignment, to_supports, EvaluateSupport, - }; - - // These closures are of no use in the rest of these code, since they only deal with the - // overall list of voters. - let cache = helpers::generate_voter_cache::(&all_voters); - let stake_of = helpers::stake_of_fn::(&all_voters, &cache); - - // 1. convert to staked and reduce - let (reduced_count, staked) = { - let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of) - .map_err::, _>(Into::into)?; - - // first, reduce the solution if requested. This will already remove a lot of - // "redundant" and reduce the chance for the need of any further trimming. - let count = if do_reduce { reduce(&mut staked) } else { 0 }; - (count, staked) - }; - - // 2. trim the supports by backing. - let (_pre_score, final_trimmed_assignments, winners_removed, backers_removed) = { - // these supports could very well be invalid for SCORE purposes. The reason is that - // you might trim out half of an account's stake, but we don't look for this - // account's other votes to fix it. - let supports_invalid_score = to_supports(&staked); - - let pre_score = (&supports_invalid_score).evaluate(); - let (bounded_invalid_score, winners_removed, backers_removed) = - SupportsOfMiner::::sorted_truncate_from(supports_invalid_score); - - // now recreated the staked assignments - let staked = supports_to_staked_assignment(bounded_invalid_score.into()); - let assignments = assignment_staked_to_ratio_normalized(staked) - .map_err::, _>(Into::into)?; - (pre_score, assignments, winners_removed, backers_removed) - }; - - miner_log!( - debug, - "initial score = {:?}, reduced {} edges, trimmed {} winners from supports, trimmed {} backers from support", - _pre_score, - reduced_count, - winners_removed, - backers_removed, - ); - - final_trimmed_assignments - }; - - // split the assignments into different pages. - let mut paged_assignments: BoundedVec>, T::Pages> = - BoundedVec::with_bounded_capacity(pages as usize); - paged_assignments.bounded_resize(pages as usize, Default::default()); - for assignment in trimmed_assignments { - // NOTE: this `page` index is LOCAL. It does not correspond to the actual page index of - // the snapshot map, but rather the index in the `voter_pages`. - let page = voter_page_fn(&assignment.who).ok_or(MinerError::InvalidPage)?; - let assignment_page = - paged_assignments.get_mut(page as usize).ok_or(MinerError::InvalidPage)?; - assignment_page.push(assignment); - } - - // convert each page to a compact struct - let solution_pages: BoundedVec, T::Pages> = paged_assignments - .into_iter() - .enumerate() - .map(|(page_index, assignment_page)| { - // get the page of the snapshot that corresponds to this page of the assignments. - let page: PageIndex = page_index.saturated_into(); - let voter_snapshot_page = voter_pages - .get(page as usize) - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(page)))?; - - let voter_index_fn = { - let cache = helpers::generate_voter_cache::(&voter_snapshot_page); - helpers::voter_index_fn_owned::(cache) - }; - >::from_assignment( - &assignment_page, - &voter_index_fn, - &target_index_fn, - ) - .map_err::, _>(Into::into) - }) - .collect::, _>>()? - .try_into() - .expect("`paged_assignments` is bound by `T::Pages`; length cannot change in iter chain; qed"); - - // now do the weight and length trim. - let mut solution_pages_unbounded = solution_pages.into_inner(); - let _trim_length_weight = - Self::maybe_trim_weight_and_len(&mut solution_pages_unbounded, &voter_pages)?; - let solution_pages = solution_pages_unbounded - .try_into() - .expect("maybe_trim_weight_and_len cannot increase the length of its input; qed."); - miner_log!(debug, "trimmed {} voters due to length restriction.", _trim_length_weight); - - // finally, wrap everything up. Assign a fake score here, since we might need to re-compute - // it. - let mut paged = PagedRawSolution { round, solution_pages, score: Default::default() }; - - // OPTIMIZATION: we do feasibility_check inside `compute_score`, and once later - // pre_dispatch. I think it is fine, but maybe we can improve it. - let score = Self::compute_score(&paged, &voter_pages, &all_targets, desired_targets) - .map_err::, _>(Into::into)?; - paged.score = score; - - miner_log!( - info, - "mined a solution with {} pages, score {:?}, {} winners, {} voters, {} edges, and {} bytes", - pages, - score, - paged.winner_count_single_page_target_snapshot(), - paged.voter_count(), - paged.edge_count(), - paged.using_encoded(|b| b.len()) - ); - - Ok(paged) - } - - /// perform the feasibility check on all pages of a solution, returning `Ok(())` if all good and - /// the corresponding error otherwise. - pub fn check_feasibility( - paged_solution: &PagedRawSolution, - paged_voters: &AllVoterPagesOf, - snapshot_targets: &BoundedVec, - desired_targets: u32, - solution_type: &str, - ) -> Result>, MinerError> { - // check every solution page for feasibility. - let padded_voters = paged_voters.clone().pad_solution_pages(T::Pages::get()); - paged_solution - .solution_pages - .pagify(T::Pages::get()) - .map(|(page_index, page_solution)| { - verifier::feasibility_check_page_inner_with_snapshot::( - page_solution.clone(), - &padded_voters[page_index as usize], - snapshot_targets, - desired_targets, - ) - }) - .collect::, _>>() - .map_err(|err| { - miner_log!( - warn, - "feasibility check failed for {} solution at: {:?}", - solution_type, - err - ); - MinerError::from(err) - }) - .and_then(|supports| { - // TODO: Check `MaxBackersPerWinnerFinal` - Ok(supports) - }) - } - - /// Take the given raw paged solution and compute its score. This will replicate what the chain - /// would do as closely as possible, and expects all the corresponding snapshot data to be - /// available. - fn compute_score( - paged_solution: &PagedRawSolution, - paged_voters: &AllVoterPagesOf, - all_targets: &BoundedVec, - desired_targets: u32, - ) -> Result> { - let all_supports = Self::check_feasibility( - paged_solution, - paged_voters, - all_targets, - desired_targets, - "mined", - )?; - let mut total_backings: BTreeMap = BTreeMap::new(); - all_supports.into_iter().flat_map(|x| x.0).for_each(|(who, support)| { - let backing = total_backings.entry(who).or_default(); - *backing = backing.saturating_add(support.total); - }); - - let all_supports = total_backings - .into_iter() - .map(|(who, total)| (who, Support { total, ..Default::default() })) - .collect::>(); - - Ok((&all_supports).evaluate()) - } - - /// Trim the given supports so that the count of backings in none of them exceeds - /// [`crate::verifier::Config::MaxBackersPerWinner`]. - /// - /// Note that this should only be called on the *global, non-paginated* supports. Calling this - /// on a single page of supports is essentially pointless and does not guarantee anything in - /// particular. - /// - /// Returns the count of supports trimmed. - pub fn trim_supports(supports: &mut sp_npos_elections::Supports) -> u32 { - let limit = T::MaxBackersPerWinner::get() as usize; - let mut count = 0; - supports - .iter_mut() - .filter_map( - |(_, support)| if support.voters.len() > limit { Some(support) } else { None }, - ) - .for_each(|support| { - support.voters.sort_unstable_by(|(_, b1), (_, b2)| b1.cmp(&b2).reverse()); - support.voters.truncate(limit); - support.total = support.voters.iter().fold(0, |acc, (_, x)| acc.saturating_add(*x)); - count.saturating_inc(); - }); - count - } - - /// Maybe tim the weight and length of the given multi-page solution. - /// - /// Returns the number of voters removed. - /// - /// If either of the bounds are not met, the trimming strategy is as follows: - /// - /// Start from the least significant page. Assume only this page is going to be trimmed. call - /// `page.sort()` on this page. This will make sure in each field (`votes1`, `votes2`, etc.) of - /// that page, the voters are sorted by descending stake. Then, we compare the last item of each - /// field. This is the process of removing the single least staked voter. - /// - /// We repeat this until satisfied, for both weight and length. If a full page is removed, but - /// the bound is not satisfied, we need to make sure that we sort the next least valuable page, - /// and repeat the same process. - /// - /// NOTE: this is a public function to be used by the `OffchainWorkerMiner` or any similar one, - /// based on the submission strategy. The length and weight bounds of a call are dependent on - /// the number of pages being submitted, the number of blocks over which we submit, and the type - /// of the transaction and its weight (e.g. signed or unsigned). - /// - /// NOTE: It could be that this function removes too many voters, and the solution becomes - /// invalid. This is not yet handled and only a warning is emitted. - pub fn maybe_trim_weight_and_len( - solution_pages: &mut Vec>, - paged_voters: &AllVoterPagesOf, - ) -> Result> { - debug_assert_eq!(solution_pages.len(), paged_voters.len()); - let size_limit = T::MaxLength::get(); - - let needs_any_trim = |solution_pages: &mut Vec>| { - let size = solution_pages.encoded_size() as u32; - let needs_len_trim = size > size_limit; - // a reminder that we used to have weight trimming here, but not more! - let needs_weight_trim = false; - needs_weight_trim || needs_len_trim - }; - - // Note the solution might be partial. In either case, this is its least significant page. - let mut current_trimming_page = 0; - let current_trimming_page_stake_of = |current_trimming_page: usize| { - Box::new(move |voter_index: &SolutionVoterIndexOf| -> VoteWeight { - paged_voters - .get(current_trimming_page) - .and_then(|page_voters| { - page_voters - .get((*voter_index).saturated_into::()) - .map(|(_, s, _)| *s) - }) - .unwrap_or_default() - }) - }; - - let sort_current_trimming_page = - |current_trimming_page: usize, solution_pages: &mut Vec>| { - solution_pages.get_mut(current_trimming_page).map(|solution_page| { - let stake_of_fn = current_trimming_page_stake_of(current_trimming_page); - solution_page.sort(stake_of_fn) - }); - }; - - let is_empty = |solution_pages: &Vec>| { - solution_pages.iter().all(|page| page.voter_count().is_zero()) - }; - - if needs_any_trim(solution_pages) { - sort_current_trimming_page(current_trimming_page, solution_pages) - } - - // Implementation note: we want `solution_pages` and `paged_voters` to remain in sync, so - // while one of the pages of `solution_pages` might become "empty" we prefer not removing - // it. This has a slight downside that even an empty pages consumes a few dozens of bytes, - // which we accept for code simplicity. - - let mut removed = 0; - while needs_any_trim(solution_pages) && !is_empty(solution_pages) { - if let Some(removed_idx) = - solution_pages.get_mut(current_trimming_page).and_then(|page| { - let stake_of_fn = current_trimming_page_stake_of(current_trimming_page); - page.remove_weakest_sorted(&stake_of_fn) - }) { - miner_log!( - trace, - "removed voter at index {:?} of (un-pagified) page {} as the weakest due to weight/length limits.", - removed_idx, - current_trimming_page - ); - // we removed one person, continue. - removed.saturating_inc(); - } else { - // this page cannot support remove anymore. Try and go to the next page. - miner_log!( - debug, - "page {} seems to be fully empty now, moving to the next one", - current_trimming_page - ); - let next_page = current_trimming_page.saturating_add(1); - if paged_voters.len() > next_page { - current_trimming_page = next_page; - sort_current_trimming_page(current_trimming_page, solution_pages); - } else { - miner_log!( - warn, - "no more pages to trim from at page {}, already trimmed", - current_trimming_page - ); - break - } - } - } - - Ok(removed) - } -} - -/// A miner that is suited to work inside offchain worker environment. -/// -/// This is parameterized by [`Config`], rather than [`MinerConfig`]. -pub(crate) struct OffchainWorkerMiner(sp_std::marker::PhantomData); - -impl OffchainWorkerMiner { - /// Storage key used to store the offchain worker running status. - pub(crate) const OFFCHAIN_LOCK: &'static [u8] = b"parity/multi-block-unsigned-election/lock"; - /// Storage key used to store the last block number at which offchain worker ran. - const OFFCHAIN_LAST_BLOCK: &'static [u8] = b"parity/multi-block-unsigned-election"; - /// Storage key used to cache the solution `call` and its snapshot fingerprint. - const OFFCHAIN_CACHED_CALL: &'static [u8] = b"parity/multi-block-unsigned-election/call"; - /// The number of pages that the offchain worker miner will try and mine. - const MINING_PAGES: PageIndex = 1; - - pub(crate) fn fetch_snapshot( - pages: PageIndex, - ) -> Result< - (AllVoterPagesOf, BoundedVec, u32), - OffchainMinerError, - > { - // read the appropriate snapshot pages. - let desired_targets = crate::Snapshot::::desired_targets() - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::DesiredTargets))?; - let all_targets = crate::Snapshot::::targets() - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Targets))?; - - // This is the range of voters that we are interested in. Mind the second `.rev`, it is - // super critical. - let voter_pages_range = (crate::Pallet::::lsp()..crate::Pallet::::msp() + 1) - .rev() - .take(pages as usize) - .rev(); - - sublog!( - debug, - "unsigned::base-miner", - "mining a solution with {} pages, voter snapshot range will be: {:?}", - pages, - voter_pages_range.clone().collect::>() - ); - - // NOTE: if `pages (2) < T::Pages (3)`, at this point this vector will have length 2, - // with a layout of `[snapshot(1), snapshot(2)]`, namely the two most significant pages - // of the snapshot. - let voter_pages: BoundedVec<_, T::Pages> = voter_pages_range - .map(|p| { - crate::Snapshot::::voters(p) - .ok_or(MinerError::SnapshotUnAvailable(SnapshotType::Voters(p))) - }) - .collect::, _>>()? - .try_into() - .expect( - "`voter_pages_range` has `.take(pages)`; it must have length less than pages; it - must convert to `BoundedVec`; qed", - ); - - Ok((voter_pages, all_targets, desired_targets)) - } - - pub(crate) fn mine_solution( - pages: PageIndex, - do_reduce: bool, - ) -> Result, OffchainMinerError> { - let (voter_pages, all_targets, desired_targets) = Self::fetch_snapshot(pages)?; - let round = crate::Pallet::::round(); - BaseMiner::::mine_solution(MineInput { - desired_targets, - all_targets, - voter_pages, - pages, - do_reduce, - round, - }) - .map_err(Into::into) - } - - /// Get a checked solution from the base miner, ensure unsigned-specific checks also pass, then - /// return an submittable call. - fn mine_checked_call() -> Result, OffchainMinerError> { - // we always do reduce in the offchain worker miner. - let reduce = true; - - // NOTE: we don't run any checks in the base miner, and run all of them via - // `Self::full_checks`. - let paged_solution = Self::mine_solution(Self::MINING_PAGES, reduce) - .map_err::, _>(Into::into)?; - // check the call fully, no fingerprinting. - let _ = Self::check_solution(&paged_solution, None, true, "mined")?; - - let call: Call = - Call::::submit_unsigned { paged_solution: Box::new(paged_solution) }.into(); - - Ok(call) - } - - /// Mine a new checked solution, cache it, and submit it back to the chain as an unsigned - /// transaction. - pub fn mine_check_save_submit() -> Result<(), OffchainMinerError> { - sublog!(debug, "unsigned::ocw-miner", "miner attempting to compute an unsigned solution."); - let call = Self::mine_checked_call()?; - Self::save_solution(&call, crate::Snapshot::::fingerprint())?; - Self::submit_call(call) - } - - /// Check the solution, from the perspective of the offchain-worker miner: - /// - /// 1. unsigned-specific checks. - /// 2. full-checks of the base miner - /// 1. optionally feasibility check. - /// 2. snapshot-independent checks. - /// 1. optionally, snapshot fingerprint. - pub fn check_solution( - paged_solution: &PagedRawSolution, - maybe_snapshot_fingerprint: Option, - do_feasibility: bool, - solution_type: &str, - ) -> Result<(), OffchainMinerError> { - // NOTE: we prefer cheap checks first, so first run unsigned checks. - Pallet::::unsigned_specific_checks(paged_solution)?; - Self::base_check_solution( - paged_solution, - maybe_snapshot_fingerprint, - do_feasibility, - solution_type, - ) - } - - fn submit_call(call: Call) -> Result<(), OffchainMinerError> { - sublog!( - debug, - "unsigned::ocw-miner", - "miner submitting a solution as an unsigned transaction" - ); - let xt = T::create_inherent(call.into()); - frame_system::offchain::SubmitTransaction::>::submit_transaction(xt) - .map(|_| { - sublog!( - debug, - "unsigned::ocw-miner", - "miner submitted a solution as an unsigned transaction", - ); - }) - .map_err(|_| OffchainMinerError::PoolSubmissionFailed) - } - - /// Check the solution, from the perspective of the base miner: - /// - /// 1. snapshot-independent checks. - /// - with the fingerprint check being an optional step fo that. - /// 2. optionally, feasibility check. - /// - /// In most cases, you should always use this either with `do_feasibility = true` or - /// `maybe_snapshot_fingerprint.is_some()`. Doing both could be an overkill. The snapshot - /// staying constant (which can be checked via the hash) is a string guarantee that the - /// feasibility still holds. - /// - /// The difference between this and [`Self::check_solution`] is that this does not run unsigned - /// specific checks. - pub(crate) fn base_check_solution( - paged_solution: &PagedRawSolution, - maybe_snapshot_fingerprint: Option, - do_feasibility: bool, - solution_type: &str, // TODO: remove - ) -> Result<(), OffchainMinerError> { - let _ = crate::Pallet::::snapshot_independent_checks( - paged_solution, - maybe_snapshot_fingerprint, - )?; - - if do_feasibility { - let (voter_pages, all_targets, desired_targets) = - Self::fetch_snapshot(paged_solution.solution_pages.len() as PageIndex)?; - let _ = BaseMiner::::check_feasibility( - &paged_solution, - &voter_pages, - &all_targets, - desired_targets, - solution_type, - )?; - } - - Ok(()) - } - - /// Attempt to restore a solution from cache. Otherwise, compute it fresh. Either way, - /// submit if our call's score is greater than that of the cached solution. - pub fn restore_or_compute_then_maybe_submit() -> Result<(), OffchainMinerError> { - sublog!( - debug, - "unsigned::ocw-miner", - "miner attempting to restore or compute an unsigned solution." - ); - - let call = Self::restore_solution() - .and_then(|(call, snapshot_fingerprint)| { - // ensure the cached call is still current before submitting - if let Call::submit_unsigned { paged_solution, .. } = &call { - // we check the snapshot fingerprint instead of doing a full feasibility. - OffchainWorkerMiner::::check_solution( - paged_solution, - Some(snapshot_fingerprint), - false, - "restored" - ).map_err::, _>(Into::into)?; - Ok(call) - } else { - Err(OffchainMinerError::SolutionCallInvalid) - } - }) - .or_else::, _>(|error| { - use OffchainMinerError as OE; - use MinerError as ME; - use CommonError as CE; - match error { - OE::NoStoredSolution => { - // IFF, not present regenerate. - let call = Self::mine_checked_call()?; - Self::save_solution(&call, crate::Snapshot::::fingerprint())?; - Ok(call) - }, - OE::Common(ref e) => { - sublog!( - error, - "unsigned::ocw-miner", - "unsigned specific checks failed ({:?}) while restoring solution. This should never happen. clearing cache.", - e, - ); - Self::clear_offchain_solution_cache(); - Err(error) - }, - OE::BaseMiner(ME::Feasibility(_)) - | OE::BaseMiner(ME::Common(CE::WrongRound)) - | OE::BaseMiner(ME::Common(CE::WrongFingerprint)) - => { - // note that failing `Feasibility` can only mean that the solution was - // computed over a snapshot that has changed due to a fork. - sublog!(warn, "unsigned::ocw-miner", "wiping infeasible solution ({:?}).", error); - // kill the "bad" solution. - Self::clear_offchain_solution_cache(); - - // .. then return the error as-is. - Err(error) - }, - _ => { - sublog!(debug, "unsigned::ocw-miner", "unhandled error in restoring offchain solution {:?}", error); - // nothing to do. Return the error as-is. - Err(error) - }, - } - })?; - - Self::submit_call(call) - } - - /// Checks if an execution of the offchain worker is permitted at the given block number, or - /// not. - /// - /// This makes sure that - /// 1. we don't run on previous blocks in case of a re-org - /// 2. we don't run twice within a window of length `T::OffchainRepeat`. - /// - /// Returns `Ok(())` if offchain worker limit is respected, `Err(reason)` otherwise. If - /// `Ok()` is returned, `now` is written in storage and will be used in further calls as the - /// baseline. - pub fn ensure_offchain_repeat_frequency( - now: BlockNumberFor, - ) -> Result<(), OffchainMinerError> { - let threshold = T::OffchainRepeat::get(); - let last_block = StorageValueRef::persistent(&Self::OFFCHAIN_LAST_BLOCK); - - let mutate_stat = last_block.mutate::<_, &'static str, _>( - |maybe_head: Result>, _>| { - match maybe_head { - Ok(Some(head)) if now < head => Err("fork."), - Ok(Some(head)) if now >= head && now <= head + threshold => - Err("recently executed."), - Ok(Some(head)) if now > head + threshold => { - // we can run again now. Write the new head. - Ok(now) - }, - _ => { - // value doesn't exists. Probably this node just booted up. Write, and - // run - Ok(now) - }, - } - }, - ); - - match mutate_stat { - // all good - Ok(_) => Ok(()), - // failed to write. - Err(MutateStorageError::ConcurrentModification(_)) => Err(OffchainMinerError::Lock( - "failed to write to offchain db (concurrent modification).", - )), - // fork etc. - Err(MutateStorageError::ValueFunctionFailed(why)) => Err(OffchainMinerError::Lock(why)), - } - } - - /// Save a given call into OCW storage. - fn save_solution( - call: &Call, - snapshot_fingerprint: T::Hash, - ) -> Result<(), OffchainMinerError> { - sublog!(debug, "unsigned::ocw-miner", "saving a call to the offchain storage."); - let storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL); - match storage.mutate::<_, (), _>(|_| Ok((call.clone(), snapshot_fingerprint))) { - Ok(_) => Ok(()), - Err(MutateStorageError::ConcurrentModification(_)) => - Err(OffchainMinerError::FailedToStoreSolution), - Err(MutateStorageError::ValueFunctionFailed(_)) => { - // this branch should be unreachable according to the definition of - // `StorageValueRef::mutate`: that function should only ever `Err` if the closure we - // pass it returns an error. however, for safety in case the definition changes, we - // do not optimize the branch away or panic. - Err(OffchainMinerError::FailedToStoreSolution) - }, - } - } - - /// Get a saved solution from OCW storage if it exists. - fn restore_solution() -> Result<(Call, T::Hash), OffchainMinerError> { - StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL) - .get() - .ok() - .flatten() - .ok_or(OffchainMinerError::NoStoredSolution) - } - - /// Clear a saved solution from OCW storage. - fn clear_offchain_solution_cache() { - sublog!(debug, "unsigned::ocw-miner", "clearing offchain call cache storage."); - let mut storage = StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL); - storage.clear(); - } - - #[cfg(test)] - fn cached_solution() -> Option> { - StorageValueRef::persistent(&Self::OFFCHAIN_CACHED_CALL) - .get::>() - .unwrap() - } -} - -// This will only focus on testing the internals of `maybe_trim_weight_and_len_works`. -#[cfg(test)] -mod trim_weight_length { - use super::*; - use crate::{mock::*, verifier::Verifier}; - use frame_election_provider_support::TryFromUnboundedPagedSupports; - use sp_npos_elections::Support; - - #[test] - fn trim_length() { - // This is just demonstration to show the normal election result with new votes, without any - // trimming. - ExtBuilder::unsigned().build_and_execute(|| { - let mut current_voters = Voters::get(); - current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); - Voters::set(current_voters); - - roll_to_snapshot_created(); - ensure_voters(3, 12); - - let solution = mine_full_solution().unwrap(); - - assert_eq!( - solution.solution_pages.iter().map(|page| page.voter_count()).sum::(), - 8 - ); - - assert_eq!(solution.solution_pages.encoded_size(), 105); - - load_mock_signed_and_start(solution); - let supports = roll_to_full_verification(); - - // a solution is queued. - assert!(VerifierPallet::queued_score().is_some()); - - assert_eq!( - supports, - vec![ - // if we set any limit less than 105, 30 will be the first to leave. - vec![ - (30, Support { total: 30, voters: vec![(30, 30)] }), - (40, Support { total: 40, voters: vec![(40, 40)] }) - ], - vec![ - (30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }), - (40, Support { total: 7, voters: vec![(5, 3), (6, 4)] }) - ], - vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] - ] - .try_from_unbounded_paged() - .unwrap() - ); - }); - - ExtBuilder::unsigned().miner_max_length(104).build_and_execute(|| { - let mut current_voters = Voters::get(); - current_voters.iter_mut().for_each(|(who, stake, ..)| *stake = *who); - Voters::set(current_voters); - - roll_to_snapshot_created(); - ensure_voters(3, 12); - - let solution = mine_full_solution().unwrap(); - - assert_eq!( - solution.solution_pages.iter().map(|page| page.voter_count()).sum::(), - 7 - ); - - assert_eq!(solution.solution_pages.encoded_size(), 99); - - load_mock_signed_and_start(solution); - let supports = roll_to_full_verification(); - - // a solution is queued. - assert!(VerifierPallet::queued_score().is_some()); - - assert_eq!( - supports, - vec![ - // 30 is gone! - vec![(40, Support { total: 40, voters: vec![(40, 40)] })], - vec![ - (30, Support { total: 11, voters: vec![(7, 7), (5, 2), (6, 2)] }), - (40, Support { total: 7, voters: vec![(5, 3), (6, 4)] }) - ], - vec![(40, Support { total: 9, voters: vec![(2, 2), (3, 3), (4, 4)] })] - ] - .try_from_unbounded_paged() - .unwrap() - ); - }); - } -} - -#[cfg(test)] -mod base_miner { - use std::vec; - - use super::*; - use crate::{mock::*, Snapshot}; - use frame_election_provider_support::TryFromUnboundedPagedSupports; - use sp_npos_elections::Support; - use sp_runtime::PerU16; - - #[test] - fn pagination_does_not_affect_score() { - let score_1 = ExtBuilder::unsigned() - .pages(1) - .voter_per_page(12) - .build_unchecked() - .execute_with(|| { - roll_to_snapshot_created(); - mine_full_solution().unwrap().score - }); - let score_2 = ExtBuilder::unsigned() - .pages(2) - .voter_per_page(6) - .build_unchecked() - .execute_with(|| { - roll_to_snapshot_created(); - mine_full_solution().unwrap().score - }); - let score_3 = ExtBuilder::unsigned() - .pages(3) - .voter_per_page(4) - .build_unchecked() - .execute_with(|| { - roll_to_snapshot_created(); - mine_full_solution().unwrap().score - }); - - assert_eq!(score_1, score_2); - assert_eq!(score_2, score_3); - } - - #[test] - fn mine_solution_single_page_works() { - ExtBuilder::unsigned().pages(1).voter_per_page(8).build_and_execute(|| { - roll_to_snapshot_created(); - - ensure_voters(1, 8); - ensure_targets(1, 4); - - assert_eq!( - Snapshot::::voters(0) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![1, 2, 3, 4, 5, 6, 7, 8] - ); - - let paged = mine_full_solution().unwrap(); - assert_eq!(paged.solution_pages.len(), 1); - - // this solution must be feasible and submittable. - OffchainWorkerMiner::::base_check_solution(&paged, None, true, "mined") - .unwrap(); - - // now do a realistic full verification - load_mock_signed_and_start(paged.clone()); - let supports = roll_to_full_verification(); - - assert_eq!( - supports, - vec![vec![ - (10, Support { total: 30, voters: vec![(1, 10), (8, 10), (4, 5), (5, 5)] }), - ( - 40, - Support { - total: 40, - voters: vec![(2, 10), (3, 10), (6, 10), (4, 5), (5, 5)] - } - ) - ]] - .try_from_unbounded_paged() - .unwrap() - ); - - // NOTE: this is the same as the score of any other test that contains the first 8 - // voters, we already test for this in `pagination_does_not_affect_score`. - assert_eq!( - paged.score, - ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 } - ); - }) - } - - #[test] - fn mine_solution_double_page_works() { - ExtBuilder::unsigned().pages(2).voter_per_page(4).build_and_execute(|| { - roll_to_snapshot_created(); - - // 2 pages of 8 voters - ensure_voters(2, 8); - // 1 page of 4 targets - ensure_targets(1, 4); - - // voters in pages. note the reverse page index. - assert_eq!( - Snapshot::::voters(0) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![5, 6, 7, 8] - ); - assert_eq!( - Snapshot::::voters(1) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![1, 2, 3, 4] - ); - // targets in pages. - assert_eq!(Snapshot::::targets().unwrap(), vec![10, 20, 30, 40]); - let paged = mine_full_solution().unwrap(); - - assert_eq!( - paged.solution_pages, - vec![ - TestNposSolution { - // voter 6 (index 1) is backing 40 (index 3). - // voter 8 (index 3) is backing 10 (index 0) - votes1: vec![(1, 3), (3, 0)], - // voter 5 (index 0) is backing 40 (index 10) and 10 (index 0) - votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)], - ..Default::default() - }, - TestNposSolution { - // voter 1 (index 0) is backing 10 (index 0) - // voter 2 (index 1) is backing 40 (index 3) - // voter 3 (index 2) is backing 40 (index 3) - votes1: vec![(0, 0), (1, 3), (2, 3)], - // voter 4 (index 3) is backing 40 (index 10) and 10 (index 0) - votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)], - ..Default::default() - }, - ] - ); - - // this solution must be feasible and submittable. - OffchainWorkerMiner::::base_check_solution(&paged, None, false, "mined") - .unwrap(); - - // it must also be verified in the verifier - load_mock_signed_and_start(paged.clone()); - let supports = roll_to_full_verification(); - - assert_eq!( - supports, - vec![ - // page0, supports from voters 5, 6, 7, 8 - vec![ - (10, Support { total: 15, voters: vec![(8, 10), (5, 5)] }), - (40, Support { total: 15, voters: vec![(6, 10), (5, 5)] }) - ], - // page1 supports from voters 1, 2, 3, 4 - vec![ - (10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }), - (40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] }) - ] - ] - .try_from_unbounded_paged() - .unwrap() - ); - - assert_eq!( - paged.score, - ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 } - ); - }) - } - - #[test] - fn mine_solution_triple_page_works() { - ExtBuilder::unsigned().pages(3).voter_per_page(4).build_and_execute(|| { - roll_to_snapshot_created(); - - ensure_voters(3, 12); - ensure_targets(1, 4); - - // voters in pages. note the reverse page index. - assert_eq!( - Snapshot::::voters(2) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![1, 2, 3, 4] - ); - assert_eq!( - Snapshot::::voters(1) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![5, 6, 7, 8] - ); - assert_eq!( - Snapshot::::voters(0) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![10, 20, 30, 40] - ); - - let paged = mine_full_solution().unwrap(); - assert_eq!( - paged.solution_pages, - vec![ - TestNposSolution { votes1: vec![(2, 2), (3, 3)], ..Default::default() }, - TestNposSolution { - votes1: vec![(2, 2)], - votes2: vec![ - (0, [(2, PerU16::from_parts(32768))], 3), - (1, [(2, PerU16::from_parts(32768))], 3) - ], - ..Default::default() - }, - TestNposSolution { - votes1: vec![(2, 3), (3, 3)], - votes2: vec![(1, [(2, PerU16::from_parts(32768))], 3)], - ..Default::default() - }, - ] - ); - - // this solution must be feasible and submittable. - OffchainWorkerMiner::::base_check_solution(&paged, None, true, "mined") - .unwrap(); - // now do a realistic full verification - load_mock_signed_and_start(paged.clone()); - let supports = roll_to_full_verification(); - - assert_eq!( - supports, - vec![ - // page 0: self-votes. - vec![ - (30, Support { total: 30, voters: vec![(30, 30)] }), - (40, Support { total: 40, voters: vec![(40, 40)] }) - ], - // page 1: 5, 6, 7, 8 - vec![ - (30, Support { total: 20, voters: vec![(7, 10), (5, 5), (6, 5)] }), - (40, Support { total: 10, voters: vec![(5, 5), (6, 5)] }) - ], - // page 2: 1, 2, 3, 4 - vec![ - (30, Support { total: 5, voters: vec![(2, 5)] }), - (40, Support { total: 25, voters: vec![(3, 10), (4, 10), (2, 5)] }) - ] - ] - .try_from_unbounded_paged() - .unwrap() - ); - - assert_eq!( - paged.score, - ElectionScore { minimal_stake: 55, sum_stake: 130, sum_stake_squared: 8650 } - ); - }) - } - - #[test] - fn mine_solution_choses_most_significant_pages() { - ExtBuilder::unsigned().pages(2).voter_per_page(4).build_and_execute(|| { - roll_to_snapshot_created(); - - ensure_voters(2, 8); - ensure_targets(1, 4); - - // these folks should be ignored safely. - assert_eq!( - Snapshot::::voters(0) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![5, 6, 7, 8] - ); - // voters in pages 1, this is the most significant page. - assert_eq!( - Snapshot::::voters(1) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![1, 2, 3, 4] - ); - - // now we ask for just 1 page of solution. - let paged = mine_solution(1).unwrap(); - - assert_eq!( - paged.solution_pages, - vec![TestNposSolution { - // voter 1 (index 0) is backing 10 (index 0) - // voter 2 (index 1) is backing 40 (index 3) - // voter 3 (index 2) is backing 40 (index 3) - votes1: vec![(0, 0), (1, 3), (2, 3)], - // voter 4 (index 3) is backing 40 (index 10) and 10 (index 0) - votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)], - ..Default::default() - }] - ); - - // this solution must be feasible and submittable. - OffchainWorkerMiner::::base_check_solution(&paged, None, true, "mined") - .unwrap(); - // now do a realistic full verification. - load_mock_signed_and_start(paged.clone()); - let supports = roll_to_full_verification(); - - assert_eq!( - supports, - vec![ - // page0: non existent. - vec![], - // page1 supports from voters 1, 2, 3, 4 - vec![ - (10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }), - (40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] }) - ] - ] - .try_from_unbounded_paged() - .unwrap() - ); - - assert_eq!( - paged.score, - ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 } - ); - }) - } - - #[test] - fn mine_solution_2_out_of_3_pages() { - ExtBuilder::unsigned().pages(3).voter_per_page(4).build_and_execute(|| { - roll_to_snapshot_created(); - - ensure_voters(3, 12); - ensure_targets(1, 4); - - assert_eq!( - Snapshot::::voters(0) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![10, 20, 30, 40] - ); - assert_eq!( - Snapshot::::voters(1) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![5, 6, 7, 8] - ); - assert_eq!( - Snapshot::::voters(2) - .unwrap() - .into_iter() - .map(|(x, _, _)| x) - .collect::>(), - vec![1, 2, 3, 4] - ); - - // now we ask for just 1 page of solution. - let paged = mine_solution(2).unwrap(); - - // this solution must be feasible and submittable. - OffchainWorkerMiner::::base_check_solution(&paged, None, true, "mined") - .unwrap(); - - assert_eq!( - paged.solution_pages, - vec![ - // this can be "pagified" to snapshot at index 1, which contains 5, 6, 7, 8 - // in which: - // 6 (index:1) votes for 40 (index:3) - // 8 (index:1) votes for 10 (index:0) - // 5 votes for both 10 and 40 - TestNposSolution { - votes1: vec![(1, 3), (3, 0)], - votes2: vec![(0, [(0, PerU16::from_parts(32768))], 3)], - ..Default::default() - }, - // this can be 'pagified" to snapshot at index 2, which contains 1, 2, 3, 4 - // in which: - // 1 (index:0) votes for 10 (index:0) - // 2 (index:1) votes for 40 (index:3) - // 3 (index:2) votes for 40 (index:3) - // 4 votes for both 10 and 40 - TestNposSolution { - votes1: vec![(0, 0), (1, 3), (2, 3)], - votes2: vec![(3, [(0, PerU16::from_parts(32768))], 3)], - ..Default::default() - } - ] - ); - - // this solution must be feasible and submittable. - OffchainWorkerMiner::::base_check_solution(&paged, None, true, "mined") - .unwrap(); - // now do a realistic full verification. - load_mock_signed_and_start(paged.clone()); - let supports = roll_to_full_verification(); - - assert_eq!( - supports, - vec![ - // empty page 0. - vec![], - // supports from voters 5, 6, 7, 8 - vec![ - (10, Support { total: 15, voters: vec![(8, 10), (5, 5)] }), - (40, Support { total: 15, voters: vec![(6, 10), (5, 5)] }) - ], - // supports from voters 1, 2, 3, 4 - vec![ - (10, Support { total: 15, voters: vec![(1, 10), (4, 5)] }), - (40, Support { total: 25, voters: vec![(2, 10), (3, 10), (4, 5)] }) - ] - ] - .try_from_unbounded_paged() - .unwrap() - ); - - assert_eq!( - paged.score, - ElectionScore { minimal_stake: 30, sum_stake: 70, sum_stake_squared: 2500 } - ); - }) - } - - #[test] - fn can_reduce_solution() { - ExtBuilder::unsigned().build_and_execute(|| { - roll_to_snapshot_created(); - let full_edges = OffchainWorkerMiner::::mine_solution(Pages::get(), false) - .unwrap() - .solution_pages - .iter() - .fold(0, |acc, x| acc + x.edge_count()); - let reduced_edges = OffchainWorkerMiner::::mine_solution(Pages::get(), true) - .unwrap() - .solution_pages - .iter() - .fold(0, |acc, x| acc + x.edge_count()); - - assert!(reduced_edges < full_edges, "{} < {} not fulfilled", reduced_edges, full_edges); - }) - } - - #[test] - fn trim_backers_per_page_works() { - ExtBuilder::unsigned() - .max_backers_per_winner(5) - .voter_per_page(8) - .build_and_execute(|| { - // 10 and 40 are the default winners, we add a lot more votes to them. - for i in 100..105 { - VOTERS.with(|v| v.borrow_mut().push((i, i - 96, vec![10].try_into().unwrap()))); - } - roll_to_snapshot_created(); - - ensure_voters(3, 17); - - // now we let the miner mine something for us.. - let paged = mine_full_solution().unwrap(); - load_mock_signed_and_start(paged.clone()); - - // this must be correct - let supports = roll_to_full_verification(); - - // 10 has no more than 5 backings, and from the new voters that we added in this - // test, the most staked ones stayed (103, 104) and the rest trimmed. - assert_eq!( - supports, - vec![ - // 1 backing for 10 - vec![(10, Support { total: 8, voters: vec![(104, 8)] })], - // 2 backings for 10 - vec![ - (10, Support { total: 17, voters: vec![(10, 10), (103, 7)] }), - (40, Support { total: 40, voters: vec![(40, 40)] }) - ], - // 20 backings for 10 - vec![ - (10, Support { total: 20, voters: vec![(1, 10), (8, 10)] }), - ( - 40, - Support { - total: 40, - voters: vec![(2, 10), (3, 10), (5, 10), (6, 10)] - } - ) - ] - ] - .try_from_unbounded_paged() - .unwrap() - ); - }) - } - - #[test] - #[should_panic] - fn trim_backers_final_works() { - ExtBuilder::unsigned() - .max_backers_per_winner_final(3) - .pages(3) - .build_and_execute(|| { - roll_to_snapshot_created(); - - let paged = mine_full_solution().unwrap(); - load_mock_signed_and_start(paged.clone()); - - // this must be correct - let _supports = roll_to_full_verification(); - - assert_eq!( - verifier_events(), - vec![ - verifier::Event::Verified(2, 2), - verifier::Event::Verified(1, 2), - verifier::Event::Verified(0, 2), - verifier::Event::VerificationFailed( - 0, - verifier::FeasibilityError::FailedToBoundSupport - ) - ] - ); - todo!("miner should trim max backers final, maybe"); - - // assert_eq!( - // supports, - // vec![ - // // 1 backing for 10 - // vec![(10, Support { total: 8, voters: vec![(104, 8)] })], - // // 2 backings for 10 - // vec![ - // (10, Support { total: 17, voters: vec![(10, 10), (103, 7)] }), - // (40, Support { total: 40, voters: vec![(40, 40)] }) - // ], - // // 20 backings for 10 - // vec![ - // (10, Support { total: 20, voters: vec![(1, 10), (8, 10)] }), - // ( - // 40, - // Support { - // total: 40, - // voters: vec![(2, 10), (3, 10), (4, 10), (6, 10)] - // } - // ) - // ] - // ] - // .try_from_unbounded_paged() - // .unwrap() - // ); - }); - } -} - -#[cfg(test)] -mod offchain_worker_miner { - use crate::{verifier::Verifier, CommonError}; - use frame_support::traits::Hooks; - use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock}; - - use super::*; - use crate::mock::*; - - #[test] - fn lock_prevents_frequent_execution() { - let (mut ext, _) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - let offchain_repeat = ::OffchainRepeat::get(); - - // first execution -- okay. - assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency(25).is_ok()); - - // next block: rejected. - assert_noop!( - OffchainWorkerMiner::::ensure_offchain_repeat_frequency(26), - OffchainMinerError::Lock("recently executed.") - ); - - // allowed after `OFFCHAIN_REPEAT` - assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency( - (26 + offchain_repeat).into() - ) - .is_ok()); - - // a fork like situation: re-execute last 3. - assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency( - (26 + offchain_repeat - 3).into() - ) - .is_err()); - assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency( - (26 + offchain_repeat - 2).into() - ) - .is_err()); - assert!(OffchainWorkerMiner::::ensure_offchain_repeat_frequency( - (26 + offchain_repeat - 1).into() - ) - .is_err()); - }) - } - - #[test] - fn lock_released_after_successful_execution() { - // first, ensure that a successful execution releases the lock - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - let guard = StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_LOCK); - let last_block = - StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_LAST_BLOCK); - - roll_to(25); - assert!(MultiBlock::current_phase().is_unsigned()); - - // initially, the lock is not set. - assert!(guard.get::().unwrap().is_none()); - - // a successful a-z execution. - UnsignedPallet::offchain_worker(25); - assert_eq!(pool.read().transactions.len(), 1); - - // afterwards, the lock is not set either.. - assert!(guard.get::().unwrap().is_none()); - assert_eq!(last_block.get::().unwrap(), Some(25)); - }); - } - - #[test] - fn lock_prevents_overlapping_execution() { - // ensure that if the guard is in hold, a new execution is not allowed. - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - roll_to(25); - assert!(MultiBlock::current_phase().is_unsigned()); - - // artificially set the value, as if another thread is mid-way. - let mut lock = StorageLock::>::with_block_deadline( - OffchainWorkerMiner::::OFFCHAIN_LOCK, - UnsignedPhase::get().saturated_into(), - ); - let guard = lock.lock(); - - // nothing submitted. - UnsignedPallet::offchain_worker(25); - assert_eq!(pool.read().transactions.len(), 0); - UnsignedPallet::offchain_worker(26); - assert_eq!(pool.read().transactions.len(), 0); - - drop(guard); - - // 🎉 ! - UnsignedPallet::offchain_worker(25); - assert_eq!(pool.read().transactions.len(), 1); - }); - } - - #[test] - fn initial_ocw_runs_and_saves_new_cache() { - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - roll_to(25); - assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25)); - - let last_block = - StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_LAST_BLOCK); - let cache = - StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); - - assert_eq!(last_block.get::(), Ok(None)); - assert_eq!(cache.get::>(), Ok(None)); - - // creates, caches, submits without expecting previous cache value - UnsignedPallet::offchain_worker(25); - assert_eq!(pool.read().transactions.len(), 1); - - assert_eq!(last_block.get::(), Ok(Some(25))); - assert!(matches!(cache.get::>(), Ok(Some(_)))); - }) - } - - #[test] - fn ocw_pool_submission_works() { - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - roll_to_with_ocw(25, None); - assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25)); - // OCW must have submitted now - - let encoded = pool.read().transactions[0].clone(); - let extrinsic: Extrinsic = codec::Decode::decode(&mut &*encoded).unwrap(); - let call = extrinsic.function; - assert!(matches!( - call, - crate::mock::RuntimeCall::UnsignedPallet( - crate::unsigned::Call::submit_unsigned { .. } - ) - )); - }) - } - - #[test] - fn resubmits_after_offchain_repeat() { - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - let offchain_repeat = ::OffchainRepeat::get(); - roll_to(25); - assert_eq!(MultiBlock::current_phase(), Phase::Unsigned(25)); - - assert!(OffchainWorkerMiner::::cached_solution().is_none()); - // creates, caches, submits without expecting previous cache value - UnsignedPallet::offchain_worker(25); - assert_eq!(pool.read().transactions.len(), 1); - let tx_cache = pool.read().transactions[0].clone(); - // assume that the tx has been processed - pool.try_write().unwrap().transactions.clear(); - - // attempts to resubmit the tx after the threshold has expired. - UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat); - assert_eq!(pool.read().transactions.len(), 1); - - // resubmitted tx is identical to first submission - let tx = &pool.read().transactions[0]; - assert_eq!(&tx_cache, tx); - }) - } - - #[test] - fn regenerates_and_resubmits_after_offchain_repeat_if_no_cache() { - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - let offchain_repeat = ::OffchainRepeat::get(); - roll_to(25); - - assert!(OffchainWorkerMiner::::cached_solution().is_none()); - // creates, caches, submits without expecting previous cache value. - UnsignedPallet::offchain_worker(25); - assert_eq!(pool.read().transactions.len(), 1); - let tx_cache = pool.read().transactions[0].clone(); - // assume that the tx has been processed - pool.try_write().unwrap().transactions.clear(); - - // remove the cached submitted tx. - // this ensures that when the resubmit window rolls around, we're ready to regenerate - // from scratch if necessary - let mut call_cache = - StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); - assert!(matches!(call_cache.get::>(), Ok(Some(_)))); - call_cache.clear(); - - // attempts to resubmit the tx after the threshold has expired - UnsignedPallet::offchain_worker(25 + 1 + offchain_repeat); - assert_eq!(pool.read().transactions.len(), 1); - - // resubmitted tx is identical to first submission - let tx = &pool.read().transactions[0]; - assert_eq!(&tx_cache, tx); - }) - } - - #[test] - fn altering_snapshot_invalidates_solution_cache() { - // by infeasible, we mean here that if the snapshot fingerprint has changed. - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - let offchain_repeat = ::OffchainRepeat::get(); - roll_to_with_ocw(25, None); - - // something is submitted.. - assert_eq!(pool.read().transactions.len(), 1); - pool.try_write().unwrap().transactions.clear(); - - // ..and cached - let call_cache = - StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); - assert!(matches!(call_cache.get::>(), Ok(Some(_)))); - - // now change the snapshot, ofc this is rare in reality. This makes the cached call - // infeasible. - assert_eq!(crate::Snapshot::::targets().unwrap(), vec![10, 20, 30, 40]); - let pre_fingerprint = crate::Snapshot::::fingerprint(); - crate::Snapshot::::remove_target(0); - let post_fingerprint = crate::Snapshot::::fingerprint(); - assert_eq!(crate::Snapshot::::targets().unwrap(), vec![20, 30, 40]); - assert_ne!(pre_fingerprint, post_fingerprint); - - // now run ocw again - roll_to_with_ocw(25 + offchain_repeat + 1, None); - // nothing is submitted this time.. - assert_eq!(pool.read().transactions.len(), 0); - // .. and the cache is gone. - assert_eq!(call_cache.get::>(), Ok(None)); - - // upon the next run, we re-generate and submit something fresh again. - roll_to_with_ocw(25 + offchain_repeat + offchain_repeat + 2, None); - assert_eq!(pool.read().transactions.len(), 1); - assert!(matches!(call_cache.get::>(), Ok(Some(_)))); - }) - } - - #[test] - fn wont_resubmit_if_weak_score() { - // common case, if the score is weak, don't bother with anything, ideally check from the - // logs that we don't run feasibility in this call path. Score check must come before. - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - let offchain_repeat = ::OffchainRepeat::get(); - // unfortunately there's no pretty way to run the ocw code such that it generates a - // weak, but correct solution. We just write it to cache directly. - - roll_to_with_ocw(25, Some(pool.clone())); - - // something is submitted.. - assert_eq!(pool.read().transactions.len(), 1); - - // ..and cached - let call_cache = - StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); - assert!(matches!(call_cache.get::>(), Ok(Some(_)))); - - // and replace it with something weak. - let weak_solution = raw_paged_from_supports( - vec![vec![(40, Support { total: 10, voters: vec![(3, 10)] })]], - 0, - ); - let weak_call = crate::unsigned::Call::::submit_unsigned { - paged_solution: Box::new(weak_solution), - }; - call_cache.set(&weak_call); - - // run again - roll_to_with_ocw(25 + offchain_repeat + 1, Some(pool.clone())); - // nothing is submitted this time.. - assert_eq!(pool.read().transactions.len(), 0); - // .. and the cache IS STILL THERE! - assert!(matches!(call_cache.get::>(), Ok(Some(_)))); - }) - } - - #[test] - fn ocw_submission_e2e_works() { - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - assert!(VerifierPallet::queued_score().is_none()); - roll_to_with_ocw(25 + 1, Some(pool.clone())); - assert!(VerifierPallet::queued_score().is_some()); - - // call is cached. - let call_cache = - StorageValueRef::persistent(&OffchainWorkerMiner::::OFFCHAIN_CACHED_CALL); - assert!(matches!(call_cache.get::>(), Ok(Some(_)))); - - // pool is empty - assert_eq!(pool.read().transactions.len(), 0); - }) - } - - #[test] - fn multi_page_ocw_e2e_submits_and_queued_msp_only() { - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - assert!(VerifierPallet::queued_score().is_none()); - - roll_to_with_ocw(25 + 1, Some(pool.clone())); - - assert_eq!( - multi_block_events(), - vec![ - crate::Event::PhaseTransitioned { from: Phase::Off, to: Phase::Snapshot(2) }, - crate::Event::PhaseTransitioned { - from: Phase::Snapshot(0), - to: Phase::Unsigned(25) - } - ] - ); - assert_eq!( - verifier_events(), - vec![ - crate::verifier::Event::Verified(2, 2), - crate::verifier::Event::Queued( - ElectionScore { minimal_stake: 15, sum_stake: 40, sum_stake_squared: 850 }, - None - ) - ] - ); - - assert!(VerifierPallet::queued_score().is_some()); - - // pool is empty - assert_eq!(pool.read().transactions.len(), 0); - }) - } - - #[test] - fn will_not_mine_if_not_enough_winners() { - // also see `trim_weight_too_much_makes_solution_invalid`. - let (mut ext, _) = ExtBuilder::unsigned().desired_targets(77).build_offchainify(); - ext.execute_with_sanity_checks(|| { - roll_to_unsigned_open(); - ensure_voters(3, 12); - - // beautiful errors, isn't it? - assert_eq!( - OffchainWorkerMiner::::mine_checked_call().unwrap_err(), - OffchainMinerError::Common(CommonError::WrongWinnerCount) - ); - }); - } -} diff --git a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs b/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs deleted file mode 100644 index ca6766efd9062..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/unsigned/mod.rs +++ /dev/null @@ -1,633 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! ## The unsigned phase, and its miner. -//! -//! This pallet deals with unsigned submissions. These are backup, single page submissions from -//! validators. -//! -//! This pallet has two miners: -//! -//! * [`unsigned::miner::BaseMiner`], which is the basis of how the mining works. It can be used by -//! a separate crate by providing an implementation of [`unsigned::miner::MinerConfig`]. And, it -//! is used in: -//! * `Miner::OffchainWorkerMiner`, which is a specialized miner for the single page mining by -//! validators in the `offchain_worker` hook. -//! -//! ## Future Idea: Multi-Page unsigned submission -//! -//! the following is the idea of how to implement multi-page unsigned, which we don't have. -//! -//! ## Multi-block unsigned submission -//! -//! The process of allowing validators to coordinate to submit a multi-page solution is new to this -//! pallet, and non-existent in the multi-phase pallet. The process is as follows: -//! -//! All validators will run their miners and compute the full paginated solution. They submit all -//! pages as individual unsigned transactions to their local tx-pool. -//! -//! Upon validation, if any page is now present the corresponding transaction is dropped. -//! -//! At each block, the first page that may be valid is included as a high priority operational -//! transaction. This page is validated on the fly to be correct. Since this transaction is sourced -//! from a validator, we can panic if they submit an invalid transaction. -//! -//! Then, once the final page is submitted, some extra checks are done, as explained in -//! [`crate::verifier`]: -//! -//! 1. bounds -//! 2. total score -//! -//! These checks might still fail. If they do, the solution is dropped. At this point, we don't know -//! which validator may have submitted a slightly-faulty solution. -//! -//! In order to prevent this, the validation process always includes a check to ensure all of the -//! previous pages that have been submitted match what the local validator has computed. If they -//! match, the validator knows that they are putting skin in a game that is valid. -//! -//! If any bad paged are detected, the next validator can bail. This process means: -//! -//! * As long as all validators are honest, and run the same miner code, a correct solution is -//! found. -//! * As little as one malicious validator can stall the process, but no one is accidentally -//! slashed, and no panic happens. -//! -//! A future improvement should keep track of submitters, and report a slash if it occurs. Or, if -//! the signed process is bullet-proof, we can be okay with the status quo. - -/// Export weights -pub use crate::weights::measured::pallet_election_provider_multi_block_unsigned::*; -/// Exports of this pallet -pub use pallet::*; -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; - -/// The miner. -pub mod miner; - -#[frame_support::pallet] -mod pallet { - use super::WeightInfo; - use crate::{ - types::*, - unsigned::miner::{self}, - verifier::Verifier, - CommonError, - }; - use frame_support::pallet_prelude::*; - use frame_system::{offchain::CreateInherent, pallet_prelude::*}; - use sp_runtime::traits::SaturatedConversion; - use sp_std::prelude::*; - - /// convert a [`crate::CommonError`] to a custom InvalidTransaction with the inner code being - /// the index of the variant. - fn base_error_to_invalid(error: CommonError) -> InvalidTransaction { - let index = error.encode().pop().unwrap_or(0); - InvalidTransaction::Custom(index) - } - - pub(crate) type UnsignedWeightsOf = ::WeightInfo; - - #[pallet::config] - #[pallet::disable_frame_system_supertrait_check] - pub trait Config: crate::Config + CreateInherent> { - /// The repeat threshold of the offchain worker. - /// - /// For example, if it is 5, that means that at least 5 blocks will elapse between attempts - /// to submit the worker's solution. - type OffchainRepeat: Get>; - - /// The solver used in hte offchain worker miner - type OffchainSolver: frame_election_provider_support::NposSolver< - AccountId = Self::AccountId, - >; - - /// The priority of the unsigned transaction submitted in the unsigned-phase - type MinerTxPriority: Get; - - /// Runtime weight information of this pallet. - type WeightInfo: WeightInfo; - } - - #[pallet::pallet] - pub struct Pallet(PhantomData); - - #[pallet::call] - impl Pallet { - /// Submit an unsigned solution. - /// - /// This works very much like an inherent, as only the validators are permitted to submit - /// anything. By default validators will compute this call in their `offchain_worker` hook - /// and try and submit it back. - /// - /// This is different from signed page submission mainly in that the solution page is - /// verified on the fly. - #[pallet::weight((UnsignedWeightsOf::::submit_unsigned(), DispatchClass::Operational))] - #[pallet::call_index(0)] - pub fn submit_unsigned( - origin: OriginFor, - paged_solution: Box>, - ) -> DispatchResultWithPostInfo { - ensure_none(origin)?; - // TODO: remove the panic from this function for now. - let error_message = "Invalid unsigned submission must produce invalid block and \ - deprive validator from their authoring reward."; - - // phase, round, claimed score, page-count and hash are checked in pre-dispatch. we - // don't check them here anymore. - debug_assert!(Self::validate_unsigned_checks(&paged_solution).is_ok()); - - let only_page = paged_solution - .solution_pages - .into_inner() - .pop() - .expect("length of `solution_pages` is always `1`, can be popped; qed."); - let claimed_score = paged_solution.score; - // `verify_synchronous` will internall queue and save the solution, we don't need to do - // it. - let _supports = ::verify_synchronous( - only_page, - claimed_score, - // must be valid against the msp - crate::Pallet::::msp(), - ) - .expect(error_message); - - sublog!( - info, - "unsigned", - "queued an unsigned solution with score {:?} and {} winners", - claimed_score, - _supports.len() - ); - - Ok(None.into()) - } - } - - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { - type Call = Call; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::submit_unsigned { paged_solution, .. } = call { - match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, - _ => return InvalidTransaction::Call.into(), - } - - let _ = Self::validate_unsigned_checks(paged_solution.as_ref()) - .map_err(|err| { - sublog!( - debug, - "unsigned", - "unsigned transaction validation failed due to {:?}", - err - ); - err - }) - .map_err(base_error_to_invalid)?; - - ValidTransaction::with_tag_prefix("OffchainElection") - // The higher the score.minimal_stake, the better a paged_solution is. - .priority( - T::MinerTxPriority::get() - .saturating_add(paged_solution.score.minimal_stake.saturated_into()), - ) - // Used to deduplicate unsigned solutions: each validator should produce one - // paged_solution per round at most, and solutions are not propagate. - .and_provides(paged_solution.round) - // Transaction should stay in the pool for the duration of the unsigned phase. - .longevity(T::UnsignedPhase::get().saturated_into::()) - // We don't propagate this. This can never be validated at a remote node. - .propagate(false) - .build() - } else { - InvalidTransaction::Call.into() - } - } - - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - if let Call::submit_unsigned { paged_solution, .. } = call { - Self::validate_unsigned_checks(paged_solution.as_ref()) - .map_err(base_error_to_invalid) - .map_err(Into::into) - } else { - Err(InvalidTransaction::Call.into()) - } - } - } - - #[pallet::hooks] - impl Hooks> for Pallet { - fn integrity_test() { - assert!( - UnsignedWeightsOf::::submit_unsigned().all_lte(T::BlockWeights::get().max_block), - "weight of `submit_unsigned` is too high" - ) - } - - #[cfg(feature = "try-runtime")] - fn try_state(now: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { - Self::do_try_state(now) - } - - fn offchain_worker(now: BlockNumberFor) { - use sp_runtime::offchain::storage_lock::{BlockAndTime, StorageLock}; - - // Create a lock with the maximum deadline of number of blocks in the unsigned phase. - // This should only come useful in an **abrupt** termination of execution, otherwise the - // guard will be dropped upon successful execution. - let mut lock = - StorageLock::>>::with_block_deadline( - miner::OffchainWorkerMiner::::OFFCHAIN_LOCK, - T::UnsignedPhase::get().saturated_into(), - ); - - match lock.try_lock() { - Ok(_guard) => { - Self::do_synchronized_offchain_worker(now); - }, - Err(deadline) => { - sublog!( - debug, - "unsigned", - "offchain worker lock not released, deadline is {:?}", - deadline - ); - }, - }; - } - } - - impl Pallet { - /// Internal logic of the offchain worker, to be executed only when the offchain lock is - /// acquired with success. - fn do_synchronized_offchain_worker(now: BlockNumberFor) { - use miner::OffchainWorkerMiner; - - let current_phase = crate::Pallet::::current_phase(); - sublog!( - trace, - "unsigned", - "lock for offchain worker acquired. Phase = {:?}", - current_phase - ); - match current_phase { - Phase::Unsigned(opened) if opened == now => { - // Mine a new solution, cache it, and attempt to submit it - let initial_output = - OffchainWorkerMiner::::ensure_offchain_repeat_frequency(now) - .and_then(|_| OffchainWorkerMiner::::mine_check_save_submit()); - sublog!( - debug, - "unsigned", - "initial offchain worker output: {:?}", - initial_output - ); - }, - Phase::Unsigned(opened) if opened < now => { - // Try and resubmit the cached solution, and recompute ONLY if it is not - // feasible. - let resubmit_output = - OffchainWorkerMiner::::ensure_offchain_repeat_frequency(now).and_then( - |_| OffchainWorkerMiner::::restore_or_compute_then_maybe_submit(), - ); - sublog!( - debug, - "unsigned", - "resubmit offchain worker output: {:?}", - resubmit_output - ); - }, - _ => {}, - } - } - - /// The checks that should happen in the `ValidateUnsigned`'s `pre_dispatch` and - /// `validate_unsigned` functions. - /// - /// These check both for snapshot independent checks, and some checks that are specific to - /// the unsigned phase. - pub(crate) fn validate_unsigned_checks( - paged_solution: &PagedRawSolution, - ) -> Result<(), CommonError> { - Self::unsigned_specific_checks(paged_solution) - .and(crate::Pallet::::snapshot_independent_checks(paged_solution, None)) - .map_err(Into::into) - } - - /// The checks that are specific to the (this) unsigned pallet. - /// - /// ensure solution has the correct phase, and it has only 1 page. - pub fn unsigned_specific_checks( - paged_solution: &PagedRawSolution, - ) -> Result<(), CommonError> { - ensure!( - crate::Pallet::::current_phase().is_unsigned(), - CommonError::EarlySubmission - ); - ensure!(paged_solution.solution_pages.len() == 1, CommonError::WrongPageCount); - - Ok(()) - } - - #[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))] - pub(crate) fn do_try_state( - _now: BlockNumberFor, - ) -> Result<(), sp_runtime::TryRuntimeError> { - Ok(()) - } - } -} - -#[cfg(test)] -mod validate_unsigned { - use frame_election_provider_support::Support; - use frame_support::{ - pallet_prelude::InvalidTransaction, - unsigned::{TransactionSource, TransactionValidityError, ValidateUnsigned}, - }; - - use super::Call; - use crate::{mock::*, types::*, verifier::Verifier}; - - #[test] - fn retracts_weak_score_accepts_threshold_better() { - ExtBuilder::unsigned() - .solution_improvement_threshold(sp_runtime::Perbill::from_percent(10)) - .build_and_execute(|| { - roll_to_snapshot_created(); - - let solution = mine_full_solution().unwrap(); - load_mock_signed_and_start(solution.clone()); - roll_to_full_verification(); - - // Some good solution is queued now. - assert_eq!( - ::queued_score(), - Some(ElectionScore { - minimal_stake: 55, - sum_stake: 130, - sum_stake_squared: 8650 - }) - ); - - roll_to_unsigned_open(); - - // this is just worse - let attempt = - fake_solution(ElectionScore { minimal_stake: 20, ..Default::default() }); - let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; - assert_eq!( - UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), - TransactionValidityError::Invalid(InvalidTransaction::Custom(2)), - ); - - // this is better, but not enough better. - let insufficient_improvement = 55 * 105 / 100; - let attempt = fake_solution(ElectionScore { - minimal_stake: insufficient_improvement, - ..Default::default() - }); - let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; - assert_eq!( - UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), - TransactionValidityError::Invalid(InvalidTransaction::Custom(2)), - ); - - // note that we now have to use a solution with 2 winners, just to pass all of the - // snapshot independent checks. - let mut paged = raw_paged_from_supports( - vec![vec![ - (40, Support { total: 10, voters: vec![(3, 5)] }), - (30, Support { total: 10, voters: vec![(3, 5)] }), - ]], - 0, - ); - let sufficient_improvement = 55 * 115 / 100; - paged.score = - ElectionScore { minimal_stake: sufficient_improvement, ..Default::default() }; - let call = Call::submit_unsigned { paged_solution: Box::new(paged) }; - assert!(UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).is_ok()); - }) - } - - #[test] - fn retracts_wrong_round() { - ExtBuilder::unsigned().build_and_execute(|| { - roll_to_unsigned_open(); - - let mut attempt = - fake_solution(ElectionScore { minimal_stake: 5, ..Default::default() }); - attempt.round += 1; - let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; - - assert_eq!( - UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), - // WrongRound is index 1 - TransactionValidityError::Invalid(InvalidTransaction::Custom(1)), - ); - }) - } - - #[test] - fn retracts_too_many_pages_unsigned() { - ExtBuilder::unsigned().build_and_execute(|| { - // NOTE: unsigned solutions should have just 1 page, regardless of the configured - // page count. - roll_to_unsigned_open(); - let attempt = mine_full_solution().unwrap(); - let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; - - assert_eq!( - UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), - // WrongPageCount is index 3 - TransactionValidityError::Invalid(InvalidTransaction::Custom(3)), - ); - - let attempt = mine_solution(2).unwrap(); - let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; - - assert_eq!( - UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), - TransactionValidityError::Invalid(InvalidTransaction::Custom(3)), - ); - - let attempt = mine_solution(1).unwrap(); - let call = Call::submit_unsigned { paged_solution: Box::new(attempt) }; - - assert!(UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).is_ok(),); - }) - } - - #[test] - fn retracts_wrong_winner_count() { - ExtBuilder::unsigned().desired_targets(2).build_and_execute(|| { - roll_to_unsigned_open(); - - let paged = raw_paged_from_supports( - vec![vec![(40, Support { total: 10, voters: vec![(3, 10)] })]], - 0, - ); - - let call = Call::submit_unsigned { paged_solution: Box::new(paged) }; - - assert_eq!( - UnsignedPallet::validate_unsigned(TransactionSource::Local, &call).unwrap_err(), - // WrongWinnerCount is index 4 - TransactionValidityError::Invalid(InvalidTransaction::Custom(4)), - ); - }); - } - - #[test] - fn retracts_wrong_phase() { - ExtBuilder::unsigned().signed_phase(5, 0).build_and_execute(|| { - let solution = raw_paged_solution_low_score(); - let call = Call::submit_unsigned { paged_solution: Box::new(solution.clone()) }; - - // initial - assert_eq!(MultiBlock::current_phase(), Phase::Off); - assert!(matches!( - ::validate_unsigned( - TransactionSource::Local, - &call - ) - .unwrap_err(), - // because EarlySubmission is index 0. - TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) - )); - assert!(matches!( - ::pre_dispatch(&call).unwrap_err(), - TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) - )); - - // signed - roll_to(20); - assert_eq!(MultiBlock::current_phase(), Phase::Signed); - assert!(matches!( - ::validate_unsigned( - TransactionSource::Local, - &call - ) - .unwrap_err(), - TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) - )); - assert!(matches!( - ::pre_dispatch(&call).unwrap_err(), - TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) - )); - - // unsigned - roll_to(25); - assert!(MultiBlock::current_phase().is_unsigned()); - - assert_ok!(::validate_unsigned( - TransactionSource::Local, - &call - )); - assert_ok!(::pre_dispatch(&call)); - }) - } - - #[test] - fn priority_is_set() { - ExtBuilder::unsigned() - .miner_tx_priority(20) - .desired_targets(0) - .build_and_execute(|| { - roll_to(25); - assert!(MultiBlock::current_phase().is_unsigned()); - - let solution = - fake_solution(ElectionScore { minimal_stake: 5, ..Default::default() }); - let call = Call::submit_unsigned { paged_solution: Box::new(solution.clone()) }; - - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &call - ) - .unwrap() - .priority, - 25 - ); - }) - } -} - -#[cfg(test)] -mod call { - use crate::{mock::*, verifier::Verifier, Snapshot}; - - #[test] - fn unsigned_submission_e2e() { - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - roll_to_snapshot_created(); - - // snapshot is created.. - assert_full_snapshot(); - // ..txpool is empty.. - assert_eq!(pool.read().transactions.len(), 0); - // ..but nothing queued. - assert_eq!(::queued_score(), None); - - // now the OCW should submit something. - roll_next_with_ocw(Some(pool.clone())); - assert_eq!(pool.read().transactions.len(), 1); - assert_eq!(::queued_score(), None); - - // and now it should be applied. - roll_next_with_ocw(Some(pool.clone())); - assert_eq!(pool.read().transactions.len(), 0); - assert!(matches!(::queued_score(), Some(_))); - }) - } - - #[test] - #[should_panic( - expected = "Invalid unsigned submission must produce invalid block and deprive validator from their authoring reward." - )] - fn unfeasible_solution_panics() { - let (mut ext, pool) = ExtBuilder::unsigned().build_offchainify(); - ext.execute_with_sanity_checks(|| { - roll_to_snapshot_created(); - - // snapshot is created.. - assert_full_snapshot(); - // ..txpool is empty.. - assert_eq!(pool.read().transactions.len(), 0); - // ..but nothing queued. - assert_eq!(::queued_score(), None); - - // now the OCW should submit something. - roll_next_with_ocw(Some(pool.clone())); - assert_eq!(pool.read().transactions.len(), 1); - assert_eq!(::queued_score(), None); - - // now we change the snapshot -- this should ensure that the solution becomes invalid. - // Note that we don't change the known fingerprint of the solution. - Snapshot::::remove_target(2); - - // and now it should be applied. - roll_next_with_ocw(Some(pool.clone())); - assert_eq!(pool.read().transactions.len(), 0); - assert!(matches!(::queued_score(), Some(_))); - }) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs b/substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs deleted file mode 100644 index 4884d24d1513e..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/verifier/benchmarking.rs +++ /dev/null @@ -1,234 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{ - verifier::{Config, Event, FeasibilityError, Pallet, Status, StatusStorage}, - CurrentPhase, Phase, -}; -use frame_benchmarking::v2::*; -use frame_election_provider_support::{ElectionDataProvider, NposSolution}; -use frame_support::pallet_prelude::*; -use sp_std::prelude::*; - -#[benchmarks(where T: crate::Config + crate::signed::Config + crate::unsigned::Config)] -mod benchmarks { - use super::*; - - // TODO: this is the epitome of bad DevEx because of generics.. create a nice one that works in - // frame_system. - fn events_for() -> Vec> { - frame_system::Pallet::::events() - .into_iter() - .map(|e| e.event) // convert to inner event - .filter_map(|e| { - let e = ::RuntimeEvent::from_ref(&e); - if let Ok(ev) = - <::RuntimeEvent as TryInto>>::try_into((*e).clone()) - { - Some(ev) - } else { - None - } - }) - .collect() - } - - #[benchmark] - fn on_initialize_valid_non_terminal() -> Result<(), BenchmarkError> { - // roll to signed validation, with a solution stored in the signed pallet - T::DataProvider::set_next_election(crate::Pallet::::reasonable_next_election()); - - crate::Pallet::::roll_to_signed_and_submit_full_solution(); - // roll to verification - crate::Pallet::::roll_until_matches(|| { - matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) - }); - - // start signal must have been sent by now - assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp())); - - #[block] - { - crate::Pallet::::roll_next(true, false); - } - assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp() - 1)); - - Ok(()) - } - - #[benchmark] - fn on_initialize_valid_terminal() -> Result<(), BenchmarkError> { - // roll to signed validation, with a solution stored in the signed pallet - T::DataProvider::set_next_election(crate::Pallet::::reasonable_next_election()); - assert!( - T::SignedValidationPhase::get() >= T::Pages::get().into(), - "Signed validation phase must be larger than the number of pages" - ); - - crate::Pallet::::roll_to_signed_and_submit_full_solution(); - // roll to before the last page of verification - crate::Pallet::::roll_until_matches(|| { - matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) - }); - // start signal must have been sent by now - assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp())); - for _ in 0..(T::Pages::get() - 1) { - crate::Pallet::::roll_next(true, false); - } - - // we must have verified all pages by now, minus the last one. - assert!(matches!( - &events_for::()[..], - [Event::Verified(_, _), .., Event::Verified(1, _)] - )); - - // verify the last page. - #[block] - { - crate::Pallet::::roll_next(true, false); - } - - // we are done - assert_eq!(StatusStorage::::get(), Status::Nothing); - // last event is success - assert!(matches!( - &events_for::()[..], - [Event::Verified(_, _), .., Event::Verified(0, _), Event::Queued(_, None)] - )); - - Ok(()) - } - - #[benchmark] - fn on_initialize_invalid_terminal() -> Result<(), BenchmarkError> { - // this is the verification of the current page + removing all of the previously valid - // pages. The worst case is therefore when the last page is invalid, for example the final - // score. - assert!(T::Pages::get() >= 2, "benchmark only works if we have more than 2 pages"); - - // roll to signed validation, with a solution stored in the signed pallet - T::DataProvider::set_next_election(crate::Pallet::::reasonable_next_election()); - - // but this solution is corrupt - let mut paged_solution = crate::Pallet::::roll_to_signed_and_mine_full_solution(); - paged_solution.score.minimal_stake -= 1; - crate::Pallet::::submit_full_solution(paged_solution); - - // roll to verification - crate::Pallet::::roll_until_matches(|| { - matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) - }); - - assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp())); - // verify all pages, except for the last one. - for i in 0..T::Pages::get() - 1 { - crate::Pallet::::roll_next(true, false); - assert_eq!( - StatusStorage::::get(), - Status::Ongoing(crate::Pallet::::msp() - 1 - i) - ); - } - - // next page to be verified is the last one - assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::lsp())); - assert!(matches!( - &events_for::()[..], - [Event::Verified(_, _), .., Event::Verified(1, _)] - )); - - #[block] - { - crate::Pallet::::roll_next(true, false); - } - - // we are now reset. - assert_eq!(StatusStorage::::get(), Status::Nothing); - assert!(matches!( - &events_for::()[..], - [ - .., - Event::Verified(0, _), - Event::VerificationFailed(0, FeasibilityError::InvalidScore) - ] - )); - - Ok(()) - } - - #[benchmark] - fn on_initialize_invalid_non_terminal( - // number of valid pages that have been verified, before we verify the non-terminal invalid - // page. - v: Linear<0, { T::Pages::get() - 1 }>, - ) -> Result<(), BenchmarkError> { - assert!(T::Pages::get() >= 2, "benchmark only works if we have more than 2 pages"); - - T::DataProvider::set_next_election(crate::Pallet::::reasonable_next_election()); - - // roll to signed validation, with a solution stored in the signed pallet, but this solution - // is corrupt in its msp. - let mut paged_solution = crate::Pallet::::roll_to_signed_and_mine_full_solution(); - let page_to_corrupt = crate::Pallet::::msp() - v; - crate::log!( - info, - "pages of solution: {:?}, to corrupt {}, v {}", - paged_solution.solution_pages.len(), - page_to_corrupt, - v - ); - paged_solution.solution_pages[page_to_corrupt as usize].corrupt(); - crate::Pallet::::submit_full_solution(paged_solution); - - // roll to verification - crate::Pallet::::roll_until_matches(|| { - matches!(CurrentPhase::::get(), Phase::SignedValidation(_)) - }); - - // we should be ready to go - assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp())); - - // validate the the parameterized number of valid pages. - for _ in 0..v { - crate::Pallet::::roll_next(true, false); - } - - // we are still ready to continue - assert_eq!(StatusStorage::::get(), Status::Ongoing(crate::Pallet::::msp() - v)); - - // verify one page, which will be invalid. - #[block] - { - crate::Pallet::::roll_next(true, false); - } - - // we are now reset, because this page was invalid. - assert_eq!(StatusStorage::::get(), Status::Nothing); - - assert!(matches!( - &events_for::()[..], - [.., Event::VerificationFailed(_, FeasibilityError::NposElection(_))] - )); - - Ok(()) - } - - impl_benchmark_test_suite!( - Pallet, - crate::mock::ExtBuilder::full().build_unchecked(), - crate::mock::Runtime - ); -} diff --git a/substrate/frame/election-provider-multi-block/src/verifier/impls.rs b/substrate/frame/election-provider-multi-block/src/verifier/impls.rs deleted file mode 100644 index 0f5f0fb911be8..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/verifier/impls.rs +++ /dev/null @@ -1,955 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use super::*; -use crate::{ - helpers, - types::VoterOf, - unsigned::miner::{MinerConfig, SupportsOfMiner}, - verifier::Verifier, - SolutionOf, -}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_election_provider_support::{ - ExtendedBalance, NposSolution, PageIndex, TryFromOtherBounds, -}; -use frame_support::{ - ensure, - pallet_prelude::{ValueQuery, *}, - traits::{defensive_prelude::*, Defensive, Get}, -}; -use frame_system::pallet_prelude::*; -use pallet::*; -use sp_npos_elections::{evaluate_support, ElectionScore, EvaluateSupport}; -use sp_runtime::Perbill; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; - -pub(crate) type SupportsOfVerifier = frame_election_provider_support::BoundedSupports< - ::AccountId, - ::MaxWinnersPerPage, - ::MaxBackersPerWinner, ->; - -pub(crate) type VerifierWeightsOf = ::WeightInfo; - -/// The status of this pallet. -#[derive( - Encode, Decode, scale_info::TypeInfo, Clone, Copy, MaxEncodedLen, Debug, PartialEq, Eq, -)] -pub enum Status { - /// A verification is ongoing, and the next page that will be verified is indicated with the - /// inner value. - Ongoing(PageIndex), - /// Nothing is happening. - Nothing, -} - -impl Default for Status { - fn default() -> Self { - Self::Nothing - } -} - -/// Enum to point to the valid variant of the [`QueuedSolution`]. -#[derive(Encode, Decode, scale_info::TypeInfo, Clone, Copy, MaxEncodedLen)] -enum ValidSolution { - X, - Y, -} - -impl Default for ValidSolution { - fn default() -> Self { - ValidSolution::Y - } -} - -impl ValidSolution { - fn other(&self) -> Self { - match *self { - ValidSolution::X => ValidSolution::Y, - ValidSolution::Y => ValidSolution::X, - } - } -} - -/// A simple newtype that represents the partial backing of a winner. It only stores the total -/// backing, and the sum of backings, as opposed to a [`sp_npos_elections::Support`] that also -/// stores all of the backers' individual contribution. -/// -/// This is mainly here to allow us to implement `Backings` for it. -#[derive(Default, Encode, Decode, MaxEncodedLen, scale_info::TypeInfo)] -pub struct PartialBackings { - /// The total backing of this particular winner. - pub total: ExtendedBalance, - /// The number of backers. - pub backers: u32, -} - -impl sp_npos_elections::Backings for PartialBackings { - fn total(&self) -> ExtendedBalance { - self.total - } -} - -#[frame_support::pallet] -pub(crate) mod pallet { - use super::*; - #[pallet::config] - #[pallet::disable_frame_system_supertrait_check] - pub trait Config: crate::Config { - /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent> - + TryInto> - + Clone; - - /// The minimum amount of improvement to the solution score that defines a solution as - /// "better". - #[pallet::constant] - type SolutionImprovementThreshold: Get; - - /// Maximum number of backers, per winner, among all pages of an election. - /// - /// This can only be checked at the very final step of verification. - type MaxBackersPerWinnerFinal: Get; - - /// Maximum number of backers, per winner, per page. - type MaxBackersPerWinner: Get; - - /// Maximum number of supports (aka. winners/validators/targets) that can be represented in - /// a page of results. - type MaxWinnersPerPage: Get; - - /// Something that can provide the solution data to the verifier. - /// - /// In reality, this will be fulfilled by the signed phase. - type SolutionDataProvider: crate::verifier::SolutionDataProvider< - Solution = SolutionOf, - >; - - /// The weight information of this pallet. - type WeightInfo: super::WeightInfo; - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// The verification data was unavailable and it could not continue. - VerificationDataUnavailable, - /// A verification failed at the given page. - /// - /// NOTE: if the index is 0, then this could mean either the feasibility of the last page - /// was wrong, or the final checks of `finalize_verification` failed. - VerificationFailed(PageIndex, FeasibilityError), - /// The given page of a solution has been verified, with the given number of winners being - /// found in it. - Verified(PageIndex, u32), - /// A solution with the given score has replaced our current best solution. - Queued(ElectionScore, Option), - } - - // TODO this has to be entirely re-done to take into account that for lazy deletions. We store - // the queued solutions per round and account id. if a solution is invalid, we just mark it as - // garbage and delete it later. - // we keep a pointer to (round, who) which stores the current best solution. - - /// A wrapper interface for the storage items related to the queued solution. - /// - /// It wraps the following: - /// - /// - `QueuedSolutionX` - /// - `QueuedSolutionY` - /// - `QueuedValidVariant` - /// - `QueuedSolutionScore` - /// - `QueuedSolutionBackings` - /// - /// As the name suggests, `QueuedValidVariant` points to the correct variant between - /// `QueuedSolutionX` and `QueuedSolutionY`. In the context of this pallet, by VALID and - /// INVALID variant we mean either of these two storage items, based on the value of - /// `QueuedValidVariant`. - /// - /// ### Invariants - /// - /// The following conditions must be met at all times for this group of storage items to be - /// sane. - /// - /// - `QueuedSolutionScore` must always be correct. In other words, it should correctly be the - /// score of `QueuedValidVariant`. - /// - `QueuedSolutionScore` must always be [`Config::SolutionImprovementThreshold`] better than - /// `MinimumScore`. - /// - The number of existing keys in `QueuedSolutionBackings` must always match that of the - /// INVALID variant. - /// - /// Moreover, the following conditions must be met when this pallet is in [`Status::Nothing`], - /// meaning that no ongoing asynchronous verification is ongoing. - /// - /// - No keys should exist in the INVALID variant. - /// - This implies that no data should exist in `QueuedSolutionBackings`. - /// - /// > Note that some keys *might* exist in the queued variant, but since partial solutions - /// > (having less than `T::Pages` pages) are in principle correct, we cannot assert anything on - /// > the number of keys in the VALID variant. In fact, an empty solution with score of [0, 0, - /// > 0] can also be correct. - /// - /// No additional conditions must be met when the pallet is in [`Status::Ongoing`]. The number - /// of pages in - pub struct QueuedSolution(sp_std::marker::PhantomData); - impl QueuedSolution { - /// Private helper for mutating the storage group. - fn mutate_checked(mutate: impl FnOnce() -> R) -> R { - let r = mutate(); - #[cfg(debug_assertions)] - assert!(Self::sanity_check().is_ok()); - r - } - - /// Finalize a correct solution. - /// - /// Should be called at the end of a verification process, once we are sure that a certain - /// solution is 100% correct. - /// - /// It stores its score, flips the pointer to it being the current best one, and clears all - /// the backings and the invalid variant. (note: in principle, we can skip clearing the - /// backings here) - pub(crate) fn finalize_correct(score: ElectionScore) { - sublog!( - info, - "verifier", - "finalizing verification a correct solution, replacing old score {:?} with {:?}", - QueuedSolutionScore::::get(), - score - ); - - Self::mutate_checked(|| { - QueuedValidVariant::::mutate(|v| *v = v.other()); - QueuedSolutionScore::::put(score); - - // Clear what was previously the valid variant. Also clears the partial backings. - Self::clear_invalid_and_backings_unchecked(); - }); - } - - /// Clear all relevant information of an invalid solution. - /// - /// Should be called at any step, if we encounter an issue which makes the solution - /// infeasible. - pub(crate) fn clear_invalid_and_backings() { - Self::mutate_checked(Self::clear_invalid_and_backings_unchecked) - } - - /// Same as [`clear_invalid_and_backings`], but without any checks for the integrity of the - /// storage item group. - pub(crate) fn clear_invalid_and_backings_unchecked() { - // clear is safe as we delete at most `Pages` entries, and `Pages` is bounded. - match Self::invalid() { - ValidSolution::X => clear_paged_map!(QueuedSolutionX::), - ValidSolution::Y => clear_paged_map!(QueuedSolutionY::), - }; - clear_paged_map!(QueuedSolutionBackings::); - } - - /// Write a single page of a valid solution into the `invalid` variant of the storage. - /// - /// This should only be called once we are sure that this particular page is 100% correct. - /// - /// This is called after *a page* has been validated, but the entire solution is not yet - /// known to be valid. At this stage, we write to the invalid variant. Once all pages are - /// verified, a call to [`finalize_correct`] will seal the correct pages and flip the - /// invalid/valid variants. - pub(crate) fn set_invalid_page(page: PageIndex, supports: SupportsOfVerifier>) { - use frame_support::traits::TryCollect; - Self::mutate_checked(|| { - let backings: BoundedVec<_, _> = supports - .iter() - .map(|(x, s)| (x.clone(), PartialBackings { total: s.total, backers: s.voters.len() as u32 } )) - .try_collect() - .expect("`SupportsOfVerifier` is bounded by as Verifier>::MaxWinnersPerPage, which is assured to be the same as `T::MaxWinnersPerPage` in an integrity test"); - QueuedSolutionBackings::::insert(page, backings); - - match Self::invalid() { - ValidSolution::X => QueuedSolutionX::::insert(page, supports), - ValidSolution::Y => QueuedSolutionY::::insert(page, supports), - } - }) - } - - /// Write a single page to the valid variant directly. - /// - /// This is not the normal flow of writing, and the solution is not checked. - /// - /// This is only useful to override the valid solution with a single (likely backup) - /// solution. - pub(crate) fn force_set_single_page_valid( - page: PageIndex, - supports: SupportsOfVerifier>, - score: ElectionScore, - ) { - Self::mutate_checked(|| { - // clear everything about valid solutions. - match Self::valid() { - ValidSolution::X => clear_paged_map!(QueuedSolutionX::), - ValidSolution::Y => clear_paged_map!(QueuedSolutionY::), - }; - QueuedSolutionScore::::kill(); - - // write a single new page. - match Self::valid() { - ValidSolution::X => QueuedSolutionX::::insert(page, supports), - ValidSolution::Y => QueuedSolutionY::::insert(page, supports), - } - - // write the score. - QueuedSolutionScore::::put(score); - }) - } - - /// Clear all storage items. - /// - /// Should only be called once everything is done. - pub(crate) fn kill() { - Self::mutate_checked(|| { - clear_paged_map!(QueuedSolutionX::); - clear_paged_map!(QueuedSolutionY::); - QueuedValidVariant::::kill(); - clear_paged_map!(QueuedSolutionBackings::); - QueuedSolutionScore::::kill(); - }) - } - - // -- non-mutating methods. - - /// Return the `score` and `winner_count` of verifying solution. - /// - /// Assumes that all the corresponding pages of `QueuedSolutionBackings` exist, then it - /// computes the final score of the solution that is currently at the end of its - /// verification process. - /// - /// This solution corresponds to whatever is stored in the INVALID variant of - /// `QueuedSolution`. Recall that the score of this solution is not yet verified, so it - /// should never become `valid`. - pub(crate) fn compute_invalid_score() -> Result<(ElectionScore, u32), FeasibilityError> { - // ensure that this is only called when all pages are verified individually. - // TODO: this is a very EXPENSIVE, and perhaps unreasonable check. A partial solution - // could very well be valid. - if QueuedSolutionBackings::::iter_keys().count() != T::Pages::get() as usize { - return Err(FeasibilityError::Incomplete) - } - - let mut total_supports: BTreeMap = Default::default(); - for (who, PartialBackings { backers, total }) in - QueuedSolutionBackings::::iter().flat_map(|(_, pb)| pb) - { - let entry = total_supports.entry(who).or_default(); - entry.total = entry.total.saturating_add(total); - entry.backers = entry.backers.saturating_add(backers); - - if entry.backers > T::MaxBackersPerWinnerFinal::get() { - return Err(FeasibilityError::FailedToBoundSupport) - } - } - - let winner_count = total_supports.len() as u32; - let score = evaluate_support(total_supports.into_values()); - - Ok((score, winner_count)) - } - - /// The score of the current best solution, if any. - pub(crate) fn queued_score() -> Option { - QueuedSolutionScore::::get() - } - - /// Get a page of the current queued (aka valid) solution. - pub(crate) fn get_queued_solution_page( - page: PageIndex, - ) -> Option>> { - match Self::valid() { - ValidSolution::X => QueuedSolutionX::::get(page), - ValidSolution::Y => QueuedSolutionY::::get(page), - } - } - - fn valid() -> ValidSolution { - QueuedValidVariant::::get() - } - - fn invalid() -> ValidSolution { - Self::valid().other() - } - } - - #[allow(unused)] - #[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime", debug_assertions))] - impl QueuedSolution { - pub(crate) fn valid_iter( - ) -> impl Iterator>)> { - match Self::valid() { - ValidSolution::X => QueuedSolutionX::::iter(), - ValidSolution::Y => QueuedSolutionY::::iter(), - } - } - - pub(crate) fn invalid_iter( - ) -> impl Iterator>)> { - match Self::invalid() { - ValidSolution::X => QueuedSolutionX::::iter(), - ValidSolution::Y => QueuedSolutionY::::iter(), - } - } - - pub(crate) fn get_valid_page(page: PageIndex) -> Option>> { - match Self::valid() { - ValidSolution::X => QueuedSolutionX::::get(page), - ValidSolution::Y => QueuedSolutionY::::get(page), - } - } - - pub(crate) fn backing_iter() -> impl Iterator< - Item = (PageIndex, BoundedVec<(T::AccountId, PartialBackings), T::MaxWinnersPerPage>), - > { - QueuedSolutionBackings::::iter() - } - - /// Ensure that all the storage items managed by this struct are in `kill` state, meaning - /// that in the expect state after an election is OVER. - pub(crate) fn assert_killed() { - use frame_support::assert_storage_noop; - assert_storage_noop!(Self::kill()); - } - - /// Ensure this storage item group is in correct state. - pub(crate) fn sanity_check() -> Result<(), sp_runtime::DispatchError> { - // score is correct and better than min-score. - ensure!( - Pallet::::minimum_score() - .zip(Self::queued_score()) - .map_or(true, |(min_score, score)| score - .strict_threshold_better(min_score, Perbill::zero())), - "queued solution has weak score (min-score)" - ); - - if let Some(queued_score) = Self::queued_score() { - let mut backing_map: BTreeMap = BTreeMap::new(); - Self::valid_iter() - .flat_map(|(_, supports)| supports) - .for_each(|(who, support)| { - let entry = backing_map.entry(who).or_default(); - entry.total = entry.total.saturating_add(support.total); - }); - let real_score = evaluate_support(backing_map.into_values()); - ensure!(real_score == queued_score, "queued solution has wrong score"); - } - - // The number of existing keys in `QueuedSolutionBackings` must always match that of - // the INVALID variant. - ensure!( - QueuedSolutionBackings::::iter().count() == Self::invalid_iter().count(), - "incorrect number of backings pages", - ); - - if let Status::Nothing = StatusStorage::::get() { - ensure!(Self::invalid_iter().count() == 0, "dangling data in invalid variant"); - } - - Ok(()) - } - } - - // -- private storage items, managed by `QueuedSolution`. - - /// The `X` variant of the current queued solution. Might be the valid one or not. - /// - /// The two variants of this storage item is to avoid the need of copying. Recall that once a - /// `VerifyingSolution` is being processed, it needs to write its partial supports *somewhere*. - /// Writing theses supports on top of a *good* queued supports is wrong, since we might bail. - /// Writing them to a bugger and copying at the ned is slightly better, but expensive. This flag - /// system is best of both worlds. - #[pallet::storage] - type QueuedSolutionX = - StorageMap<_, Twox64Concat, PageIndex, SupportsOfVerifier>>; - #[pallet::storage] - /// The `Y` variant of the current queued solution. Might be the valid one or not. - type QueuedSolutionY = - StorageMap<_, Twox64Concat, PageIndex, SupportsOfVerifier>>; - /// Pointer to the variant of [`QueuedSolutionX`] or [`QueuedSolutionY`] that is currently - /// valid. - #[pallet::storage] - type QueuedValidVariant = StorageValue<_, ValidSolution, ValueQuery>; - /// The `(amount, count)` of backings, divided per page. - /// - /// This is stored because in the last block of verification we need them to compute the score, - /// and check `MaxBackersPerWinnerFinal`. - /// - /// This can only ever live for the invalid variant of the solution. Once it is valid, we don't - /// need this information anymore; the score is already computed once in - /// [`QueuedSolutionScore`], and the backing counts are checked. - #[pallet::storage] - type QueuedSolutionBackings = StorageMap< - _, - Twox64Concat, - PageIndex, - BoundedVec<(T::AccountId, PartialBackings), T::MaxWinnersPerPage>, - >; - /// The score of the valid variant of [`QueuedSolution`]. - /// - /// This only ever lives for the `valid` variant. - #[pallet::storage] - type QueuedSolutionScore = StorageValue<_, ElectionScore>; - // -- ^^ private storage items, managed by `QueuedSolution`. - - /// The minimum score that each solution must attain in order to be considered feasible. - #[pallet::storage] - #[pallet::getter(fn minimum_score)] - pub(crate) type MinimumScore = StorageValue<_, ElectionScore>; - - /// Storage item for [`Status`]. - #[pallet::storage] - #[pallet::getter(fn status_storage)] - pub(crate) type StatusStorage = StorageValue<_, Status, ValueQuery>; - - #[pallet::pallet] - pub struct Pallet(PhantomData); - - #[pallet::call] - impl Pallet {} - - #[pallet::hooks] - impl Hooks> for Pallet { - fn integrity_test() { - // ensure that we have funneled some of our type parameters EXACTLY as-is to the - // verifier trait interface we implement. - assert_eq!(T::MaxWinnersPerPage::get(), ::MaxWinnersPerPage::get()); - assert_eq!( - T::MaxBackersPerWinner::get(), - ::MaxBackersPerWinner::get() - ); - assert!(T::MaxBackersPerWinner::get() <= T::MaxBackersPerWinnerFinal::get()); - } - - fn on_initialize(_n: BlockNumberFor) -> Weight { - Self::do_on_initialize() - } - - #[cfg(feature = "try-runtime")] - fn try_state(_now: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { - Self::do_try_state(_now) - } - } -} - -impl Pallet { - fn do_on_initialize() -> Weight { - if let Status::Ongoing(current_page) = Self::status_storage() { - let maybe_page_solution = - ::get_page(current_page); - - if maybe_page_solution.as_ref().is_none() { - // the data provider has zilch, revert to a clean state, waiting for a new `start`. - sublog!( - error, - "verifier", - "T::SolutionDataProvider failed to deliver page {}. This is an unexpected error.", - current_page, - ); - - QueuedSolution::::clear_invalid_and_backings(); - StatusStorage::::put(Status::Nothing); - T::SolutionDataProvider::report_result(VerificationResult::DataUnavailable); - - Self::deposit_event(Event::::VerificationDataUnavailable); - // TODO: weight - return Default::default(); - } - - let page_solution = maybe_page_solution.expect("Option checked to not be None; qed"); - let maybe_supports = Self::feasibility_check_page_inner(page_solution, current_page); - - sublog!( - debug, - "verifier", - "verified page {} of a solution, outcome = {:?}", - current_page, - maybe_supports.as_ref().map(|s| s.len()) - ); - - match maybe_supports { - Ok(supports) => { - Self::deposit_event(Event::::Verified(current_page, supports.len() as u32)); - QueuedSolution::::set_invalid_page(current_page, supports); - - if current_page > crate::Pallet::::lsp() { - // not last page, just tick forward. - StatusStorage::::put(Status::Ongoing(current_page.saturating_sub(1))); - VerifierWeightsOf::::on_initialize_valid_non_terminal() - } else { - // last page, finalize everything. Solution data provider must always have a - // score for us at this point. Not much point in reporting a result, we just - // assume default score, which will almost certainly fail and cause a proper - // cleanup of the pallet, which is what we want anyways. - let claimed_score = - T::SolutionDataProvider::get_score().defensive_unwrap_or_default(); - - // in both cases of the following match, we are not back to the nothing - // state. - StatusStorage::::put(Status::Nothing); - - match Self::finalize_async_verification(claimed_score) { - Ok(_) => { - T::SolutionDataProvider::report_result(VerificationResult::Queued); - }, - Err(_) => { - T::SolutionDataProvider::report_result( - VerificationResult::Rejected, - ); - // In case of any of the errors, kill the solution. - QueuedSolution::::clear_invalid_and_backings(); - }, - } - VerifierWeightsOf::::on_initialize_valid_terminal() - } - }, - Err(err) => { - // the page solution was invalid. - Self::deposit_event(Event::::VerificationFailed(current_page, err)); - StatusStorage::::put(Status::Nothing); - QueuedSolution::::clear_invalid_and_backings(); - T::SolutionDataProvider::report_result(VerificationResult::Rejected); - // TODO: use lower weight if non-terminal. - VerifierWeightsOf::::on_initialize_invalid_terminal() - }, - } - } else { - // TODO: weight for when nothing happens - Default::default() - } - } - - fn do_verify_synchronous( - partial_solution: SolutionOf, - claimed_score: ElectionScore, - page: PageIndex, - ) -> Result, FeasibilityError> { - // first, ensure this score will be good enough, even if valid.. - let _ = Self::ensure_score_quality(claimed_score)?; - - // then actually check feasibility... - // NOTE: `MaxBackersPerWinnerFinal` is also already checked here. - let supports = Self::feasibility_check_page_inner(partial_solution, page)?; - - // then check that the number of winners was exactly enough.. - let desired_targets = - crate::Snapshot::::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?; - ensure!(supports.len() as u32 == desired_targets, FeasibilityError::WrongWinnerCount); - - // then check the score was truth.. - let truth_score = supports.evaluate(); - ensure!(truth_score == claimed_score, FeasibilityError::InvalidScore); - - // and finally queue the solution. - QueuedSolution::::force_set_single_page_valid(page, supports.clone(), truth_score); - - Ok(supports) - } - - /// Finalize an asynchronous verification. Checks the final score for correctness, and ensures - /// that it matches all of the criteria. - /// - /// This should only be called when all pages of an async verification are done. - /// - /// Returns: - /// - `Ok()` if everything is okay, at which point the valid variant of the queued solution will - /// be updated. Returns - /// - `Err(Feasibility)` if any of the last verification steps fail. - fn finalize_async_verification(claimed_score: ElectionScore) -> Result<(), FeasibilityError> { - let outcome = QueuedSolution::::compute_invalid_score() - .and_then(|(final_score, winner_count)| { - let desired_targets = crate::Snapshot::::desired_targets().unwrap(); - // claimed_score checked prior in seal_unverified_solution - match (final_score == claimed_score, winner_count == desired_targets) { - (true, true) => { - // all good, finalize this solution - // NOTE: must be before the call to `finalize_correct`. - Self::deposit_event(Event::::Queued( - final_score, - QueuedSolution::::queued_score(), /* the previous score, now - * ejected. */ - )); - QueuedSolution::::finalize_correct(final_score); - Ok(()) - }, - (false, true) => Err(FeasibilityError::InvalidScore), - (true, false) => Err(FeasibilityError::WrongWinnerCount), - (false, false) => Err(FeasibilityError::InvalidScore), - } - }) - .map_err(|err| { - sublog!(warn, "verifier", "Finalizing solution was invalid due to {:?}.", err); - // and deposit an event about it. - Self::deposit_event(Event::::VerificationFailed(0, err.clone())); - err - }); - sublog!(debug, "verifier", "finalize verification outcome: {:?}", outcome); - outcome - } - - /// Ensure that the given score is: - /// - /// - better than the queued solution, if one exists. - /// - greater than the minimum untrusted score. - pub(crate) fn ensure_score_quality(score: ElectionScore) -> Result<(), FeasibilityError> { - let is_improvement = ::queued_score().map_or(true, |best_score| { - score.strict_threshold_better(best_score, T::SolutionImprovementThreshold::get()) - }); - ensure!(is_improvement, FeasibilityError::ScoreTooLow); - - let is_greater_than_min_trusted = Self::minimum_score() - .map_or(true, |min_score| score.strict_threshold_better(min_score, Perbill::zero())); - ensure!(is_greater_than_min_trusted, FeasibilityError::ScoreTooLow); - - Ok(()) - } - - /// Do the full feasibility check: - /// - /// - check all edges. - /// - checks `MaxBackersPerWinner` to be respected IN THIS PAGE. - /// - checks the number of winners to be less than or equal to `DesiredTargets` IN THIS PAGE - /// ONLY. - pub(super) fn feasibility_check_page_inner( - partial_solution: SolutionOf, - page: PageIndex, - ) -> Result, FeasibilityError> { - // Read the corresponding snapshots. - let snapshot_targets = - crate::Snapshot::::targets().ok_or(FeasibilityError::SnapshotUnavailable)?; - let snapshot_voters = - crate::Snapshot::::voters(page).ok_or(FeasibilityError::SnapshotUnavailable)?; - let desired_targets = - crate::Snapshot::::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?; - - feasibility_check_page_inner_with_snapshot::( - partial_solution, - &snapshot_voters, - &snapshot_targets, - desired_targets, - ) - .and_then(|miner_supports| { - SupportsOfVerifier::::try_from_other_bounds(miner_supports) - .defensive_map_err(|_| FeasibilityError::FailedToBoundSupport) - }) - } - - #[cfg(any(test, feature = "runtime-benchmarks", feature = "try-runtime"))] - pub(crate) fn do_try_state(_now: BlockNumberFor) -> Result<(), sp_runtime::TryRuntimeError> { - QueuedSolution::::sanity_check() - } -} - -/// Same as `feasibility_check_page_inner`, but with a snapshot. -/// -/// This is exported as a standalone function, relying on `MinerConfig` rather than `Config` so that -/// it can be used in any offchain miner. -pub fn feasibility_check_page_inner_with_snapshot( - partial_solution: SolutionOf, - snapshot_voters: &BoundedVec, T::VoterSnapshotPerBlock>, - snapshot_targets: &BoundedVec, - desired_targets: u32, -) -> Result, FeasibilityError> { - // ----- Start building. First, we need some closures. - let cache = helpers::generate_voter_cache::(snapshot_voters); - let voter_at = helpers::voter_at_fn::(snapshot_voters); - let target_at = helpers::target_at_fn::(snapshot_targets); - let voter_index = helpers::voter_index_fn_usize::(&cache); - - // Then convert solution -> assignment. This will fail if any of the indices are - // gibberish. - let assignments = partial_solution - .into_assignment(voter_at, target_at) - .map_err::(Into::into)?; - - // Ensure that assignments are all correct. - let _ = assignments - .iter() - .map(|ref assignment| { - // Check that assignment.who is actually a voter (defensive-only). NOTE: while - // using the index map from `voter_index` is better than a blind linear search, - // this *still* has room for optimization. Note that we had the index when we - // did `solution -> assignment` and we lost it. Ideal is to keep the index - // around. - - // Defensive-only: must exist in the snapshot. - let snapshot_index = - voter_index(&assignment.who).ok_or(FeasibilityError::InvalidVoter)?; - // Defensive-only: index comes from the snapshot, must exist. - let (_voter, _stake, targets) = - snapshot_voters.get(snapshot_index).ok_or(FeasibilityError::InvalidVoter)?; - debug_assert!(*_voter == assignment.who); - - // Check that all of the targets are valid based on the snapshot. - if assignment.distribution.iter().any(|(t, _)| !targets.contains(t)) { - return Err(FeasibilityError::InvalidVote) - } - Ok(()) - }) - .collect::>()?; - - // ----- Start building support. First, we need one more closure. - let stake_of = helpers::stake_of_fn::(&snapshot_voters, &cache); - - // This might fail if the normalization fails. Very unlikely. See `integrity_test`. - let staked_assignments = - sp_npos_elections::assignment_ratio_to_staked_normalized(assignments, stake_of) - .map_err::(Into::into)?; - - let supports = sp_npos_elections::to_supports(&staked_assignments); - - // Ensure some heuristics. These conditions must hold in the **entire** support, this is - // just a single page. But, they must hold in a single page as well. - ensure!((supports.len() as u32) <= desired_targets, FeasibilityError::WrongWinnerCount); - - // almost-defensive-only: `MaxBackersPerWinner` is already checked. A sane value of - // `MaxWinnersPerPage` should be more than any possible value of `desired_targets()`, which - // is ALSO checked, so this conversion can almost never fail. - let bounded_supports = - supports.try_into().map_err(|_| FeasibilityError::FailedToBoundSupport)?; - Ok(bounded_supports) -} - -impl Verifier for Pallet { - type AccountId = T::AccountId; - type Solution = SolutionOf; - type MaxBackersPerWinner = T::MaxBackersPerWinner; - type MaxWinnersPerPage = T::MaxWinnersPerPage; - type MaxBackersPerWinnerFinal = T::MaxBackersPerWinnerFinal; - - fn set_minimum_score(score: ElectionScore) { - MinimumScore::::put(score); - } - - fn ensure_claimed_score_improves(claimed_score: ElectionScore) -> bool { - Self::ensure_score_quality(claimed_score).is_ok() - } - - fn queued_score() -> Option { - QueuedSolution::::queued_score() - } - - fn kill() { - QueuedSolution::::kill(); - >::put(Status::Nothing); - } - - fn get_queued_solution_page(page: PageIndex) -> Option> { - QueuedSolution::::get_queued_solution_page(page) - } - - fn verify_synchronous( - partial_solution: Self::Solution, - claimed_score: ElectionScore, - page: PageIndex, - ) -> Result, FeasibilityError> { - let maybe_current_score = Self::queued_score(); - match Self::do_verify_synchronous(partial_solution, claimed_score, page) { - Ok(supports) => { - sublog!( - info, - "verifier", - "queued a sync solution with score {:?} for page {}", - claimed_score, - page - ); - Self::deposit_event(Event::::Verified(page, supports.len() as u32)); - Self::deposit_event(Event::::Queued(claimed_score, maybe_current_score)); - Ok(supports) - }, - Err(fe) => { - sublog!( - warn, - "verifier", - "sync verification of page {} failed due to {:?}.", - page, - fe - ); - Self::deposit_event(Event::::VerificationFailed(page, fe.clone())); - Err(fe) - }, - } - } - - fn force_set_single_page_valid( - partial_supports: SupportsOfVerifier, - page: PageIndex, - score: ElectionScore, - ) { - Self::deposit_event(Event::::Queued(score, QueuedSolution::::queued_score())); - QueuedSolution::::force_set_single_page_valid(page, partial_supports, score); - } -} - -impl AsynchronousVerifier for Pallet { - type SolutionDataProvider = T::SolutionDataProvider; - - fn status() -> Status { - Pallet::::status_storage() - } - - fn start() -> Result<(), &'static str> { - sublog!(info, "verifier", "start signal received."); - if let Status::Nothing = Self::status() { - let claimed_score = Self::SolutionDataProvider::get_score().unwrap_or_default(); - if Self::ensure_score_quality(claimed_score).is_err() { - // don't do anything, report back that this solution was garbage. - Self::deposit_event(Event::::VerificationFailed( - crate::Pallet::::msp(), - FeasibilityError::ScoreTooLow, - )); - T::SolutionDataProvider::report_result(VerificationResult::Rejected); - // Despite being an instant-reject, this was a successful `start` operation. - Ok(()) - } else { - // This solution is good enough to win, we start verifying it in the next block. - StatusStorage::::put(Status::Ongoing(crate::Pallet::::msp())); - Ok(()) - } - } else { - sublog!(warn, "verifier", "start signal received while busy. This will be ignored."); - Err("verification ongoing") - } - } - - fn stop() { - sublog!(warn, "verifier", "stop signal received. clearing everything."); - - // we clear any ongoing solution's no been verified in any case, although this should only - // exist if we were doing something. - #[cfg(debug_assertions)] - assert!( - !matches!(StatusStorage::::get(), Status::Ongoing(_)) || - (matches!(StatusStorage::::get(), Status::Ongoing(_)) && - QueuedSolution::::invalid_iter().count() > 0) - ); - QueuedSolution::::clear_invalid_and_backings_unchecked(); - - // we also mutate the status back to doing nothing. - StatusStorage::::mutate(|old| { - if matches!(old, Status::Ongoing(_)) { - T::SolutionDataProvider::report_result(VerificationResult::Rejected) - } - *old = Status::Nothing; - }); - } -} diff --git a/substrate/frame/election-provider-multi-block/src/verifier/tests.rs b/substrate/frame/election-provider-multi-block/src/verifier/tests.rs deleted file mode 100644 index 6fd06923284c2..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/verifier/tests.rs +++ /dev/null @@ -1,1266 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{ - mock::*, - types::*, - verifier::{impls::Status, *}, - *, -}; - -use frame_election_provider_support::Support; -use frame_support::{assert_noop, assert_ok}; -use sp_runtime::traits::Bounded; - -mod feasibility_check { - use super::*; - - #[test] - fn missing_snapshot() { - ExtBuilder::verifier().build_unchecked().execute_with(|| { - // create snapshot just so that we can create a solution.. - roll_to_snapshot_created(); - let paged = mine_full_solution().unwrap(); - - // ..remove the only page of the target snapshot. - crate::Snapshot::::remove_target_page(); - - assert_noop!( - VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0), - FeasibilityError::SnapshotUnavailable - ); - }); - - ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| { - roll_to_snapshot_created(); - let paged = mine_full_solution().unwrap(); - - // ..remove just one of the pages of voter snapshot that is relevant. - crate::Snapshot::::remove_voter_page(0); - - assert_noop!( - VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0), - FeasibilityError::SnapshotUnavailable - ); - }); - - ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| { - roll_to_snapshot_created(); - let paged = mine_full_solution().unwrap(); - - // ..removing this page is not important, because we check page 0. - crate::Snapshot::::remove_voter_page(1); - - assert_ok!(VerifierPallet::feasibility_check_page_inner( - paged.solution_pages[0].clone(), - 0 - )); - }); - - ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| { - roll_to_snapshot_created(); - let paged = mine_full_solution().unwrap(); - - // `DesiredTargets` missing is also an error - crate::Snapshot::::kill_desired_targets(); - - assert_noop!( - VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0), - FeasibilityError::SnapshotUnavailable - ); - }); - - ExtBuilder::verifier().pages(2).build_unchecked().execute_with(|| { - roll_to_snapshot_created(); - let paged = mine_full_solution().unwrap(); - - // `DesiredTargets` is not checked here. - crate::Snapshot::::remove_target_page(); - - assert_noop!( - VerifierPallet::feasibility_check_page_inner(paged.solution_pages[1].clone(), 0), - FeasibilityError::SnapshotUnavailable - ); - }); - } - - #[test] - fn winner_indices_single_page_must_be_in_bounds() { - ExtBuilder::verifier().pages(1).desired_targets(2).build_and_execute(|| { - roll_to_snapshot_created(); - let mut paged = mine_full_solution().unwrap(); - assert_eq!(crate::Snapshot::::targets().unwrap().len(), 4); - // ----------------------------------------------------^^ valid range is [0..3]. - - // Swap all votes from 3 to 4. here are only 4 targets, so index 4 is invalid. - paged.solution_pages[0] - .votes1 - .iter_mut() - .filter(|(_, t)| *t == TargetIndex::from(3u16)) - .for_each(|(_, t)| *t += 1); - - assert_noop!( - VerifierPallet::feasibility_check_page_inner(paged.solution_pages[0].clone(), 0), - FeasibilityError::NposElection(sp_npos_elections::Error::SolutionInvalidIndex) - ); - }) - } - - #[test] - fn voter_indices_per_page_must_be_in_bounds() { - ExtBuilder::verifier() - .pages(1) - .voter_per_page(Bounded::max_value()) - .desired_targets(2) - .build_and_execute(|| { - roll_to_snapshot_created(); - let mut paged = mine_full_solution().unwrap(); - - assert_eq!(crate::Snapshot::::voters(0).unwrap().len(), 12); - // ------------------------------------------------^^ valid range is [0..11] in page - // 0. - - // Check that there is an index 11 in votes1, and flip to 12. There are only 12 - // voters, so index 12 is invalid. - assert!( - paged.solution_pages[0] - .votes1 - .iter_mut() - .filter(|(v, _)| *v == VoterIndex::from(11u32)) - .map(|(v, _)| *v = 12) - .count() > 0 - ); - assert_noop!( - VerifierPallet::feasibility_check_page_inner( - paged.solution_pages[0].clone(), - 0 - ), - FeasibilityError::NposElection(sp_npos_elections::Error::SolutionInvalidIndex), - ); - }) - } - - #[test] - fn voter_must_have_same_targets_as_snapshot() { - ExtBuilder::verifier() - .pages(1) - .voter_per_page(Bounded::max_value()) - .desired_targets(2) - .build_and_execute(|| { - roll_to_snapshot_created(); - let mut paged = mine_full_solution().unwrap(); - - // First, check that voter at index 11 (40) actually voted for 3 (40) -- this is - // self vote. Then, change the vote to 2 (30). - assert_eq!( - paged.solution_pages[0] - .votes1 - .iter_mut() - .filter(|(v, t)| *v == 11 && *t == 3) - .map(|(_, t)| *t = 2) - .count(), - 1, - ); - assert_noop!( - VerifierPallet::feasibility_check_page_inner( - paged.solution_pages[0].clone(), - 0 - ), - FeasibilityError::InvalidVote, - ); - }) - } - - #[test] - fn heuristic_max_backers_per_winner_per_page() { - ExtBuilder::verifier().max_backers_per_winner(2).build_and_execute(|| { - roll_to_snapshot_created(); - - // these votes are all valid, but some dude has 3 supports in a single page. - let solution = solution_from_supports( - vec![(40, Support { total: 30, voters: vec![(2, 10), (3, 10), (4, 10)] })], - // all these voters are in page of the snapshot, the msp! - 2, - ); - - assert_noop!( - VerifierPallet::feasibility_check_page_inner(solution, 2), - FeasibilityError::FailedToBoundSupport, - ); - }) - } - - #[test] - fn heuristic_desired_target_check_per_page() { - ExtBuilder::verifier().desired_targets(2).build_and_execute(|| { - roll_to(25); - assert_full_snapshot(); - - // all of these votes are valid, but this solution is already presenting 3 winners, - // while we just one 2. - let solution = solution_from_supports( - vec![ - (10, Support { total: 30, voters: vec![(4, 2)] }), - (20, Support { total: 30, voters: vec![(4, 2)] }), - (40, Support { total: 30, voters: vec![(4, 6)] }), - ], - // all these voters are in page 2 of the snapshot, the msp! - 2, - ); - - assert_noop!( - VerifierPallet::feasibility_check_page_inner(solution, 2), - FeasibilityError::WrongWinnerCount, - ); - }) - } -} - -mod async_verification { - use sp_core::bounded_vec; - - use super::*; - // disambiguate event - use crate::verifier::Event; - - #[test] - fn basic_single_verification_works() { - ExtBuilder::verifier().pages(1).build_and_execute(|| { - // load a solution after the snapshot has been created. - roll_to_snapshot_created(); - - let solution = mine_full_solution().unwrap(); - load_mock_signed_and_start(solution.clone()); - - // now let it verify - roll_next(); - - // It done after just one block. - assert_eq!(VerifierPallet::status(), Status::Nothing); - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(0, 2), - Event::::Queued(solution.score, None) - ] - ); - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); - }); - } - - #[test] - fn basic_multi_verification_works() { - ExtBuilder::verifier().pages(3).build_and_execute(|| { - // load a solution after the snapshot has been created. - roll_to_snapshot_created(); - - let solution = mine_full_solution().unwrap(); - // ------------- ^^^^^^^^^^^^ - - load_mock_signed_and_start(solution.clone()); - assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); - assert_eq!(QueuedSolution::::valid_iter().count(), 0); - - // now let it verify - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); - assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); - // 1 page verified, stored as invalid. - assert_eq!(QueuedSolution::::invalid_iter().count(), 1); - - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Ongoing(0)); - assert_eq!( - verifier_events(), - vec![Event::::Verified(2, 2), Event::::Verified(1, 2),] - ); - // 2 pages verified, stored as invalid. - assert_eq!(QueuedSolution::::invalid_iter().count(), 2); - - // nothing is queued yet. - assert_eq!(MockSignedResults::get(), vec![]); - assert_eq!(QueuedSolution::::valid_iter().count(), 0); - assert!(QueuedSolution::::queued_score().is_none()); - - // last block. - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Nothing); - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(2, 2), - Event::::Verified(1, 2), - Event::::Verified(0, 2), - Event::::Queued(solution.score, None), - ] - ); - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); - - // a solution has been queued - assert_eq!(QueuedSolution::::valid_iter().count(), 3); - assert!(QueuedSolution::::queued_score().is_some()); - }); - } - - #[test] - fn basic_multi_verification_partial() { - ExtBuilder::verifier().pages(3).build_and_execute(|| { - // load a solution after the snapshot has been created. - roll_to_snapshot_created(); - - let solution = mine_solution(2).unwrap(); - // -------------------------^^^ - - load_mock_signed_and_start(solution.clone()); - - assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); - assert_eq!(QueuedSolution::::valid_iter().count(), 0); - - // now let it verify - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); - assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); - // 1 page verified, stored as invalid. - assert_eq!(QueuedSolution::::invalid_iter().count(), 1); - - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Ongoing(0)); - assert_eq!( - verifier_events(), - vec![Event::::Verified(2, 2), Event::::Verified(1, 2),] - ); - // 2 page verified, stored as invalid. - assert_eq!(QueuedSolution::::invalid_iter().count(), 2); - - // nothing is queued yet. - assert_eq!(MockSignedResults::get(), vec![]); - assert_eq!(QueuedSolution::::valid_iter().count(), 0); - assert!(QueuedSolution::::queued_score().is_none()); - - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Nothing); - - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(2, 2), - Event::::Verified(1, 2), - // this is a partial solution, no one in this page (lsp). - Event::::Verified(0, 0), - Event::::Queued(solution.score, None), - ] - ); - - // a solution has been queued - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); - assert_eq!(QueuedSolution::::valid_iter().count(), 3); - assert!(QueuedSolution::::queued_score().is_some()); - - // page 0 is empty.. - assert_eq!(QueuedSolution::::get_valid_page(0).unwrap().len(), 0); - // .. the other two are not. - assert_eq!(QueuedSolution::::get_valid_page(1).unwrap().len(), 2); - assert_eq!(QueuedSolution::::get_valid_page(2).unwrap().len(), 2); - }); - } - - #[test] - fn solution_data_provider_failing_initial() { - ExtBuilder::verifier().build_and_execute(|| { - // not super important, but anyways.. - roll_to_snapshot_created(); - - // The solution data provider is empty. - assert_eq!(SignedPhaseSwitch::get(), SignedSwitch::Mock); - assert_eq!(MockSignedNextSolution::get(), None); - - // nothing happens.. - assert_eq!(VerifierPallet::status(), Status::Nothing); - assert_ok!(::start()); - assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); - - roll_next(); - - // we instantly stop. - assert_eq!(verifier_events(), vec![Event::::VerificationDataUnavailable]); - assert_eq!(VerifierPallet::status(), Status::Nothing); - assert!(QueuedSolution::::invalid_iter().count().is_zero()); - assert!(QueuedSolution::::backing_iter().count().is_zero()); - - // and we report invalid back. - assert_eq!(MockSignedResults::get(), vec![VerificationResult::DataUnavailable]); - }); - } - - #[test] - fn solution_data_provider_failing_midway() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - let solution = mine_full_solution().unwrap(); - load_mock_signed_and_start(solution.clone()); - - assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); - - // now let it verify. first one goes fine. - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); - assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); - assert_eq!(MockSignedResults::get(), vec![]); - - // 1 page verified, stored as invalid. - assert_eq!(QueuedSolution::::invalid_iter().count(), 1); - assert_eq!(QueuedSolution::::backing_iter().count(), 1); - assert_eq!(QueuedSolution::::valid_iter().count(), 0); - - // suddenly clear this guy. - MockSignedNextSolution::set(None); - MockSignedNextScore::set(None); - - roll_next(); - - // we instantly stop. - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(2, 2), - Event::::VerificationDataUnavailable - ] - ); - assert_eq!(VerifierPallet::status(), Status::Nothing); - assert_eq!(QueuedSolution::::invalid_iter().count(), 0); - assert_eq!(QueuedSolution::::valid_iter().count(), 0); - assert_eq!(QueuedSolution::::backing_iter().count(), 0); - - // and we report invalid back. - assert_eq!(MockSignedResults::get(), vec![VerificationResult::DataUnavailable]); - }) - } - - #[test] - fn rejects_new_verification_via_start_if_ongoing() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - let solution = mine_full_solution().unwrap(); - load_mock_signed_and_start(solution.clone()); - - assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); - - // nada - assert_noop!(::start(), "verification ongoing"); - - // now let it verify. first one goes fine. - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); - assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); - assert_eq!(MockSignedResults::get(), vec![]); - - // retry, still nada. - assert_noop!(::start(), "verification ongoing"); - }) - } - - #[test] - fn stop_clears_everything() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - let solution = mine_full_solution().unwrap(); - load_mock_signed_and_start(solution.clone()); - - assert_eq!(VerifierPallet::status(), Status::Ongoing(2)); - - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Ongoing(1)); - assert_eq!(verifier_events(), vec![Event::::Verified(2, 2)]); - - roll_next(); - assert_eq!(VerifierPallet::status(), Status::Ongoing(0)); - assert_eq!( - verifier_events(), - vec![Event::::Verified(2, 2), Event::::Verified(1, 2)] - ); - - // now suddenly, we stop - ::stop(); - assert_eq!(VerifierPallet::status(), Status::Nothing); - - // everything is cleared. - assert_eq!(QueuedSolution::::invalid_iter().count(), 0); - assert_eq!(QueuedSolution::::valid_iter().count(), 0); - assert_eq!(QueuedSolution::::backing_iter().count(), 0); - - // and we report invalid back that something was rejected. - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); - }) - } - - #[test] - fn weak_valid_solution_is_insta_rejected() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - let paged = mine_full_solution().unwrap(); - load_mock_signed_and_start(paged.clone()); - let _ = roll_to_full_verification(); - - assert_eq!( - verifier_events(), - vec![ - Event::Verified(2, 2), - Event::Verified(1, 2), - Event::Verified(0, 2), - Event::Queued(paged.score, None) - ] - ); - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); - - // good boi, but you are too weak. This solution also does not have the full pages, - // which is also fine. See `basic_multi_verification_partial`. - let weak_page_partial = - solution_from_supports(vec![(10, Support { total: 10, voters: vec![(1, 10)] })], 2); - let weak_paged = PagedRawSolution:: { - solution_pages: bounded_vec![weak_page_partial], - score: ElectionScore { minimal_stake: 10, sum_stake: 10, sum_stake_squared: 100 }, - ..Default::default() - }; - - load_mock_signed_and_start(weak_paged.clone()); - // this is insta-rejected, no need to proceed any more blocks. - - assert_eq!( - verifier_events(), - vec![ - Event::Verified(2, 2), - Event::Verified(1, 2), - Event::Verified(0, 2), - Event::Queued(paged.score, None), - Event::VerificationFailed(2, FeasibilityError::ScoreTooLow) - ] - ); - - assert_eq!( - MockSignedResults::get(), - vec![VerificationResult::Queued, VerificationResult::Rejected] - ); - }) - } - - #[test] - fn better_valid_solution_replaces() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - // a weak one, which we will still accept. - let weak_page_partial = solution_from_supports( - vec![ - (10, Support { total: 10, voters: vec![(1, 10)] }), - (20, Support { total: 10, voters: vec![(4, 10)] }), - ], - 2, - ); - let weak_paged = PagedRawSolution:: { - solution_pages: bounded_vec![weak_page_partial], - score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 }, - ..Default::default() - }; - - load_mock_signed_and_start(weak_paged.clone()); - let _ = roll_to_full_verification(); - - assert_eq!( - verifier_events(), - vec![ - Event::Verified(2, 2), - Event::Verified(1, 0), // note: partial solution! - Event::Verified(0, 0), // note: partial solution! - Event::Queued(weak_paged.score, None) - ] - ); - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Queued]); - - let paged = mine_full_solution().unwrap(); - load_mock_signed_and_start(paged.clone()); - let _ = roll_to_full_verification(); - - assert_eq!( - verifier_events(), - vec![ - Event::Verified(2, 2), - Event::Verified(1, 0), - Event::Verified(0, 0), - Event::Queued(weak_paged.score, None), - Event::Verified(2, 2), - Event::Verified(1, 2), - Event::Verified(0, 2), - Event::Queued(paged.score, Some(weak_paged.score)) - ] - ); - assert_eq!( - MockSignedResults::get(), - vec![VerificationResult::Queued, VerificationResult::Queued] - ); - }) - } - - #[test] - fn invalid_solution_bad_score() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - let mut paged = mine_full_solution().unwrap(); - - // just tweak score. - paged.score.minimal_stake += 1; - assert!(::queued_score().is_none()); - - load_mock_signed_and_start(paged); - roll_to_full_verification(); - - // nothing is verified. - assert!(::queued_score().is_none()); - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(2, 2), - Event::::Verified(1, 2), - Event::::Verified(0, 2), - Event::::VerificationFailed(0, FeasibilityError::InvalidScore) - ] - ); - - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); - }) - } - - #[test] - fn invalid_solution_bad_minimum_score() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - let paged = mine_full_solution().unwrap(); - - // our minimum score is our score, just a bit better. - let mut better_score = paged.score; - better_score.minimal_stake += 1; - ::set_minimum_score(better_score); - - load_mock_signed_and_start(paged); - - // note that we don't need to call to `roll_to_full_verification`, since this solution - // is pretty much insta-rejected; - assert_eq!( - verifier_events(), - vec![Event::::VerificationFailed(2, FeasibilityError::ScoreTooLow)] - ); - - // nothing is verified.. - assert!(::queued_score().is_none()); - - // result is reported back. - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); - }) - } - - #[test] - fn invalid_solution_bad_desired_targets() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - assert_eq!(crate::Snapshot::::desired_targets().unwrap(), 2); - let paged = mine_full_solution().unwrap(); - - // tweak this, for whatever reason. - crate::Snapshot::::set_desired_targets(3); - - load_mock_signed_and_start(paged); - roll_to_full_verification(); - - // we detect this only in the last page. - assert_eq!( - verifier_events(), - vec![ - Event::Verified(2, 2), - Event::Verified(1, 2), - Event::Verified(0, 2), - Event::VerificationFailed(0, FeasibilityError::WrongWinnerCount) - ] - ); - - // nothing is verified.. - assert!(::queued_score().is_none()); - // result is reported back. - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); - }) - } - - #[test] - fn invalid_solution_bad_bounds_per_page() { - ExtBuilder::verifier() - .desired_targets(1) - .max_backers_per_winner(1) // in each page we allow 1 baker to be presented. - .max_backers_per_winner_final(12) - .build_and_execute(|| { - roll_to_snapshot_created(); - - // This is a sneaky custom solution where it will fail in the second page. - let page0 = solution_from_supports( - vec![(10, Support { total: 10, voters: vec![(1, 10)] })], - 2, - ); - let page1 = solution_from_supports( - vec![(10, Support { total: 20, voters: vec![(5, 10), (8, 10)] })], - 1, - ); - let page2 = solution_from_supports( - vec![(10, Support { total: 10, voters: vec![(10, 10)] })], - 0, - ); - let paged = PagedRawSolution { - solution_pages: bounded_vec![page0, page1, page2], - score: Default::default(), // score is never checked, so nada - ..Default::default() - }; - - load_mock_signed_and_start(paged); - roll_to_full_verification(); - - // we detect the bound issue in page 2. - assert_eq!( - verifier_events(), - vec![ - Event::Verified(2, 1), - Event::VerificationFailed(1, FeasibilityError::FailedToBoundSupport) - ] - ); - - // our state is fully cleaned. - QueuedSolution::::assert_killed(); - assert_eq!(StatusStorage::::get(), Status::Nothing); - // nothing is verified.. - assert!(::queued_score().is_none()); - // result is reported back. - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); - }) - } - - #[test] - fn invalid_solution_bad_bounds_final() { - ExtBuilder::verifier() - .desired_targets(1) - .max_backers_per_winner_final(2) - .build_and_execute(|| { - roll_to_snapshot_created(); - - // This is a sneaky custom solution where in each page 10 has 1 backers, so only in - // the last page we can catch the mfer. - let page0 = solution_from_supports( - vec![(10, Support { total: 10, voters: vec![(1, 10)] })], - 2, - ); - let page1 = solution_from_supports( - vec![(10, Support { total: 10, voters: vec![(5, 10)] })], - 1, - ); - let page2 = solution_from_supports( - vec![(10, Support { total: 10, voters: vec![(10, 10)] })], - 0, - ); - let paged = PagedRawSolution { - solution_pages: bounded_vec![page0, page1, page2], - score: ElectionScore { - minimal_stake: 30, - sum_stake: 30, - sum_stake_squared: 900, - }, - ..Default::default() - }; - - load_mock_signed_and_start(paged); - roll_to_full_verification(); - - // we detect this only in the last page. - assert_eq!( - verifier_events(), - vec![ - Event::Verified(2, 1), - Event::Verified(1, 1), - Event::Verified(0, 1), - Event::VerificationFailed(0, FeasibilityError::FailedToBoundSupport) - ] - ); - - // our state is fully cleaned. - QueuedSolution::::assert_killed(); - assert_eq!(StatusStorage::::get(), Status::Nothing); - - // nothing is verified.. - assert!(::queued_score().is_none()); - // result is reported back. - assert_eq!(MockSignedResults::get(), vec![VerificationResult::Rejected]); - }) - } - - #[test] - fn invalid_solution_does_not_alter_queue() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - let mut paged = mine_full_solution().unwrap(); - let correct_score = paged.score; - - assert!(::queued_score().is_none()); - - load_mock_signed_and_start(paged.clone()); - roll_to_full_verification(); - - assert_eq!(::queued_score(), Some(correct_score)); - assert!(QueuedSolution::::invalid_iter().count().is_zero()); - assert!(QueuedSolution::::backing_iter().count().is_zero()); - - // just tweak score. Note that we tweak for a higher score, so the verifier will accept - // it. - paged.score.minimal_stake += 1; - load_mock_signed_and_start(paged.clone()); - roll_to_full_verification(); - - // nothing is verified. - assert_eq!(::queued_score(), Some(correct_score)); - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(2, 2), - Event::::Verified(1, 2), - Event::::Verified(0, 2), - Event::::Queued(correct_score, None), - Event::::Verified(2, 2), - Event::::Verified(1, 2), - Event::::Verified(0, 2), - Event::::VerificationFailed(0, FeasibilityError::InvalidScore), - ] - ); - - // the verification results. - assert_eq!( - MockSignedResults::get(), - vec![VerificationResult::Queued, VerificationResult::Rejected] - ); - - // and the queue is still in good shape. - assert_eq!(::queued_score(), Some(correct_score)); - assert!(QueuedSolution::::invalid_iter().count().is_zero()); - assert!(QueuedSolution::::backing_iter().count().is_zero()); - }) - } -} - -mod sync_verification { - use frame_election_provider_support::Support; - use sp_core::bounded_vec; - use sp_npos_elections::ElectionScore; - use sp_runtime::Perbill; - - use crate::{ - mock::{ - fake_solution, mine_solution, roll_to_snapshot_created, solution_from_supports, - verifier_events, ExtBuilder, MaxBackersPerWinner, MaxWinnersPerPage, MultiBlock, - Runtime, VerifierPallet, - }, - verifier::{Event, FeasibilityError, Verifier}, - PagedRawSolution, Snapshot, - }; - - #[test] - fn basic_sync_verification_works() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - let single_page = mine_solution(1).unwrap(); - - assert_eq!(verifier_events(), vec![]); - assert_eq!(::queued_score(), None); - - let _ = ::verify_synchronous( - single_page.solution_pages.first().cloned().unwrap(), - single_page.score, - MultiBlock::msp(), - ) - .unwrap(); - - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(2, 2), - Event::::Queued(single_page.score, None) - ] - ); - assert_eq!(::queued_score(), Some(single_page.score)); - }) - } - - #[test] - fn winner_count_more() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - let single_page = mine_solution(1).unwrap(); - - // change the snapshot, as if the desired targets is now 1. This solution is then valid, - // but has too many. - Snapshot::::set_desired_targets(1); - - assert_eq!(verifier_events(), vec![]); - assert_eq!(::queued_score(), None); - - // note: this is NOT a storage_noop! because we do emit events. - assert_eq!( - ::verify_synchronous( - single_page.solution_pages.first().cloned().unwrap(), - single_page.score, - MultiBlock::msp(), - ) - .unwrap_err(), - FeasibilityError::WrongWinnerCount - ); - - assert_eq!( - verifier_events(), - vec![Event::::VerificationFailed(2, FeasibilityError::WrongWinnerCount)] - ); - assert_eq!(::queued_score(), None); - }) - } - - #[test] - fn winner_count_less() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - let single_page = mine_solution(1).unwrap(); - - assert_eq!(verifier_events(), vec![]); - assert_eq!(::queued_score(), None); - - // Valid solution, but has now too few. - Snapshot::::set_desired_targets(3); - - assert_eq!( - ::verify_synchronous( - single_page.solution_pages.first().cloned().unwrap(), - single_page.score, - MultiBlock::msp(), - ) - .unwrap_err(), - FeasibilityError::WrongWinnerCount - ); - - assert_eq!( - verifier_events(), - vec![Event::::VerificationFailed(2, FeasibilityError::WrongWinnerCount)] - ); - assert_eq!(::queued_score(), None); - }) - } - - #[test] - fn incorrect_score_is_rejected() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - let single_page = mine_solution(1).unwrap(); - let mut score_incorrect = single_page.score; - score_incorrect.minimal_stake += 1; - - assert_eq!( - ::verify_synchronous( - single_page.solution_pages.first().cloned().unwrap(), - score_incorrect, - MultiBlock::msp(), - ) - .unwrap_err(), - FeasibilityError::InvalidScore - ); - - assert_eq!( - verifier_events(), - vec![Event::::VerificationFailed(2, FeasibilityError::InvalidScore),] - ); - }) - } - - #[test] - fn minimum_untrusted_score_is_rejected() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - let single_page = mine_solution(1).unwrap(); - - // raise the bar such that we don't meet it. - let mut unattainable_score = single_page.score; - unattainable_score.minimal_stake += 1; - - ::set_minimum_score(unattainable_score); - - assert_eq!( - ::verify_synchronous( - single_page.solution_pages.first().cloned().unwrap(), - single_page.score, - MultiBlock::msp(), - ) - .unwrap_err(), - FeasibilityError::ScoreTooLow - ); - - assert_eq!( - verifier_events(), - vec![Event::::VerificationFailed(2, FeasibilityError::ScoreTooLow)] - ); - }) - } - - #[test] - fn bad_bounds_rejected() { - // MaxBackersPerWinner. - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - let single_page = mine_solution(1).unwrap(); - // note: change this after the miner is done, otherwise it is smart enough to trim. - MaxBackersPerWinner::set(1); - - assert_eq!( - ::verify_synchronous( - single_page.solution_pages.first().cloned().unwrap(), - single_page.score, - MultiBlock::msp(), - ) - .unwrap_err(), - FeasibilityError::FailedToBoundSupport - ); - - assert_eq!( - verifier_events(), - vec![Event::::VerificationFailed( - 2, - FeasibilityError::FailedToBoundSupport - )] - ); - }); - - // MaxWinnersPerPage. - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - let single_page = mine_solution(1).unwrap(); - // note: the miner does feasibility internally, change this parameter afterwards. - MaxWinnersPerPage::set(1); - - assert_eq!( - ::verify_synchronous( - single_page.solution_pages.first().cloned().unwrap(), - single_page.score, - MultiBlock::msp(), - ) - .unwrap_err(), - FeasibilityError::FailedToBoundSupport - ); - - assert_eq!( - verifier_events(), - vec![Event::::VerificationFailed( - 2, - FeasibilityError::FailedToBoundSupport - )] - ); - }); - } - - #[test] - fn solution_improvement_threshold_respected() { - ExtBuilder::verifier() - .solution_improvement_threshold(Perbill::from_percent(10)) - .build_and_execute(|| { - roll_to_snapshot_created(); - - // submit something good. - let single_page = mine_solution(1).unwrap(); - let _ = ::verify_synchronous( - single_page.solution_pages.first().cloned().unwrap(), - single_page.score, - MultiBlock::msp(), - ) - .unwrap(); - - // the slightly better solution need not even be correct. We improve it by 5%, but - // we need 10%. - let mut better_score = single_page.score; - let improvement = Perbill::from_percent(5) * better_score.minimal_stake; - better_score.minimal_stake += improvement; - let slightly_better = fake_solution(better_score); - - assert_eq!( - ::verify_synchronous( - slightly_better.solution_pages.first().cloned().unwrap(), - slightly_better.score, - MultiBlock::msp(), - ) - .unwrap_err(), - FeasibilityError::ScoreTooLow - ); - }); - } - - #[test] - fn weak_score_is_insta_rejected() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - // queue something useful. - let single_page = mine_solution(1).unwrap(); - let _ = ::verify_synchronous( - single_page.solution_pages.first().cloned().unwrap(), - single_page.score, - MultiBlock::msp(), - ) - .unwrap(); - assert_eq!(::queued_score(), Some(single_page.score)); - - // now try and submit that's really weak. Doesn't even need to be valid, since the score - // is checked first. - let mut bad_score = single_page.score; - bad_score.minimal_stake -= 1; - let weak = fake_solution(bad_score); - - assert_eq!( - ::verify_synchronous( - weak.solution_pages.first().cloned().unwrap(), - weak.score, - MultiBlock::msp(), - ) - .unwrap_err(), - FeasibilityError::ScoreTooLow - ); - - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(2, 2), - Event::::Queued(single_page.score, None), - Event::::VerificationFailed(2, FeasibilityError::ScoreTooLow), - ] - ); - }) - } - - #[test] - fn good_solution_replaces() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - let weak_solution = solution_from_supports( - vec![ - (10, Support { total: 10, voters: vec![(1, 10)] }), - (20, Support { total: 10, voters: vec![(4, 10)] }), - ], - 2, - ); - - let weak_paged = PagedRawSolution:: { - solution_pages: bounded_vec![weak_solution], - score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 }, - ..Default::default() - }; - - let _ = ::verify_synchronous( - weak_paged.solution_pages.first().cloned().unwrap(), - weak_paged.score, - MultiBlock::msp(), - ) - .unwrap(); - assert_eq!(::queued_score(), Some(weak_paged.score)); - - // now get a better solution. - let better = mine_solution(1).unwrap(); - - let _ = ::verify_synchronous( - better.solution_pages.first().cloned().unwrap(), - better.score, - MultiBlock::msp(), - ) - .unwrap(); - - assert_eq!(::queued_score(), Some(better.score)); - - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(2, 2), - Event::::Queued(weak_paged.score, None), - Event::::Verified(2, 2), - Event::::Queued(better.score, Some(weak_paged.score)), - ] - ); - }) - } - - #[test] - fn weak_valid_is_discarded() { - ExtBuilder::verifier().build_and_execute(|| { - roll_to_snapshot_created(); - - // first, submit something good - let better = mine_solution(1).unwrap(); - let _ = ::verify_synchronous( - better.solution_pages.first().cloned().unwrap(), - better.score, - MultiBlock::msp(), - ) - .unwrap(); - assert_eq!(::queued_score(), Some(better.score)); - - // then try with something weaker. - let weak_solution = solution_from_supports( - vec![ - (10, Support { total: 10, voters: vec![(1, 10)] }), - (20, Support { total: 10, voters: vec![(4, 10)] }), - ], - 2, - ); - let weak_paged = PagedRawSolution:: { - solution_pages: bounded_vec![weak_solution], - score: ElectionScore { minimal_stake: 10, sum_stake: 20, sum_stake_squared: 200 }, - ..Default::default() - }; - - assert_eq!( - ::verify_synchronous( - weak_paged.solution_pages.first().cloned().unwrap(), - weak_paged.score, - MultiBlock::msp(), - ) - .unwrap_err(), - FeasibilityError::ScoreTooLow - ); - - // queued solution has not changed. - assert_eq!(::queued_score(), Some(better.score)); - - assert_eq!( - verifier_events(), - vec![ - Event::::Verified(2, 2), - Event::::Queued(better.score, None), - Event::::VerificationFailed(2, FeasibilityError::ScoreTooLow), - ] - ); - }) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs deleted file mode 100644 index 3050fc7e7f195..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/measured/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod pallet_election_provider_multi_block; -pub mod pallet_election_provider_multi_block_signed; -pub mod pallet_election_provider_multi_block_unsigned; -pub mod pallet_election_provider_multi_block_verifier; diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs deleted file mode 100644 index 8e0d9cf1d16e9..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block.rs +++ /dev/null @@ -1,364 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_election_provider_multi_block` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` - -// Executed Command: -// target/release/substrate-node -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_election_provider_multi_block -// --extrinsic -// all -// --steps -// 2 -// --repeat -// 3 -// --template -// substrate/.maintain/frame-weight-template.hbs -// --heap-pages -// 65000 -// --default-pov-mode -// measured -// --output -// ../measured - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(dead_code)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_election_provider_multi_block`. -pub trait WeightInfo { - fn on_initialize_nothing() -> Weight; - fn on_initialize_into_snapshot_msp() -> Weight; - fn on_initialize_into_snapshot_rest() -> Weight; - fn on_initialize_into_signed() -> Weight; - fn on_initialize_into_signed_validation() -> Weight; - fn on_initialize_into_unsigned() -> Weight; - fn manage() -> Weight; -} - -/// Weights for `pallet_election_provider_multi_block` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - fn on_initialize_nothing() -> Weight { - // Proof Size summary in bytes: - // Measured: `156` - // Estimated: `1641` - // Minimum execution time: 9_254_000 picoseconds. - Weight::from_parts(10_145_000, 1641) - .saturating_add(T::DbWeight::get().reads(2_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `Staking::ValidatorCount` (r:1 w:0) - /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `Staking::CounterForValidators` (r:1 w:0) - /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `Staking::Validators` (r:1002 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) - /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) - /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) - /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) - /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `VoterList::ListBags` (r:200 w:0) - /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) - /// Storage: `VoterList::ListNodes` (r:26001 w:0) - /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:703 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) - /// Storage: `Staking::Ledger` (r:703 w:0) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`) - /// Storage: `Staking::Nominators` (r:703 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) - /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) - /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - fn on_initialize_into_snapshot_msp() -> Weight { - // Proof Size summary in bytes: - // Measured: `5151586` - // Estimated: `69505051` - // Minimum execution time: 201_905_061_000 picoseconds. - Weight::from_parts(203_148_720_000, 69505051) - .saturating_add(T::DbWeight::get().reads(29318_u64)) - .saturating_add(T::DbWeight::get().writes(8_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) - /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) - /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) - /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `VoterList::ListNodes` (r:26001 w:0) - /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:704 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) - /// Storage: `Staking::Ledger` (r:704 w:0) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`) - /// Storage: `Staking::Nominators` (r:703 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) - /// Storage: `VoterList::ListBags` (r:200 w:0) - /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) - /// Storage: `Staking::Validators` (r:165 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) - /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) - /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - fn on_initialize_into_snapshot_rest() -> Weight { - // Proof Size summary in bytes: - // Measured: `5329975` - // Estimated: `69683440` - // Minimum execution time: 195_257_628_000 picoseconds. - Weight::from_parts(195_317_909_000, 69683440) - .saturating_add(T::DbWeight::get().reads(28481_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - fn on_initialize_into_signed() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `1825` - // Minimum execution time: 649_767_000 picoseconds. - Weight::from_parts(764_370_000, 1825) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - fn on_initialize_into_signed_validation() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `3805` - // Minimum execution time: 657_218_000 picoseconds. - Weight::from_parts(674_575_000, 3805) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - fn on_initialize_into_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `1825` - // Minimum execution time: 866_827_000 picoseconds. - Weight::from_parts(890_863_000, 1825) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - fn manage() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 140_000 picoseconds. - Weight::from_parts(170_000, 0) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - fn on_initialize_nothing() -> Weight { - // Proof Size summary in bytes: - // Measured: `156` - // Estimated: `1641` - // Minimum execution time: 9_254_000 picoseconds. - Weight::from_parts(10_145_000, 1641) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `Staking::ValidatorCount` (r:1 w:0) - /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `Staking::CounterForValidators` (r:1 w:0) - /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `Staking::Validators` (r:1002 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) - /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) - /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) - /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) - /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `VoterList::ListBags` (r:200 w:0) - /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) - /// Storage: `VoterList::ListNodes` (r:26001 w:0) - /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:703 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) - /// Storage: `Staking::Ledger` (r:703 w:0) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`) - /// Storage: `Staking::Nominators` (r:703 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) - /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) - /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - fn on_initialize_into_snapshot_msp() -> Weight { - // Proof Size summary in bytes: - // Measured: `5151586` - // Estimated: `69505051` - // Minimum execution time: 201_905_061_000 picoseconds. - Weight::from_parts(203_148_720_000, 69505051) - .saturating_add(RocksDbWeight::get().reads(29318_u64)) - .saturating_add(RocksDbWeight::get().writes(8_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) - /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `Measured`) - /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) - /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `VoterList::ListNodes` (r:26001 w:0) - /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `Measured`) - /// Storage: `Staking::Bonded` (r:704 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `Measured`) - /// Storage: `Staking::Ledger` (r:704 w:0) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `Measured`) - /// Storage: `Staking::Nominators` (r:703 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `Measured`) - /// Storage: `VoterList::ListBags` (r:200 w:0) - /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `Measured`) - /// Storage: `Staking::Validators` (r:165 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `Measured`) - /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) - /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `Measured`) - fn on_initialize_into_snapshot_rest() -> Weight { - // Proof Size summary in bytes: - // Measured: `5329975` - // Estimated: `69683440` - // Minimum execution time: 195_257_628_000 picoseconds. - Weight::from_parts(195_317_909_000, 69683440) - .saturating_add(RocksDbWeight::get().reads(28481_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - fn on_initialize_into_signed() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `1825` - // Minimum execution time: 649_767_000 picoseconds. - Weight::from_parts(764_370_000, 1825) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - fn on_initialize_into_signed_validation() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `3805` - // Minimum execution time: 657_218_000 picoseconds. - Weight::from_parts(674_575_000, 3805) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - fn on_initialize_into_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `1825` - // Minimum execution time: 866_827_000 picoseconds. - Weight::from_parts(890_863_000, 1825) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - fn manage() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 140_000 picoseconds. - Weight::from_parts(170_000, 0) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs deleted file mode 100644 index 3eb0e3ccd48ca..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_signed.rs +++ /dev/null @@ -1,272 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_election_provider_multi_block::signed` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` - -// Executed Command: -// target/release/substrate-node -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_election_provider_multi_block::signed -// --extrinsic -// all -// --steps -// 2 -// --repeat -// 3 -// --template -// substrate/.maintain/frame-weight-template.hbs -// --heap-pages -// 65000 -// --default-pov-mode -// measured -// --output -// ../measured - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(dead_code)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_election_provider_multi_block::signed`. -pub trait WeightInfo { - fn register_not_full() -> Weight; - fn register_eject() -> Weight; - fn submit_page() -> Weight; - fn unset_page() -> Weight; - fn bail() -> Weight; -} - -/// Weights for `pallet_election_provider_multi_block::signed` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - fn register_not_full() -> Weight { - // Proof Size summary in bytes: - // Measured: `3043` - // Estimated: `6508` - // Minimum execution time: 62_425_000 picoseconds. - Weight::from_parts(63_507_000, 6508) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - fn register_eject() -> Weight { - // Proof Size summary in bytes: - // Measured: `7643` - // Estimated: `87833` - // Minimum execution time: 148_826_000 picoseconds. - Weight::from_parts(155_275_000, 87833) - .saturating_add(T::DbWeight::get().reads(38_u64)) - .saturating_add(T::DbWeight::get().writes(37_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - fn submit_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `3459` - // Estimated: `6924` - // Minimum execution time: 697_450_000 picoseconds. - Weight::from_parts(762_938_000, 6924) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - fn unset_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `4287` - // Estimated: `7752` - // Minimum execution time: 681_035_000 picoseconds. - Weight::from_parts(711_671_000, 7752) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - fn bail() -> Weight { - // Proof Size summary in bytes: - // Measured: `4508` - // Estimated: `84698` - // Minimum execution time: 117_619_000 picoseconds. - Weight::from_parts(118_169_000, 84698) - .saturating_add(T::DbWeight::get().reads(37_u64)) - .saturating_add(T::DbWeight::get().writes(35_u64)) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - fn register_not_full() -> Weight { - // Proof Size summary in bytes: - // Measured: `3043` - // Estimated: `6508` - // Minimum execution time: 62_425_000 picoseconds. - Weight::from_parts(63_507_000, 6508) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - fn register_eject() -> Weight { - // Proof Size summary in bytes: - // Measured: `7643` - // Estimated: `87833` - // Minimum execution time: 148_826_000 picoseconds. - Weight::from_parts(155_275_000, 87833) - .saturating_add(RocksDbWeight::get().reads(38_u64)) - .saturating_add(RocksDbWeight::get().writes(37_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - fn submit_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `3459` - // Estimated: `6924` - // Minimum execution time: 697_450_000 picoseconds. - Weight::from_parts(762_938_000, 6924) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - fn unset_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `4287` - // Estimated: `7752` - // Minimum execution time: 681_035_000 picoseconds. - Weight::from_parts(711_671_000, 7752) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - fn bail() -> Weight { - // Proof Size summary in bytes: - // Measured: `4508` - // Estimated: `84698` - // Minimum execution time: 117_619_000 picoseconds. - Weight::from_parts(118_169_000, 84698) - .saturating_add(RocksDbWeight::get().reads(37_u64)) - .saturating_add(RocksDbWeight::get().writes(35_u64)) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs deleted file mode 100644 index 3fbe8099f8787..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_unsigned.rs +++ /dev/null @@ -1,153 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_election_provider_multi_block::unsigned` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` - -// Executed Command: -// target/release/substrate-node -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_election_provider_multi_block::unsigned -// --extrinsic -// all -// --steps -// 2 -// --repeat -// 3 -// --template -// substrate/.maintain/frame-weight-template.hbs -// --heap-pages -// 65000 -// --default-pov-mode -// measured -// --output -// ../measured - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(dead_code)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_election_provider_multi_block::unsigned`. -pub trait WeightInfo { - fn validate_unsigned() -> Weight; - fn submit_unsigned() -> Weight; -} - -/// Weights for `pallet_election_provider_multi_block::unsigned` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - fn validate_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `1849` - // Minimum execution time: 80_312_000 picoseconds. - Weight::from_parts(80_762_000, 1849) - .saturating_add(T::DbWeight::get().reads(5_u64)) - } - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - fn submit_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `157641` - // Estimated: `161106` - // Minimum execution time: 3_629_133_000 picoseconds. - Weight::from_parts(4_086_909_000, 161106) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - fn validate_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `1849` - // Minimum execution time: 80_312_000 picoseconds. - Weight::from_parts(80_762_000, 1849) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - } - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - fn submit_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `157641` - // Estimated: `161106` - // Minimum execution time: 3_629_133_000 picoseconds. - Weight::from_parts(4_086_909_000, 161106) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs b/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs deleted file mode 100644 index cec05a6e08ad3..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/measured/pallet_election_provider_multi_block_verifier.rs +++ /dev/null @@ -1,361 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_election_provider_multi_block::verifier` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` - -// Executed Command: -// target/release/substrate-node -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_election_provider_multi_block::verifier -// --extrinsic -// all -// --steps -// 2 -// --repeat -// 3 -// --template -// substrate/.maintain/frame-weight-template.hbs -// --heap-pages -// 65000 -// --default-pov-mode -// measured -// --output -// ../measured - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(dead_code)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_election_provider_multi_block::verifier`. -pub trait WeightInfo { - fn on_initialize_valid_non_terminal() -> Weight; - fn on_initialize_valid_terminal() -> Weight; - fn on_initialize_invalid_terminal() -> Weight; - fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight; -} - -/// Weights for `pallet_election_provider_multi_block::verifier` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) - fn on_initialize_valid_non_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `160552` - // Estimated: `164017` - // Minimum execution time: 917_013_000 picoseconds. - Weight::from_parts(919_406_000, 164017) - .saturating_add(T::DbWeight::get().reads(9_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - fn on_initialize_valid_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `881924` - // Estimated: `964589` - // Minimum execution time: 1_932_757_000 picoseconds. - Weight::from_parts(1_961_530_000, 964589) - .saturating_add(T::DbWeight::get().reads(76_u64)) - .saturating_add(T::DbWeight::get().writes(71_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - fn on_initialize_invalid_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `882945` - // Estimated: `965610` - // Minimum execution time: 1_919_946_000 picoseconds. - Weight::from_parts(1_949_902_000, 965610) - .saturating_add(T::DbWeight::get().reads(106_u64)) - .saturating_add(T::DbWeight::get().writes(100_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:31) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:31 w:31) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// The range of component `v` is `[0, 31]`. - fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `164728 + v * (8538 ±0)` - // Estimated: `244918 + v * (16343 ±0)` - // Minimum execution time: 572_970_000 picoseconds. - Weight::from_parts(886_325_333, 244918) - // Standard Error: 19_873_926 - .saturating_add(Weight::from_parts(27_871_795, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(42_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes(36_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 16343).saturating_mul(v.into())) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) - fn on_initialize_valid_non_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `160552` - // Estimated: `164017` - // Minimum execution time: 917_013_000 picoseconds. - Weight::from_parts(919_406_000, 164017) - .saturating_add(RocksDbWeight::get().reads(9_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - fn on_initialize_valid_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `881924` - // Estimated: `964589` - // Minimum execution time: 1_932_757_000 picoseconds. - Weight::from_parts(1_961_530_000, 964589) - .saturating_add(RocksDbWeight::get().reads(76_u64)) - .saturating_add(RocksDbWeight::get().writes(71_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - fn on_initialize_invalid_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `882945` - // Estimated: `965610` - // Minimum execution time: 1_919_946_000 picoseconds. - Weight::from_parts(1_949_902_000, 965610) - .saturating_add(RocksDbWeight::get().reads(106_u64)) - .saturating_add(RocksDbWeight::get().writes(100_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `Measured`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `Measured`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `Measured`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `Measured`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:31) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `Measured`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:31 w:31) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `Measured`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `Measured`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `Measured`) - /// The range of component `v` is `[0, 31]`. - fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `164728 + v * (8538 ±0)` - // Estimated: `244918 + v * (16343 ±0)` - // Minimum execution time: 572_970_000 picoseconds. - Weight::from_parts(886_325_333, 244918) - // Standard Error: 19_873_926 - .saturating_add(Weight::from_parts(27_871_795, 0).saturating_mul(v.into())) - .saturating_add(RocksDbWeight::get().reads(42_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(v.into()))) - .saturating_add(RocksDbWeight::get().writes(36_u64)) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 16343).saturating_mul(v.into())) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/mod.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/mod.rs deleted file mode 100644 index 3050fc7e7f195..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/mel/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -pub mod pallet_election_provider_multi_block; -pub mod pallet_election_provider_multi_block_signed; -pub mod pallet_election_provider_multi_block_unsigned; -pub mod pallet_election_provider_multi_block_verifier; diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block.rs deleted file mode 100644 index 25b97d446cf47..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block.rs +++ /dev/null @@ -1,362 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_election_provider_multi_block` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` - -// Executed Command: -// target/release/substrate-node -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_election_provider_multi_block -// --extrinsic -// all -// --steps -// 2 -// --repeat -// 3 -// --template -// substrate/.maintain/frame-weight-template.hbs -// --heap-pages -// 65000 -// --output -// ../mel - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(dead_code)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_election_provider_multi_block`. -pub trait WeightInfo { - fn on_initialize_nothing() -> Weight; - fn on_initialize_into_snapshot_msp() -> Weight; - fn on_initialize_into_snapshot_rest() -> Weight; - fn on_initialize_into_signed() -> Weight; - fn on_initialize_into_signed_validation() -> Weight; - fn on_initialize_into_unsigned() -> Weight; - fn manage() -> Weight; -} - -/// Weights for `pallet_election_provider_multi_block` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - fn on_initialize_nothing() -> Weight { - // Proof Size summary in bytes: - // Measured: `156` - // Estimated: `1490` - // Minimum execution time: 9_425_000 picoseconds. - Weight::from_parts(9_514_000, 1490) - .saturating_add(T::DbWeight::get().reads(2_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `Staking::ValidatorCount` (r:1 w:0) - /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::CounterForValidators` (r:1 w:0) - /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Validators` (r:1002 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) - /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) - /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) - /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListBags` (r:200 w:0) - /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListNodes` (r:26001 w:0) - /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:703 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:703 w:0) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:703 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) - /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn on_initialize_into_snapshot_msp() -> Weight { - // Proof Size summary in bytes: - // Measured: `5151586` - // Estimated: `68357619` - // Minimum execution time: 205_124_352_000 picoseconds. - Weight::from_parts(206_087_996_000, 68357619) - .saturating_add(T::DbWeight::get().reads(29318_u64)) - .saturating_add(T::DbWeight::get().writes(8_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) - /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) - /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) - /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListNodes` (r:26001 w:0) - /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:704 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:704 w:0) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:703 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListBags` (r:200 w:0) - /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) - /// Storage: `Staking::Validators` (r:165 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) - /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn on_initialize_into_snapshot_rest() -> Weight { - // Proof Size summary in bytes: - // Measured: `5329975` - // Estimated: `68357619` - // Minimum execution time: 197_146_155_000 picoseconds. - Weight::from_parts(198_376_173_000, 68357619) - .saturating_add(T::DbWeight::get().reads(28481_u64)) - .saturating_add(T::DbWeight::get().writes(5_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - fn on_initialize_into_signed() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `1490` - // Minimum execution time: 750_450_000 picoseconds. - Weight::from_parts(764_001_000, 1490) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - fn on_initialize_into_signed_validation() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `4118` - // Minimum execution time: 626_412_000 picoseconds. - Weight::from_parts(663_538_000, 4118) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - fn on_initialize_into_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `1490` - // Minimum execution time: 734_786_000 picoseconds. - Weight::from_parts(882_059_000, 1490) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - fn manage() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 141_000 picoseconds. - Weight::from_parts(150_000, 0) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - fn on_initialize_nothing() -> Weight { - // Proof Size summary in bytes: - // Measured: `156` - // Estimated: `1490` - // Minimum execution time: 9_425_000 picoseconds. - Weight::from_parts(9_514_000, 1490) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `Staking::ValidatorCount` (r:1 w:0) - /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::CounterForValidators` (r:1 w:0) - /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Validators` (r:1002 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) - /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) - /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) - /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListBags` (r:200 w:0) - /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListNodes` (r:26001 w:0) - /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:703 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:703 w:0) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:703 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:0 w:1) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedTargetSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) - /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn on_initialize_into_snapshot_msp() -> Weight { - // Proof Size summary in bytes: - // Measured: `5151586` - // Estimated: `68357619` - // Minimum execution time: 205_124_352_000 picoseconds. - Weight::from_parts(206_087_996_000, 68357619) - .saturating_add(RocksDbWeight::get().reads(29318_u64)) - .saturating_add(RocksDbWeight::get().writes(8_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `Staking::VoterSnapshotStatus` (r:1 w:1) - /// Proof: `Staking::VoterSnapshotStatus` (`max_values`: Some(1), `max_size`: Some(33), added: 528, mode: `MaxEncodedLen`) - /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) - /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListNodes` (r:26001 w:0) - /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:704 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:704 w:0) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:703 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListBags` (r:200 w:0) - /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) - /// Storage: `Staking::Validators` (r:165 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshotHash` (r:0 w:1) - /// Proof: `MultiBlock::PagedVoterSnapshotHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) - /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - fn on_initialize_into_snapshot_rest() -> Weight { - // Proof Size summary in bytes: - // Measured: `5329975` - // Estimated: `68357619` - // Minimum execution time: 197_146_155_000 picoseconds. - Weight::from_parts(198_376_173_000, 68357619) - .saturating_add(RocksDbWeight::get().reads(28481_u64)) - .saturating_add(RocksDbWeight::get().writes(5_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - fn on_initialize_into_signed() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `1490` - // Minimum execution time: 750_450_000 picoseconds. - Weight::from_parts(764_001_000, 1490) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:0) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - fn on_initialize_into_signed_validation() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `4118` - // Minimum execution time: 626_412_000 picoseconds. - Weight::from_parts(663_538_000, 4118) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:1) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - fn on_initialize_into_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `340` - // Estimated: `1490` - // Minimum execution time: 734_786_000 picoseconds. - Weight::from_parts(882_059_000, 1490) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - fn manage() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 141_000 picoseconds. - Weight::from_parts(150_000, 0) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_signed.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_signed.rs deleted file mode 100644 index 98e238145ae50..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_signed.rs +++ /dev/null @@ -1,270 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_election_provider_multi_block::signed` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` - -// Executed Command: -// target/release/substrate-node -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_election_provider_multi_block::signed -// --extrinsic -// all -// --steps -// 2 -// --repeat -// 3 -// --template -// substrate/.maintain/frame-weight-template.hbs -// --heap-pages -// 65000 -// --output -// ../mel - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(dead_code)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_election_provider_multi_block::signed`. -pub trait WeightInfo { - fn register_not_full() -> Weight; - fn register_eject() -> Weight; - fn submit_page() -> Weight; - fn unset_page() -> Weight; - fn bail() -> Weight; -} - -/// Weights for `pallet_election_provider_multi_block::signed` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - fn register_not_full() -> Weight { - // Proof Size summary in bytes: - // Measured: `3043` - // Estimated: `4118` - // Minimum execution time: 60_863_000 picoseconds. - Weight::from_parts(61_264_000, 4118) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - fn register_eject() -> Weight { - // Proof Size summary in bytes: - // Measured: `7643` - // Estimated: `1185054` - // Minimum execution time: 149_207_000 picoseconds. - Weight::from_parts(151_520_000, 1185054) - .saturating_add(T::DbWeight::get().reads(38_u64)) - .saturating_add(T::DbWeight::get().writes(37_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - fn submit_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `3459` - // Estimated: `37992` - // Minimum execution time: 707_404_000 picoseconds. - Weight::from_parts(752_393_000, 37992) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - fn unset_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `4287` - // Estimated: `37992` - // Minimum execution time: 716_769_000 picoseconds. - Weight::from_parts(761_406_000, 37992) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - fn bail() -> Weight { - // Proof Size summary in bytes: - // Measured: `4508` - // Estimated: `1185054` - // Minimum execution time: 117_038_000 picoseconds. - Weight::from_parts(117_468_000, 1185054) - .saturating_add(T::DbWeight::get().reads(37_u64)) - .saturating_add(T::DbWeight::get().writes(35_u64)) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:0 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - fn register_not_full() -> Weight { - // Proof Size summary in bytes: - // Measured: `3043` - // Estimated: `4118` - // Minimum execution time: 60_863_000 picoseconds. - Weight::from_parts(61_264_000, 4118) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:2 w:2) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:2) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - fn register_eject() -> Weight { - // Proof Size summary in bytes: - // Measured: `7643` - // Estimated: `1185054` - // Minimum execution time: 149_207_000 picoseconds. - Weight::from_parts(151_520_000, 1185054) - .saturating_add(RocksDbWeight::get().reads(38_u64)) - .saturating_add(RocksDbWeight::get().writes(37_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - fn submit_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `3459` - // Estimated: `37992` - // Minimum execution time: 707_404_000 picoseconds. - Weight::from_parts(752_393_000, 37992) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - fn unset_page() -> Weight { - // Proof Size summary in bytes: - // Measured: `4287` - // Estimated: `37992` - // Minimum execution time: 716_769_000 picoseconds. - Weight::from_parts(761_406_000, 37992) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - fn bail() -> Weight { - // Proof Size summary in bytes: - // Measured: `4508` - // Estimated: `1185054` - // Minimum execution time: 117_038_000 picoseconds. - Weight::from_parts(117_468_000, 1185054) - .saturating_add(RocksDbWeight::get().reads(37_u64)) - .saturating_add(RocksDbWeight::get().writes(35_u64)) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_unsigned.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_unsigned.rs deleted file mode 100644 index 7f05b13174a5b..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_unsigned.rs +++ /dev/null @@ -1,151 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_election_provider_multi_block::unsigned` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` - -// Executed Command: -// target/release/substrate-node -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_election_provider_multi_block::unsigned -// --extrinsic -// all -// --steps -// 2 -// --repeat -// 3 -// --template -// substrate/.maintain/frame-weight-template.hbs -// --heap-pages -// 65000 -// --output -// ../mel - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(dead_code)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_election_provider_multi_block::unsigned`. -pub trait WeightInfo { - fn validate_unsigned() -> Weight; - fn submit_unsigned() -> Weight; -} - -/// Weights for `pallet_election_provider_multi_block::unsigned` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn validate_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `1533` - // Minimum execution time: 77_037_000 picoseconds. - Weight::from_parts(77_588_000, 1533) - .saturating_add(T::DbWeight::get().reads(5_u64)) - } - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - fn submit_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `157641` - // Estimated: `392238` - // Minimum execution time: 3_607_268_000 picoseconds. - Weight::from_parts(4_015_058_000, 392238) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - fn validate_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `364` - // Estimated: `1533` - // Minimum execution time: 77_037_000 picoseconds. - Weight::from_parts(77_588_000, 1533) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - } - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::MinimumScore` (r:1 w:0) - /// Proof: `MultiBlockVerifier::MinimumScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionY` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionY` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - fn submit_unsigned() -> Weight { - // Proof Size summary in bytes: - // Measured: `157641` - // Estimated: `392238` - // Minimum execution time: 3_607_268_000 picoseconds. - Weight::from_parts(4_015_058_000, 392238) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_verifier.rs b/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_verifier.rs deleted file mode 100644 index 55d359f5c283a..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/mel/pallet_election_provider_multi_block_verifier.rs +++ /dev/null @@ -1,359 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Autogenerated weights for `pallet_election_provider_multi_block::verifier` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2025-02-13, STEPS: `2`, REPEAT: `3`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `toaster1`, CPU: `AMD Ryzen Threadripper 7980X 64-Cores` -//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` - -// Executed Command: -// target/release/substrate-node -// benchmark -// pallet -// --chain -// dev -// --pallet -// pallet_election_provider_multi_block::verifier -// --extrinsic -// all -// --steps -// 2 -// --repeat -// 3 -// --template -// substrate/.maintain/frame-weight-template.hbs -// --heap-pages -// 65000 -// --output -// ../mel - -#![cfg_attr(rustfmt, rustfmt_skip)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] -#![allow(dead_code)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use core::marker::PhantomData; - -/// Weight functions needed for `pallet_election_provider_multi_block::verifier`. -pub trait WeightInfo { - fn on_initialize_valid_non_terminal() -> Weight; - fn on_initialize_valid_terminal() -> Weight; - fn on_initialize_invalid_terminal() -> Weight; - fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight; -} - -/// Weights for `pallet_election_provider_multi_block::verifier` using the Substrate node and recommended hardware. -pub struct SubstrateWeight(PhantomData); -impl WeightInfo for SubstrateWeight { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`) - fn on_initialize_valid_non_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `160552` - // Estimated: `392238` - // Minimum execution time: 881_299_000 picoseconds. - Weight::from_parts(1_161_243_000, 392238) - .saturating_add(T::DbWeight::get().reads(9_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - fn on_initialize_valid_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `881924` - // Estimated: `1799127` - // Minimum execution time: 1_974_549_000 picoseconds. - Weight::from_parts(2_755_105_000, 1799127) - .saturating_add(T::DbWeight::get().reads(76_u64)) - .saturating_add(T::DbWeight::get().writes(71_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - fn on_initialize_invalid_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `882945` - // Estimated: `192092149` - // Minimum execution time: 1_982_131_000 picoseconds. - Weight::from_parts(1_994_790_000, 192092149) - .saturating_add(T::DbWeight::get().reads(106_u64)) - .saturating_add(T::DbWeight::get().writes(100_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:31) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:31 w:31) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// The range of component `v` is `[0, 31]`. - fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `164728 + v * (8538 ±0)` - // Estimated: `1185054 + v * (6190080 ±0)` - // Minimum execution time: 574_462_000 picoseconds. - Weight::from_parts(575_951_333, 1185054) - // Standard Error: 975_598 - .saturating_add(Weight::from_parts(9_099_741, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(42_u64)) - .saturating_add(T::DbWeight::get().reads((2_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().writes(36_u64)) - .saturating_add(T::DbWeight::get().writes((2_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 6190080).saturating_mul(v.into())) - } -} - -// For backwards compatibility and tests. -impl WeightInfo for () { - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:0) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:1 w:0) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`) - fn on_initialize_valid_non_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `160552` - // Estimated: `392238` - // Minimum execution time: 881_299_000 picoseconds. - Weight::from_parts(1_161_243_000, 392238) - .saturating_add(RocksDbWeight::get().reads(9_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionScore` (r:1 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionScore` (`max_values`: Some(1), `max_size`: Some(48), added: 543, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:0 w:1) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - fn on_initialize_valid_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `881924` - // Estimated: `1799127` - // Minimum execution time: 1_974_549_000 picoseconds. - Weight::from_parts(2_755_105_000, 1799127) - .saturating_add(RocksDbWeight::get().reads(76_u64)) - .saturating_add(RocksDbWeight::get().writes(71_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:33 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:32) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - fn on_initialize_invalid_terminal() -> Weight { - // Proof Size summary in bytes: - // Measured: `882945` - // Estimated: `192092149` - // Minimum execution time: 1_982_131_000 picoseconds. - Weight::from_parts(1_994_790_000, 192092149) - .saturating_add(RocksDbWeight::get().reads(106_u64)) - .saturating_add(RocksDbWeight::get().writes(100_u64)) - } - /// Storage: `MultiBlock::CurrentPhase` (r:1 w:0) - /// Proof: `MultiBlock::CurrentPhase` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::StatusStorage` (r:1 w:1) - /// Proof: `MultiBlockVerifier::StatusStorage` (`max_values`: Some(1), `max_size`: Some(5), added: 500, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::Round` (r:1 w:0) - /// Proof: `MultiBlock::Round` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SortedScores` (r:1 w:1) - /// Proof: `MultiBlockSigned::SortedScores` (`max_values`: None, `max_size`: Some(653), added: 3128, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionStorage` (r:32 w:32) - /// Proof: `MultiBlockSigned::SubmissionStorage` (`max_values`: None, `max_size`: Some(34527), added: 37002, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedTargetSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedTargetSnapshot` (`max_values`: None, `max_size`: Some(32014), added: 34489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::PagedVoterSnapshot` (r:1 w:0) - /// Proof: `MultiBlock::PagedVoterSnapshot` (`max_values`: None, `max_size`: Some(388773), added: 391248, mode: `MaxEncodedLen`) - /// Storage: `MultiBlock::DesiredTargets` (r:1 w:0) - /// Proof: `MultiBlock::DesiredTargets` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedValidVariant` (r:1 w:0) - /// Proof: `MultiBlockVerifier::QueuedValidVariant` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionX` (r:31 w:31) - /// Proof: `MultiBlockVerifier::QueuedSolutionX` (`max_values`: None, `max_size`: Some(6194014), added: 6196489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockVerifier::QueuedSolutionBackings` (r:31 w:31) - /// Proof: `MultiBlockVerifier::QueuedSolutionBackings` (`max_values`: None, `max_size`: Some(52014), added: 54489, mode: `MaxEncodedLen`) - /// Storage: `MultiBlockSigned::SubmissionMetadataStorage` (r:1 w:1) - /// Proof: `MultiBlockSigned::SubmissionMetadataStorage` (`max_values`: None, `max_size`: Some(181), added: 2656, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:1 w:1) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(427), added: 2902, mode: `MaxEncodedLen`) - /// The range of component `v` is `[0, 31]`. - fn on_initialize_invalid_non_terminal(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `164728 + v * (8538 ±0)` - // Estimated: `1185054 + v * (6190080 ±0)` - // Minimum execution time: 574_462_000 picoseconds. - Weight::from_parts(575_951_333, 1185054) - // Standard Error: 975_598 - .saturating_add(Weight::from_parts(9_099_741, 0).saturating_mul(v.into())) - .saturating_add(RocksDbWeight::get().reads(42_u64)) - .saturating_add(RocksDbWeight::get().reads((2_u64).saturating_mul(v.into()))) - .saturating_add(RocksDbWeight::get().writes(36_u64)) - .saturating_add(RocksDbWeight::get().writes((2_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 6190080).saturating_mul(v.into())) - } -} diff --git a/substrate/frame/election-provider-multi-block/src/weights/mod.rs b/substrate/frame/election-provider-multi-block/src/weights/mod.rs deleted file mode 100644 index 89b3960098443..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#![allow(unused)] -pub(crate) mod measured; -pub(crate) mod mel; -pub(crate) mod zero; -pub use zero::AllZeroWeights; diff --git a/substrate/frame/election-provider-multi-block/src/weights/zero.rs b/substrate/frame/election-provider-multi-block/src/weights/zero.rs deleted file mode 100644 index 38210adde7cc9..0000000000000 --- a/substrate/frame/election-provider-multi-block/src/weights/zero.rs +++ /dev/null @@ -1,89 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! A set of zero weights for all benchmarks of this pallet to be temporarily used in testing -//! runtimes while benchmarking is being finalized. - -/// A `WeightInfo` impl with all zero weights -pub struct AllZeroWeights; -use frame_support::weights::Weight; - -impl crate::WeightInfo for AllZeroWeights { - fn manage() -> Weight { - Default::default() - } - fn on_initialize_into_signed() -> Weight { - Default::default() - } - fn on_initialize_into_signed_validation() -> Weight { - Default::default() - } - fn on_initialize_into_snapshot_msp() -> Weight { - Default::default() - } - fn on_initialize_into_snapshot_rest() -> Weight { - Default::default() - } - fn on_initialize_into_unsigned() -> Weight { - Default::default() - } - fn on_initialize_nothing() -> Weight { - Default::default() - } -} - -impl crate::signed::WeightInfo for AllZeroWeights { - fn bail() -> Weight { - Default::default() - } - fn register_eject() -> Weight { - Default::default() - } - fn register_not_full() -> Weight { - Default::default() - } - fn submit_page() -> Weight { - Default::default() - } - fn unset_page() -> Weight { - Default::default() - } -} - -impl crate::unsigned::WeightInfo for AllZeroWeights { - fn submit_unsigned() -> Weight { - Default::default() - } - fn validate_unsigned() -> Weight { - Default::default() - } -} - -impl crate::verifier::WeightInfo for AllZeroWeights { - fn on_initialize_invalid_non_terminal(_: u32) -> Weight { - Default::default() - } - fn on_initialize_invalid_terminal() -> Weight { - Default::default() - } - fn on_initialize_valid_non_terminal() -> Weight { - Default::default() - } - fn on_initialize_valid_terminal() -> Weight { - Default::default() - } -} diff --git a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs index 20984f11a447b..222e79ab99c6c 100644 --- a/substrate/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/substrate/frame/election-provider-multi-phase/src/benchmarking.rs @@ -197,7 +197,6 @@ mod benchmarks { #[benchmark] fn on_initialize_nothing() { - T::DataProvider::set_next_election(sp_runtime::traits::Bounded::max_value()); assert!(CurrentPhase::::get().is_off()); #[block] @@ -289,11 +288,9 @@ mod benchmarks { ) -> Result<(), BenchmarkError> { // We don't directly need the data-provider to be populated, but it is just easy to use it. set_up_data_provider::(v, t); - // default bounds are unbounded. - let targets = - T::DataProvider::electable_targets(DataProviderBounds::default(), Zero::zero())?; - let voters = T::DataProvider::electing_voters(DataProviderBounds::default(), Zero::zero())?; - + // Default bounds are unbounded. + let targets = T::DataProvider::electable_targets(DataProviderBounds::default())?; + let voters = T::DataProvider::electing_voters(DataProviderBounds::default())?; let desired_targets = T::DataProvider::desired_targets()?; assert!(Snapshot::::get().is_none()); @@ -303,9 +300,8 @@ mod benchmarks { } assert!(Snapshot::::get().is_some()); - // TODO: bring this back - // assert_eq!(SnapshotMetadata::::get().ok_or("metadata missing")?.voters, v); - // assert_eq!(SnapshotMetadata::::get().ok_or("metadata missing")?.targets, t); + assert_eq!(SnapshotMetadata::::get().ok_or("metadata missing")?.voters, v); + assert_eq!(SnapshotMetadata::::get().ok_or("metadata missing")?.targets, t); Ok(()) } @@ -347,7 +343,7 @@ mod benchmarks { #[block] { - result = as ElectionProvider>::elect(Zero::zero()); + result = as ElectionProvider>::elect(); } assert!(result.is_ok()); @@ -535,9 +531,8 @@ mod benchmarks { } assert!(Snapshot::::get().is_some()); - // TODO: bring this back - // assert_eq!(SnapshotMetadata::::get().ok_or("snapshot missing")?.voters, v); - // assert_eq!(SnapshotMetadata::::get().ok_or("snapshot missing")?.targets, t); + assert_eq!(SnapshotMetadata::::get().ok_or("snapshot missing")?.voters, v); + assert_eq!(SnapshotMetadata::::get().ok_or("snapshot missing")?.targets, t); Ok(()) } diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index c0e256c3e6525..1b43385bd6bc4 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -189,18 +189,6 @@ //! Note that there could be an overlap between these sub-errors. For example, A //! `SnapshotUnavailable` can happen in both miner and feasibility check phase. //! -//! ## Multi-page election support -//! -//! The [`frame_election_provider_support::ElectionDataProvider`] and -//! [`frame_election_provider_support::ElectionProvider`] traits used by this pallet can support a -//! multi-page election. -//! -//! However, this pallet only supports single-page election and data -//! provider and all the relevant trait implementation and configurations reflect that assumption. -//! -//! If external callers request the election of a page index higher than 0, the election will fail -//! with [`ElectionError::MultiPageNotSupported`]. -//! //! ## Future Plans //! //! **Emergency-phase recovery script**: This script should be taken out of staking-miner in @@ -246,14 +234,14 @@ extern crate alloc; use alloc::{boxed::Box, vec::Vec}; use codec::{Decode, DecodeWithMemTracking, Encode}; use frame_election_provider_support::{ - bounds::{CountBound, ElectionBounds, SizeBound}, - BoundedSupports, BoundedSupportsOf, ElectionDataProvider, ElectionProvider, - InstantElectionProvider, NposSolution, PageIndex, + bounds::{CountBound, ElectionBounds, ElectionBoundsBuilder, SizeBound}, + BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider, + ElectionProviderBase, InstantElectionProvider, NposSolution, }; use frame_support::{ dispatch::DispatchClass, ensure, - traits::{Currency, Get, OnUnbalanced, ReservableCurrency}, + traits::{Currency, DefensiveResult, Get, OnUnbalanced, ReservableCurrency}, weights::Weight, DefaultNoBound, EqNoBound, PartialEqNoBound, }; @@ -263,7 +251,7 @@ use sp_arithmetic::{ traits::{CheckedAdd, Zero}, UpperOf, }; -use sp_npos_elections::{ElectionScore, IdentifierT, Supports, VoteWeight}; +use sp_npos_elections::{BoundedSupports, ElectionScore, IdentifierT, Supports, VoteWeight}; use sp_runtime::{ transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, @@ -282,8 +270,6 @@ mod mock; #[macro_use] pub mod helpers; -/// This pallet only supports a single page election flow. -pub(crate) const SINGLE_PAGE: u32 = 0; const LOG_TARGET: &str = "runtime::election-provider"; pub mod migrations; @@ -301,6 +287,7 @@ pub use weights::WeightInfo; /// The solution type used by this crate. pub type SolutionOf = ::Solution; + /// The voter index. Derived from [`SolutionOf`]. pub type SolutionVoterIndexOf = as NposSolution>::VoterIndex; /// The target index. Derived from [`SolutionOf`]. @@ -308,14 +295,8 @@ pub type SolutionTargetIndexOf = as NposSolution>::TargetIndex /// The accuracy of the election, when submitted from offchain. Derived from [`SolutionOf`]. pub type SolutionAccuracyOf = ::MinerConfig> as NposSolution>::Accuracy; -/// A ready solution parameterized with this pallet's miner config. -pub type ReadySolutionOf = ReadySolution< - ::AccountId, - ::MaxWinners, - ::MaxBackersPerWinner, ->; /// The fallback election type. -pub type FallbackErrorOf = <::Fallback as ElectionProvider>::Error; +pub type FallbackErrorOf = <::Fallback as ElectionProviderBase>::Error; /// Configuration for the benchmarks of the pallet. pub trait BenchmarkingConfig { @@ -463,18 +444,17 @@ impl Default for RawSolution { DefaultNoBound, scale_info::TypeInfo, )] -#[scale_info(skip_type_params(AccountId, MaxWinners, MaxBackersPerWinner))] -pub struct ReadySolution +#[scale_info(skip_type_params(AccountId, MaxWinners))] +pub struct ReadySolution where AccountId: IdentifierT, MaxWinners: Get, - MaxBackersPerWinner: Get, { /// The final supports of the solution. /// /// This is target-major vector, storing each winners, total backing, and each individual /// backer. - pub supports: BoundedSupports, + pub supports: BoundedSupports, /// The score of the solution. /// /// This is needed to potentially challenge the solution. @@ -527,15 +507,13 @@ pub enum ElectionError { DataProvider(&'static str), /// An error nested in the fallback. Fallback(FallbackErrorOf), - /// An error occurred when requesting an election result. The caller expects a multi-paged - /// election, which this pallet does not support. - MultiPageNotSupported, /// No solution has been queued. NothingQueued, } // NOTE: we have to do this manually because of the additional where clause needed on // `FallbackErrorOf`. +#[cfg(test)] impl PartialEq for ElectionError where FallbackErrorOf: PartialEq, @@ -547,7 +525,6 @@ where (Miner(x), Miner(y)) if x == y => true, (DataProvider(x), DataProvider(y)) if x == y => true, (Fallback(x), Fallback(y)) if x == y => true, - (MultiPageNotSupported, MultiPageNotSupported) => true, _ => false, } } @@ -652,7 +629,6 @@ pub mod pallet { AccountId = Self::AccountId, MaxVotesPerVoter = ::MaxVotesPerVoter, MaxWinners = Self::MaxWinners, - MaxBackersPerWinner = Self::MaxBackersPerWinner, >; /// Maximum number of signed submissions that can be queued. @@ -689,23 +665,20 @@ pub mod pallet { #[pallet::constant] type SignedDepositWeight: Get>; - /// Maximum number of winners that an election supports. + /// The maximum number of winners that can be elected by this `ElectionProvider` + /// implementation. /// /// Note: This must always be greater or equal to `T::DataProvider::desired_targets()`. #[pallet::constant] type MaxWinners: Get; - /// Maximum number of voters that can support a winner in an election solution. - /// - /// This is needed to ensure election computation is bounded. - #[pallet::constant] - type MaxBackersPerWinner: Get; - /// Something that calculates the signed deposit base based on the signed submissions queue /// size. type SignedDepositBase: Convert>; /// The maximum number of electing voters and electable targets to put in the snapshot. + /// At the moment, snapshots are only over a single block, but once multi-block elections + /// are introduced they will take place over multiple blocks. type ElectionBounds: Get; /// Handler for the slashed deposits. @@ -725,8 +698,7 @@ pub mod pallet { AccountId = Self::AccountId, BlockNumber = BlockNumberFor, DataProvider = Self::DataProvider, - MaxBackersPerWinner = Self::MaxBackersPerWinner, - MaxWinnersPerPage = Self::MaxWinners, + MaxWinners = Self::MaxWinners, >; /// Configuration of the governance-only fallback. @@ -737,8 +709,7 @@ pub mod pallet { AccountId = Self::AccountId, BlockNumber = BlockNumberFor, DataProvider = Self::DataProvider, - MaxWinnersPerPage = Self::MaxWinners, - MaxBackersPerWinner = Self::MaxBackersPerWinner, + MaxWinners = Self::MaxWinners, >; /// OCW election solution miner algorithm implementation. @@ -792,10 +763,9 @@ pub mod pallet { log!( trace, - "current phase {:?}, next election {:?}, queued? {:?}, metadata: {:?}", + "current phase {:?}, next election {:?}, metadata: {:?}", current_phase, next_election, - QueuedSolution::::get().map(|rs| (rs.supports.len(), rs.compute, rs.score)), SnapshotMetadata::::get() ); match current_phase { @@ -1021,9 +991,8 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; ensure!(CurrentPhase::::get().is_emergency(), Error::::CallNotAllowed); - // bound supports with T::MaxWinners. - let supports: BoundedSupportsOf = - supports.try_into().map_err(|_| Error::::TooManyWinners)?; + // bound supports with T::MaxWinners + let supports = supports.try_into().map_err(|_| Error::::TooManyWinners)?; // Note: we don't `rotate_round` at this point; the next call to // `ElectionProvider::elect` will succeed and take care of that. @@ -1126,21 +1095,35 @@ pub mod pallet { /// calling [`Call::set_emergency_election_result`]. #[pallet::call_index(4)] #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] - pub fn governance_fallback(origin: OriginFor) -> DispatchResult { + pub fn governance_fallback( + origin: OriginFor, + maybe_max_voters: Option, + maybe_max_targets: Option, + ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; ensure!(CurrentPhase::::get().is_emergency(), Error::::CallNotAllowed); - let RoundSnapshot { voters, targets } = - Snapshot::::get().ok_or(Error::::MissingSnapshotMetadata)?; - let desired_targets = - DesiredTargets::::get().ok_or(Error::::MissingSnapshotMetadata)?; + let election_bounds = ElectionBoundsBuilder::default() + .voters_count(maybe_max_voters.unwrap_or(u32::MAX).into()) + .targets_count(maybe_max_targets.unwrap_or(u32::MAX).into()) + .build(); - let supports = T::GovernanceFallback::instant_elect(voters, targets, desired_targets) - .map_err(|e| { + let supports = T::GovernanceFallback::instant_elect( + election_bounds.voters, + election_bounds.targets, + ) + .map_err(|e| { log!(error, "GovernanceFallback failed: {:?}", e); Error::::FallbackFailed })?; + // transform BoundedVec<_, T::GovernanceFallback::MaxWinners> into + // `BoundedVec<_, T::MaxWinners>` + let supports: BoundedVec<_, T::MaxWinners> = supports + .into_inner() + .try_into() + .defensive_map_err(|_| Error::::BoundNotMet)?; + let solution = ReadySolution { supports, score: Default::default(), @@ -1295,7 +1278,8 @@ pub mod pallet { /// /// Always sorted by score. #[pallet::storage] - pub type QueuedSolution = StorageValue<_, ReadySolutionOf>; + pub type QueuedSolution = + StorageValue<_, ReadySolution>; /// Snapshot data of the round. /// @@ -1427,7 +1411,7 @@ impl Pallet { /// Current best solution, signed or unsigned, queued to be returned upon `elect`. /// /// Always sorted by score. - pub fn queued_solution() -> Option> { + pub fn queued_solution() -> Option> { QueuedSolution::::get() } @@ -1533,12 +1517,11 @@ impl Pallet { /// Parts of [`create_snapshot`] that happen outside of this pallet. /// /// Extracted for easier weight calculation. - /// - /// Note: this pallet only supports one page of voter and target snapshots. fn create_snapshot_external( ) -> Result<(Vec, Vec>, u32), ElectionError> { let election_bounds = T::ElectionBounds::get(); - let targets = T::DataProvider::electable_targets_stateless(election_bounds.targets) + + let targets = T::DataProvider::electable_targets(election_bounds.targets) .and_then(|t| { election_bounds.ensure_targets_limits( CountBound(t.len() as u32), @@ -1548,7 +1531,7 @@ impl Pallet { }) .map_err(ElectionError::DataProvider)?; - let voters = T::DataProvider::electing_voters_stateless(election_bounds.voters) + let voters = T::DataProvider::electing_voters(election_bounds.voters) .and_then(|v| { election_bounds.ensure_voters_limits( CountBound(v.len() as u32), @@ -1558,7 +1541,7 @@ impl Pallet { }) .map_err(ElectionError::DataProvider)?; - let mut desired_targets = as ElectionProvider>::desired_targets_checked() + let mut desired_targets = as ElectionProviderBase>::desired_targets_checked() .map_err(|e| ElectionError::DataProvider(e))?; // If `desired_targets` > `targets.len()`, cap `desired_targets` to that level and emit a @@ -1613,7 +1596,7 @@ impl Pallet { pub fn feasibility_check( raw_solution: RawSolution>, compute: ElectionCompute, - ) -> Result, FeasibilityError> { + ) -> Result, FeasibilityError> { let desired_targets = DesiredTargets::::get().ok_or(FeasibilityError::SnapshotUnavailable)?; @@ -1660,42 +1643,40 @@ impl Pallet { QueuedSolution::::take() .ok_or(ElectionError::::NothingQueued) .or_else(|_| { - log!(warn, "No solution queued, falling back to instant fallback.",); - let (voters, targets, desired_targets) = if T::Fallback::bother() { - let RoundSnapshot { voters, targets } = Snapshot::::get().ok_or( - ElectionError::::Feasibility(FeasibilityError::SnapshotUnavailable), - )?; - let desired_targets = DesiredTargets::::get().ok_or( - ElectionError::::Feasibility(FeasibilityError::SnapshotUnavailable), - )?; - (voters, targets, desired_targets) - } else { - (Default::default(), Default::default(), Default::default()) - }; - T::Fallback::instant_elect(voters, targets, desired_targets) - .map_err(|fe| ElectionError::Fallback(fe)) - .and_then(|supports| { - Ok(ReadySolution { - supports, - score: Default::default(), - compute: ElectionCompute::Fallback, - }) + // default data provider bounds are unbounded. calling `instant_elect` with + // unbounded data provider bounds means that the on-chain `T:Bounds` configs will + // *not* be overwritten. + T::Fallback::instant_elect( + DataProviderBounds::default(), + DataProviderBounds::default(), + ) + .map_err(|fe| ElectionError::Fallback(fe)) + .and_then(|supports| { + Ok(ReadySolution { + supports, + score: Default::default(), + compute: ElectionCompute::Fallback, }) + }) }) .map(|ReadySolution { compute, score, supports }| { Self::deposit_event(Event::ElectionFinalized { compute, score }); - log!(info, "Finalized election round with compute {:?}.", compute); + if Round::::get() != 1 { + log!(info, "Finalized election round with compute {:?}.", compute); + } supports }) .map_err(|err| { Self::deposit_event(Event::ElectionFailed); - log!(warn, "Failed to finalize election round. reason {:?}", err); + if Round::::get() != 1 { + log!(warn, "Failed to finalize election round. reason {:?}", err); + } err }) } /// record the weight of the given `supports`. - fn weigh_supports(supports: &BoundedSupportsOf) { + fn weigh_supports(supports: &Supports) { let active_voters = supports .iter() .map(|(_, x)| x) @@ -1787,41 +1768,35 @@ impl Pallet { } } -impl ElectionProvider for Pallet { +impl ElectionProviderBase for Pallet { type AccountId = T::AccountId; type BlockNumber = BlockNumberFor; type Error = ElectionError; - type MaxWinnersPerPage = T::MaxWinners; - type MaxBackersPerWinner = T::MaxBackersPerWinner; - type Pages = sp_core::ConstU32<1>; + type MaxWinners = T::MaxWinners; type DataProvider = T::DataProvider; +} - fn elect(page: PageIndex) -> Result, Self::Error> { - // Note: this pallet **MUST** only by used in the single-page mode. - ensure!(page == SINGLE_PAGE, ElectionError::::MultiPageNotSupported); +impl ElectionProvider for Pallet { + fn ongoing() -> bool { + match CurrentPhase::::get() { + Phase::Off => false, + _ => true, + } + } - let res = match Self::do_elect() { - Ok(bounded_supports) => { + fn elect() -> Result, Self::Error> { + match Self::do_elect() { + Ok(supports) => { // All went okay, record the weight, put sign to be Off, clean snapshot, etc. - Self::weigh_supports(&bounded_supports); + Self::weigh_supports(&supports); Self::rotate_round(); - Ok(bounded_supports) + Ok(supports) }, Err(why) => { log!(error, "Entering emergency mode: {:?}", why); Self::phase_transition(Phase::Emergency); Err(why) }, - }; - - log!(info, "ElectionProvider::elect({}) => {:?}", page, res.as_ref().map(|s| s.len())); - res - } - - fn ongoing() -> bool { - match CurrentPhase::::get() { - Phase::Off => false, - _ => true, } } } @@ -1841,6 +1816,7 @@ mod feasibility_check { //! All of the tests here should be dedicated to only testing the feasibility check and nothing //! more. The best way to audit and review these tests is to try and come up with a solution //! that is invalid, but gets through the system as valid. + use super::*; use crate::mock::{ raw_solution, roll_to, EpochLength, ExtBuilder, MultiPhase, Runtime, SignedPhase, @@ -2044,7 +2020,6 @@ mod tests { }, Phase, }; - use frame_election_provider_support::bounds::ElectionBoundsBuilder; use frame_support::{assert_noop, assert_ok}; use sp_npos_elections::{BalancingConfig, Support}; @@ -2106,7 +2081,7 @@ mod tests { assert_eq!(CurrentPhase::::get(), Phase::Unsigned((true, 25))); assert!(Snapshot::::get().is_some()); - assert_ok!(MultiPhase::elect(SINGLE_PAGE)); + assert_ok!(MultiPhase::elect()); assert!(CurrentPhase::::get().is_off()); assert!(Snapshot::::get().is_none()); @@ -2170,7 +2145,7 @@ mod tests { roll_to(30); assert!(CurrentPhase::::get().is_unsigned_open_at(20)); - assert_ok!(MultiPhase::elect(SINGLE_PAGE)); + assert_ok!(MultiPhase::elect()); assert!(CurrentPhase::::get().is_off()); assert!(Snapshot::::get().is_none()); @@ -2217,7 +2192,7 @@ mod tests { roll_to(30); assert!(CurrentPhase::::get().is_signed()); - assert_ok!(MultiPhase::elect(SINGLE_PAGE)); + assert_ok!(MultiPhase::elect()); assert!(CurrentPhase::::get().is_off()); assert!(Snapshot::::get().is_none()); @@ -2255,20 +2230,23 @@ mod tests { roll_to(30); assert!(CurrentPhase::::get().is_off()); - // This module is now cannot even do onchain fallback, as no snapshot is there - assert_eq!( - MultiPhase::elect(SINGLE_PAGE), - Err(ElectionError::::Feasibility(FeasibilityError::SnapshotUnavailable)) - ); + // This module is now only capable of doing on-chain backup. + assert_ok!(MultiPhase::elect()); - // this puts us in emergency now. - assert!(CurrentPhase::::get().is_emergency()); + assert!(CurrentPhase::::get().is_off()); assert_eq!( multi_phase_events(), vec![ - Event::ElectionFailed, - Event::PhaseTransitioned { from: Phase::Off, to: Phase::Emergency, round: 1 } + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + }, + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Off, round: 2 }, ] ); }); @@ -2289,7 +2267,7 @@ mod tests { assert_eq!(Round::::get(), 1); // An unexpected call to elect. - assert_ok!(MultiPhase::elect(SINGLE_PAGE)); + assert_ok!(MultiPhase::elect()); // We surely can't have any feasible solutions. This will cause an on-chain election. assert_eq!( @@ -2340,7 +2318,7 @@ mod tests { } // an unexpected call to elect. - assert_ok!(MultiPhase::elect(SINGLE_PAGE)); + assert_ok!(MultiPhase::elect()); // all storage items must be cleared. assert_eq!(Round::::get(), 2); @@ -2411,7 +2389,7 @@ mod tests { )); roll_to(30); - assert_ok!(MultiPhase::elect(SINGLE_PAGE)); + assert_ok!(MultiPhase::elect()); assert_eq!( multi_phase_events(), @@ -2468,7 +2446,7 @@ mod tests { )); assert!(QueuedSolution::::get().is_some()); - assert_ok!(MultiPhase::elect(SINGLE_PAGE)); + assert_ok!(MultiPhase::elect()); assert_eq!( multi_phase_events(), @@ -2502,35 +2480,6 @@ mod tests { }) } - #[test] - fn try_elect_multi_page_fails() { - let prepare_election = || { - roll_to_signed(); - assert!(Snapshot::::get().is_some()); - - // submit solution and assert it is queued and ready for elect to be called. - let (solution, _, _) = MultiPhase::mine_solution().unwrap(); - assert_ok!(MultiPhase::submit( - crate::mock::RuntimeOrigin::signed(99), - Box::new(solution), - )); - roll_to(30); - assert!(QueuedSolution::::get().is_some()); - }; - - ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - prepare_election(); - // single page elect call works as expected. - assert_ok!(MultiPhase::elect(SINGLE_PAGE)); - }); - - ExtBuilder::default().onchain_fallback(false).build_and_execute(|| { - prepare_election(); - // multi page calls will fail with multipage not supported error. - assert_noop!(MultiPhase::elect(SINGLE_PAGE + 1), ElectionError::MultiPageNotSupported); - }) - } - #[test] fn fallback_strategy_works() { ExtBuilder::default().onchain_fallback(true).build_and_execute(|| { @@ -2539,16 +2488,15 @@ mod tests { // Zilch solutions thus far, but we get a result. assert!(QueuedSolution::::get().is_none()); - let supports = MultiPhase::elect(SINGLE_PAGE).unwrap(); + let supports = MultiPhase::elect().unwrap(); - let expected_supports = vec![ - (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), - (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }), - ] - .try_into() - .unwrap(); - - assert_eq!(supports, expected_supports); + assert_eq!( + supports, + vec![ + (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), + (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }) + ] + ); assert_eq!( multi_phase_events(), @@ -2582,10 +2530,7 @@ mod tests { // Zilch solutions thus far. assert!(QueuedSolution::::get().is_none()); - assert_eq!( - MultiPhase::elect(SINGLE_PAGE).unwrap_err(), - ElectionError::Fallback("NoFallback.") - ); + assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); // phase is now emergency. assert_eq!(CurrentPhase::::get(), Phase::Emergency); // snapshot is still there until election finalizes. @@ -2619,10 +2564,7 @@ mod tests { // Zilch solutions thus far. assert!(QueuedSolution::::get().is_none()); - assert_eq!( - MultiPhase::elect(SINGLE_PAGE).unwrap_err(), - ElectionError::Fallback("NoFallback.") - ); + assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::Fallback("NoFallback.")); // phase is now emergency. assert_eq!(CurrentPhase::::get(), Phase::Emergency); @@ -2631,16 +2573,16 @@ mod tests { // no single account can trigger this assert_noop!( - MultiPhase::governance_fallback(RuntimeOrigin::signed(99)), + MultiPhase::governance_fallback(RuntimeOrigin::signed(99), None, None), DispatchError::BadOrigin ); // only root can - assert_ok!(MultiPhase::governance_fallback(RuntimeOrigin::root())); + assert_ok!(MultiPhase::governance_fallback(RuntimeOrigin::root(), None, None)); // something is queued now assert!(QueuedSolution::::get().is_some()); // next election call with fix everything.; - assert!(MultiPhase::elect(SINGLE_PAGE).is_ok()); + assert!(MultiPhase::elect().is_ok()); assert_eq!(CurrentPhase::::get(), Phase::Off); assert_eq!( @@ -2691,17 +2633,22 @@ mod tests { roll_to(25); assert_eq!(CurrentPhase::::get(), Phase::Off); - // On-chain backup will fail similarly. - assert_eq!( - MultiPhase::elect(SINGLE_PAGE).unwrap_err(), - ElectionError::::Feasibility(FeasibilityError::SnapshotUnavailable) - ); + // On-chain backup works though. + let supports = MultiPhase::elect().unwrap(); + assert!(supports.len() > 0); assert_eq!( multi_phase_events(), vec![ - Event::ElectionFailed, - Event::PhaseTransitioned { from: Phase::Off, to: Phase::Emergency, round: 1 }, + Event::ElectionFinalized { + compute: ElectionCompute::Fallback, + score: ElectionScore { + minimal_stake: 0, + sum_stake: 0, + sum_stake_squared: 0 + } + }, + Event::PhaseTransitioned { from: Phase::Off, to: Phase::Off, round: 2 }, ] ); }); @@ -2726,7 +2673,7 @@ mod tests { assert_eq!(CurrentPhase::::get(), Phase::Off); roll_to(29); - let err = MultiPhase::elect(SINGLE_PAGE).unwrap_err(); + let err = MultiPhase::elect().unwrap_err(); assert_eq!(err, ElectionError::Fallback("NoFallback.")); assert_eq!(CurrentPhase::::get(), Phase::Emergency); diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index d244af0b40394..d0797e100fcdf 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -18,7 +18,7 @@ use super::*; use crate::{self as multi_phase, signed::GeometricDepositBase, unsigned::MinerConfig}; use frame_election_provider_support::{ - bounds::{DataProviderBounds, ElectionBounds, ElectionBoundsBuilder}, + bounds::{DataProviderBounds, ElectionBounds}, data_provider, onchain, ElectionDataProvider, NposSolution, SequentialPhragmen, }; pub use frame_support::derive_impl; @@ -35,7 +35,7 @@ use sp_core::{ testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }, - ConstBool, H256, + H256, }; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, seq_phragmen, to_supports, BalancingConfig, @@ -116,7 +116,7 @@ pub fn roll_to_round(n: u32) { while Round::::get() != n { roll_to_signed(); - frame_support::assert_ok!(MultiPhase::elect(Zero::zero())); + frame_support::assert_ok!(MultiPhase::elect()); } } @@ -296,8 +296,6 @@ parameter_types! { #[derive(Debug)] pub static MaxWinners: u32 = 200; - #[derive(Debug)] - pub static MaxBackersPerWinner: u32 = 200; // `ElectionBounds` and `OnChainElectionsBounds` are defined separately to set them independently in the tests. pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); pub static OnChainElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); @@ -311,52 +309,34 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen, Balancing>; type DataProvider = StakingMock; type WeightInfo = (); - type MaxWinnersPerPage = MaxWinners; - type MaxBackersPerWinner = MaxBackersPerWinner; - type Sort = ConstBool; + type MaxWinners = MaxWinners; type Bounds = OnChainElectionsBounds; } pub struct MockFallback; -impl ElectionProvider for MockFallback { - type AccountId = AccountId; +impl ElectionProviderBase for MockFallback { type BlockNumber = BlockNumber; + type AccountId = AccountId; type Error = &'static str; - type MaxWinnersPerPage = MaxWinners; - type MaxBackersPerWinner = MaxBackersPerWinner; - type Pages = ConstU32<1>; type DataProvider = StakingMock; - - fn elect(_remaining: PageIndex) -> Result, Self::Error> { - unimplemented!() - } - - fn ongoing() -> bool { - false - } + type MaxWinners = MaxWinners; } impl InstantElectionProvider for MockFallback { fn instant_elect( - voters: Vec>, - targets: Vec, - desired_targets: u32, + voters_bounds: DataProviderBounds, + targets_bounds: DataProviderBounds, ) -> Result, Self::Error> { if OnChainFallback::get() { onchain::OnChainExecution::::instant_elect( - voters, - targets, - desired_targets, + voters_bounds, + targets_bounds, ) .map_err(|_| "onchain::OnChainExecution failed.") } else { Err("NoFallback.") } } - - fn bother() -> bool { - OnChainFallback::get() - } } parameter_types! { @@ -382,7 +362,6 @@ impl MinerConfig for Runtime { type MaxWeight = MinerMaxWeight; type MaxVotesPerVoter = ::MaxVotesPerVoter; type MaxWinners = MaxWinners; - type MaxBackersPerWinner = MaxBackersPerWinner; type Solution = TestNposSolution; fn solution_weight(v: u32, t: u32, a: u32, d: u32) -> Weight { @@ -425,7 +404,6 @@ impl crate::Config for Runtime { frame_election_provider_support::onchain::OnChainExecution; type ForceOrigin = frame_system::EnsureRoot; type MaxWinners = MaxWinners; - type MaxBackersPerWinner = MaxBackersPerWinner; type MinerConfig = Self; type Solver = SequentialPhragmen, Balancing>; type ElectionBounds = ElectionsBounds; @@ -477,12 +455,7 @@ impl ElectionDataProvider for StakingMock { type AccountId = AccountId; type MaxVotesPerVoter = MaxNominations; - fn electable_targets( - bounds: DataProviderBounds, - remaining_pages: PageIndex, - ) -> data_provider::Result> { - assert!(remaining_pages.is_zero()); - + fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result> { let targets = Targets::get(); if !DataProviderAllowBadData::get() && @@ -494,12 +467,7 @@ impl ElectionDataProvider for StakingMock { Ok(targets) } - fn electing_voters( - bounds: DataProviderBounds, - remaining_pages: PageIndex, - ) -> data_provider::Result>> { - assert!(remaining_pages.is_zero()); - + fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>> { let mut voters = Voters::get(); if !DataProviderAllowBadData::get() { @@ -614,10 +582,6 @@ impl ExtBuilder { ::set(weight); self } - pub fn max_backers_per_winner(self, max: u32) -> Self { - MaxBackersPerWinner::set(max); - self - } pub fn build(self) -> sp_io::TestExternalities { sp_tracing::try_init_simple(); let mut storage = diff --git a/substrate/frame/election-provider-multi-phase/src/signed.rs b/substrate/frame/election-provider-multi-phase/src/signed.rs index 5efe848c0e626..c685791bbdd9d 100644 --- a/substrate/frame/election-provider-multi-phase/src/signed.rs +++ b/substrate/frame/election-provider-multi-phase/src/signed.rs @@ -21,7 +21,7 @@ use core::marker::PhantomData; use crate::{ unsigned::MinerConfig, Config, ElectionCompute, Pallet, QueuedSolution, RawSolution, - ReadySolutionOf, SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap, + ReadySolution, SignedSubmissionIndices, SignedSubmissionNextIndex, SignedSubmissionsMap, SnapshotMetadata, SolutionOf, SolutionOrSnapshotSize, Weight, WeightInfo, }; use alloc::{ @@ -490,7 +490,7 @@ impl Pallet { /// /// Infallible pub fn finalize_signed_phase_accept_solution( - ready_solution: ReadySolutionOf, + ready_solution: ReadySolution, who: &T::AccountId, deposit: BalanceOf, call_fee: BalanceOf, @@ -566,9 +566,9 @@ impl Pallet { mod tests { use super::*; use crate::{ - mock::*, CurrentPhase, ElectionCompute, ElectionError, Error, Event, Perbill, Phase, Round, + mock::*, CurrentPhase, ElectionBoundsBuilder, ElectionCompute, ElectionError, Error, Event, + Perbill, Phase, Round, }; - use frame_election_provider_support::bounds::ElectionBoundsBuilder; use frame_support::{assert_noop, assert_ok, assert_storage_noop}; use sp_runtime::Percent; diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index 5aabc3454d4df..191131ed3acc3 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -19,8 +19,8 @@ use crate::{ helpers, Call, Config, CurrentPhase, DesiredTargets, ElectionCompute, Error, FeasibilityError, - Pallet, QueuedSolution, RawSolution, ReadySolution, ReadySolutionOf, Round, RoundSnapshot, - Snapshot, SolutionAccuracyOf, SolutionOf, SolutionOrSnapshotSize, Weight, + Pallet, QueuedSolution, RawSolution, ReadySolution, Round, RoundSnapshot, Snapshot, + SolutionAccuracyOf, SolutionOf, SolutionOrSnapshotSize, Weight, }; use alloc::{boxed::Box, vec::Vec}; use codec::Encode; @@ -98,8 +98,6 @@ pub enum MinerError { NoMoreVoters, /// An error from the solver. Solver, - /// Desired targets are mire than the maximum allowed winners. - TooManyDesiredTargets, } impl From for MinerError { @@ -114,20 +112,16 @@ impl From for MinerError { } } -/// Reports the trimming result of a mined solution. +/// Reports the trimming result of a mined solution #[derive(Debug, Clone)] pub struct TrimmingStatus { - /// Number of voters trimmed due to the solution weight limits. weight: usize, - /// Number of voters trimmed due to the solution length limits. length: usize, - /// Number of edges (voter -> target) trimmed due to the max backers per winner bound. - edges: usize, } impl TrimmingStatus { pub fn is_trimmed(&self) -> bool { - self.weight > 0 || self.length > 0 || self.edges > 0 + self.weight > 0 || self.length > 0 } pub fn trimmed_weight(&self) -> usize { @@ -137,10 +131,6 @@ impl TrimmingStatus { pub fn trimmed_length(&self) -> usize { self.length } - - pub fn trimmed_edges(&self) -> usize { - self.edges - } } /// Save a given call into OCW storage. @@ -204,7 +194,6 @@ impl>> Pallet { let RoundSnapshot { voters, targets } = Snapshot::::get().ok_or(MinerError::SnapshotUnAvailable)?; let desired_targets = DesiredTargets::::get().ok_or(MinerError::SnapshotUnAvailable)?; - ensure!(desired_targets <= T::MaxWinners::get(), MinerError::TooManyDesiredTargets); let (solution, score, size, is_trimmed) = Miner::::mine_solution_with_snapshot::( voters, @@ -273,17 +262,16 @@ impl>> Pallet { /// Mine a new solution as a call. Performs all checks. pub fn mine_checked_call() -> Result, MinerError> { // get the solution, with a load of checks to ensure if submitted, IT IS ABSOLUTELY VALID. - let (raw_solution, witness, _trimming) = Self::mine_and_check()?; + let (raw_solution, witness, _) = Self::mine_and_check()?; let score = raw_solution.score; let call: Call = Call::submit_unsigned { raw_solution: Box::new(raw_solution), witness }; log!( debug, - "mined a solution with score {:?} and size {} and trimming {:?}", + "mined a solution with score {:?} and size {}", score, - call.using_encoded(|b| b.len()), - _trimming + call.using_encoded(|b| b.len()) ); Ok(call) @@ -405,7 +393,7 @@ impl>> Pallet { // ensure score is being improved. Panic henceforth. ensure!( QueuedSolution::::get() - .map_or(true, |q: ReadySolution<_, _, _>| raw_solution.score > q.score), + .map_or(true, |q: ReadySolution<_, _>| raw_solution.score > q.score), Error::::PreDispatchWeakSubmission, ); @@ -439,11 +427,8 @@ pub trait MinerConfig { /// /// The weight is computed using `solution_weight`. type MaxWeight: Get; - /// The maximum number of winners that can be elected in the single page supported by this - /// pallet. + /// The maximum number of winners that can be elected. type MaxWinners: Get; - /// The maximum number of backers per winner in the last solution. - type MaxBackersPerWinner: Get; /// Something that can compute the weight of a solution. /// /// This weight estimate is then used to trim the solution, based on [`MinerConfig::MaxWeight`]. @@ -505,11 +490,7 @@ impl Miner { let ElectionResult { assignments, winners: _ } = election_result; - // keeps track of how many edges were trimmed out. - let mut edges_trimmed = 0; - - // Reduce (requires round-trip to staked form) and ensures the max backer per winner bound - // requirements are met. + // Reduce (requires round-trip to staked form) let sorted_assignments = { // convert to staked and reduce. let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; @@ -536,53 +517,6 @@ impl Miner { }, ); - // ensures that the max backers per winner bounds are respected given the supports - // generated from the assignments. We achieve that by removing edges (voter -> - // target) in the assignments with lower stake until the total number of backers per - // winner fits within the expected bounded supports. This should be performed *after* - // applying reduce over the assignments to avoid over-trimming. - // - // a potential trimming does not affect the desired targets of the solution as the - // targets have *too many* edges by definition if trimmed. - let max_backers_per_winner = T::MaxBackersPerWinner::get().saturated_into::(); - - let _ = sp_npos_elections::to_supports(&staked) - .iter_mut() - .filter(|(_, support)| support.voters.len() > max_backers_per_winner) - .for_each(|(target, ref mut support)| { - // first sort by support stake, lowest at the tail. - support.voters.sort_by(|a, b| b.1.cmp(&a.1)); - - // filter out lowest stake edge in this support. - // optimization note: collects edge voters to remove from assignments into a - // btree set to optimize the search in the next loop. - let filtered: alloc::collections::BTreeSet<_> = support - .voters - .split_off(max_backers_per_winner) - .into_iter() - .map(|(who, _stake)| who) - .collect(); - - // remove lowest stake edges calculated above from assignments. - staked.iter_mut().for_each(|assignment| { - if filtered.contains(&assignment.who) { - assignment.distribution.retain(|(t, _)| t != target); - } - }); - - edges_trimmed += filtered.len(); - }); - - debug_assert!({ - // at this point we expect the supports generated from the assignments to fit within - // the expected bounded supports. - let expected_ok: Result< - crate::BoundedSupports<_, T::MaxWinners, T::MaxBackersPerWinner>, - _, - > = sp_npos_elections::to_supports(&staked).try_into(); - expected_ok.is_ok() - }); - // convert back. assignment_staked_to_ratio_normalized(staked)? }; @@ -615,8 +549,7 @@ impl Miner { // re-calc score. let score = solution.clone().score(stake_of, voter_at, target_at)?; - let is_trimmed = - TrimmingStatus { weight: weight_trimmed, length: length_trimmed, edges: edges_trimmed }; + let is_trimmed = TrimmingStatus { weight: weight_trimmed, length: length_trimmed }; Ok((solution, score, size, is_trimmed)) } @@ -685,7 +618,7 @@ impl Miner { let remove = assignments.len().saturating_sub(maximum_allowed_voters); log_no_system!( - trace, + debug, "from {} assignments, truncating to {} for length, removing {}", assignments.len(), maximum_allowed_voters, @@ -814,7 +747,7 @@ impl Miner { snapshot: RoundSnapshot>, current_round: u32, minimum_untrusted_score: Option, - ) -> Result, FeasibilityError> { + ) -> Result, FeasibilityError> { let RawSolution { solution, score, round } = raw_solution; let RoundSnapshot { voters: snapshot_voters, targets: snapshot_targets } = snapshot; @@ -881,12 +814,9 @@ impl Miner { // Finally, check that the claimed score was indeed correct. let known_score = supports.evaluate(); - ensure!(known_score == score, FeasibilityError::InvalidScore); - // Size of winners in miner solution is equal to `desired_targets` <= `MaxWinners`. In - // addition, the miner should have ensured that the MaxBackerPerWinner bound in respected, - // thus this conversion should not fail. + // Size of winners in miner solution is equal to `desired_targets` <= `MaxWinners`. let supports = supports .try_into() .defensive_map_err(|_| FeasibilityError::BoundedConversionFailed)?; @@ -1932,193 +1862,6 @@ mod tests { }) } - #[test] - fn mine_solution_always_respects_max_backers_per_winner() { - use crate::mock::MaxBackersPerWinner; - use frame_election_provider_support::BoundedSupport; - - let targets = vec![10, 20, 30, 40]; - let voters = vec![ - (1, 11, bounded_vec![10, 20, 30]), - (2, 12, bounded_vec![10, 20, 30]), - (3, 13, bounded_vec![10, 20, 30]), - (4, 14, bounded_vec![10, 20, 30]), - (5, 15, bounded_vec![10, 20, 40]), - ]; - let snapshot = RoundSnapshot { voters: voters.clone(), targets: targets.clone() }; - let (round, desired_targets) = (1, 3); - - // election with unbounded max backers per winnner. - ExtBuilder::default().max_backers_per_winner(u32::MAX).build_and_execute(|| { - assert_eq!(MaxBackersPerWinner::get(), u32::MAX); - - let (solution, expected_score_unbounded, _, trimming_status) = - Miner::::mine_solution_with_snapshot::<::Solver>( - voters.clone(), - targets.clone(), - desired_targets, - ) - .unwrap(); - - let ready_solution = Miner::::feasibility_check( - RawSolution { solution, score: expected_score_unbounded, round }, - Default::default(), - desired_targets, - snapshot.clone(), - round, - Default::default(), - ) - .unwrap(); - - assert_eq!( - ready_solution.supports.into_iter().collect::>(), - vec![ - ( - 10, - BoundedSupport { total: 25, voters: bounded_vec![(1, 11), (5, 5), (4, 9)] } - ), - (20, BoundedSupport { total: 22, voters: bounded_vec![(2, 12), (5, 10)] }), - (30, BoundedSupport { total: 18, voters: bounded_vec![(3, 13), (4, 5)] }) - ] - ); - - // no trimmed edges. - assert_eq!(trimming_status.trimmed_edges(), 0); - }); - - // election with max 1 backer per winnner. - ExtBuilder::default().max_backers_per_winner(1).build_and_execute(|| { - assert_eq!(MaxBackersPerWinner::get(), 1); - - let (solution, expected_score_bounded, _, trimming_status) = - Miner::::mine_solution_with_snapshot::<::Solver>( - voters, - targets, - desired_targets, - ) - .unwrap(); - - let ready_solution = Miner::::feasibility_check( - RawSolution { solution, score: expected_score_bounded, round }, - Default::default(), - desired_targets, - snapshot, - round, - Default::default(), - ) - .unwrap(); - - for (_, supports) in ready_solution.supports.iter() { - assert!((supports.voters.len() as u32) <= MaxBackersPerWinner::get()); - } - - assert_eq!( - ready_solution.supports.into_iter().collect::>(), - vec![ - (10, BoundedSupport { total: 11, voters: bounded_vec![(1, 11)] }), - (20, BoundedSupport { total: 12, voters: bounded_vec![(2, 12)] }), - (30, BoundedSupport { total: 13, voters: bounded_vec![(3, 13)] }) - ] - ); - - // four trimmed edges. - assert_eq!(trimming_status.trimmed_edges(), 4); - }); - } - - #[test] - fn max_backers_edges_trims_lowest_stake() { - use crate::mock::MaxBackersPerWinner; - - ExtBuilder::default().build_and_execute(|| { - let targets = vec![10, 20, 30, 40]; - - let voters = vec![ - (1, 100, bounded_vec![10, 20]), - (2, 200, bounded_vec![10, 20, 30]), - (3, 300, bounded_vec![10, 30]), - (4, 400, bounded_vec![10, 30]), - (5, 500, bounded_vec![10, 20, 30]), - (6, 600, bounded_vec![10, 20, 30, 40]), - ]; - let snapshot = RoundSnapshot { voters: voters.clone(), targets: targets.clone() }; - let (round, desired_targets) = (1, 4); - - let max_backers_bound = u32::MAX; - let trim_backers_bound = 2; - - // election with unbounded max backers per winnner. - MaxBackersPerWinner::set(max_backers_bound); - let (solution, score, _, trimming_status) = - Miner::::mine_solution_with_snapshot::<::Solver>( - voters.clone(), - targets.clone(), - desired_targets, - ) - .unwrap(); - - assert_eq!(trimming_status.trimmed_edges(), 0); - - let ready_solution = Miner::::feasibility_check( - RawSolution { solution, score, round }, - Default::default(), - desired_targets, - snapshot.clone(), - round, - Default::default(), - ) - .unwrap(); - - let full_supports = ready_solution.supports.into_iter().collect::>(); - - // gather the expected trimmed supports (lowest stake from supports with more backers - // than expected when MaxBackersPerWinner is 2) from the full, unbounded supports. - let expected_trimmed_supports = full_supports - .into_iter() - .filter(|(_, s)| s.voters.len() as u32 > trim_backers_bound) - .map(|(t, s)| (t, s.voters.into_iter().min_by(|a, b| a.1.cmp(&b.1)).unwrap())) - .collect::>(); - - // election with bounded 2 max backers per winnner. - MaxBackersPerWinner::set(trim_backers_bound); - let (solution, score, _, trimming_status) = - Miner::::mine_solution_with_snapshot::<::Solver>( - voters.clone(), - targets.clone(), - desired_targets, - ) - .unwrap(); - - assert_eq!(trimming_status.trimmed_edges(), 2); - - let ready_solution = Miner::::feasibility_check( - RawSolution { solution, score, round }, - Default::default(), - desired_targets, - snapshot.clone(), - round, - Default::default(), - ) - .unwrap(); - - let trimmed_supports = ready_solution.supports.into_iter().collect::>(); - - // gather all trimmed_supports edges from the trimmed solution. - let mut trimmed_supports_edges_full = vec![]; - for (t, s) in trimmed_supports { - for v in s.voters { - trimmed_supports_edges_full.push((t, v)); - } - } - - // expected trimmed supports set should be disjoint to the trimmed_supports full set of - // edges. - for edge in trimmed_supports_edges_full { - assert!(!expected_trimmed_supports.contains(&edge)); - } - }) - } - #[test] fn trim_assignments_length_does_not_modify_when_short_enough() { ExtBuilder::default().build_and_execute(|| { diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 135a52fece67b..1a9bf7165511a 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -24,7 +24,7 @@ use frame_support::{ PalletId, }; use frame_system::EnsureRoot; -use sp_core::{ConstBool, ConstU32, Get}; +use sp_core::{ConstU32, Get}; use sp_npos_elections::{ElectionScore, VoteWeight}; use sp_runtime::{ offchain::{ @@ -174,8 +174,6 @@ parameter_types! { pub static TransactionPriority: transaction_validity::TransactionPriority = 1; #[derive(Debug)] pub static MaxWinners: u32 = 100; - #[derive(Debug)] - pub static MaxBackersPerWinner: u32 = 100; pub static MaxVotesPerVoter: u32 = 16; pub static SignedFixedDeposit: Balance = 1; pub static SignedDepositIncreaseFactor: Percent = Percent::from_percent(10); @@ -204,18 +202,12 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SlashHandler = (); type RewardHandler = (); type DataProvider = Staking; - type Fallback = frame_election_provider_support::NoElection<( - AccountId, - BlockNumber, - Staking, - MaxWinners, - MaxBackersPerWinner, - )>; + type Fallback = + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, MaxWinners)>; type GovernanceFallback = onchain::OnChainExecution; type Solver = SequentialPhragmen, ()>; type ForceOrigin = EnsureRoot; type MaxWinners = MaxWinners; - type MaxBackersPerWinner = MaxBackersPerWinner; type ElectionBounds = ElectionBounds; type BenchmarkingConfig = NoopElectionProviderBenchmarkConfig; type WeightInfo = (); @@ -229,7 +221,6 @@ impl MinerConfig for Runtime { type MaxLength = MinerMaxLength; type MaxWeight = MinerMaxWeight; type MaxWinners = MaxWinners; - type MaxBackersPerWinner = MaxBackersPerWinner; fn solution_weight(_v: u32, _t: u32, _a: u32, _d: u32) -> Weight { Weight::zero() @@ -366,9 +357,6 @@ parameter_types! { } impl onchain::Config for OnChainSeqPhragmen { - type MaxWinnersPerPage = MaxWinners; - type MaxBackersPerWinner = MaxBackersPerWinner; - type Sort = ConstBool; type System = Runtime; type Solver = SequentialPhragmen< AccountId, @@ -376,6 +364,7 @@ impl onchain::Config for OnChainSeqPhragmen { >; type DataProvider = Staking; type WeightInfo = (); + type MaxWinners = MaxWinners; type Bounds = ElectionBounds; } diff --git a/substrate/frame/election-provider-support/Cargo.toml b/substrate/frame/election-provider-support/Cargo.toml index aadf87edb0e6f..32fa381e1d274 100644 --- a/substrate/frame/election-provider-support/Cargo.toml +++ b/substrate/frame/election-provider-support/Cargo.toml @@ -24,8 +24,6 @@ sp-arithmetic = { workspace = true } sp-core = { workspace = true } sp-npos-elections = { workspace = true } sp-runtime = { workspace = true } -sp-std = { workspace = true } - [dev-dependencies] rand = { features = ["small_rng"], workspace = true, default-features = true } @@ -45,7 +43,6 @@ std = [ "sp-io/std", "sp-npos-elections/std", "sp-runtime/std", - "sp-std/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", diff --git a/substrate/frame/election-provider-support/benchmarking/src/inner.rs b/substrate/frame/election-provider-support/benchmarking/src/inner.rs index a7b969bb1cf9b..7fb8c1bdb7290 100644 --- a/substrate/frame/election-provider-support/benchmarking/src/inner.rs +++ b/substrate/frame/election-provider-support/benchmarking/src/inner.rs @@ -37,7 +37,7 @@ fn set_up_voters_targets( voters_len: u32, targets_len: u32, degree: usize, -) -> (Vec<(AccountId, u64, impl Clone + IntoIterator)>, Vec) { +) -> (Vec<(AccountId, u64, impl IntoIterator)>, Vec) { // fill targets. let mut targets = (0..targets_len) .map(|i| frame_benchmarking::account::("Target", i, SEED)) diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs b/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs index c4ae7c8462347..90fd9509e6f29 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs @@ -21,8 +21,7 @@ use sp_arithmetic::Percent; use sp_runtime::codec::{Encode, Error}; fn main() { - generate_solution_type!( - #[compact] pub struct InnerTestSolutionCompact::< + generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::< VoterIndex = u32, TargetIndex = u32, Accuracy = Percent, diff --git a/substrate/frame/election-provider-support/solution-type/src/codec.rs b/substrate/frame/election-provider-support/solution-type/src/codec.rs index c1dd62fe55506..16d5f17469b7e 100644 --- a/substrate/frame/election-provider-support/solution-type/src/codec.rs +++ b/substrate/frame/election-provider-support/solution-type/src/codec.rs @@ -33,7 +33,6 @@ pub(crate) fn codec_and_info_impl( let scale_info = scale_info_impl(&ident, &voter_type, &target_type, &weight_type, count); quote! { - impl _fepsp::codec::EncodeLike for #ident {} #encode #decode #scale_info diff --git a/substrate/frame/election-provider-support/solution-type/src/single_page.rs b/substrate/frame/election-provider-support/solution-type/src/single_page.rs index c921be34b3430..b496c349d8db8 100644 --- a/substrate/frame/election-provider-support/solution-type/src/single_page.rs +++ b/substrate/frame/election-provider-support/solution-type/src/single_page.rs @@ -84,8 +84,6 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { Eq, Clone, Debug, - Ord, - PartialOrd, _fepsp::codec::Encode, _fepsp::codec::Decode, _fepsp::codec::DecodeWithMemTracking, @@ -99,8 +97,6 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { let from_impl = from_impl(&struct_name, count); let into_impl = into_impl(&assignment_name, count, weight_type.clone()); let from_index_impl = crate::index_assignment::from_impl(&struct_name, count); - let sort_impl = sort_impl(count); - let remove_weakest_sorted_impl = remove_weakest_sorted_impl(count); Ok(quote! ( /// A struct to encode a election assignment in a compact way. @@ -183,29 +179,6 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { all_targets.into_iter().collect() } - - fn sort(&mut self, mut voter_stake: F) - where - F: FnMut(&Self::VoterIndex) -> _feps::VoteWeight - { - #sort_impl - } - - fn remove_weakest_sorted(&mut self, mut voter_stake: F) -> Option - where - F: FnMut(&Self::VoterIndex) -> _feps::VoteWeight - { - #remove_weakest_sorted_impl - } - - fn corrupt(&mut self) { - self.votes1.push( - ( - _fepsp::sp_arithmetic::traits::Bounded::max_value(), - _fepsp::sp_arithmetic::traits::Bounded::max_value() - ) - ) - } } type __IndexAssignment = _feps::IndexAssignment< @@ -213,12 +186,11 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { <#ident as _feps::NposSolution>::TargetIndex, <#ident as _feps::NposSolution>::Accuracy, >; - impl _fepsp::codec::MaxEncodedLen for #ident { fn max_encoded_len() -> usize { use frame_support::traits::Get; use _fepsp::codec::Encode; - let s: u32 = <#max_voters as _feps::Get>::get(); + let s: u32 = #max_voters::get(); let max_element_size = // the first voter.. #voter_type::max_encoded_len() @@ -235,7 +207,6 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { .saturating_add((s as usize).saturating_mul(max_element_size)) } } - impl<'a> core::convert::TryFrom<&'a [__IndexAssignment]> for #ident { type Error = _feps::Error; fn try_from(index_assignments: &'a [__IndexAssignment]) -> Result { @@ -257,65 +228,6 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { )) } -fn sort_impl(count: usize) -> TokenStream2 { - (1..=count) - .map(|c| { - let field = vote_field(c); - quote! { - // NOTE: self.filed here is sometimes `Vec<(voter, weight)>` and sometimes - // `Vec<(voter, weights, last_weight)>`, but Rust's great patter matching makes it - // all work super nice. - self.#field.sort_by(|(a, ..), (b, ..)| voter_stake(&b).cmp(&voter_stake(&a))); - // ---------------------------------^^ in all fields, the index 0 is the voter id. - } - }) - .collect::() -} - -fn remove_weakest_sorted_impl(count: usize) -> TokenStream2 { - // check minium from field 2 onwards. We assume 0 is minimum - let check_minimum = (2..=count).map(|c| { - let filed = vote_field(c); - quote! { - let filed_value = self.#filed - .last() - .map(|(x, ..)| voter_stake(x)) - .unwrap_or_else(|| _fepsp::sp_arithmetic::traits::Bounded::max_value()); - if filed_value < minimum { - minimum = filed_value; - minimum_filed = #c - } - } - }); - - let remove_minimum_match = (1..=count).map(|c| { - let filed = vote_field(c); - quote! { - #c => self.#filed.pop().map(|(x, ..)| x), - } - }); - - let first_filed = vote_field(1); - quote! { - // we assume first one is the minimum. No problem if it is empty. - let mut minimum_filed = 1; - let mut minimum = self.#first_filed - .last() - .map(|(x, ..)| voter_stake(x)) - .unwrap_or_else(|| _fepsp::sp_arithmetic::traits::Bounded::max_value()); - - #( #check_minimum )* - - match minimum_filed { - #( #remove_minimum_match )* - _ => { - debug_assert!(false); - None - } - } - } -} - fn remove_voter_impl(count: usize) -> TokenStream2 { let field_name = vote_field(1); let single = quote! { diff --git a/substrate/frame/election-provider-support/src/bounds.rs b/substrate/frame/election-provider-support/src/bounds.rs index 6ef0604cb4bef..6b2423b7fece6 100644 --- a/substrate/frame/election-provider-support/src/bounds.rs +++ b/substrate/frame/election-provider-support/src/bounds.rs @@ -54,7 +54,6 @@ //! A default or `None` bound means that no bounds are enforced (i.e. unlimited result size). In //! general, be careful when using unbounded election bounds in production. -use codec::Encode; use core::ops::Add; use sp_runtime::traits::Zero; @@ -155,15 +154,6 @@ impl DataProviderBounds { self.size_exhausted(given_size.unwrap_or(SizeBound::zero())) } - /// Ensures the given encode-able slice meets both the length and count bounds. - /// - /// Same as `exhausted` but a better syntax. - pub fn slice_exhausted(self, input: &[T]) -> bool { - let size = Some((input.encoded_size() as u32).into()); - let count = Some((input.len() as u32).into()); - self.exhausted(size, count) - } - /// Returns an instance of `Self` that is constructed by capping both the `count` and `size` /// fields. If `self` is None, overwrite it with the provided bounds. pub fn max(self, bounds: DataProviderBounds) -> Self { diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs index 27f8d96e5e791..90966ec59346e 100644 --- a/substrate/frame/election-provider-support/src/lib.rs +++ b/substrate/frame/election-provider-support/src/lib.rs @@ -21,9 +21,10 @@ //! within FRAME pallets. //! //! Something that will provide the functionality of election will implement -//! [`ElectionProvider`], whilst needing an associated [`ElectionProvider::DataProvider`], which -//! needs to be fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data -//! provider is* the receiver of the election, resulting in a diagram as below: +//! [`ElectionProvider`] and its parent-trait [`ElectionProviderBase`], whilst needing an +//! associated [`ElectionProviderBase::DataProvider`], which needs to be +//! fulfilled by an entity implementing [`ElectionDataProvider`]. Most often, *the data provider is* +//! the receiver of the election, resulting in a diagram as below: //! //! ```ignore //! ElectionDataProvider @@ -55,15 +56,8 @@ //! //! To accommodate both type of elections in one trait, the traits lean toward **stateful //! election**, as it is more general than the stateless. This is why [`ElectionProvider::elect`] -//! does not receive election data as an input. All value and type parameter must be provided by the -//! [`ElectionDataProvider`] trait, even if the election happens immediately. -//! -//! ## Multi-page election support -//! -//! Both [`ElectionDataProvider`] and [`ElectionProvider`] traits are parameterized by page, -//! supporting an election to be performed over multiple pages. This enables the -//! [`ElectionDataProvider`] implementor to provide all the election data over multiple pages. -//! Similarly [`ElectionProvider::elect`] is parameterized by page index. +//! has no parameters. All value and type parameter must be provided by the [`ElectionDataProvider`] +//! trait, even if the election happens immediately. //! //! ## Election Data //! @@ -110,17 +104,17 @@ //! impl ElectionDataProvider for Pallet { //! type AccountId = AccountId; //! type BlockNumber = BlockNumber; -//! type MaxVotesPerVoter = ConstU32<100>; +//! type MaxVotesPerVoter = ConstU32<1>; //! //! fn desired_targets() -> data_provider::Result { //! Ok(1) //! } -//! fn electing_voters(bounds: DataProviderBounds, _page: PageIndex) +//! fn electing_voters(bounds: DataProviderBounds) //! -> data_provider::Result>> //! { //! Ok(Default::default()) //! } -//! fn electable_targets(bounds: DataProviderBounds, _page: PageIndex) -> data_provider::Result> { +//! fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result> { //! Ok(vec![10, 20, 30]) //! } //! fn next_election_prediction(now: BlockNumber) -> BlockNumber { @@ -132,54 +126,40 @@ //! //! mod generic_election_provider { //! use super::*; -//! use sp_runtime::traits::Zero; //! //! pub struct GenericElectionProvider(std::marker::PhantomData); //! //! pub trait Config { //! type DataProvider: ElectionDataProvider; -//! type MaxWinnersPerPage: Get; -//! type MaxBackersPerWinner: Get; -//! type Pages: Get; //! } //! -//! impl ElectionProvider for GenericElectionProvider { +//! impl ElectionProviderBase for GenericElectionProvider { //! type AccountId = AccountId; //! type BlockNumber = BlockNumber; //! type Error = &'static str; -//! type MaxBackersPerWinner = T::MaxBackersPerWinner; -//! type MaxWinnersPerPage = T::MaxWinnersPerPage; -//! type Pages = T::Pages; //! type DataProvider = T::DataProvider; +//! type MaxWinners = ConstU32<{ u32::MAX }>; //! -//! fn elect(page: PageIndex) -> Result, Self::Error> { -//! unimplemented!() -//! } +//! } //! -//! fn ongoing() -> bool { -//! unimplemented!() +//! impl ElectionProvider for GenericElectionProvider { +//! fn ongoing() -> bool { false } +//! fn elect() -> Result, Self::Error> { +//! Self::DataProvider::electable_targets(DataProviderBounds::default()) +//! .map_err(|_| "failed to elect") +//! .map(|t| bounded_vec![(t[0], Support::default())]) //! } //! } //! } //! //! mod runtime { -//! use frame_support::parameter_types; //! use super::generic_election_provider; //! use super::data_provider_mod; //! use super::AccountId; //! -//! parameter_types! { -//! pub static MaxWinnersPerPage: u32 = 10; -//! pub static MaxBackersPerWinner: u32 = 20; -//! pub static Pages: u32 = 2; -//! } -//! //! struct Runtime; //! impl generic_election_provider::Config for Runtime { //! type DataProvider = data_provider_mod::Pallet; -//! type MaxWinnersPerPage = MaxWinnersPerPage; -//! type MaxBackersPerWinner = MaxBackersPerWinner; -//! type Pages = Pages; //! } //! //! impl data_provider_mod::Config for Runtime { @@ -201,8 +181,6 @@ extern crate alloc; use alloc::{boxed::Box, vec::Vec}; use core::fmt::Debug; -use frame_support::traits::{Defensive, DefensiveResult}; -use sp_core::ConstU32; use sp_runtime::{ traits::{Bounded, Saturating, Zero}, RuntimeDebug, @@ -210,15 +188,12 @@ use sp_runtime::{ pub use bounds::DataProviderBounds; pub use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen}; -/// Re-export the solution generation macro. -pub use frame_election_provider_solution_type::generate_solution_type; -pub use frame_support::{traits::Get, weights::Weight, BoundedVec, DefaultNoBound}; -use scale_info::TypeInfo; +pub use frame_support::{traits::Get, weights::Weight, BoundedVec}; /// Re-export some type as they are used in the interface. pub use sp_arithmetic::PerThing; pub use sp_npos_elections::{ - Assignment, BalancingConfig, ElectionResult, Error, ExtendedBalance, IdentifierT, PerThing128, - Support, Supports, VoteWeight, + Assignment, BalancingConfig, BoundedSupports, ElectionResult, Error, ExtendedBalance, + IdentifierT, PerThing128, Support, Supports, VoteWeight, }; pub use traits::NposSolution; @@ -257,9 +232,6 @@ mod mock; #[cfg(test)] mod tests; -/// A page index for the multi-block elections pagination. -pub type PageIndex = u32; - /// The [`IndexAssignment`] type is an intermediate between the assignments list /// ([`&[Assignment]`][Assignment]) and `SolutionOf`. /// @@ -277,9 +249,7 @@ pub struct IndexAssignment { pub distribution: Vec<(TargetIndex, P)>, } -impl - IndexAssignment -{ +impl IndexAssignment { pub fn new( assignment: &Assignment, voter_index: impl Fn(&AccountId) -> Option, @@ -321,43 +291,21 @@ pub trait ElectionDataProvider { /// Maximum number of votes per voter that this data provider is providing. type MaxVotesPerVoter: Get; - /// Returns the possible targets for the election associated with the provided `page`, i.e. the - /// targets that could become elected, thus "electable". + /// All possible targets for the election, i.e. the targets that could become elected, thus + /// "electable". /// /// This should be implemented as a self-weighing function. The implementor should register its /// appropriate weight at the end of execution with the system pallet directly. - fn electable_targets( - bounds: DataProviderBounds, - page: PageIndex, - ) -> data_provider::Result>; + fn electable_targets(bounds: DataProviderBounds) + -> data_provider::Result>; - /// A state-less version of [`Self::electable_targets`]. - /// - /// An election-provider that only uses 1 page should use this. - fn electable_targets_stateless( - bounds: DataProviderBounds, - ) -> data_provider::Result> { - Self::electable_targets(bounds, 0) - } - - /// All the voters that participate in the election associated with page `page`, thus - /// "electing". + /// All the voters that participate in the election, thus "electing". /// /// Note that if a notion of self-vote exists, it should be represented here. /// /// This should be implemented as a self-weighing function. The implementor should register its /// appropriate weight at the end of execution with the system pallet directly. - fn electing_voters( - bounds: DataProviderBounds, - page: PageIndex, - ) -> data_provider::Result>>; - - /// A state-less version of [`Self::electing_voters`]. - fn electing_voters_stateless( - bounds: DataProviderBounds, - ) -> data_provider::Result>> { - Self::electing_voters(bounds, 0) - } + fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>>; /// The number of targets to elect. /// @@ -389,9 +337,6 @@ pub trait ElectionDataProvider { ) { } - #[cfg(any(feature = "runtime-benchmarks", test))] - fn set_next_election(_to: u32) {} - /// Utility function only to be used in benchmarking scenarios, to be implemented optionally, /// else a noop. /// @@ -414,38 +359,28 @@ pub trait ElectionDataProvider { /// Clear all voters and targets. #[cfg(any(feature = "runtime-benchmarks", test))] fn clear() {} - - #[cfg(any(feature = "runtime-benchmarks", test))] - fn set_desired_targets(_count: u32) {} } -/// Something that can compute the result of an election and pass it back to the caller in a paged -/// way. -pub trait ElectionProvider { - /// The account ID identifier; +/// Base trait for types that can provide election +pub trait ElectionProviderBase { + /// The account identifier type. type AccountId; /// The block number type. type BlockNumber; - /// The error type returned by the provider; - type Error: Debug + PartialEq; + /// The error type that is returned by the provider. + type Error: Debug; - /// The maximum number of winners per page in results returned by this election provider. + /// The upper bound on election winners that can be returned. /// - /// A winner is an `AccountId` that is part of the final election result. - type MaxWinnersPerPage: Get; - - /// The maximum number of backers that a single page may have in results returned by this - /// election provider. + /// # WARNING /// - /// A backer is an `AccountId` that "backs" one or more winners. For example, in the context of - /// nominated proof of stake, a backer is a voter that nominates a winner validator in the - /// election result. - type MaxBackersPerWinner: Get; - - /// The number of pages that this election provider supports. - type Pages: Get; + /// when communicating with the data provider, one must ensure that + /// `DataProvider::desired_targets` returns a value less than this bound. An + /// implementation can chose to either return an error and/or sort and + /// truncate the output to meet this bound. + type MaxWinners: Get; /// The data provider of the election. type DataProvider: ElectionDataProvider< @@ -453,108 +388,92 @@ pub trait ElectionProvider { BlockNumber = Self::BlockNumber, >; - /// Elect a new set of winners. - /// - /// A complete election may require multiple calls to [`ElectionProvider::elect`] if - /// [`ElectionProvider::Pages`] is higher than one. - /// - /// The result is returned in a target major format, namely as vector of supports. - /// - /// This should be implemented as a self-weighing function. The implementor should register its - /// appropriate weight at the end of execution with the system pallet directly. - fn elect(page: PageIndex) -> Result, Self::Error>; - - /// The index of the *most* significant page that this election provider supports. - fn msp() -> PageIndex { - Self::Pages::get().saturating_sub(1) - } - - /// The index of the *least* significant page that this election provider supports. - fn lsp() -> PageIndex { - Zero::zero() - } - /// checked call to `Self::DataProvider::desired_targets()` ensuring the value never exceeds - /// [`Self::MaxWinnersPerPage`]. + /// [`Self::MaxWinners`]. fn desired_targets_checked() -> data_provider::Result { Self::DataProvider::desired_targets().and_then(|desired_targets| { - if desired_targets <= Self::MaxWinnersPerPage::get() { + if desired_targets <= Self::MaxWinners::get() { Ok(desired_targets) } else { Err("desired_targets must not be greater than MaxWinners.") } }) } +} - /// Indicate whether this election provider is currently ongoing an asynchronous election. +/// Elect a new set of winners, bounded by `MaxWinners`. +/// +/// It must always use [`ElectionProviderBase::DataProvider`] to fetch the data it needs. +/// +/// This election provider that could function asynchronously. This implies that this election might +/// needs data ahead of time (ergo, receives no arguments to `elect`), and might be `ongoing` at +/// times. +pub trait ElectionProvider: ElectionProviderBase { + /// Indicate if this election provider is currently ongoing an asynchronous election or not. fn ongoing() -> bool; + + /// Performs the election. This should be implemented as a self-weighing function. The + /// implementor should register its appropriate weight at the end of execution with the + /// system pallet directly. + fn elect() -> Result, Self::Error>; } /// A (almost) marker trait that signifies an election provider as working synchronously. i.e. being /// *instant*. /// -/// This must still use the same data provider as with [`ElectionProvider::DataProvider`]. +/// This must still use the same data provider as with [`ElectionProviderBase::DataProvider`]. /// However, it can optionally overwrite the amount of voters and targets that are fetched from the /// data provider at runtime via `forced_input_voters_bound` and `forced_input_target_bound`. -pub trait InstantElectionProvider: ElectionProvider { +pub trait InstantElectionProvider: ElectionProviderBase { fn instant_elect( - voters: Vec>, - targets: Vec, - desired_targets: u32, + forced_input_voters_bound: DataProviderBounds, + forced_input_target_bound: DataProviderBounds, ) -> Result, Self::Error>; - - // Sine many instant election provider, like [`NoElection`] are meant to do nothing, this is a - // hint for the caller to call before, and if `false` is returned, not bother with passing all - // the info to `instant_elect`. - fn bother() -> bool; } /// An election provider that does nothing whatsoever. pub struct NoElection(core::marker::PhantomData); -impl ElectionProvider - for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinnersPerPage, MaxBackersPerWinner)> +impl ElectionProviderBase + for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> where DataProvider: ElectionDataProvider, - MaxWinnersPerPage: Get, - MaxBackersPerWinner: Get, + MaxWinners: Get, { type AccountId = AccountId; type BlockNumber = BlockNumber; type Error = &'static str; - type Pages = ConstU32<1>; + type MaxWinners = MaxWinners; type DataProvider = DataProvider; - type MaxWinnersPerPage = MaxWinnersPerPage; - type MaxBackersPerWinner = MaxBackersPerWinner; - - fn elect(_page: PageIndex) -> Result, Self::Error> { - Err("`NoElection` cannot do anything.") - } +} +impl ElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> +where + DataProvider: ElectionDataProvider, + MaxWinners: Get, +{ fn ongoing() -> bool { false } + + fn elect() -> Result, Self::Error> { + Err("`NoElection` cannot do anything.") + } } -impl - InstantElectionProvider - for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinnersPerPage, MaxBackersPerWinner)> +impl InstantElectionProvider + for NoElection<(AccountId, BlockNumber, DataProvider, MaxWinners)> where DataProvider: ElectionDataProvider, - MaxWinnersPerPage: Get, - MaxBackersPerWinner: Get, + MaxWinners: Get, { fn instant_elect( - _: Vec>, - _: Vec, - _: u32, + _: DataProviderBounds, + _: DataProviderBounds, ) -> Result, Self::Error> { Err("`NoElection` cannot do anything.") } - - fn bother() -> bool { - false - } } /// A utility trait for something to implement `ElectionDataProvider` in a sensible way. @@ -686,11 +605,7 @@ pub trait NposSolver { fn solve( to_elect: usize, targets: Vec, - voters: Vec<( - Self::AccountId, - VoteWeight, - impl Clone + IntoIterator, - )>, + voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator)>, ) -> Result, Self::Error>; /// Measure the weight used in the calculation of the solver. @@ -700,70 +615,6 @@ pub trait NposSolver { fn weight(voters: u32, targets: u32, vote_degree: u32) -> Weight; } -/// A quick and dirty solver, that produces a valid but probably worthless election result, but is -/// fast. -/// -/// It choses a random number of winners without any consideration. -/// -/// Then it iterates over the voters and assigns them to the winners. -/// -/// It is only meant to be used in benchmarking. -pub struct QuickDirtySolver(core::marker::PhantomData<(AccountId, Accuracy)>); -impl NposSolver - for QuickDirtySolver -{ - type AccountId = AccountId; - type Accuracy = Accuracy; - type Error = &'static str; - - fn solve( - to_elect: usize, - targets: Vec, - voters: Vec<( - Self::AccountId, - VoteWeight, - impl Clone + IntoIterator, - )>, - ) -> Result, Self::Error> { - use sp_std::collections::btree_map::BTreeMap; - - if to_elect > targets.len() { - return Err("to_elect is greater than the number of targets."); - } - - let winners = targets.into_iter().take(to_elect).collect::>(); - - let mut assignments = Vec::with_capacity(voters.len()); - let mut final_winners = BTreeMap::::new(); - - for (voter, weight, votes) in voters { - let our_winners = winners - .iter() - .filter(|w| votes.clone().into_iter().any(|v| v == **w)) - .collect::>(); - let our_winners_len = our_winners.len(); - let distribution = our_winners - .into_iter() - .map(|w| { - *final_winners.entry(w.clone()).or_default() += weight as u128; - (w.clone(), Self::Accuracy::from_rational(1, our_winners_len as u128)) - }) - .collect::>(); - - let mut assignment = Assignment { who: voter, distribution }; - assignment.try_normalize().unwrap(); - assignments.push(assignment); - } - - let winners = final_winners.into_iter().collect::>(); - Ok(ElectionResult { winners, assignments }) - } - - fn weight(_: u32, _: u32, _: u32) -> Weight { - Default::default() - } -} - /// A wrapper for [`sp_npos_elections::seq_phragmen`] that implements [`NposSolver`]. See the /// documentation of [`sp_npos_elections::seq_phragmen`] for more info. pub struct SequentialPhragmen( @@ -779,11 +630,7 @@ impl, - voters: Vec<( - Self::AccountId, - VoteWeight, - impl Clone + IntoIterator, - )>, + voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator)>, ) -> Result, Self::Error> { sp_npos_elections::seq_phragmen(winners, targets, voters, Balancing::get()) } @@ -808,11 +655,7 @@ impl, - voters: Vec<( - Self::AccountId, - VoteWeight, - impl Clone + IntoIterator, - )>, + voters: Vec<(Self::AccountId, VoteWeight, impl IntoIterator)>, ) -> Result, Self::Error> { sp_npos_elections::phragmms(winners, targets, voters, Balancing::get()) } @@ -829,303 +672,10 @@ pub type Voter = (AccountId, VoteWeight, BoundedVec = Voter<::AccountId, ::MaxVotesPerVoter>; -/// A bounded vector of supports. Bounded equivalent to [`sp_npos_elections::Supports`]. -#[derive( - Default, Debug, Encode, Decode, DecodeWithMemTracking, scale_info::TypeInfo, MaxEncodedLen, -)] -#[codec(mel_bound(AccountId: MaxEncodedLen, Bound: Get))] -#[scale_info(skip_type_params(Bound))] -pub struct BoundedSupport> { - /// Total support. - pub total: ExtendedBalance, - /// Support from voters. - pub voters: BoundedVec<(AccountId, ExtendedBalance), Bound>, -} - -impl> sp_npos_elections::Backings for &BoundedSupport { - fn total(&self) -> ExtendedBalance { - self.total - } -} - -impl> PartialEq for BoundedSupport { - fn eq(&self, other: &Self) -> bool { - self.total == other.total && self.voters == other.voters - } -} - -impl> From> for Support { - fn from(b: BoundedSupport) -> Self { - Support { total: b.total, voters: b.voters.into_inner() } - } -} - -impl> Clone for BoundedSupport { - fn clone(&self) -> Self { - Self { voters: self.voters.clone(), total: self.total } - } -} - -impl> TryFrom> - for BoundedSupport -{ - type Error = &'static str; - fn try_from(s: sp_npos_elections::Support) -> Result { - let voters = s.voters.try_into().map_err(|_| "voters bound not respected")?; - Ok(Self { voters, total: s.total }) - } -} - -impl> BoundedSupport { - pub fn sorted_truncate_from(mut support: sp_npos_elections::Support) -> (Self, u32) { - // If bounds meet, then short circuit. - if let Ok(bounded) = support.clone().try_into() { - return (bounded, 0) - } - - let pre_len = support.voters.len(); - // sort support based on stake of each backer, low to high. - // Note: we don't sort high to low and truncate because we would have to track `total` - // updates, so we need one iteration anyhow. - support.voters.sort_by(|a, b| a.1.cmp(&b.1)); - // then do the truncation. - let mut bounded = Self { voters: Default::default(), total: 0 }; - while let Some((voter, weight)) = support.voters.pop() { - if let Err(_) = bounded.voters.try_push((voter, weight)) { - break - } - bounded.total += weight; - } - let post_len = bounded.voters.len(); - (bounded, (pre_len - post_len) as u32) - } -} - -/// A bounded vector of [`BoundedSupport`]. -/// -/// A [`BoundedSupports`] is a set of [`sp_npos_elections::Supports`] which are bounded in two -/// dimensions. `BInner` corresponds to the bound of the maximum backers per voter and `BOuter` -/// corresponds to the bound of the maximum winners that the bounded supports may contain. -/// -/// With the bounds, we control the maximum size of a bounded supports instance. -#[derive(Encode, Decode, DecodeWithMemTracking, TypeInfo, DefaultNoBound, MaxEncodedLen)] -#[codec(mel_bound(AccountId: MaxEncodedLen, BOuter: Get, BInner: Get))] -#[scale_info(skip_type_params(BOuter, BInner))] -pub struct BoundedSupports, BInner: Get>( - pub BoundedVec<(AccountId, BoundedSupport), BOuter>, -); - -/// Try and build yourself from another `BoundedSupports` with a different set of types. -pub trait TryFromOtherBounds, BOtherInner: Get> { - fn try_from_other_bounds( - other: BoundedSupports, - ) -> Result - where - Self: Sized; -} - -impl< - AccountId, - BOuter: Get, - BInner: Get, - BOtherOuter: Get, - BOuterInner: Get, - > TryFromOtherBounds - for BoundedSupports -{ - fn try_from_other_bounds( - other: BoundedSupports, - ) -> Result { - // TODO: we might as well do this with unsafe rust and do it faster. - if BOtherOuter::get() <= BOuter::get() && BInner::get() <= BOuterInner::get() { - let supports = other - .into_iter() - .map(|(acc, b_support)| { - b_support - .try_into() - .defensive_map_err(|_| Error::BoundsExceeded) - .map(|b_support| (acc, b_support)) - }) - .collect::, _>>() - .defensive()?; - supports.try_into() - } else { - Err(crate::Error::BoundsExceeded) - } - } -} - -impl, BInner: Get> - BoundedSupports -{ - /// Two u32s returned are number of winners and backers removed respectively. - pub fn sorted_truncate_from(supports: Supports) -> (Self, u32, u32) { - // if bounds, meet, short circuit - if let Ok(bounded) = supports.clone().try_into() { - return (bounded, 0, 0) - } - - let pre_winners = supports.len(); - let mut backers_removed = 0; - // first, convert all inner supports. - let mut inner_supports = supports - .into_iter() - .map(|(account, support)| { - let (bounded, removed) = - BoundedSupport::::sorted_truncate_from(support); - backers_removed += removed; - (account, bounded) - }) - .collect::>(); - - // then sort outer supports based on total stake, high to low - inner_supports.sort_by(|a, b| b.1.total.cmp(&a.1.total)); - - // then take the first slice that can fit. - let bounded = BoundedSupports(BoundedVec::< - (AccountId, BoundedSupport), - BOuter, - >::truncate_from(inner_supports)); - let post_winners = bounded.len(); - (bounded, (pre_winners - post_winners) as u32, backers_removed) - } -} -pub trait TryFromUnboundedPagedSupports, BInner: Get> { - fn try_from_unbounded_paged( - self, - ) -> Result>, crate::Error> - where - Self: Sized; -} - -impl, BInner: Get> - TryFromUnboundedPagedSupports for Vec> -{ - fn try_from_unbounded_paged( - self, - ) -> Result>, crate::Error> { - self.into_iter() - .map(|s| s.try_into().map_err(|_| crate::Error::BoundsExceeded)) - .collect::, _>>() - } -} - -impl, BInner: Get> sp_npos_elections::EvaluateSupport - for BoundedSupports -{ - fn evaluate(&self) -> sp_npos_elections::ElectionScore { - sp_npos_elections::evaluate_support(self.iter().map(|(_, s)| s)) - } -} - -impl, BInner: Get> sp_std::ops::DerefMut - for BoundedSupports -{ - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl, BInner: Get> Debug - for BoundedSupports -{ - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - for s in self.0.iter() { - write!(f, "({:?}, {:?}, {:?}) ", s.0, s.1.total, s.1.voters)?; - } - Ok(()) - } -} - -impl, BInner: Get> PartialEq - for BoundedSupports -{ - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - } -} - -impl, BInner: Get> Into> - for BoundedSupports -{ - fn into(self) -> Supports { - // TODO: can be done faster with unsafe code. - self.0.into_iter().map(|(acc, b_support)| (acc, b_support.into())).collect() - } -} - -impl, BInner: Get> - From), BOuter>> - for BoundedSupports -{ - fn from(t: BoundedVec<(AccountId, BoundedSupport), BOuter>) -> Self { - Self(t) - } -} - -impl, BInner: Get> Clone - for BoundedSupports -{ - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - -impl, BInner: Get> sp_std::ops::Deref - for BoundedSupports -{ - type Target = BoundedVec<(AccountId, BoundedSupport), BOuter>; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl, BInner: Get> IntoIterator - for BoundedSupports -{ - type Item = (AccountId, BoundedSupport); - type IntoIter = sp_std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.0.into_iter() - } -} - -impl, BInner: Get> TryFrom> - for BoundedSupports -{ - type Error = crate::Error; - - fn try_from(supports: Supports) -> Result { - // optimization note: pre-allocate outer bounded vec. - let mut outer_bounded_supports = BoundedVec::< - (AccountId, BoundedSupport), - BOuter, - >::with_bounded_capacity( - supports.len().min(BOuter::get() as usize) - ); - - // optimization note: avoid intermediate allocations. - supports - .into_iter() - .map(|(account, support)| (account, support.try_into().map_err(|_| ()))) - .try_for_each(|(account, maybe_bounded_supports)| { - outer_bounded_supports - .try_push((account, maybe_bounded_supports?)) - .map_err(|_| ()) - }) - .map_err(|_| crate::Error::BoundsExceeded)?; - - Ok(outer_bounded_supports.into()) - } -} - -/// Same as `BoundedSupports` but parameterized by an `ElectionProvider`. +/// Same as `BoundedSupports` but parameterized by a `ElectionProviderBase`. pub type BoundedSupportsOf = BoundedSupports< - ::AccountId, - ::MaxWinnersPerPage, - ::MaxBackersPerWinner, + ::AccountId, + ::MaxWinners, >; sp_core::generate_feature_enabled_macro!( diff --git a/substrate/frame/election-provider-support/src/onchain.rs b/substrate/frame/election-provider-support/src/onchain.rs index 3478eec6c9db6..1063d5d35aee7 100644 --- a/substrate/frame/election-provider-support/src/onchain.rs +++ b/substrate/frame/election-provider-support/src/onchain.rs @@ -20,27 +20,27 @@ //! careful when using it onchain. use crate::{ - bounds::{ElectionBounds, ElectionBoundsBuilder}, - BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, InstantElectionProvider, - NposSolver, PageIndex, VoterOf, WeightInfo, + bounds::{DataProviderBounds, ElectionBounds, ElectionBoundsBuilder}, + BoundedSupportsOf, Debug, ElectionDataProvider, ElectionProvider, ElectionProviderBase, + InstantElectionProvider, NposSolver, WeightInfo, }; -use alloc::{collections::btree_map::BTreeMap, vec::Vec}; +use alloc::collections::btree_map::BTreeMap; use core::marker::PhantomData; use frame_support::{dispatch::DispatchClass, traits::Get}; -use frame_system::pallet_prelude::BlockNumberFor; use sp_npos_elections::{ - assignment_ratio_to_staked_normalized, to_supports, ElectionResult, VoteWeight, + assignment_ratio_to_staked_normalized, to_supports, BoundedSupports, ElectionResult, VoteWeight, }; /// Errors of the on-chain election. -#[derive(Eq, PartialEq, Debug, Clone)] +#[derive(Eq, PartialEq, Debug)] pub enum Error { /// An internal error in the NPoS elections crate. NposElections(sp_npos_elections::Error), /// Errors from the data provider. DataProvider(&'static str), - /// Results failed to meet the bounds. - FailedToBound, + /// Configurational error caused by `desired_targets` requested by data provider exceeding + /// `MaxWinners`. + TooManyWinners, } impl From for Error { @@ -62,12 +62,6 @@ pub type BoundedExecution = OnChainExecution; /// Configuration trait for an onchain election execution. pub trait Config { - /// Whether to try and sort or not. - /// - /// If `true`, the supports will be sorted by descending total support to meet the bounds. If - /// `false`, `FailedToBound` error may be returned. - type Sort: Get; - /// Needed for weight registration. type System: frame_system::Config; @@ -77,18 +71,6 @@ pub trait Config { Error = sp_npos_elections::Error, >; - /// Maximum number of backers allowed per target. - /// - /// If the bounds are exceeded due to the data returned by the data provider, the election will - /// fail. - type MaxBackersPerWinner: Get; - - /// Maximum number of winners in an election. - /// - /// If the bounds are exceeded due to the data returned by the data provider, the election will - /// fail. - type MaxWinnersPerPage: Get; - /// Something that provides the data for election. type DataProvider: ElectionDataProvider< AccountId = ::AccountId, @@ -98,107 +80,103 @@ pub trait Config { /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; + /// Upper bound on maximum winners from electable targets. + /// + /// As noted in the documentation of [`ElectionProviderBase::MaxWinners`], this value should + /// always be more than `DataProvider::desired_target`. + type MaxWinners: Get; + /// Elections bounds, to use when calling into [`Config::DataProvider`]. It might be overwritten /// in the `InstantElectionProvider` impl. type Bounds: Get; } -impl OnChainExecution { - fn elect_with_snapshot( - voters: Vec>, - targets: Vec<::AccountId>, - desired_targets: u32, - ) -> Result, Error> { - if (desired_targets > T::MaxWinnersPerPage::get()) && !T::Sort::get() { - // early exit what will fail in the last line anyways. - return Err(Error::FailedToBound) - } +/// Same as `BoundedSupportsOf` but for `onchain::Config`. +pub type OnChainBoundedSupportsOf = BoundedSupports< + <::System as frame_system::Config>::AccountId, + ::MaxWinners, +>; - let voters_len = voters.len() as u32; - let targets_len = targets.len() as u32; - - let stake_map: BTreeMap<_, _> = voters - .iter() - .map(|(validator, vote_weight, _)| (validator.clone(), *vote_weight)) - .collect(); - - let stake_of = |w: &::AccountId| -> VoteWeight { - stake_map.get(w).cloned().unwrap_or_default() - }; - - let ElectionResult { winners: _, assignments } = - T::Solver::solve(desired_targets as usize, targets, voters).map_err(Error::from)?; - - let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; - - let weight = T::Solver::weight::( - voters_len, - targets_len, - ::MaxVotesPerVoter::get(), - ); - frame_system::Pallet::::register_extra_weight_unchecked( - weight, - DispatchClass::Mandatory, - ); - - let unbounded = to_supports(&staked); - let bounded = if T::Sort::get() { - let (bounded, _winners_removed, _backers_removed) = - BoundedSupportsOf::::sorted_truncate_from(unbounded); - bounded - } else { - unbounded.try_into().map_err(|_| Error::FailedToBound)? - }; - Ok(bounded) - } +fn elect_with_input_bounds( + bounds: ElectionBounds, +) -> Result, Error> { + let (voters, targets) = T::DataProvider::electing_voters(bounds.voters) + .and_then(|voters| Ok((voters, T::DataProvider::electable_targets(bounds.targets)?))) + .map_err(Error::DataProvider)?; - fn elect_with( - bounds: ElectionBounds, - page: PageIndex, - ) -> Result, Error> { - let (voters, targets) = T::DataProvider::electing_voters(bounds.voters, page) - .and_then(|voters| { - Ok((voters, T::DataProvider::electable_targets(bounds.targets, page)?)) - }) - .map_err(Error::DataProvider)?; - let desired_targets = T::DataProvider::desired_targets().map_err(Error::DataProvider)?; - Self::elect_with_snapshot(voters, targets, desired_targets) - } -} + let desired_targets = T::DataProvider::desired_targets().map_err(Error::DataProvider)?; -impl InstantElectionProvider for OnChainExecution { - fn instant_elect( - voters: Vec>, - targets: Vec<::AccountId>, - desired_targets: u32, - ) -> Result, Self::Error> { - Self::elect_with_snapshot(voters, targets, desired_targets) + if desired_targets > T::MaxWinners::get() { + // early exit + return Err(Error::TooManyWinners) } - fn bother() -> bool { - true - } + let voters_len = voters.len() as u32; + let targets_len = targets.len() as u32; + + let stake_map: BTreeMap<_, _> = voters + .iter() + .map(|(validator, vote_weight, _)| (validator.clone(), *vote_weight)) + .collect(); + + let stake_of = |w: &::AccountId| -> VoteWeight { + stake_map.get(w).cloned().unwrap_or_default() + }; + + let ElectionResult { winners: _, assignments } = + T::Solver::solve(desired_targets as usize, targets, voters).map_err(Error::from)?; + + let staked = assignment_ratio_to_staked_normalized(assignments, &stake_of)?; + + let weight = T::Solver::weight::( + voters_len, + targets_len, + ::MaxVotesPerVoter::get(), + ); + frame_system::Pallet::::register_extra_weight_unchecked( + weight, + DispatchClass::Mandatory, + ); + + // defensive: Since npos solver returns a result always bounded by `desired_targets`, this is + // never expected to happen as long as npos solver does what is expected for it to do. + let supports: OnChainBoundedSupportsOf = + to_supports(&staked).try_into().map_err(|_| Error::TooManyWinners)?; + + Ok(supports) } -impl ElectionProvider for OnChainExecution { +impl ElectionProviderBase for OnChainExecution { type AccountId = ::AccountId; - type BlockNumber = BlockNumberFor; + type BlockNumber = frame_system::pallet_prelude::BlockNumberFor; type Error = Error; - type MaxWinnersPerPage = T::MaxWinnersPerPage; - type MaxBackersPerWinner = T::MaxBackersPerWinner; - // can support any number of pages, as this is meant to be called "instantly". We don't care - // about this value here. - type Pages = sp_core::ConstU32<1>; + type MaxWinners = T::MaxWinners; type DataProvider = T::DataProvider; +} - fn elect(page: PageIndex) -> Result, Self::Error> { - let election_bounds = ElectionBoundsBuilder::from(T::Bounds::get()).build(); - Self::elect_with(election_bounds, page) +impl InstantElectionProvider for OnChainExecution { + fn instant_elect( + forced_input_voters_bounds: DataProviderBounds, + forced_input_targets_bounds: DataProviderBounds, + ) -> Result, Self::Error> { + let elections_bounds = ElectionBoundsBuilder::from(T::Bounds::get()) + .voters_or_lower(forced_input_voters_bounds) + .targets_or_lower(forced_input_targets_bounds) + .build(); + + elect_with_input_bounds::(elections_bounds) } +} +impl ElectionProvider for OnChainExecution { fn ongoing() -> bool { false } + + fn elect() -> Result, Self::Error> { + let election_bounds = ElectionBoundsBuilder::from(T::Bounds::get()).build(); + elect_with_input_bounds::(election_bounds) + } } #[cfg(test)] @@ -206,7 +184,6 @@ mod tests { use super::*; use crate::{ElectionProvider, PhragMMS, SequentialPhragmen}; use frame_support::{assert_noop, derive_impl, parameter_types}; - use sp_io::TestExternalities; use sp_npos_elections::Support; use sp_runtime::Perbill; type AccountId = u64; @@ -254,50 +231,42 @@ mod tests { struct PhragMMSParams; parameter_types! { - pub static MaxWinnersPerPage: u32 = 10; - pub static MaxBackersPerWinner: u32 = 20; + pub static MaxWinners: u32 = 10; pub static DesiredTargets: u32 = 2; - pub static Sort: bool = false; pub static Bounds: ElectionBounds = ElectionBoundsBuilder::default().voters_count(600.into()).targets_count(400.into()).build(); } impl Config for PhragmenParams { - type Sort = Sort; type System = Runtime; type Solver = SequentialPhragmen; type DataProvider = mock_data_provider::DataProvider; - type MaxWinnersPerPage = MaxWinnersPerPage; - type MaxBackersPerWinner = MaxBackersPerWinner; - type Bounds = Bounds; type WeightInfo = (); + type MaxWinners = MaxWinners; + type Bounds = Bounds; } impl Config for PhragMMSParams { - type Sort = Sort; type System = Runtime; type Solver = PhragMMS; type DataProvider = mock_data_provider::DataProvider; - type MaxWinnersPerPage = MaxWinnersPerPage; - type MaxBackersPerWinner = MaxBackersPerWinner; type WeightInfo = (); + type MaxWinners = MaxWinners; type Bounds = Bounds; } mod mock_data_provider { - use super::*; - use crate::{data_provider, DataProviderBounds, PageIndex, VoterOf}; use frame_support::traits::ConstU32; use sp_runtime::bounded_vec; + use super::*; + use crate::{data_provider, VoterOf}; + pub struct DataProvider; impl ElectionDataProvider for DataProvider { type AccountId = AccountId; type BlockNumber = BlockNumber; type MaxVotesPerVoter = ConstU32<2>; - fn electing_voters( - _: DataProviderBounds, - _page: PageIndex, - ) -> data_provider::Result>> { + fn electing_voters(_: DataProviderBounds) -> data_provider::Result>> { Ok(vec![ (1, 10, bounded_vec![10, 20]), (2, 20, bounded_vec![30, 20]), @@ -305,10 +274,7 @@ mod tests { ]) } - fn electable_targets( - _: DataProviderBounds, - _page: PageIndex, - ) -> data_provider::Result> { + fn electable_targets(_: DataProviderBounds) -> data_provider::Result> { Ok(vec![10, 20, 30]) } @@ -324,101 +290,40 @@ mod tests { #[test] fn onchain_seq_phragmen_works() { - TestExternalities::new_empty().execute_with(|| { - let expected_supports = vec![ - ( - 10 as AccountId, - Support { total: 25, voters: vec![(1 as AccountId, 10), (3, 15)] }, - ), - (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }), - ] - .try_into() - .unwrap(); - + sp_io::TestExternalities::new_empty().execute_with(|| { assert_eq!( - as ElectionProvider>::elect(0).unwrap(), - expected_supports, + as ElectionProvider>::elect().unwrap(), + vec![ + (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), + (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) + ] ); }) } #[test] - fn sorting_false_works() { - TestExternalities::new_empty().execute_with(|| { - // Default results would have 3 targets, but we allow for only 2. - DesiredTargets::set(3); - MaxWinnersPerPage::set(2); - - assert_noop!( - as ElectionProvider>::elect(0), - Error::FailedToBound, - ); - }); - - TestExternalities::new_empty().execute_with(|| { - // Default results would have 2 backers per winner - MaxBackersPerWinner::set(1); + fn too_many_winners_when_desired_targets_exceed_max_winners() { + sp_io::TestExternalities::new_empty().execute_with(|| { + // given desired targets larger than max winners + DesiredTargets::set(10); + MaxWinners::set(9); assert_noop!( - as ElectionProvider>::elect(0), - Error::FailedToBound, - ); - }); - } - - #[test] - fn sorting_true_works_winners() { - Sort::set(true); - - TestExternalities::new_empty().execute_with(|| { - let expected_supports = - vec![(30, Support { total: 35, voters: vec![(2, 20), (3, 15)] })] - .try_into() - .unwrap(); - - // we want to allow 1 winner only, and allow sorting. - MaxWinnersPerPage::set(1); - - assert_eq!( - as ElectionProvider>::elect(0).unwrap(), - expected_supports, - ); - }); - - MaxWinnersPerPage::set(10); - - TestExternalities::new_empty().execute_with(|| { - let expected_supports = vec![ - (30, Support { total: 20, voters: vec![(2, 20)] }), - (10 as AccountId, Support { total: 15, voters: vec![(3 as AccountId, 15)] }), - ] - .try_into() - .unwrap(); - - // we want to allow 2 winners only but 1 backer each, and allow sorting. - MaxBackersPerWinner::set(1); - - assert_eq!( - as ElectionProvider>::elect(0).unwrap(), - expected_supports, + as ElectionProvider>::elect(), + Error::TooManyWinners, ); }) } #[test] fn onchain_phragmms_works() { - TestExternalities::new_empty().execute_with(|| { + sp_io::TestExternalities::new_empty().execute_with(|| { assert_eq!( - as ElectionProvider>::elect(0).unwrap(), + as ElectionProvider>::elect().unwrap(), vec![ - ( - 10 as AccountId, - Support { total: 25, voters: vec![(1 as AccountId, 10), (3, 15)] } - ), + (10, Support { total: 25, voters: vec![(1, 10), (3, 15)] }), (30, Support { total: 35, voters: vec![(2, 20), (3, 15)] }) ] - .try_into() - .unwrap() ); }) } diff --git a/substrate/frame/election-provider-support/src/tests.rs b/substrate/frame/election-provider-support/src/tests.rs index de4bac3664bdd..6e3deb9e38346 100644 --- a/substrate/frame/election-provider-support/src/tests.rs +++ b/substrate/frame/election-provider-support/src/tests.rs @@ -18,10 +18,10 @@ //! Tests for solution-type. #![cfg(test)] -use crate::{mock::*, BoundedSupports, IndexAssignment, NposSolution}; + +use crate::{mock::*, IndexAssignment, NposSolution}; use frame_support::traits::ConstU32; use rand::SeedableRng; -use sp_npos_elections::{Support, Supports}; mod solution_type { use super::*; @@ -452,29 +452,3 @@ fn index_assignments_generate_same_solution_as_plain_assignments() { assert_eq!(solution, index_compact); } - -#[test] -fn sorted_truncate_from_works() { - let supports: Supports = vec![ - (1, Support { total: 303, voters: vec![(100, 100), (101, 101), (102, 102)] }), - (2, Support { total: 201, voters: vec![(100, 100), (101, 101)] }), - (3, Support { total: 406, voters: vec![(100, 100), (101, 101), (102, 102), (103, 103)] }), - ]; - - let (bounded, winners_removed, backers_removed) = - BoundedSupports::, ConstU32<2>>::sorted_truncate_from(supports); - // we trim 2 as it has least total support, and trim backers based on stake. - assert_eq!( - bounded - .clone() - .into_iter() - .map(|(k, v)| (k, Support { total: v.total, voters: v.voters.into_inner() })) - .collect::>(), - vec![ - (3, Support { total: 205, voters: vec![(103, 103), (102, 102)] }), - (1, Support { total: 203, voters: vec![(102, 102), (101, 101)] }) - ] - ); - assert_eq!(winners_removed, 1); - assert_eq!(backers_removed, 3); -} diff --git a/substrate/frame/election-provider-support/src/traits.rs b/substrate/frame/election-provider-support/src/traits.rs index d8ffd41d8ae51..84fd57992d343 100644 --- a/substrate/frame/election-provider-support/src/traits.rs +++ b/substrate/frame/election-provider-support/src/traits.rs @@ -42,8 +42,6 @@ where + Clone + Bounded + Encode - + Ord - + PartialOrd + TypeInfo; /// The target type. Needs to be an index (convert to usize). @@ -55,8 +53,6 @@ where + Clone + Bounded + Encode - + Ord - + PartialOrd + TypeInfo; /// The weight/accuracy type of each vote. @@ -127,23 +123,4 @@ where voter_at: impl Fn(Self::VoterIndex) -> Option, target_at: impl Fn(Self::TargetIndex) -> Option, ) -> Result>, Error>; - - /// Sort self by the means of the given function. - /// - /// This might be helpful to allow for easier trimming. - fn sort(&mut self, voter_stake: F) - where - F: FnMut(&Self::VoterIndex) -> VoteWeight; - - /// Remove the least staked voter. - /// - /// This is ONLY sensible to do if [`Self::sort`] has been called on the struct at least once. - fn remove_weakest_sorted(&mut self, voter_stake: F) -> Option - where - F: FnMut(&Self::VoterIndex) -> VoteWeight; - - /// Make this solution corrupt. This should set the index of a voter to `Bounded::max_value()`. - /// - /// Obviously, this is only useful for testing. - fn corrupt(&mut self); } diff --git a/substrate/frame/elections-phragmen/src/benchmarking.rs b/substrate/frame/elections-phragmen/src/benchmarking.rs index 6e8850aca9d8d..60771fa89ad7e 100644 --- a/substrate/frame/elections-phragmen/src/benchmarking.rs +++ b/substrate/frame/elections-phragmen/src/benchmarking.rs @@ -71,10 +71,7 @@ fn submit_candidates( RawOrigin::Signed(account.clone()).into(), candidate_count::(), ) - .map_err(|e| { - log::error!(target: crate::LOG_TARGET, "failed to submit candidacy: {:?}", e); - "failed to submit candidacy" - })?; + .map_err(|_| "failed to submit candidacy")?; Ok(account) }) .collect::>() @@ -155,10 +152,6 @@ mod benchmarks { // -- Signed ones #[benchmark] fn vote_equal(v: Linear<1, { T::MaxVotesPerVoter::get() }>) -> Result<(), BenchmarkError> { - assert!( - T::MaxCandidates::get() > T::MaxVotesPerVoter::get(), - "MaxCandidates should be more than MaxVotesPerVoter" - ); clean::(); // create a bunch of candidates. @@ -466,9 +459,6 @@ mod benchmarks { let all_candidates = submit_candidates_with_self_vote::(c, "candidates")?; let _ = distribute_voters::(all_candidates, v.saturating_sub(c), votes_per_voter as usize)?; - log::info!(target: crate::LOG_TARGET, "[v = {:?}]voters: {:?}",v, v.saturating_sub(c)); - log::info!(target: crate::LOG_TARGET, "votes_per_voter: {:?}",votes_per_voter); - log::info!(target: crate::LOG_TARGET, "candidates: {:?}",c); #[block] { diff --git a/substrate/frame/fast-unstake/src/mock.rs b/substrate/frame/fast-unstake/src/mock.rs index 5d8aed59ff653..67f7ee21e6175 100644 --- a/substrate/frame/fast-unstake/src/mock.rs +++ b/substrate/frame/fast-unstake/src/mock.rs @@ -16,7 +16,6 @@ // limitations under the License. use crate::{self as fast_unstake}; -use frame_election_provider_support::PageIndex; use frame_support::{ assert_ok, derive_impl, pallet_prelude::*, @@ -83,28 +82,25 @@ parameter_types! { pub static BondingDuration: u32 = 3; pub static CurrentEra: u32 = 0; pub static Ongoing: bool = false; + pub static MaxWinners: u32 = 100; } pub struct MockElection; - -impl frame_election_provider_support::ElectionProvider for MockElection { - type BlockNumber = BlockNumber; +impl frame_election_provider_support::ElectionProviderBase for MockElection { type AccountId = AccountId; + type BlockNumber = BlockNumber; + type MaxWinners = MaxWinners; type DataProvider = Staking; - type MaxBackersPerWinner = ConstU32<100>; - type MaxWinnersPerPage = ConstU32<100>; - type Pages = ConstU32<1>; type Error = (); +} - fn elect( - _remaining_pages: PageIndex, - ) -> Result, Self::Error> { - Err(()) - } - +impl frame_election_provider_support::ElectionProvider for MockElection { fn ongoing() -> bool { Ongoing::get() } + fn elect() -> Result, Self::Error> { + Err(()) + } } #[derive_impl(pallet_staking::config_preludes::TestDefaultConfig)] @@ -204,7 +200,7 @@ impl ExtBuilder { (v, Exposure { total: 0, own: 0, others }) }) .for_each(|(validator, exposure)| { - pallet_staking::EraInfo::::upsert_exposure(era, &validator, exposure); + pallet_staking::EraInfo::::set_exposure(era, &validator, exposure); }); } @@ -304,7 +300,7 @@ pub fn create_exposed_nominator(exposed: AccountId, era: u32) { // create an exposed nominator in passed era let mut exposure = pallet_staking::EraInfo::::get_full_exposure(era, &VALIDATORS_PER_ERA); exposure.others.push(IndividualExposure { who: exposed, value: 0 as Balance }); - pallet_staking::EraInfo::::upsert_exposure(era, &VALIDATORS_PER_ERA, exposure); + pallet_staking::EraInfo::::set_exposure(era, &VALIDATORS_PER_ERA, exposure); Balances::make_free_balance_be(&exposed, 100); assert_ok!(Staking::bond( diff --git a/substrate/frame/grandpa/Cargo.toml b/substrate/frame/grandpa/Cargo.toml index 8fe651de43d99..4072d65b6267b 100644 --- a/substrate/frame/grandpa/Cargo.toml +++ b/substrate/frame/grandpa/Cargo.toml @@ -42,7 +42,6 @@ pallet-staking = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } -sp-tracing = { workspace = true, default-features = true } [features] default = ["std"] diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 2fd0cbb5ffdcb..933aa6c3ea2fd 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -32,14 +32,14 @@ use frame_support::{ }; use pallet_session::historical as pallet_session_historical; use sp_consensus_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; -use sp_core::{ConstBool, H256}; +use sp_core::H256; use sp_keyring::Ed25519Keyring; use sp_runtime::{ curve::PiecewiseLinear, impl_opaque_keys, testing::{TestXt, UintAuthorityId}, traits::OpaqueKeys, - BoundedVec, BuildStorage, DigestItem, Perbill, + BuildStorage, DigestItem, Perbill, }; use sp_staking::{EraIndex, SessionIndex}; @@ -156,9 +156,7 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinnersPerPage = ConstU32<100>; - type MaxBackersPerWinner = ConstU32<100>; - type Sort = ConstBool; + type MaxWinners = ConstU32<100>; type Bounds = ElectionsBoundsOnChain; } @@ -225,7 +223,6 @@ pub fn new_test_ext(vec: Vec<(u64, u64)>) -> sp_io::TestExternalities { } pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestExternalities { - sp_tracing::try_init_simple(); let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); let balances: Vec<_> = (0..authorities.len()).map(|i| (i as u64, 10_000_000)).collect(); @@ -263,7 +260,7 @@ pub fn new_test_ext_raw_authorities(authorities: AuthorityList) -> sp_io::TestEx validator_count: 8, force_era: pallet_staking::Forcing::ForceNew, minimum_validator_count: 0, - invulnerables: BoundedVec::new(), + invulnerables: vec![], ..Default::default() }; @@ -292,9 +289,8 @@ pub fn start_session(session_index: SessionIndex) { Timestamp::set_timestamp(System::block_number() * 6000); System::on_initialize(System::block_number()); - // staking has to be initialized before session as per the multi-block staking PR. - Staking::on_initialize(System::block_number()); Session::on_initialize(System::block_number()); + Staking::on_initialize(System::block_number()); Grandpa::on_initialize(System::block_number()); } diff --git a/substrate/frame/nomination-pools/benchmarking/src/mock.rs b/substrate/frame/nomination-pools/benchmarking/src/mock.rs index 1dcfb86b75cf6..c707af4842977 100644 --- a/substrate/frame/nomination-pools/benchmarking/src/mock.rs +++ b/substrate/frame/nomination-pools/benchmarking/src/mock.rs @@ -85,7 +85,7 @@ impl pallet_staking::Config for Runtime { type AdminOrigin = frame_system::EnsureRoot; type EraPayout = pallet_staking::ConvertCurve; type ElectionProvider = - frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, (), ())>; + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs index cc7ea7c029ba8..7eee16cd5a4ff 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -102,7 +102,7 @@ impl pallet_staking::Config for Runtime { type BondingDuration = BondingDuration; type EraPayout = pallet_staking::ConvertCurve; type ElectionProvider = - frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, (), ())>; + frame_election_provider_support::NoElection<(AccountId, BlockNumber, Staking, ())>; type GenesisElectionProvider = Self::ElectionProvider; type VoterList = VoterList; type TargetList = pallet_staking::UseValidatorsMap; diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index f37dbf55f52f7..c87fe36ee23d4 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -112,7 +112,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); - pub const Sort: bool = true; } pub struct OnChainSeqPhragmen; @@ -121,9 +120,7 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinnersPerPage = ConstU32<100>; - type MaxBackersPerWinner = ConstU32<100>; - type Sort = Sort; + type MaxWinners = ConstU32<100>; type Bounds = ElectionsBounds; } diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 09223802f67d5..45f1fa8c2058c 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -26,10 +26,8 @@ use frame_election_provider_support::{ use frame_support::{ derive_impl, parameter_types, traits::{ConstU32, ConstU64, OneSessionHandler}, - BoundedVec, }; use pallet_staking::StakerStatus; -use sp_core::ConstBool; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; use sp_staking::{EraIndex, SessionIndex}; @@ -112,9 +110,7 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinnersPerPage = ConstU32<100>; - type MaxBackersPerWinner = ConstU32<100>; - type Sort = ConstBool; + type MaxWinners = ConstU32<100>; type Bounds = ElectionsBounds; } @@ -185,7 +181,7 @@ impl Config for Test { pub struct ExtBuilder { validator_count: u32, minimum_validator_count: u32, - invulnerables: BoundedVec::MaxInvulnerables>, + invulnerables: Vec, balance_factor: Balance, } @@ -194,7 +190,7 @@ impl Default for ExtBuilder { Self { validator_count: 2, minimum_validator_count: 0, - invulnerables: BoundedVec::new(), + invulnerables: vec![], balance_factor: 1, } } diff --git a/substrate/frame/session/benchmarking/src/inner.rs b/substrate/frame/session/benchmarking/src/inner.rs index 8fda2bb4655b1..9789b6bb593d0 100644 --- a/substrate/frame/session/benchmarking/src/inner.rs +++ b/substrate/frame/session/benchmarking/src/inner.rs @@ -58,7 +58,6 @@ mod benchmarks { false, true, RewardDestination::Staked, - pallet_staking::CurrentEra::::get().unwrap_or_default(), )?; let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; @@ -83,7 +82,6 @@ mod benchmarks { false, true, RewardDestination::Staked, - pallet_staking::CurrentEra::::get().unwrap_or_default(), )?; let v_controller = pallet_staking::Pallet::::bonded(&v_stash).ok_or("not stash")?; let keys = T::Keys::decode(&mut TrailingZeroInput::zeroes()).unwrap(); diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 235209f14cad2..0be337c459d14 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -120,7 +120,6 @@ pallet_staking_reward_curve::build! { parameter_types! { pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); - pub const Sort: bool = true; } pub struct OnChainSeqPhragmen; @@ -129,9 +128,7 @@ impl onchain::Config for OnChainSeqPhragmen { type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); - type MaxWinnersPerPage = ConstU32<100>; - type MaxBackersPerWinner = ConstU32<100>; - type Sort = Sort; + type MaxWinners = ConstU32<100>; type Bounds = ElectionsBounds; } diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index a80a2b235757b..8c359a4bf665f 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -661,6 +661,8 @@ impl Pallet { /// punishment after a fork. pub fn rotate_session() { let session_index = CurrentIndex::::get(); + log::trace!(target: "runtime::session", "rotating session {:?}", session_index); + let changed = QueuedChanged::::get(); // Inform the session handlers that a session is going to end. @@ -682,17 +684,11 @@ impl Pallet { // Increment session index. let session_index = session_index + 1; CurrentIndex::::put(session_index); + T::SessionManager::start_session(session_index); - log::trace!(target: "runtime::session", "starting_session {:?}", session_index); // Get next validator set. let maybe_next_validators = T::SessionManager::new_session(session_index + 1); - log::trace!( - target: "runtime::session", - "planning_session {:?} with {:?} validators", - session_index + 1, - maybe_next_validators.as_ref().map(|v| v.len()) - ); let (next_validators, next_identities_changed) = if let Some(validators) = maybe_next_validators { // NOTE: as per the documentation on `OnSessionEnding`, we consider diff --git a/substrate/frame/staking/Cargo.toml b/substrate/frame/staking/Cargo.toml index ee69c29af1672..74b1c78e9cbee 100644 --- a/substrate/frame/staking/Cargo.toml +++ b/substrate/frame/staking/Cargo.toml @@ -27,18 +27,16 @@ pallet-authorship = { workspace = true } pallet-session = { features = [ "historical", ], workspace = true } -rand = { features = ["alloc"], workspace = true } -rand_chacha = { workspace = true } scale-info = { features = ["derive", "serde"], workspace = true } serde = { features = ["alloc", "derive"], workspace = true } sp-application-crypto = { features = ["serde"], workspace = true } -sp-core = { workspace = true } sp-io = { workspace = true } sp-runtime = { features = ["serde"], workspace = true } sp-staking = { features = ["serde"], workspace = true } # Optional imports for benchmarking frame-benchmarking = { optional = true, workspace = true } +rand_chacha = { optional = true, workspace = true } [dev-dependencies] frame-benchmarking = { workspace = true, default-features = true } @@ -49,6 +47,7 @@ pallet-balances = { workspace = true, default-features = true } pallet-staking-reward-curve = { workspace = true, default-features = true } pallet-timestamp = { workspace = true, default-features = true } rand_chacha = { workspace = true, default-features = true } +sp-core = { workspace = true, default-features = true } sp-npos-elections = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-utils = { workspace = true } @@ -67,13 +66,10 @@ std = [ "pallet-balances/std", "pallet-session/std", "pallet-timestamp/std", - "rand/std", - "rand_chacha/std", "scale-info/std", "serde/std", "sp-application-crypto/std", "sp-core/std", - "sp-core/std", "sp-io/std", "sp-npos-elections/std", "sp-runtime/std", @@ -88,6 +84,7 @@ runtime-benchmarks = [ "pallet-bags-list/runtime-benchmarks", "pallet-balances/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", + "rand_chacha", "sp-runtime/runtime-benchmarks", "sp-staking/runtime-benchmarks", ] diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index ce4f0178a2480..41bfeed5b6de6 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -19,32 +19,32 @@ use super::*; use crate::{asset, ConfigOp, Pallet as Staking}; +use testing_utils::*; + use codec::Decode; -pub use frame_benchmarking::{ - impl_benchmark_test_suite, v2::*, whitelist_account, whitelisted_caller, BenchmarkError, -}; use frame_election_provider_support::{bounds::DataProviderBounds, SortedListProvider}; use frame_support::{ pallet_prelude::*, storage::bounded_vec::BoundedVec, - traits::{Get, Imbalance}, + traits::{Get, Imbalance, UnfilteredDispatchable}, }; -use frame_system::RawOrigin; use sp_runtime::{ traits::{Bounded, One, StaticLookup, TrailingZeroInput, Zero}, Perbill, Percent, Saturating, }; use sp_staking::{currency_to_vote::CurrencyToVote, SessionIndex}; -use testing_utils::*; + +pub use frame_benchmarking::{ + impl_benchmark_test_suite, v2::*, whitelist_account, whitelisted_caller, BenchmarkError, +}; +use frame_system::RawOrigin; const SEED: u32 = 0; const MAX_SPANS: u32 = 100; const MAX_SLASHES: u32 = 1000; -type BenchMaxValidators = - <::BenchmarkingConfig as BenchmarkingConfig>::MaxValidators; -type BenchMaxNominators = - <::BenchmarkingConfig as BenchmarkingConfig>::MaxNominators; +type MaxValidators = <::BenchmarkingConfig as BenchmarkingConfig>::MaxValidators; +type MaxNominators = <::BenchmarkingConfig as BenchmarkingConfig>::MaxNominators; // Add slashing spans to a user account. Not relevant for actual use, only to benchmark // read and write operations. @@ -73,7 +73,6 @@ pub fn create_validator_with_nominators( dead_controller: bool, unique_controller: bool, destination: RewardDestination, - era: u32, ) -> Result<(T::AccountId, Vec<(T::AccountId, T::AccountId)>), &'static str> { // Clean up any existing state. clear_validators_and_nominators::(); @@ -114,16 +113,9 @@ pub fn create_validator_with_nominators( } ValidatorCount::::put(1); - MinimumValidatorCount::::put(1); - - // Start a new (genesis) Era - // populate electable stashes as it gets read within `try_plan_new_era` - // ElectableStashes::::put( - // BoundedBTreeSet::try_from(vec![v_stash.clone()].into_iter().collect::>()) - // .unwrap(), - // ); - let new_validators = Staking::::try_plan_new_era(SessionIndex::one(), true).unwrap(); + // Start a new Era + let new_validators = Staking::::try_trigger_new_era(SessionIndex::one(), true).unwrap(); assert_eq!(new_validators.len(), 1); assert_eq!(new_validators[0], v_stash, "Our validator was not selected!"); @@ -136,13 +128,14 @@ pub fn create_validator_with_nominators( individual: points_individual.into_iter().collect(), }; - ErasRewardPoints::::insert(era, reward); + let current_era = CurrentEra::::get().unwrap(); + ErasRewardPoints::::insert(current_era, reward); // Create reward pool let total_payout = asset::existential_deposit::() .saturating_mul(upper_bound.into()) .saturating_mul(1000u32.into()); - >::insert(era, total_payout); + >::insert(current_era, total_payout); Ok((v_stash, nominators)) } @@ -230,123 +223,6 @@ const USER_SEED: u32 = 999666; mod benchmarks { use super::*; - #[benchmark] - fn on_initialize_noop() { - assert!(ElectableStashes::::get().is_empty()); - assert_eq!(NextElectionPage::::get(), None); - - #[block] - { - Pallet::::on_initialize(1_u32.into()); - } - - assert!(ElectableStashes::::get().is_empty()); - assert_eq!(NextElectionPage::::get(), None); - } - - #[benchmark] - fn do_elect_paged_inner( - v: Linear<1, { T::MaxValidatorSet::get() }>, - ) -> Result<(), BenchmarkError> { - // TODO: re-benchmark this - // use frame_election_provider_support::{ - // BoundedSupport, BoundedSupportsOf, ElectionProvider, - // }; - // let mut bounded_random_supports = BoundedSupportsOf::::default(); - // for i in 0..v { - // let backed = account("validator", i, SEED); - // let mut total = 0; - // let voters = (0..::MaxBackersPerWinner::get()) - // .map(|j| { - // let voter = account("nominator", j, SEED); - // let support = 100000; - // total += support; - // (voter, support) - // }) - // .collect::>() - // .try_into() - // .unwrap(); - // bounded_random_supports - // .try_push((backed, BoundedSupport { total, voters })) - // .map_err(|_| "bound failed") - // .expect("map is over the correct bound"); - // } - - #[block] - { - // assert_eq!(Pallet::::do_elect_paged_inner(bounded_random_supports), Ok(v as - // usize)); - } - - // assert!(!ElectableStashes::::get().is_empty()); - - Ok(()) - } - - #[benchmark] - fn get_npos_voters( - // number of validator intention. we will iterate all of them. - v: Linear<{ BenchMaxValidators::::get() / 2 }, { BenchMaxValidators::::get() }>, - - // number of nominator intention. we will iterate all of them. - n: Linear<{ BenchMaxNominators::::get() / 2 }, { BenchMaxNominators::::get() }>, - ) -> Result<(), BenchmarkError> { - create_validators_with_nominators_for_era::( - v, - n, - MaxNominationsOf::::get() as usize, - false, - None, - )?; - - assert_eq!(Validators::::count(), v); - assert_eq!(Nominators::::count(), n); - - let num_voters = (v + n) as usize; - - // default bounds are unbounded. - let voters; - #[block] - { - voters = >::get_npos_voters( - DataProviderBounds::default(), - &SnapshotStatus::::Waiting, - ); - } - - assert_eq!(voters.len(), num_voters); - - Ok(()) - } - - #[benchmark] - fn get_npos_targets( - // number of validator intention. - v: Linear<{ BenchMaxValidators::::get() / 2 }, { BenchMaxValidators::::get() }>, - ) -> Result<(), BenchmarkError> { - // number of nominator intention. - let n = BenchMaxNominators::::get(); - create_validators_with_nominators_for_era::( - v, - n, - MaxNominationsOf::::get() as usize, - false, - None, - )?; - - let targets; - - #[block] - { - // default bounds are unbounded. - targets = >::get_npos_targets(DataProviderBounds::default()); - } - - assert_eq!(targets.len() as u32, v); - - Ok(()) - } - #[benchmark] fn bond() { let stash = create_funded_user::("stash", USER_SEED, 100); @@ -693,7 +569,7 @@ mod benchmarks { #[benchmark] fn set_validator_count() { - let validator_count = BenchMaxValidators::::get(); + let validator_count = MaxValidators::::get(); #[extrinsic_call] _(RawOrigin::Root, validator_count); @@ -727,7 +603,7 @@ mod benchmarks { #[benchmark] // Worst case scenario, the list of invulnerables is very long. - fn set_invulnerables(v: Linear<0, { T::MaxInvulnerables::get() }>) { + fn set_invulnerables(v: Linear<0, { MaxValidators::::get() }>) { let mut invulnerables = Vec::new(); for i in 0..v { invulnerables.push(account("invulnerable", i, SEED)); @@ -823,20 +699,15 @@ mod benchmarks { fn payout_stakers_alive_staked( n: Linear<0, { T::MaxExposurePageSize::get() as u32 }>, ) -> Result<(), BenchmarkError> { - // reset genesis era 0 so that triggering the new genesis era works as expected. - CurrentEra::::set(Some(0)); - let current_era = CurrentEra::::get().unwrap(); - Staking::::clear_era_information(current_era); - let (validator, nominators) = create_validator_with_nominators::( n, T::MaxExposurePageSize::get() as u32, false, true, RewardDestination::Staked, - current_era, )?; + let current_era = CurrentEra::::get().unwrap(); // set the commission for this particular era as well. >::insert( current_era, @@ -951,6 +822,91 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn new_era(v: Linear<1, 10>, n: Linear<0, 100>) -> Result<(), BenchmarkError> { + create_validators_with_nominators_for_era::( + v, + n, + MaxNominationsOf::::get() as usize, + false, + None, + )?; + let session_index = SessionIndex::one(); + + let validators; + #[block] + { + validators = + Staking::::try_trigger_new_era(session_index, true).ok_or("`new_era` failed")?; + } + + assert!(validators.len() == v as usize); + + Ok(()) + } + + #[benchmark(extra)] + fn payout_all(v: Linear<1, 10>, n: Linear<0, 100>) -> Result<(), BenchmarkError> { + create_validators_with_nominators_for_era::( + v, + n, + MaxNominationsOf::::get() as usize, + false, + None, + )?; + // Start a new Era + let new_validators = Staking::::try_trigger_new_era(SessionIndex::one(), true).unwrap(); + assert!(new_validators.len() == v as usize); + + let current_era = CurrentEra::::get().unwrap(); + let mut points_total = 0; + let mut points_individual = Vec::new(); + let mut payout_calls_arg = Vec::new(); + + for validator in new_validators.iter() { + points_total += 10; + points_individual.push((validator.clone(), 10)); + payout_calls_arg.push((validator.clone(), current_era)); + } + + // Give Era Points + let reward = EraRewardPoints:: { + total: points_total, + individual: points_individual.into_iter().collect(), + }; + + ErasRewardPoints::::insert(current_era, reward); + + // Create reward pool + let total_payout = asset::existential_deposit::() * 1000u32.into(); + >::insert(current_era, total_payout); + + let caller: T::AccountId = whitelisted_caller(); + let origin = RawOrigin::Signed(caller); + let calls: Vec<_> = payout_calls_arg + .iter() + .map(|arg| { + Call::::payout_stakers_by_page { + validator_stash: arg.0.clone(), + era: arg.1, + page: 0, + } + .encode() + }) + .collect(); + + #[block] + { + for call in calls { + as Decode>::decode(&mut &*call) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(origin.clone().into())?; + } + } + + Ok(()) + } + #[benchmark(extra)] fn do_slash( l: Linear<1, { T::MaxUnlockingChunks::get() as u32 }>, @@ -983,6 +939,67 @@ mod benchmarks { Ok(()) } + #[benchmark] + fn get_npos_voters( + // number of validator intention. we will iterate all of them. + v: Linear<{ MaxValidators::::get() / 2 }, { MaxValidators::::get() }>, + + // number of nominator intention. we will iterate all of them. + n: Linear<{ MaxNominators::::get() / 2 }, { MaxNominators::::get() }>, + ) -> Result<(), BenchmarkError> { + create_validators_with_nominators_for_era::( + v, + n, + MaxNominationsOf::::get() as usize, + false, + None, + )?; + + assert_eq!(Validators::::count(), v); + assert_eq!(Nominators::::count(), n); + + let num_voters = (v + n) as usize; + + // default bounds are unbounded. + let voters; + #[block] + { + voters = >::get_npos_voters(DataProviderBounds::default()); + } + + assert_eq!(voters.len(), num_voters); + + Ok(()) + } + + #[benchmark] + fn get_npos_targets( + // number of validator intention. + v: Linear<{ MaxValidators::::get() / 2 }, { MaxValidators::::get() }>, + ) -> Result<(), BenchmarkError> { + // number of nominator intention. + let n = MaxNominators::::get(); + create_validators_with_nominators_for_era::( + v, + n, + MaxNominationsOf::::get() as usize, + false, + None, + )?; + + let targets; + + #[block] + { + // default bounds are unbounded. + targets = >::get_npos_targets(DataProviderBounds::default()); + } + + assert_eq!(targets.len() as u32, v); + + Ok(()) + } + #[benchmark] fn set_staking_configs_all_set() { #[extrinsic_call] @@ -1209,19 +1226,19 @@ mod tests { ExtBuilder::default().build_and_execute(|| { let n = 10; - let current_era = CurrentEra::::get().unwrap(); let (validator_stash, nominators) = create_validator_with_nominators::( n, <::MaxExposurePageSize as Get<_>>::get(), false, false, RewardDestination::Staked, - current_era, ) .unwrap(); assert_eq!(nominators.len() as u32, n); + let current_era = CurrentEra::::get().unwrap(); + let original_stakeable_balance = asset::stakeable_balance::(&validator_stash); assert_ok!(Staking::payout_stakers_by_page( RuntimeOrigin::signed(1337), @@ -1247,7 +1264,6 @@ mod tests { false, false, RewardDestination::Staked, - CurrentEra::::get().unwrap(), ) .unwrap(); diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index 1247470edf4ce..d42c863592124 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -143,40 +143,6 @@ //! The pallet implement the trait `SessionManager`. Which is the only API to query new validator //! set and allowing these validator set to be rewarded once their era is ended. //! -//! ## Multi-page election support -//! -//! > Unless explicitly stated on the contrary, one page is the equivalent of one block. "Pages" and -//! "blocks" are used interchangibly across the documentation. -//! -//! The pallet supports a multi-page election. In a multi-page election, some key actions of the -//! staking pallet progress over multi pages/blocks. Most notably: -//! 1. **Snapshot creation**: The voter snapshot *may be* created over multi blocks. The -//! [`frame_election_provider_support::ElectionDataProvider`] trait supports that functionality -//! by parameterizing the electing voters by the page index. Even though the target snapshot -//! could be paged, this pallet implements a single-page target snapshot only. -//! 2. **Election**: The election is multi-block, where a set of supports is fetched per page/block. -//! This pallet keeps track of the elected stashes and their exposures as the paged election is -//! called. The [`frame_election_provider_support::ElectionProvider`] trait supports this -//! functionality by parameterizing the elect call with the page index. -//! -//! Note: [`frame_election_provider_support::ElectionDataProvider`] trait supports mulit-paged -//! target snaphsot. However, this pallet only supports and implements a single-page snapshot. -//! Calling `ElectionDataProvider::electable_targets` with a different index than 0 is redundant -//! and the single page idx 0 of targets be returned. -//! -//! ### Prepare an election ahead of time with `on_initialize` -//! -//! This pallet is expected to have a set of winners ready and their exposures collected and stored -//! at the time of a predicted election. In order to ensure that, it starts to fetch the paged -//! results of an election from the [`frame_election_provider_support::ElectionProvider`] `N` pages -//! ahead of the next election prediction. -//! -//! As the pages of winners are fetched, their exposures and era info are processed and stored so -//! that all the data is ready at the time of the next election. -//! -//! Even though this pallet supports mulit-page elections, it also can be used in a single page -//! context provided that the configs are set accordingly. -//! //! ## Interface //! //! ### Dispatchable Functions @@ -328,8 +294,6 @@ pub mod testing_utils; pub(crate) mod mock; #[cfg(test)] mod tests; -#[cfg(test)] -mod tests_paged_election; pub mod asset; pub mod election_size_tracker; @@ -345,7 +309,6 @@ extern crate alloc; use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, DecodeWithMemTracking, Encode, HasCompact, MaxEncodedLen}; -use frame_election_provider_support::ElectionProvider; use frame_support::{ defensive, defensive_assert, traits::{ @@ -385,22 +348,9 @@ macro_rules! log { }; } -/// Alias for a bounded set of exposures behind a validator, parameterized by this pallet's -/// election provider. -pub type BoundedExposuresOf = BoundedVec< - ( - ::AccountId, - Exposure<::AccountId, BalanceOf>, - ), - MaxWinnersPerPageOf<::ElectionProvider>, ->; - -/// Alias for the maximum number of winners (aka. active validators), as defined in by this pallet's -/// config. -pub type MaxWinnersOf = ::MaxValidatorSet; - -/// Alias for the maximum number of winners per page, as expected by the election provider. -pub type MaxWinnersPerPageOf

=

::MaxWinnersPerPage; +/// Maximum number of winners (aka. active validators), as defined in the election provider of this +/// pallet. +pub type MaxWinnersOf = <::ElectionProvider as frame_election_provider_support::ElectionProviderBase>::MaxWinners; /// Maximum number of nominations per nominator. pub type MaxNominationsOf = @@ -520,18 +470,6 @@ pub struct UnlockChunk { era: EraIndex, } -/// Status of a paged snapshot progress. -#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo, MaxEncodedLen, Default)] -pub enum SnapshotStatus { - /// Paged snapshot is in progress, the `AccountId` was the last staker iterated in the list. - Ongoing(AccountId), - /// All the stakers in the system have been consumed since the snapshot started. - Consumed, - /// Waiting for a new snapshot to be requested. - #[default] - Waiting, -} - /// The ledger of a (bonded) stash. /// /// Note: All the reads and mutations to the [`Ledger`], [`Bonded`] and [`Payee`] storage items @@ -1180,12 +1118,44 @@ where pub struct EraInfo(core::marker::PhantomData); impl EraInfo { /// Returns true if validator has one or more page of era rewards not claimed yet. + // Also looks at legacy storage that can be cleaned up after #433. pub fn pending_rewards(era: EraIndex, validator: &T::AccountId) -> bool { - >::get(&era, validator) - .map(|overview| { - ClaimedRewards::::get(era, validator).len() < overview.page_count as usize - }) - .unwrap_or(false) + let page_count = if let Some(overview) = >::get(&era, validator) { + overview.page_count + } else { + if >::contains_key(era, validator) { + // this means non paged exposure, and we treat them as single paged. + 1 + } else { + // if no exposure, then no rewards to claim. + return false + } + }; + + // check if era is marked claimed in legacy storage. + if >::get(validator) + .map(|l| l.legacy_claimed_rewards.contains(&era)) + .unwrap_or_default() + { + return false + } + + ClaimedRewards::::get(era, validator).len() < page_count as usize + } + + /// Temporary function which looks at both (1) passed param `T::StakingLedger` for legacy + /// non-paged rewards, and (2) `T::ClaimedRewards` for paged rewards. This function can be + /// removed once `T::HistoryDepth` eras have passed and none of the older non-paged rewards + /// are relevant/claimable. + // Refer tracker issue for cleanup: https://github.com/paritytech/polkadot-sdk/issues/433 + pub(crate) fn is_rewards_claimed_with_legacy_fallback( + era: EraIndex, + ledger: &StakingLedger, + validator: &T::AccountId, + page: Page, + ) -> bool { + ledger.legacy_claimed_rewards.binary_search(&era).is_ok() || + Self::is_rewards_claimed(era, validator, page) } /// Check if the rewards for the given era and page index have been claimed. @@ -1206,7 +1176,20 @@ impl EraInfo { validator: &T::AccountId, page: Page, ) -> Option>> { - let overview = >::get(&era, validator)?; + let overview = >::get(&era, validator); + + // return clipped exposure if page zero and paged exposure does not exist + // exists for backward compatibility and can be removed as part of #13034 + if overview.is_none() && page == 0 { + return Some(PagedExposure::from_clipped(>::get(era, validator))) + } + + // no exposure for this validator + if overview.is_none() { + return None + } + + let overview = overview.expect("checked above; qed"); // validator stake is added only in page zero let validator_stake = if page == 0 { overview.own } else { Zero::zero() }; @@ -1227,9 +1210,13 @@ impl EraInfo { era: EraIndex, validator: &T::AccountId, ) -> Exposure> { - let Some(overview) = >::get(&era, validator) else { - return Exposure::default(); - }; + let overview = >::get(&era, validator); + + if overview.is_none() { + return ErasStakers::::get(era, validator) + } + + let overview = overview.expect("checked above; qed"); let mut others = Vec::with_capacity(overview.nominator_count as usize); for page in 0..overview.page_count { @@ -1260,7 +1247,20 @@ impl EraInfo { } /// Returns the next page that can be claimed or `None` if nothing to claim. - pub(crate) fn get_next_claimable_page(era: EraIndex, validator: &T::AccountId) -> Option { + pub(crate) fn get_next_claimable_page( + era: EraIndex, + validator: &T::AccountId, + ledger: &StakingLedger, + ) -> Option { + if Self::is_non_paged_exposure(era, validator) { + return match ledger.legacy_claimed_rewards.binary_search(&era) { + // already claimed + Ok(_) => None, + // Non-paged exposure is considered as a single page + Err(_) => Some(0), + } + } + // Find next claimable page of paged exposure. let page_count = Self::get_page_count(era, validator); let all_claimable_pages: Vec = (0..page_count).collect(); @@ -1269,6 +1269,11 @@ impl EraInfo { all_claimable_pages.into_iter().find(|p| !claimed_pages.contains(p)) } + /// Checks if exposure is paged or not. + fn is_non_paged_exposure(era: EraIndex, validator: &T::AccountId) -> bool { + >::contains_key(&era, validator) + } + /// Returns validator commission for this era and page. pub(crate) fn get_validator_commission( era: EraIndex, @@ -1295,98 +1300,32 @@ impl EraInfo { } /// Store exposure for elected validators at start of an era. - /// - /// If the exposure does not exist yet for the tuple (era, validator), it sets it. Otherwise, - /// it updates the existing record by ensuring *intermediate* exposure pages are filled up with - /// `T::MaxExposurePageSize` number of backers per page and the remaining exposures are added - /// to new exposure pages. - pub fn upsert_exposure( + pub fn set_exposure( era: EraIndex, validator: &T::AccountId, - mut exposure: Exposure>, + exposure: Exposure>, ) { let page_size = T::MaxExposurePageSize::get().defensive_max(1); - if let Some(stored_overview) = ErasStakersOverview::::get(era, &validator) { - let last_page_idx = stored_overview.page_count.saturating_sub(1); - - let mut last_page = - ErasStakersPaged::::get((era, validator, last_page_idx)).unwrap_or_default(); - let last_page_empty_slots = - T::MaxExposurePageSize::get().saturating_sub(last_page.others.len() as u32); - - // splits the exposure so that `exposures_append` will fit within the last exposure - // page, up to the max exposure page size. The remaining individual exposures in - // `exposure` will be added to new pages. - let exposures_append = exposure.split_others(last_page_empty_slots); - - ErasStakersOverview::::mutate(era, &validator, |stored| { - // new metadata is updated based on 3 different set of exposures: the - // current one, the exposure split to be "fitted" into the current last page and - // the exposure set that will be appended from the new page onwards. - let new_metadata = - stored.defensive_unwrap_or_default().update_with::( - [&exposures_append, &exposure] - .iter() - .fold(Default::default(), |total, expo| { - total.saturating_add(expo.total.saturating_sub(expo.own)) - }), - [&exposures_append, &exposure] - .iter() - .fold(Default::default(), |count, expo| { - count.saturating_add(expo.others.len() as u32) - }), - ); - *stored = new_metadata.into(); - }); + let nominator_count = exposure.others.len(); + // expected page count is the number of nominators divided by the page size, rounded up. + let expected_page_count = nominator_count + .defensive_saturating_add((page_size as usize).defensive_saturating_sub(1)) + .saturating_div(page_size as usize); - // fill up last page with exposures. - last_page.page_total = last_page - .page_total - .saturating_add(exposures_append.total) - .saturating_sub(exposures_append.own); - last_page.others.extend(exposures_append.others); - ErasStakersPaged::::insert((era, &validator, last_page_idx), last_page); - - // now handle the remaining exposures and append the exposure pages. The metadata update - // has been already handled above. - let (_, exposure_pages) = exposure.into_pages(page_size); - - exposure_pages.iter().enumerate().for_each(|(idx, paged_exposure)| { - let append_at = - (last_page_idx.saturating_add(1).saturating_add(idx as u32)) as Page; - >::insert((era, &validator, append_at), &paged_exposure); - }); - } else { - // expected page count is the number of nominators divided by the page size, rounded up. - let expected_page_count = exposure - .others - .len() - .defensive_saturating_add((page_size as usize).defensive_saturating_sub(1)) - .saturating_div(page_size as usize); - - // no exposures yet for this (era, validator) tuple, calculate paged exposure pages and - // metadata from a blank slate. - let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size); - defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count"); - - // insert metadata. - ErasStakersOverview::::insert(era, &validator, exposure_metadata); - - // insert validator's overview. - exposure_pages.iter().enumerate().for_each(|(idx, paged_exposure)| { - let append_at = idx as Page; - >::insert((era, &validator, append_at), &paged_exposure); - }); - }; - } + let (exposure_metadata, exposure_pages) = exposure.into_pages(page_size); + defensive_assert!(exposure_pages.len() == expected_page_count, "unexpected page count"); - /// Update the total exposure for all the elected validators in the era. - pub(crate) fn add_total_stake(era: EraIndex, stake: BalanceOf) { - >::mutate(era, |total_stake| { - *total_stake += stake; + >::insert(era, &validator, &exposure_metadata); + exposure_pages.iter().enumerate().for_each(|(page, paged_exposure)| { + >::insert((era, &validator, page as Page), &paged_exposure); }); } + + /// Store total exposure for all the elected validators in the era. + pub(crate) fn set_total_stake(era: EraIndex, total_stake: BalanceOf) { + >::insert(era, total_stake); + } } /// A utility struct that provides a way to check if a given account is a staker. @@ -1409,9 +1348,9 @@ impl Contains for AllStakers { /// Configurations of the benchmarking of the pallet. pub trait BenchmarkingConfig { - /// The maximum number of validators to use for snapshot creation. + /// The maximum number of validators to use. type MaxValidators: Get; - /// The maximum number of nominators to use for snapshot creation, per page. + /// The maximum number of nominators to use. type MaxNominators: Get; } diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 96c63a657da93..e17780308f426 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -86,38 +86,12 @@ pub mod v17 { /// severity for re-enabling purposes. pub mod v16 { use super::*; - use frame_support::Twox64Concat; use sp_staking::offence::OffenceSeverity; - #[frame_support::storage_alias] - pub(crate) type Invulnerables = - StorageValue, Vec<::AccountId>, ValueQuery>; - #[frame_support::storage_alias] pub(crate) type DisabledValidators = - StorageValue, Vec<(u32, OffenceSeverity)>, ValueQuery>; + StorageValue, Vec<(u32, OffenceSeverity)>, ValueQuery>; - #[frame_support::storage_alias] - pub(crate) type ErasStakers = StorageDoubleMap< - Pallet, - Twox64Concat, - EraIndex, - Twox64Concat, - ::AccountId, - Exposure<::AccountId, BalanceOf>, - ValueQuery, - >; - - #[frame_support::storage_alias] - pub(crate) type ErasStakersClipped = StorageDoubleMap< - Pallet, - Twox64Concat, - EraIndex, - Twox64Concat, - ::AccountId, - Exposure<::AccountId, BalanceOf>, - ValueQuery, - >; pub struct VersionUncheckedMigrateV15ToV16(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV15ToV16 { diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index d3eb74a98b1a8..3915ee8d745e7 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -20,8 +20,7 @@ use crate::{self as pallet_staking, *}; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, - onchain, BoundedSupports, BoundedSupportsOf, ElectionProvider, PageIndex, SequentialPhragmen, - Support, VoteWeight, + onchain, SequentialPhragmen, VoteWeight, }; use frame_support::{ assert_ok, derive_impl, ord_parameter_types, parameter_types, @@ -31,17 +30,15 @@ use frame_support::{ weights::constants::RocksDbWeight, }; use frame_system::{EnsureRoot, EnsureSignedBy}; -use sp_core::ConstBool; use sp_io; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; use sp_staking::{ offence::{OffenceDetails, OnOffenceHandler}, - OnStakingUpdate, StakingInterface, + OnStakingUpdate, }; -pub(crate) const INIT_TIMESTAMP: u64 = 30_000; -pub(crate) const BLOCK_TIME: u64 = 1000; -pub(crate) const SINGLE_PAGE: u32 = 0; +pub const INIT_TIMESTAMP: u64 = 30_000; +pub const BLOCK_TIME: u64 = 1000; /// The AccountId alias in this test module. pub(crate) type AccountId = u64; @@ -208,10 +205,9 @@ parameter_types! { pub static MaxExposurePageSize: u32 = 64; pub static MaxUnlockingChunks: u32 = 32; pub static RewardOnUnbalanceWasCalled: bool = false; - pub static MaxValidatorSet: u32 = 100; + pub static MaxWinners: u32 = 100; pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); pub static AbsoluteMaxNominations: u32 = 16; - pub static MaxWinnersPerPage: u32 = MaxValidatorSet::get(); } type VoterBagsListInstance = pallet_bags_list::Instance1; @@ -224,87 +220,14 @@ impl pallet_bags_list::Config for Test { type Score = VoteWeight; } -// multi-page types and controller. -parameter_types! { - // default is single page EP. - pub static Pages: PageIndex = 1; - // Should be large enough to pass all tests, but not too big to cause benchmarking tests to be too slow. - pub static MaxBackersPerWinner: u32 = 256; - // If set, the `SingleOrMultipageElectionProvider` will return these exact values, per page - // index. If not, it will behave is per the code. - pub static CustomElectionSupports: Option::ElectionProvider>, onchain::Error>>> = None; -} - -// An election provider wrapper that allows testing with single and multi page modes. -pub struct SingleOrMultipageElectionProvider(core::marker::PhantomData); -impl< - // single page EP. - SP: ElectionProvider< - AccountId = AccountId, - MaxWinnersPerPage = MaxWinnersPerPage, - MaxBackersPerWinner = MaxBackersPerWinner, - Error = onchain::Error, - >, - > ElectionProvider for SingleOrMultipageElectionProvider -{ - type AccountId = AccountId; - type BlockNumber = BlockNumber; - type MaxWinnersPerPage = MaxWinnersPerPage; - type MaxBackersPerWinner = MaxBackersPerWinner; - type Pages = Pages; - type DataProvider = Staking; - type Error = onchain::Error; - - fn elect(page: PageIndex) -> Result, Self::Error> { - if let Some(maybe_paged_supports) = CustomElectionSupports::get() { - maybe_paged_supports[page as usize].clone() - } else { - if Pages::get() == 1 { - SP::elect(page) - } else { - // will take first `MaxWinnersPerPage` in the validator set as winners. in this mock - // impl, we return an arbitrarily but deterministic nominator exposure per - // winner/page. - let supports: Vec<(AccountId, Support)> = - Validators::::iter_keys() - .filter(|x| Staking::status(x) == Ok(StakerStatus::Validator)) - .take(Self::MaxWinnersPerPage::get() as usize) - .map(|v| { - ( - v, - Support { - total: (100 + page).into(), - voters: vec![((page + 1) as AccountId, (100 + page).into())], - }, - ) - }) - .collect::>(); - - Ok(to_bounded_supports(supports)) - } - } - } - fn msp() -> PageIndex { - SP::msp() - } - fn lsp() -> PageIndex { - SP::lsp() - } - fn ongoing() -> bool { - SP::ongoing() - } -} - pub struct OnChainSeqPhragmen; impl onchain::Config for OnChainSeqPhragmen { type System = Test; type Solver = SequentialPhragmen; type DataProvider = Staking; type WeightInfo = (); + type MaxWinners = MaxWinners; type Bounds = ElectionsBounds; - type Sort = ConstBool; - type MaxBackersPerWinner = MaxBackersPerWinner; - type MaxWinnersPerPage = MaxWinnersPerPage; } pub struct MockReward {} @@ -362,10 +285,9 @@ impl crate::pallet::pallet::Config for Test { type EraPayout = ConvertCurve; type NextNewSession = Session; type MaxExposurePageSize = MaxExposurePageSize; - type MaxValidatorSet = MaxValidatorSet; - type ElectionProvider = - SingleOrMultipageElectionProvider>; - type GenesisElectionProvider = onchain::OnChainExecution; + type ElectionProvider = onchain::OnChainExecution; + type GenesisElectionProvider = Self::ElectionProvider; + // NOTE: consider a macro and use `UseNominatorsAndValidatorsMap` as well. type VoterList = VoterBagsList; type TargetList = UseValidatorsMap; type NominationsQuota = WeightedNominationsQuota<16>; @@ -373,8 +295,6 @@ impl crate::pallet::pallet::Config for Test { type HistoryDepth = HistoryDepth; type MaxControllersInDeprecationBatch = MaxControllersInDeprecationBatch; type EventListeners = EventListenerMock; - type MaxInvulnerables = ConstU32<20>; - type MaxDisabledValidators = ConstU32<100>; type Filter = MockedRestrictList; } @@ -409,7 +329,7 @@ pub struct ExtBuilder { nominate: bool, validator_count: u32, minimum_validator_count: u32, - invulnerables: BoundedVec::MaxInvulnerables>, + invulnerables: Vec, has_stakers: bool, initialize_first_session: bool, pub min_nominator_bond: Balance, @@ -427,7 +347,7 @@ impl Default for ExtBuilder { validator_count: 2, minimum_validator_count: 0, balance_factor: 1, - invulnerables: BoundedVec::new(), + invulnerables: vec![], has_stakers: true, initialize_first_session: true, min_nominator_bond: ExistentialDeposit::get(), @@ -461,8 +381,7 @@ impl ExtBuilder { self } pub fn invulnerables(mut self, invulnerables: Vec) -> Self { - self.invulnerables = BoundedVec::try_from(invulnerables) - .expect("Too many invulnerable validators: upper limit is MaxInvulnerables"); + self.invulnerables = invulnerables; self } pub fn session_per_era(self, length: SessionIndex) -> Self { @@ -511,22 +430,10 @@ impl ExtBuilder { self.stakers.push((stash, ctrl, stake, status)); self } - pub fn exposures_page_size(self, max: u32) -> Self { - MaxExposurePageSize::set(max); - self - } pub fn balance_factor(mut self, factor: Balance) -> Self { self.balance_factor = factor; self } - pub fn multi_page_election_provider(self, pages: PageIndex) -> Self { - Pages::set(pages); - self - } - pub fn max_winners_per_page(self, max: u32) -> Self { - MaxWinnersPerPage::set(max); - self - } pub fn try_state(self, enable: bool) -> Self { SkipTryStateCheck::set(!enable); self @@ -570,7 +477,6 @@ impl ExtBuilder { (71, self.balance_factor * 2000), (80, self.balance_factor), (81, self.balance_factor * 2000), - (91, self.balance_factor * 2000), // This allows us to have a total_payout different from 0. (999, 1_000_000_000_000), ], @@ -816,13 +722,6 @@ pub(crate) fn validator_controllers() -> Vec { .collect() } -pub(crate) fn era_exposures(era: u32) -> Vec<(AccountId, Exposure)> { - validator_controllers() - .into_iter() - .map(|v| (v, Staking::eras_stakers(era, &v))) - .collect::>() -} - pub(crate) fn on_offence_in_era( offenders: &[OffenceDetails< AccountId, @@ -1046,16 +945,6 @@ pub(crate) fn balances(who: &AccountId) -> (Balance, Balance) { (asset::stakeable_balance::(who), Balances::reserved_balance(who)) } -pub(crate) fn to_bounded_supports( - supports: Vec<(AccountId, Support)>, -) -> BoundedSupports< - AccountId, - <::ElectionProvider as ElectionProvider>::MaxWinnersPerPage, - <::ElectionProvider as ElectionProvider>::MaxBackersPerWinner, -> { - supports.try_into().unwrap() -} - pub(crate) fn restrict(who: &AccountId) { if !RestrictedAccounts::get().contains(who) { RestrictedAccounts::mutate(|l| l.push(*who)); diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 66bfdc16aea55..eb028ba7410cf 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -18,8 +18,9 @@ //! Implementations for the Staking FRAME Pallet. use frame_election_provider_support::{ - bounds::CountBound, data_provider, BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, - ElectionProvider, PageIndex, ScoreProvider, SortedListProvider, VoteWeight, VoterOf, + bounds::{CountBound, SizeBound}, + data_provider, BoundedSupportsOf, DataProviderBounds, ElectionDataProvider, ElectionProvider, + ScoreProvider, SortedListProvider, VoteWeight, VoterOf, }; use frame_support::{ defensive, @@ -49,10 +50,10 @@ use sp_staking::{ use crate::{ asset, election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, - BalanceOf, BoundedExposuresOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, - IndividualExposure, LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, MaxWinnersPerPageOf, - Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, - SnapshotStatus, StakingLedger, ValidatorPrefs, STAKING_ID, + BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, + LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, + PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, + STAKING_ID, }; use alloc::{boxed::Box, vec, vec::Vec}; @@ -72,20 +73,6 @@ use sp_runtime::TryRuntimeError; const NPOS_MAX_ITERATIONS_COEFFICIENT: u32 = 2; impl Pallet { - /// Fetches the number of pages configured by the election provider. - pub fn election_pages() -> u32 { - <::ElectionProvider as ElectionProvider>::Pages::get() - } - - /// Clears up all election preparation metadata in storage. - pub(crate) fn clear_election_metadata() { - VoterSnapshotStatus::::kill(); - NextElectionPage::::kill(); - ElectableStashes::::kill(); - // TODO: crude weights, improve. - Self::register_weight(T::DbWeight::get().writes(3)); - } - /// Fetches the ledger associated with a controller or stash account, if any. pub fn ledger(account: StakingAccount) -> Result, Error> { StakingLedger::::get(account) @@ -248,8 +235,13 @@ impl Pallet { validator_stash: T::AccountId, era: EraIndex, ) -> DispatchResultWithPostInfo { - let page = - EraInfo::::get_next_claimable_page(era, &validator_stash).ok_or_else(|| { + let controller = Self::bonded(&validator_stash).ok_or_else(|| { + Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + })?; + + let ledger = Self::ledger(StakingAccount::Controller(controller))?; + let page = EraInfo::::get_next_claimable_page(era, &validator_stash, &ledger) + .ok_or_else(|| { Error::::AlreadyClaimed .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) })?; @@ -269,7 +261,6 @@ impl Pallet { })?; let history_depth = T::HistoryDepth::get(); - ensure!( era <= current_era && era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward @@ -305,13 +296,13 @@ impl Pallet { let stash = ledger.stash.clone(); - if EraInfo::::is_rewards_claimed(era, &stash, page) { + if EraInfo::::is_rewards_claimed_with_legacy_fallback(era, &ledger, &stash, page) { return Err(Error::::AlreadyClaimed .with_weight(T::WeightInfo::payout_stakers_alive_staked(0))) + } else { + EraInfo::::set_rewards_as_claimed(era, &stash, page); } - EraInfo::::set_rewards_as_claimed(era, &stash, page); - let exposure = EraInfo::::get_paged_exposure(era, &stash, page).ok_or_else(|| { Error::::InvalidEraToReward .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) @@ -361,7 +352,7 @@ impl Pallet { era_index: era, validator_stash: stash.clone(), page, - next: EraInfo::::get_next_claimable_page(era, &stash), + next: EraInfo::::get_next_claimable_page(era, &stash, &ledger), }); let mut total_imbalance = PositiveImbalanceOf::::zero(); @@ -457,10 +448,6 @@ impl Pallet { } /// Plan a new session potentially trigger a new era. - /// - /// Subsequent function calls in the happy path are as follows: - /// 1. `try_plan_new_era` - /// 2. `plan_new_era` fn new_session( session_index: SessionIndex, is_genesis: bool, @@ -478,9 +465,9 @@ impl Pallet { match ForceEra::::get() { // Will be set to `NotForcing` again if a new era has been triggered. Forcing::ForceNew => (), - // Short circuit to `try_plan_new_era`. + // Short circuit to `try_trigger_new_era`. Forcing::ForceAlways => (), - // Only go to `try_plan_new_era` if deadline reached. + // Only go to `try_trigger_new_era` if deadline reached. Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), _ => { // Either `Forcing::ForceNone`, @@ -490,7 +477,7 @@ impl Pallet { } // New era. - let maybe_new_era_validators = Self::try_plan_new_era(session_index, is_genesis); + let maybe_new_era_validators = Self::try_trigger_new_era(session_index, is_genesis); if maybe_new_era_validators.is_some() && matches!(ForceEra::::get(), Forcing::ForceNew) { @@ -501,7 +488,7 @@ impl Pallet { } else { // Set initial era. log!(debug, "Starting the first era."); - Self::try_plan_new_era(session_index, is_genesis) + Self::try_trigger_new_era(session_index, is_genesis) } } @@ -545,7 +532,6 @@ impl Pallet { fn start_era(start_session: SessionIndex) { let active_era = ActiveEra::::mutate(|active_era| { let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0); - log!(debug, "starting active era {:?}", new_index); *active_era = Some(ActiveEraInfo { index: new_index, // Set new active era start in next `on_finalize`. To guarantee usage of `Time` @@ -614,78 +600,69 @@ impl Pallet { } } - /// Helper function provided to other pallets that want to rely on pallet-stkaing for - /// testing/benchmarking, and wish to populate `ElectableStashes`, such that a next call (post - /// genesis) to `try_plan_new_era` works. + /// Plan a new era. /// - /// This uses `GenesisElectionProvider` which should always be set to something reasonable and - /// instant. - pub fn populate_staking_election_testing_benchmarking_only() -> Result<(), &'static str> { - let supports = ::elect(Zero::zero()).map_err(|e| { - log!(warn, "genesis election provider failed due to {:?}", e); - "election failed" - })?; - Self::do_elect_paged_inner(supports).map_err(|_| "do_elect_paged_inner")?; - Ok(()) + /// * Bump the current era storage (which holds the latest planned era). + /// * Store start session index for the new planned era. + /// * Clean old era information. + /// * Store staking information for the new planned era + /// + /// Returns the new validator set. + pub fn trigger_new_era( + start_session_index: SessionIndex, + exposures: BoundedVec< + (T::AccountId, Exposure>), + MaxWinnersOf, + >, + ) -> BoundedVec> { + // Increment or set current era. + let new_planned_era = CurrentEra::::mutate(|s| { + *s = Some(s.map(|s| s + 1).unwrap_or(0)); + s.unwrap() + }); + ErasStartSessionIndex::::insert(&new_planned_era, &start_session_index); + + // Clean old era information. + if let Some(old_era) = new_planned_era.checked_sub(T::HistoryDepth::get() + 1) { + Self::clear_era_information(old_era); + } + + // Set staking information for the new era. + Self::store_stakers_info(exposures, new_planned_era) } /// Potentially plan a new era. /// - /// The election results are either fetched directly from an election provider if it is the - /// "genesis" election or from a cached set of winners. - /// + /// Get election result from `T::ElectionProvider`. /// In case election result has more than [`MinimumValidatorCount`] validator trigger a new era. /// /// In case a new era is planned, the new validator set is returned. - pub(crate) fn try_plan_new_era( + pub(crate) fn try_trigger_new_era( start_session_index: SessionIndex, is_genesis: bool, ) -> Option>> { - // TODO: weights of this call path are rather crude, improve. - let validators: BoundedVec> = if is_genesis { - // genesis election only uses one election result page. - let result = ::elect(Zero::zero()).map_err(|e| { + let election_result: BoundedVec<_, MaxWinnersOf> = if is_genesis { + let result = ::elect().map_err(|e| { log!(warn, "genesis election provider failed due to {:?}", e); Self::deposit_event(Event::StakingElectionFailed); }); - let exposures = Self::collect_exposures(result.ok().unwrap_or_default()); - - let validators = exposures - .iter() - .map(|(validator, _)| validator) - .cloned() - .try_collect() - .unwrap_or_default(); - - // set stakers info for genesis era (0). - let _ = Self::store_stakers_info(exposures, Zero::zero()); - - // consume full block weight to be safe. - Self::register_weight(sp_runtime::traits::Bounded::max_value()); - validators - } else { - // note: exposures have already been processed and stored for each of the election - // solution page at the time of `elect_paged(page_index)`. - Self::register_weight(T::DbWeight::get().reads(1)); - ElectableStashes::::take() + result + .ok()? .into_inner() - .into_iter() - .collect::>() .try_into() - .expect("same bounds, will fit; qed.") + // both bounds checked in integrity test to be equal + .defensive_unwrap_or_default() + } else { + let result = ::elect().map_err(|e| { + log!(warn, "election provider failed due to {:?}", e); + Self::deposit_event(Event::StakingElectionFailed); + }); + result.ok()? }; - log!( - info, - "(is_genesis?: {:?}) electable validators count for session starting {:?}, era {:?}: {:?}", - is_genesis, - start_session_index, - CurrentEra::::get().unwrap_or_default() + 1, - validators.len() - ); - - if (validators.len() as u32) < MinimumValidatorCount::::get().max(1) { + let exposures = Self::collect_exposures(election_result); + if (exposures.len() as u32) < MinimumValidatorCount::::get().max(1) { // Session will panic if we ever return an empty validator set, thus max(1) ^^. match CurrentEra::::get() { Some(current_era) if current_era > 0 => log!( @@ -693,7 +670,7 @@ impl Pallet { "chain does not have enough staking candidates to operate for era {:?} ({} \ elected, minimum is {})", CurrentEra::::get().unwrap_or(0), - validators.len(), + exposures.len(), MinimumValidatorCount::::get(), ), None => { @@ -704,186 +681,69 @@ impl Pallet { CurrentEra::::put(0); ErasStartSessionIndex::::insert(&0, &start_session_index); }, - _ => {}, + _ => (), } - // election failed, clear election prep metadata. - Self::deposit_event(Event::StakingElectionFailed); - Self::clear_election_metadata(); - - None - } else { - Self::deposit_event(Event::StakersElected); - Self::clear_election_metadata(); - Self::plan_new_era(start_session_index); - - Some(validators) - } - } - /// Plan a new era. - /// - /// * Bump the current era storage (which holds the latest planned era). - /// * Store start session index for the new planned era. - /// * Clean old era information. - /// - /// The new validator set for this era is stored under `ElectableStashes`. - pub fn plan_new_era(start_session_index: SessionIndex) { - // Increment or set current era. - let new_planned_era = CurrentEra::::mutate(|s| { - *s = Some(s.map(|s| s + 1).unwrap_or(0)); - s.unwrap() - }); - ErasStartSessionIndex::::insert(&new_planned_era, &start_session_index); - - // Clean old era information. - if let Some(old_era) = new_planned_era.checked_sub(T::HistoryDepth::get() + 1) { - log!(trace, "Removing era information for {:?}", old_era); - Self::clear_era_information(old_era); + Self::deposit_event(Event::StakingElectionFailed); + return None } - } - /// Paginated elect. - /// - /// Fetches the election page with index `page` from the election provider. - /// - /// The results from the elect call should be stored in the `ElectableStashes` storage. In - /// addition, it stores stakers' information for next planned era based on the paged solution - /// data returned. - /// - /// If any new election winner does not fit in the electable stashes storage, it truncates the - /// result of the election. We ensure that only the winners that are part of the electable - /// stashes have exposures collected for the next era. - /// - /// If `T::ElectionProvider::elect(_)`, we don't raise an error just yet and continue until - /// `elect(0)`. IFF `elect(0)` is called, yet we have not collected enough validators (as per - /// `MinimumValidatorCount` storage), an error is raised in the next era rotation. - pub(crate) fn do_elect_paged(page: PageIndex) -> Weight { - match T::ElectionProvider::elect(page) { - Ok(supports) => { - let supports_len = supports.len() as u32; - let inner_processing_results = Self::do_elect_paged_inner(supports); - if let Err(not_included) = inner_processing_results { - defensive!( - "electable stashes exceeded limit, unexpected but election proceeds.\ - {} stashes from election result discarded", - not_included - ); - }; - - Self::deposit_event(Event::PagedElectionProceeded { - page, - result: inner_processing_results.map(|x| x as u32).map_err(|x| x as u32), - }); - T::WeightInfo::do_elect_paged_inner(supports_len) - }, - Err(e) => { - log!(warn, "election provider page failed due to {:?} (page: {})", e, page); - Self::deposit_event(Event::PagedElectionProceeded { page, result: Err(0) }); - // no-op -- no need to raise an error for now. - Default::default() - }, - } + Self::deposit_event(Event::StakersElected); + Some(Self::trigger_new_era(start_session_index, exposures)) } - /// Inner implementation of [`Self::do_elect_paged`]. + /// Process the output of the election. /// - /// Returns an error if adding election winners to the electable stashes storage fails due to - /// exceeded bounds. In case of error, it returns the index of the first stash that failed to be - /// included. - pub(crate) fn do_elect_paged_inner( - mut supports: BoundedSupportsOf, - ) -> Result { - // preparing the next era. Note: we expect `do_elect_paged` to be called *only* during a - // non-genesis era, thus current era should be set by now. - let planning_era = CurrentEra::::get().defensive_unwrap_or_default().saturating_add(1); - - match Self::add_electables(supports.iter().map(|(s, _)| s.clone())) { - Ok(added) => { - let exposures = Self::collect_exposures(supports); - let _ = Self::store_stakers_info(exposures, planning_era); - Ok(added) - }, - Err(not_included_idx) => { - let not_included = supports.len().saturating_sub(not_included_idx); - - log!( - warn, - "not all winners fit within the electable stashes, excluding {:?} accounts from solution.", - not_included, - ); - - // filter out supports of stashes that do not fit within the electable stashes - // storage bounds to prevent collecting their exposures. - supports.truncate(not_included_idx); - let exposures = Self::collect_exposures(supports); - let _ = Self::store_stakers_info(exposures, planning_era); - - Err(not_included) - }, - } - } - - /// Process the output of a paged election. - /// - /// Store staking information for the new planned era of a single election page. + /// Store staking information for the new planned era pub fn store_stakers_info( - exposures: BoundedExposuresOf, + exposures: BoundedVec< + (T::AccountId, Exposure>), + MaxWinnersOf, + >, new_planned_era: EraIndex, - ) -> BoundedVec> { - // populate elected stash, stakers, exposures, and the snapshot of validator prefs. - let mut total_stake_page: BalanceOf = Zero::zero(); - let mut elected_stashes_page = Vec::with_capacity(exposures.len()); - let mut total_backers = 0u32; + ) -> BoundedVec> { + // Populate elected stash, stakers, exposures, and the snapshot of validator prefs. + let mut total_stake: BalanceOf = Zero::zero(); + let mut elected_stashes = Vec::with_capacity(exposures.len()); exposures.into_iter().for_each(|(stash, exposure)| { - log!( - trace, - "stored exposure for stash {:?} and {:?} backers", - stash, - exposure.others.len() - ); - // build elected stash. - elected_stashes_page.push(stash.clone()); - // accumulate total stake. - total_stake_page = total_stake_page.saturating_add(exposure.total); - // set or update staker exposure for this era. - total_backers += exposure.others.len() as u32; - EraInfo::::upsert_exposure(new_planned_era, &stash, exposure); + // build elected stash + elected_stashes.push(stash.clone()); + // accumulate total stake + total_stake = total_stake.saturating_add(exposure.total); + // store staker exposure for this era + EraInfo::::set_exposure(new_planned_era, &stash, exposure); }); - let elected_stashes: BoundedVec<_, MaxWinnersPerPageOf> = - elected_stashes_page - .try_into() - .expect("both types are bounded by MaxWinnersPerPageOf; qed"); + let elected_stashes: BoundedVec<_, MaxWinnersOf> = elected_stashes + .try_into() + .expect("elected_stashes.len() always equal to exposures.len(); qed"); - // adds to total stake in this era. - EraInfo::::add_total_stake(new_planned_era, total_stake_page); + EraInfo::::set_total_stake(new_planned_era, total_stake); - // collect or update the pref of all winners. + // Collect the pref of all winners. for stash in &elected_stashes { let pref = Validators::::get(stash); >::insert(&new_planned_era, stash, pref); } - log!( - info, - "stored a page of stakers with {:?} validators and {:?} total backers for era {:?}", - elected_stashes.len(), - total_backers, - new_planned_era, - ); + if new_planned_era > 0 { + log!( + info, + "new validator set of size {:?} has been processed for era {:?}", + elected_stashes.len(), + new_planned_era, + ); + } elected_stashes } /// Consume a set of [`BoundedSupports`] from [`sp_npos_elections`] and collect them into a /// [`Exposure`]. - /// - /// Returns vec of all the exposures of a validator in `paged_supports`, bounded by the number - /// of max winners per page returned by the election provider. - pub(crate) fn collect_exposures( + fn collect_exposures( supports: BoundedSupportsOf, - ) -> BoundedExposuresOf { + ) -> BoundedVec<(T::AccountId, Exposure>), MaxWinnersOf> { let total_issuance = asset::total_issuance::(); let to_currency = |e: frame_election_provider_support::ExtendedBalance| { T::CurrencyToVote::to_currency(e, total_issuance) @@ -902,7 +762,6 @@ impl Pallet { .map(|(nominator, weight)| (nominator, to_currency(weight))) .for_each(|(nominator, stake)| { if nominator == validator { - defensive_assert!(own == Zero::zero(), "own stake should be unique"); own = own.saturating_add(stake); } else { others.push(IndividualExposure { who: nominator, value: stake }); @@ -917,28 +776,6 @@ impl Pallet { .expect("we only map through support vector which cannot change the size; qed") } - /// Adds a new set of stashes to the electable stashes. - /// - /// Returns: - /// - /// `Ok(newly_added)` if all stashes were added successfully. - /// `Err(first_un_included)` if some stashes cannot be added due to bounds. - pub(crate) fn add_electables( - new_stashes: impl Iterator, - ) -> Result { - ElectableStashes::::mutate(|electable| { - let pre_size = electable.len(); - - for (idx, stash) in new_stashes.enumerate() { - if electable.try_insert(stash).is_err() { - return Err(idx); - } - } - - Ok(electable.len() - pre_size) - }) - } - /// Remove all associated data of a stash account from the staking system. /// /// Assumes storage is upgraded before calling. @@ -963,7 +800,11 @@ impl Pallet { pub(crate) fn clear_era_information(era_index: EraIndex) { // FIXME: We can possibly set a reasonable limit since we do this only once per era and // clean up state across multiple blocks. - let mut cursor = >::clear_prefix(era_index, u32::MAX, None); + let mut cursor = >::clear_prefix(era_index, u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix(era_index, u32::MAX, None); + debug_assert!(cursor.maybe_cursor.is_none()); + cursor = >::clear_prefix(era_index, u32::MAX, None); debug_assert!(cursor.maybe_cursor.is_none()); cursor = >::clear_prefix(era_index, u32::MAX, None); debug_assert!(cursor.maybe_cursor.is_none()); @@ -1028,7 +869,7 @@ impl Pallet { stash: T::AccountId, exposure: Exposure>, ) { - EraInfo::::upsert_exposure(current_era, &stash, exposure); + EraInfo::::set_exposure(current_era, &stash, exposure); } #[cfg(feature = "runtime-benchmarks")] @@ -1036,29 +877,23 @@ impl Pallet { SlashRewardFraction::::put(fraction); } - /// Get all the voters associated with `page` that are eligible for the npos election. + /// Get all of the voters that are eligible for the npos election. /// - /// `maybe_max_len` can impose a cap on the number of voters returned per page. + /// `maybe_max_len` can imposes a cap on the number of voters returned; /// /// Sets `MinimumActiveStake` to the minimum active nominator stake in the returned set of /// nominators. /// - /// Note: in the context of the multi-page snapshot, we expect the *order* of `VoterList` and - /// `TargetList` not to change while the pages are being processed. - /// /// This function is self-weighing as [`DispatchClass::Mandatory`]. - pub(crate) fn get_npos_voters( - bounds: DataProviderBounds, - status: &SnapshotStatus, - ) -> Vec> { + pub fn get_npos_voters(bounds: DataProviderBounds) -> Vec> { let mut voters_size_tracker: StaticTracker = StaticTracker::default(); - let page_len_prediction = { + let final_predicted_len = { let all_voter_count = T::VoterList::count(); bounds.count.unwrap_or(all_voter_count.into()).min(all_voter_count.into()).0 }; - let mut all_voters = Vec::<_>::with_capacity(page_len_prediction as usize); + let mut all_voters = Vec::<_>::with_capacity(final_predicted_len as usize); // cache a few things. let weight_of = Self::weight_of_fn(); @@ -1068,18 +903,9 @@ impl Pallet { let mut nominators_taken = 0u32; let mut min_active_stake = u64::MAX; - let mut sorted_voters = match status { - // start the snapshot processing from the beginning. - SnapshotStatus::Waiting => T::VoterList::iter(), - // snapshot continues, start from the last iterated voter in the list. - SnapshotStatus::Ongoing(account_id) => T::VoterList::iter_from(&account_id) - .defensive_unwrap_or(Box::new(vec![].into_iter())), - // all voters have been consumed already, return an empty iterator. - SnapshotStatus::Consumed => Box::new(vec![].into_iter()), - }; - - while all_voters.len() < page_len_prediction as usize && - voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * page_len_prediction as u32) + let mut sorted_voters = T::VoterList::iter(); + while all_voters.len() < final_predicted_len as usize && + voters_seen < (NPOS_MAX_ITERATIONS_COEFFICIENT * final_predicted_len as u32) { let voter = match sorted_voters.next() { Some(voter) => { @@ -1114,7 +940,6 @@ impl Pallet { all_voters.push(voter); nominators_taken.saturating_inc(); } else { - defensive!("non-nominator fetched from voter list: {:?}", voter); // technically should never happen, but not much we can do about it. } min_active_stake = @@ -1145,17 +970,15 @@ impl Pallet { // `T::NominationsQuota::get_quota`. The latter can rarely happen, and is not // really an emergency or bug if it does. defensive!( - "invalid item in `VoterList`: {:?}, this nominator probably has too many nominations now", + "DEFENSIVE: invalid item in `VoterList`: {:?}, this nominator probably has too many nominations now", voter, ); } } // all_voters should have not re-allocated. - debug_assert!(all_voters.capacity() == page_len_prediction as usize); + debug_assert!(all_voters.capacity() == final_predicted_len as usize); - // TODO remove this and further instances of this, it will now be recorded in the EPM-MB - // pallet. Self::register_weight(T::WeightInfo::get_npos_voters(validators_taken, nominators_taken)); let min_active_stake: T::CurrencyBalance = @@ -1163,12 +986,18 @@ impl Pallet { MinimumActiveStake::::put(min_active_stake); + log!( + info, + "generated {} npos voters, {} from validators and {} nominators", + all_voters.len(), + validators_taken, + nominators_taken + ); + all_voters } - /// Get all the targets associated are eligible for the npos election. - /// - /// The target snapshot is *always* single paged. + /// Get the targets for an upcoming npos election. /// /// This function is self-weighing as [`DispatchClass::Mandatory`]. pub fn get_npos_targets(bounds: DataProviderBounds) -> Vec { @@ -1196,7 +1025,6 @@ impl Pallet { if targets_size_tracker.try_register_target(target.clone(), &bounds).is_err() { // no more space left for the election snapshot, stop iterating. - log!(warn, "npos targets size exceeded, stopping iteration."); Self::deposit_event(Event::::SnapshotTargetsSizeExceeded { size: targets_size_tracker.size as u32, }); @@ -1209,7 +1037,7 @@ impl Pallet { } Self::register_weight(T::WeightInfo::get_npos_targets(all_targets.len() as u32)); - log!(info, "[bounds {:?}] generated {} npos targets", bounds, all_targets.len()); + log!(info, "generated {} npos targets", all_targets.len()); all_targets } @@ -1318,10 +1146,9 @@ impl Pallet { /// Returns full exposure of a validator for a given era. /// - /// History note: This used to be a getter for old storage item `ErasStakers` deprecated in v14 - /// and deleted in v17. Since this function is used in the codebase at various places, we kept - /// it as a custom getter that takes care of getting the full exposure of the validator in a - /// backward compatible way. + /// History note: This used to be a getter for old storage item `ErasStakers` deprecated in v14. + /// Since this function is used in the codebase at various places, we kept it as a custom getter + /// that takes care of getting the full exposure of the validator in a backward compatible way. pub fn eras_stakers( era: EraIndex, account: &T::AccountId, @@ -1564,13 +1391,6 @@ impl Pallet { } } -// TODO: this is a very bad design. A hack for now so we can do benchmarks. Once -// `next_election_prediction` is reworked based on rc-client, get rid of it. For now, just know that -// the only fn that can set this is only accessible in runtime benchmarks. -frame_support::parameter_types! { - pub storage BenchmarkNextElection: Option = None; -} - impl ElectionDataProvider for Pallet { type AccountId = T::AccountId; type BlockNumber = BlockNumberFor; @@ -1581,92 +1401,36 @@ impl ElectionDataProvider for Pallet { Ok(ValidatorCount::::get()) } - fn electing_voters( - bounds: DataProviderBounds, - page: PageIndex, - ) -> data_provider::Result>> { - let mut status = VoterSnapshotStatus::::get(); - let voters = Self::get_npos_voters(bounds, &status); - - // update the voter snapshot status. - match (page, &status) { - // last page, reset status for next round. - (0, _) => status = SnapshotStatus::Waiting, - - (_, SnapshotStatus::Waiting) | (_, SnapshotStatus::Ongoing(_)) => { - let maybe_last = voters.last().map(|(x, _, _)| x).cloned(); - - if let Some(ref last) = maybe_last { - if maybe_last == T::VoterList::iter().last() { - // all voters in the voter list have been consumed. - status = SnapshotStatus::Consumed; - } else { - status = SnapshotStatus::Ongoing(last.clone()); - } - } - }, - // do nothing. - (_, SnapshotStatus::Consumed) => (), - } - log!( - info, - "[page {}, status {:?} (stake?: {:?}), bounds {:?}] generated {} npos voters", - page, - VoterSnapshotStatus::::get(), - if let SnapshotStatus::Ongoing(x) = VoterSnapshotStatus::::get() { - Self::weight_of(&x) - } else { - Zero::zero() - }, - bounds, - voters.len(), - ); - VoterSnapshotStatus::::put(status); - - debug_assert!(!bounds.slice_exhausted(&voters)); + fn electing_voters(bounds: DataProviderBounds) -> data_provider::Result>> { + // This can never fail -- if `maybe_max_len` is `Some(_)` we handle it. + let voters = Self::get_npos_voters(bounds); - Ok(voters) - } + debug_assert!(!bounds.exhausted( + SizeBound(voters.encoded_size() as u32).into(), + CountBound(voters.len() as u32).into() + )); - fn electing_voters_stateless( - bounds: DataProviderBounds, - ) -> data_provider::Result>> { - let voters = Self::get_npos_voters(bounds, &SnapshotStatus::Waiting); - log!( - info, - "[stateless, status {:?}, bounds {:?}] generated {} npos voters", - VoterSnapshotStatus::::get(), - bounds, - voters.len(), - ); Ok(voters) } - fn electable_targets( - bounds: DataProviderBounds, - page: PageIndex, - ) -> data_provider::Result> { - if page > 0 { - log!(warn, "multi-page target snapshot not supported, returning page 0."); - } - + fn electable_targets(bounds: DataProviderBounds) -> data_provider::Result> { let targets = Self::get_npos_targets(bounds); + // We can't handle this case yet -- return an error. WIP to improve handling this case in // . - if bounds.exhausted(None, CountBound(targets.len() as u32).into()) { + if bounds.exhausted(None, CountBound(T::TargetList::count() as u32).into()) { return Err("Target snapshot too big") } - debug_assert!(!bounds.slice_exhausted(&targets)); + debug_assert!(!bounds.exhausted( + SizeBound(targets.encoded_size() as u32).into(), + CountBound(targets.len() as u32).into() + )); Ok(targets) } fn next_election_prediction(now: BlockNumberFor) -> BlockNumberFor { - if let Some(override_value) = BenchmarkNextElection::get() { - return override_value.into() - } - let current_era = CurrentEra::::get().unwrap_or(0); let current_session = CurrentPlannedSession::::get(); let current_era_start_session_index = @@ -1694,33 +1458,11 @@ impl ElectionDataProvider for Pallet { .into(), }; - // TODO: this is somewhat temp hack to fix this issue: - // in the new multi-block staking model, we finish the election one block before the session - // ends. In this very last block, we don't want to tell EP that the next election is in one - // blocks, but rather in a whole era from now. For simplification, while we are - // mid-election,we always point to one era later. - // - // This whole code path has to change when we move to the rc-client model. - if !ElectableStashes::::get().is_empty() { - log!(debug, "we are mid-election, pointing to next era as election prediction."); - return now.saturating_add( - BlockNumberFor::::from(T::SessionsPerEra::get()) * session_length, - ) - } - now.saturating_add( until_this_session_end.saturating_add(sessions_left.saturating_mul(session_length)), ) } - #[cfg(feature = "runtime-benchmarks")] - fn set_next_election(to: u32) { - frame_benchmarking::benchmarking::add_to_whitelist( - BenchmarkNextElection::key().to_vec().into(), - ); - BenchmarkNextElection::set(&Some(to)); - } - #[cfg(feature = "runtime-benchmarks")] fn add_voter( voter: T::AccountId, @@ -1738,7 +1480,7 @@ impl ElectionDataProvider for Pallet { #[cfg(feature = "runtime-benchmarks")] fn add_target(target: T::AccountId) { - let stake = (MinValidatorBond::::get() + 1u32.into()) * 100u32.into(); + let stake = MinValidatorBond::::get() * 100u32.into(); >::insert(target.clone(), target.clone()); >::insert(target.clone(), StakingLedger::::new(target.clone(), stake)); Self::do_add_validator( @@ -1791,11 +1533,6 @@ impl ElectionDataProvider for Pallet { ); }); } - - #[cfg(feature = "runtime-benchmarks")] - fn set_desired_targets(count: u32) { - ValidatorCount::::put(count); - } } /// In this implementation `new_session(session)` must be called before `end_session(session-1)` @@ -1804,15 +1541,6 @@ impl ElectionDataProvider for Pallet { /// Once the first new_session is planned, all session must start and then end in order, though /// some session can lag in between the newest session planned and the latest session started. impl pallet_session::SessionManager for Pallet { - // └── Self::new_session(new_index, false) - // └── Self::try_plan_new_era(session_index, is_genesis) - // └── T::GenesisElectionProvider::elect() OR ElectableStashes::::take() - // └── Self::collect_exposures() - // └── Self::store_stakers_info() - // └── Self::plan_new_era() - // └── CurrentEra increment - // └── ErasStartSessionIndex update - // └── Self::clear_era_information() fn new_session(new_index: SessionIndex) -> Option> { log!(trace, "planning new session {}", new_index); CurrentPlannedSession::::put(new_index); @@ -1823,19 +1551,6 @@ impl pallet_session::SessionManager for Pallet { CurrentPlannedSession::::put(new_index); Self::new_session(new_index, true).map(|v| v.into_inner()) } - // start_session(start_session: SessionIndex) - // └── Check if this is the start of next active era - // └── Self::start_era(start_session) - // └── Update active era index - // └── Set active era start timestamp - // └── Update BondedEras - // └── Self::apply_unapplied_slashes() - // └── Get slashes for era from UnappliedSlashes - // └── Apply each slash - // └── Clear slashes metadata - // └── Process disabled validators - // └── Get all disabled validators - // └── Call T::SessionInterface::disable_validator() for each fn start_session(start_index: SessionIndex) { log!(trace, "starting session {}", start_index); Self::start_session(start_index) @@ -2225,7 +1940,7 @@ impl StakingInterface for Pallet { } fn election_ongoing() -> bool { - ::ongoing() + T::ElectionProvider::ongoing() } fn force_unstake(who: Self::AccountId) -> sp_runtime::DispatchResult { @@ -2235,6 +1950,13 @@ impl StakingInterface for Pallet { } fn is_exposed_in_era(who: &Self::AccountId, era: &EraIndex) -> bool { + // look in the non paged exposures + // FIXME: Can be cleaned up once non paged exposures are cleared (https://github.com/paritytech/polkadot-sdk/issues/433) + ErasStakers::::iter_prefix(era).any(|(validator, exposures)| { + validator == *who || exposures.others.iter().any(|i| i.who == *who) + }) + || + // look in the paged exposures ErasStakersPaged::::iter_prefix((era,)).any(|((validator, _), exposure_page)| { validator == *who || exposure_page.others.iter().any(|i| i.who == *who) }) @@ -2291,7 +2013,7 @@ impl StakingInterface for Pallet { .map(|(who, value)| IndividualExposure { who: who.clone(), value: *value }) .collect::>(); let exposure = Exposure { total: Default::default(), own: Default::default(), others }; - EraInfo::::upsert_exposure(*current_era, stash, exposure); + EraInfo::::set_exposure(*current_era, stash, exposure); } fn set_current_era(era: EraIndex) { @@ -2350,54 +2072,22 @@ impl sp_staking::StakingUnchecked for Pallet { #[cfg(any(test, feature = "try-runtime"))] impl Pallet { - pub(crate) fn do_try_state(now: BlockNumberFor) -> Result<(), TryRuntimeError> { + pub(crate) fn do_try_state(_: BlockNumberFor) -> Result<(), TryRuntimeError> { ensure!( T::VoterList::iter() .all(|x| >::contains_key(&x) || >::contains_key(&x)), "VoterList contains non-staker" ); - Self::ensure_snapshot_metadata_state(now)?; Self::check_ledgers()?; Self::check_bonded_consistency()?; Self::check_payees()?; Self::check_nominators()?; + Self::check_exposures()?; Self::check_paged_exposures()?; Self::check_count() } - /// Test invariants of: - /// - /// - `NextElectionPage`: should only be set if pages > 1 and if we are within `pages-election - /// -> election` - /// - `VoterSnapshotStatus`: cannot be argued about as we don't know when we get a call to data - /// provider, but we know it should never be set if we have 1 page. - /// - /// -- SHOULD ONLY BE CALLED AT THE END OF A GIVEN BLOCK. - pub fn ensure_snapshot_metadata_state(now: BlockNumberFor) -> Result<(), TryRuntimeError> { - let next_election = Self::next_election_prediction(now); - let pages = Self::election_pages().saturated_into::>(); - let election_prep_start = next_election - pages; - - if pages > One::one() && now >= election_prep_start { - ensure!( - NextElectionPage::::get().is_some() || next_election == now + One::one(), - "NextElectionPage should be set mid election, except for last block" - ); - } else if pages == One::one() { - ensure!( - NextElectionPage::::get().is_none(), - "NextElectionPage should not be set mid election" - ); - ensure!( - VoterSnapshotStatus::::get() == SnapshotStatus::Waiting, - "VoterSnapshotStatus should not be set mid election" - ); - } - - Ok(()) - } - /// Invariants: /// * A controller should not be associated with more than one ledger. /// * A bonded (stash, controller) pair should have only one associated ledger. I.e. if the @@ -2489,13 +2179,11 @@ impl Pallet { ::TargetList::count() == Validators::::count(), "wrong external count" ); - let max_validators_bound = MaxWinnersOf::::get(); - let max_winners_per_page_bound = MaxWinnersPerPageOf::::get(); ensure!( - max_validators_bound >= max_winners_per_page_bound, - "max validators should be higher than per page bounds" + ValidatorCount::::get() <= + ::MaxWinners::get(), + Error::::TooManyValidators ); - ensure!(ValidatorCount::::get() <= max_validators_bound, Error::::TooManyValidators); Ok(()) } @@ -2552,6 +2240,27 @@ impl Pallet { Ok(()) } + /// Invariants: + /// * For each era exposed validator, check if the exposure total is sane (exposure.total = + /// exposure.own + exposure.own). + fn check_exposures() -> Result<(), TryRuntimeError> { + let era = ActiveEra::::get().unwrap().index; + ErasStakers::::iter_prefix_values(era) + .map(|expo| { + ensure!( + expo.total == + expo.own + + expo.others + .iter() + .map(|e| e.value) + .fold(Zero::zero(), |acc, x| acc + x), + "wrong total exposure.", + ); + Ok(()) + }) + .collect::>() + } + /// Invariants: /// * For each paged era exposed validator, check if the exposure total is sane (exposure.total /// = exposure.own + exposure.own). diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 2641d26969b91..c0ec45b8de351 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -17,33 +17,28 @@ //! Staking FRAME Pallet. -use alloc::{format, vec::Vec}; +use alloc::vec::Vec; use codec::Codec; -use frame_election_provider_support::{ElectionProvider, SortedListProvider, VoteWeight}; +use frame_election_provider_support::{ + ElectionProvider, ElectionProviderBase, SortedListProvider, VoteWeight, +}; use frame_support::{ - assert_ok, pallet_prelude::*, traits::{ fungible::{ hold::{Balanced as FunHoldBalanced, Mutate as FunHoldMutate}, - Inspect, Mutate, Mutate as FunMutate, + Mutate as FunMutate, }, Contains, Defensive, DefensiveSaturating, EnsureOrigin, EstimateNextNewSession, Get, InspectLockableCurrency, Nothing, OnUnbalanced, UnixTime, }, weights::Weight, - BoundedBTreeSet, BoundedVec, + BoundedVec, }; use frame_system::{ensure_root, ensure_signed, pallet_prelude::*}; -use rand::seq::SliceRandom; -use rand_chacha::{ - rand_core::{RngCore, SeedableRng}, - ChaChaRng, -}; -use sp_core::{sr25519::Pair as SrPair, Pair}; use sp_runtime::{ traits::{SaturatedConversion, StaticLookup, Zero}, - ArithmeticError, Perbill, Percent, Saturating, + ArithmeticError, Perbill, Percent, }; use sp_staking::{ @@ -61,6 +56,7 @@ use crate::{ EraRewardPoints, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, + Exposure, }; // The speculative number of spans are used as an input of the weight annotation of @@ -72,9 +68,9 @@ pub(crate) const SPECULATIVE_NUM_SPANS: u32 = 32; pub mod pallet { use super::*; use codec::HasCompact; + use frame_election_provider_support::ElectionDataProvider; - use crate::{BenchmarkingConfig, PagedExposureMetadata, SnapshotStatus}; - use frame_election_provider_support::{ElectionDataProvider, PageIndex}; + use crate::{BenchmarkingConfig, PagedExposureMetadata}; /// The in-code storage version. const STORAGE_VERSION: StorageVersion = StorageVersion::new(16); @@ -162,8 +158,6 @@ pub mod pallet { AccountId = Self::AccountId, BlockNumber = BlockNumberFor, DataProvider = Pallet, - MaxWinnersPerPage = ::MaxWinnersPerPage, - MaxBackersPerWinner = ::MaxBackersPerWinner, >; /// Something that defines the maximum number of nominations per nominator. @@ -173,9 +167,10 @@ pub mod pallet { /// Number of eras to keep in history. /// /// Following information is kept for eras in `[current_era - - /// HistoryDepth, current_era]`: `ErasValidatorPrefs`, `ErasValidatorReward`, - /// `ErasRewardPoints`, `ErasTotalStake`, `ErasStartSessionIndex`, `ClaimedRewards`, - /// `ErasStakersPaged`, `ErasStakersOverview`. + /// HistoryDepth, current_era]`: `ErasStakers`, `ErasStakersClipped`, + /// `ErasValidatorPrefs`, `ErasValidatorReward`, `ErasRewardPoints`, + /// `ErasTotalStake`, `ErasStartSessionIndex`, `ClaimedRewards`, `ErasStakersPaged`, + /// `ErasStakersOverview`. /// /// Must be more than the number of eras delayed by session. /// I.e. active era must always be in history. I.e. `active_era > @@ -259,13 +254,6 @@ pub mod pallet { #[pallet::constant] type MaxExposurePageSize: Get; - /// The absolute maximum of winner validators this pallet should return. - /// - /// As this pallet supports multi-block election, the set of winner validators *per - /// election* is bounded by this type. - #[pallet::constant] - type MaxValidatorSet: Get; - /// Something that provides a best-effort sorted list of voters aka electing nominators, /// used for NPoS election. /// @@ -326,13 +314,6 @@ pub mod pallet { #[pallet::no_default_bounds] type EventListeners: sp_staking::OnStakingUpdate>; - /// Maximum number of invulnerable validators. - #[pallet::constant] - type MaxInvulnerables: Get; - - /// Maximum number of disabled validators. - #[pallet::constant] - type MaxDisabledValidators: Get; #[pallet::no_default_bounds] /// Filter some accounts from participating in staking. @@ -395,10 +376,7 @@ pub mod pallet { type NextNewSession = (); type MaxExposurePageSize = ConstU32<64>; type MaxUnlockingChunks = ConstU32<32>; - type MaxValidatorSet = ConstU32<100>; type MaxControllersInDeprecationBatch = ConstU32<100>; - type MaxInvulnerables = ConstU32<20>; - type MaxDisabledValidators = ConstU32<100>; type EventListeners = (); type Filter = Nothing; #[cfg(feature = "std")] @@ -419,8 +397,8 @@ pub mod pallet { /// easy to initialize and the performance hit is minimal (we expect no more than four /// invulnerables) and restricted to testnets. #[pallet::storage] - pub type Invulnerables = - StorageValue<_, BoundedVec, ValueQuery>; + #[pallet::unbounded] + pub type Invulnerables = StorageValue<_, Vec, ValueQuery>; /// Map from all locked "stash" accounts to the controller account. /// @@ -532,6 +510,26 @@ pub mod pallet { #[pallet::storage] pub type ErasStartSessionIndex = StorageMap<_, Twox64Concat, EraIndex, SessionIndex>; + /// Exposure of validator at era. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after [`Config::HistoryDepth`] eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + /// + /// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures. + #[pallet::storage] + #[pallet::unbounded] + pub type ErasStakers = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + Exposure>, + ValueQuery, + >; + /// Summary of validator exposure at a given era. /// /// This contains the total stake in support of the validator and their own stake. In addition, @@ -555,6 +553,34 @@ pub mod pallet { OptionQuery, >; + /// Clipped Exposure of validator at era. + /// + /// Note: This is deprecated, should be used as read-only and will be removed in the future. + /// New `Exposure`s are stored in a paged manner in `ErasStakersPaged` instead. + /// + /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the + /// `T::MaxExposurePageSize` biggest stakers. + /// (Note: the field `total` and `own` of the exposure remains unchanged). + /// This is used to limit the i/o cost for the nominator payout. + /// + /// This is keyed fist by the era index to allow bulk deletion and then the stash account. + /// + /// It is removed after [`Config::HistoryDepth`] eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + /// + /// Note: Deprecated since v14. Use `EraInfo` instead to work with exposures. + #[pallet::storage] + #[pallet::unbounded] + pub type ErasStakersClipped = StorageDoubleMap< + _, + Twox64Concat, + EraIndex, + Twox64Concat, + T::AccountId, + Exposure>, + ValueQuery, + >; + /// Paginated exposure of a validator at given era. /// /// This is keyed first by the era index to allow bulk deletion, then stash account and finally @@ -592,7 +618,7 @@ pub mod pallet { ValueQuery, >; - /// Exposure of validator at era with the preferences of validators. + /// Similar to `ErasStakers`, this holds the preferences of validators. /// /// This is keyed first by the era index to allow bulk deletion and then the stash account. /// @@ -715,34 +741,12 @@ pub mod pallet { #[pallet::storage] pub(crate) type ChillThreshold = StorageValue<_, Percent, OptionQuery>; - /// Voter snapshot progress status. - /// - /// If the status is `Ongoing`, it keeps a cursor of the last voter retrieved to proceed when - /// creating the next snapshot page. - #[pallet::storage] - pub(crate) type VoterSnapshotStatus = - StorageValue<_, SnapshotStatus, ValueQuery>; - - /// Keeps track of an ongoing multi-page election solution request. - /// - /// If `Some(_)``, it is the next page that we intend to elect. If `None`, we are not in the - /// election process. - /// - /// This is only set in multi-block elections. Should always be `None` otherwise. - #[pallet::storage] - pub(crate) type NextElectionPage = StorageValue<_, PageIndex, OptionQuery>; - - /// A bounded list of the "electable" stashes that resulted from a successful election. - #[pallet::storage] - pub(crate) type ElectableStashes = - StorageValue<_, BoundedBTreeSet, ValueQuery>; - #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] pub struct GenesisConfig { pub validator_count: u32, pub minimum_validator_count: u32, - pub invulnerables: BoundedVec, + pub invulnerables: Vec, pub force_era: Forcing, pub slash_reward_fraction: Perbill, pub canceled_payout: BalanceOf, @@ -752,39 +756,6 @@ pub mod pallet { pub min_validator_bond: BalanceOf, pub max_validator_count: Option, pub max_nominator_count: Option, - /// Create the given number of validators and nominators. - /// - /// These account need not be in the endowment list of balances, and are auto-topped up - /// here. - /// - /// Useful for testing genesis config. - pub dev_stakers: Option<(u32, u32)>, - } - - impl GenesisConfig { - fn generate_endowed_bonded_account( - derivation: &str, - rng: &mut ChaChaRng, - min_validator_bond: BalanceOf, - ) -> T::AccountId { - let pair: SrPair = Pair::from_string(&derivation, None) - .expect(&format!("Failed to parse derivation string: {derivation}")); - let who = T::AccountId::decode(&mut &pair.public().encode()[..]) - .expect(&format!("Failed to decode public key from pair: {:?}", pair.public())); - - let stake = BalanceOf::::from(rng.next_u64()) - .max(T::Currency::minimum_balance()) - .max(min_validator_bond); - let two: BalanceOf = 2u64.into(); - - assert_ok!(T::Currency::mint_into(&who, stake * two)); - assert_ok!(>::bond( - T::RuntimeOrigin::from(Some(who.clone()).into()), - stake, - RewardDestination::Staked, - )); - who - } } #[pallet::genesis_build] @@ -792,11 +763,7 @@ pub mod pallet { fn build(&self) { ValidatorCount::::put(self.validator_count); MinimumValidatorCount::::put(self.minimum_validator_count); - assert!( - self.invulnerables.len() as u32 <= T::MaxInvulnerables::get(), - "Too many invulnerable validators at genesis." - ); - >::put(&self.invulnerables); + Invulnerables::::put(&self.invulnerables); ForceEra::::put(self.force_era); CanceledSlashPayout::::put(self.canceled_payout); SlashRewardFraction::::put(self.slash_reward_fraction); @@ -821,12 +788,12 @@ pub mod pallet { asset::free_to_stake::(stash) >= balance, "Stash does not have enough balance to bond." ); - assert_ok!(>::bond( + frame_support::assert_ok!(>::bond( T::RuntimeOrigin::from(Some(stash.clone()).into()), balance, RewardDestination::Staked, )); - assert_ok!(match status { + frame_support::assert_ok!(match status { crate::StakerStatus::Validator => >::validate( T::RuntimeOrigin::from(Some(stash.clone()).into()), Default::default(), @@ -839,8 +806,7 @@ pub mod pallet { }); assert!( ValidatorCount::::get() <= - ::MaxWinnersPerPage::get() * - ::Pages::get() + ::MaxWinners::get() ); } @@ -850,58 +816,6 @@ pub mod pallet { Nominators::::count() + Validators::::count(), "not all genesis stakers were inserted into sorted list provider, something is wrong." ); - - // now generate the dev stakers, after all else is setup - if let Some((validators, nominators)) = self.dev_stakers { - crate::log!( - debug, - "generating dev stakers: validators: {}, nominators: {}", - validators, - nominators - ); - let base_derivation = "//staker//{}"; - - // it is okay for the randomness to be the same on every call. If we want different, - // we can make `base_derivation` configurable. - let mut rng = - ChaChaRng::from_seed(base_derivation.using_encoded(sp_core::blake2_256)); - - let validators = (0..validators) - .map(|index| { - let derivation = - base_derivation.replace("{}", &format!("validator{}", index)); - let who = Self::generate_endowed_bonded_account( - &derivation, - &mut rng, - self.min_validator_bond, - ); - assert_ok!(>::validate( - T::RuntimeOrigin::from(Some(who.clone()).into()), - Default::default(), - )); - who - }) - .collect::>(); - - (0..nominators).for_each(|index| { - let derivation = base_derivation.replace("{}", &format!("nominator{}", index)); - let who = Self::generate_endowed_bonded_account( - &derivation, - &mut rng, - self.min_validator_bond, - ); - - let random_nominations = validators - .choose_multiple(&mut rng, MaxNominationsOf::::get() as usize) - .map(|v| v.clone()) - .collect::>(); - - assert_ok!(>::nominate( - T::RuntimeOrigin::from(Some(who.clone()).into()), - random_nominations.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), - )); - }) - } } } @@ -910,11 +824,7 @@ pub mod pallet { pub enum Event { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. - EraPaid { - era_index: EraIndex, - validator_payout: BalanceOf, - remainder: BalanceOf, - }, + EraPaid { era_index: EraIndex, validator_payout: BalanceOf, remainder: BalanceOf }, /// The nominator has been rewarded by this amount to this destination. Rewarded { stash: T::AccountId, @@ -922,54 +832,31 @@ pub mod pallet { amount: BalanceOf, }, /// A staker (validator or nominator) has been slashed by the given amount. - Slashed { - staker: T::AccountId, - amount: BalanceOf, - }, + Slashed { staker: T::AccountId, amount: BalanceOf }, /// A slash for the given validator, for the given percentage of their stake, at the given /// era as been reported. - SlashReported { - validator: T::AccountId, - fraction: Perbill, - slash_era: EraIndex, - }, + SlashReported { validator: T::AccountId, fraction: Perbill, slash_era: EraIndex }, /// An old slashing report from a prior era was discarded because it could /// not be processed. - OldSlashingReportDiscarded { - session_index: SessionIndex, - }, + OldSlashingReportDiscarded { session_index: SessionIndex }, /// A new set of stakers was elected. StakersElected, /// An account has bonded this amount. \[stash, amount\] /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, /// it will not be emitted for staking rewards when they are added to stake. - Bonded { - stash: T::AccountId, - amount: BalanceOf, - }, + Bonded { stash: T::AccountId, amount: BalanceOf }, /// An account has unbonded this amount. - Unbonded { - stash: T::AccountId, - amount: BalanceOf, - }, + Unbonded { stash: T::AccountId, amount: BalanceOf }, /// An account has called `withdraw_unbonded` and removed unbonding chunks worth `Balance` /// from the unlocking queue. - Withdrawn { - stash: T::AccountId, - amount: BalanceOf, - }, + Withdrawn { stash: T::AccountId, amount: BalanceOf }, /// A nominator has been kicked from a validator. - Kicked { - nominator: T::AccountId, - stash: T::AccountId, - }, + Kicked { nominator: T::AccountId, stash: T::AccountId }, /// The election failed. No new era is planned. StakingElectionFailed, /// An account has stopped participating as either a validator or nominator. - Chilled { - stash: T::AccountId, - }, + Chilled { stash: T::AccountId }, /// A Page of stakers rewards are getting paid. `next` is `None` if all pages are claimed. PayoutStarted { era_index: EraIndex, @@ -978,44 +865,20 @@ pub mod pallet { next: Option, }, /// A validator has set their preferences. - ValidatorPrefsSet { - stash: T::AccountId, - prefs: ValidatorPrefs, - }, + ValidatorPrefsSet { stash: T::AccountId, prefs: ValidatorPrefs }, /// Voters size limit reached. - SnapshotVotersSizeExceeded { - size: u32, - }, + SnapshotVotersSizeExceeded { size: u32 }, /// Targets size limit reached. - SnapshotTargetsSizeExceeded { - size: u32, - }, - ForceEra { - mode: Forcing, - }, + SnapshotTargetsSizeExceeded { size: u32 }, + /// A new force era mode was set. + ForceEra { mode: Forcing }, /// Report of a controller batch deprecation. ControllerBatchDeprecated { failures: u32, }, /// Staking balance migrated from locks to holds, with any balance that could not be held /// is force withdrawn. - CurrencyMigrated { - stash: T::AccountId, - force_withdraw: BalanceOf, - }, - /// A page from a multi-page election was fetched. A number of these are followed by - /// `StakersElected`. - /// - /// `Ok(count)` indicates the give number of stashes were added. - /// `Err(index)` indicates that the stashes after index were dropped. - /// `Err(0)` indicates that an error happened but no stashes were dropped nor added. - /// - /// The error indicates that a number of validators were dropped due to excess size, but - /// the overall election will continue. - PagedElectionProceeded { - page: PageIndex, - result: Result, - }, + CurrencyMigrated { stash: T::AccountId, force_withdraw: BalanceOf }, } #[pallet::error] @@ -1098,38 +961,9 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - /// Start fetching the election pages `Pages` blocks before the election prediction, so - /// that the `ElectableStashes` has been populated with all validators from all pages at - /// the time of the election. - fn on_initialize(now: BlockNumberFor) -> Weight { - let pages = Self::election_pages(); - - // election ongoing, fetch the next page. - let inner_weight = if let Some(next_page) = NextElectionPage::::get() { - let next_next_page = next_page.checked_sub(1); - NextElectionPage::::set(next_next_page); - Self::do_elect_paged(next_page) - } else { - // election isn't ongoing yet, check if it should start. - let next_election = ::next_election_prediction(now); - - if now == (next_election.saturating_sub(pages.into())) { - crate::log!( - debug, - "elect(): start fetching solution pages. expected pages: {:?}", - pages - ); - - let current_page = pages.saturating_sub(1); - let next_page = current_page.checked_sub(1); - NextElectionPage::::set(next_page); - Self::do_elect_paged(current_page) - } else { - Weight::default() - } - }; - - T::WeightInfo::on_initialize_noop().saturating_add(inner_weight) + fn on_initialize(_now: BlockNumberFor) -> Weight { + // just return the weight of the on_finalize. + T::DbWeight::get().reads(1) } fn on_finalize(_n: BlockNumberFor) { @@ -1155,12 +989,18 @@ pub mod pallet { // and that MaxNominations is always greater than 1, since we count on this. assert!(!MaxNominationsOf::::get().is_zero()); + // ensure election results are always bounded with the same value + assert!( + ::MaxWinners::get() == + ::MaxWinners::get() + ); + assert!( T::SlashDeferDuration::get() < T::BondingDuration::get() || T::BondingDuration::get() == 0, "As per documentation, slash defer duration ({}) should be less than bonding duration ({}).", T::SlashDeferDuration::get(), T::BondingDuration::get(), - ); + ) } #[cfg(feature = "try-runtime")] @@ -1181,7 +1021,7 @@ pub mod pallet { } /// Get the validators that may never be slashed or forcibly kicked out. - pub fn invulnerables() -> BoundedVec { + pub fn invulnerables() -> Vec { Invulnerables::::get() } @@ -1224,6 +1064,18 @@ pub mod pallet { ErasStartSessionIndex::::get(era_index) } + /// Get the clipped exposure of a given validator at an era. + pub fn eras_stakers_clipped( + era_index: EncodeLikeEraIndex, + account_id: EncodeLikeAccountId, + ) -> Exposure> + where + EncodeLikeEraIndex: codec::EncodeLike, + EncodeLikeAccountId: codec::EncodeLike, + { + ErasStakersClipped::::get(era_index, account_id) + } + /// Get the paged history of claimed rewards by era for given validator. pub fn claimed_rewards( era_index: EncodeLikeEraIndex, @@ -1746,15 +1598,18 @@ pub mod pallet { #[pallet::compact] new: u32, ) -> DispatchResult { ensure_root(origin)?; - - ensure!(new <= T::MaxValidatorSet::get(), Error::::TooManyValidators); - + // ensure new validator count does not exceed maximum winners + // support by election provider. + ensure!( + new <= ::MaxWinners::get(), + Error::::TooManyValidators + ); ValidatorCount::::put(new); Ok(()) } /// Increments the ideal number of validators up to maximum of - /// `T::MaxValidatorSet`. + /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. /// @@ -1769,15 +1624,17 @@ pub mod pallet { ensure_root(origin)?; let old = ValidatorCount::::get(); let new = old.checked_add(additional).ok_or(ArithmeticError::Overflow)?; - - ensure!(new <= T::MaxValidatorSet::get(), Error::::TooManyValidators); + ensure!( + new <= ::MaxWinners::get(), + Error::::TooManyValidators + ); ValidatorCount::::put(new); Ok(()) } /// Scale up the ideal number of validators by a factor up to maximum of - /// `T::MaxValidatorSet`. + /// `ElectionProviderBase::MaxWinners`. /// /// The dispatch origin must be Root. /// @@ -1790,7 +1647,10 @@ pub mod pallet { let old = ValidatorCount::::get(); let new = old.checked_add(factor.mul_floor(old)).ok_or(ArithmeticError::Overflow)?; - ensure!(new <= T::MaxValidatorSet::get(), Error::::TooManyValidators); + ensure!( + new <= ::MaxWinners::get(), + Error::::TooManyValidators + ); ValidatorCount::::put(new); Ok(()) @@ -1849,8 +1709,6 @@ pub mod pallet { invulnerables: Vec, ) -> DispatchResult { ensure_root(origin)?; - let invulnerables = - BoundedVec::try_from(invulnerables).map_err(|_| Error::::BoundNotMet)?; >::put(invulnerables); Ok(()) } @@ -1946,7 +1804,6 @@ pub mod pallet { era: EraIndex, ) -> DispatchResultWithPostInfo { ensure_signed(origin)?; - Self::do_payout_stakers(validator_stash, era) } diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 5e1b405f6d84e..15f7015049c23 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -32,7 +32,6 @@ use frame_support::{ fungible::Inspect, Currency, Get, InspectLockableCurrency, LockableCurrency, ReservableCurrency, WithdrawReasons, }, - BoundedVec, }; use mock::*; use pallet_balances::Error as BalancesError; @@ -1377,7 +1376,6 @@ fn bond_extra_and_withdraw_unbonded_works() { legacy_claimed_rewards: bounded_vec![], } ); - assert_eq!( Staking::eras_stakers(active_era(), &11), Exposure { total: 1000, own: 1000, others: vec![] } @@ -1928,11 +1926,7 @@ fn reward_to_stake_works() { let _ = asset::set_stakeable_balance::(&20, 1000); // Bypass logic and change current exposure - EraInfo::::upsert_exposure( - 0, - &21, - Exposure { total: 69, own: 69, others: vec![] }, - ); + EraInfo::::set_exposure(0, &21, Exposure { total: 69, own: 69, others: vec![] }); >::insert( &20, StakingLedgerInspect { @@ -2283,14 +2277,14 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider() { // winners should be 21 and 31. Otherwise this election is taking duplicates into // account. - let supports = ::ElectionProvider::elect(SINGLE_PAGE).unwrap(); - - let expected_supports = vec![ - (21, Support { total: 1800, voters: vec![(21, 1000), (1, 400), (3, 400)] }), - (31, Support { total: 2200, voters: vec![(31, 1000), (1, 600), (3, 600)] }), - ]; - - assert_eq!(supports, to_bounded_supports(expected_supports)); + let supports = ::ElectionProvider::elect().unwrap(); + assert_eq!( + supports, + vec![ + (21, Support { total: 1800, voters: vec![(21, 1000), (1, 400), (3, 400)] }), + (31, Support { total: 2200, voters: vec![(31, 1000), (1, 600), (3, 600)] }) + ], + ); }); } @@ -2335,13 +2329,14 @@ fn bond_with_duplicate_vote_should_be_ignored_by_election_provider_elected() { assert_ok!(Staking::nominate(RuntimeOrigin::signed(3), vec![21])); // winners should be 21 and 11. - let supports = ::ElectionProvider::elect(SINGLE_PAGE).unwrap(); - let expected_supports = vec![ - (11, Support { total: 1500, voters: vec![(11, 1000), (1, 500)] }), - (21, Support { total: 2500, voters: vec![(21, 1000), (1, 500), (3, 1000)] }), - ]; - - assert_eq!(supports, to_bounded_supports(expected_supports)); + let supports = ::ElectionProvider::elect().unwrap(); + assert_eq!( + supports, + vec![ + (11, Support { total: 1500, voters: vec![(11, 1000), (1, 500)] }), + (21, Support { total: 2500, voters: vec![(21, 1000), (1, 500), (3, 1000)] }) + ], + ); }); } @@ -2384,7 +2379,7 @@ fn phragmen_should_not_overflow() { #[test] fn reward_validator_slashing_validator_does_not_overflow() { - ExtBuilder::default().nominate(false).build_and_execute(|| { + ExtBuilder::default().build_and_execute(|| { let stake = u64::MAX as Balance * 2; let reward_slash = u64::MAX as Balance * 2; @@ -2394,6 +2389,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { // Set staker let _ = asset::set_stakeable_balance::(&11, stake); + let exposure = Exposure:: { total: stake, own: stake, others: vec![] }; let reward = EraRewardPoints:: { total: 1, individual: vec![(11, 1)].into_iter().collect(), @@ -2401,19 +2397,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { // Check reward ErasRewardPoints::::insert(0, reward); - - // force exposure metadata to account for the overflowing `stake`. - ErasStakersOverview::::insert( - current_era(), - 11, - PagedExposureMetadata { total: stake, own: stake, nominator_count: 0, page_count: 0 }, - ); - - // we want to slash only self-stake, confirm that no others exposed. - let full_exposure_after = EraInfo::::get_full_exposure(current_era(), &11); - assert_eq!(full_exposure_after.total, stake); - assert_eq!(full_exposure_after.others, vec![]); - + EraInfo::::set_exposure(0, &11, exposure); ErasValidatorReward::::insert(0, stake); assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0)); assert_eq!(asset::stakeable_balance::(&11), stake * 2); @@ -2435,19 +2419,13 @@ fn reward_validator_slashing_validator_does_not_overflow() { // only slashes out of bonded stake are applied. without this line, it is 0. Staking::bond(RuntimeOrigin::signed(2), stake - 1, RewardDestination::Staked).unwrap(); - - // Override metadata and exposures of 11 so that it exposes minmal self stake and `stake` - - // 1 from nominator 2. - ErasStakersOverview::::insert( - current_era(), - 11, - PagedExposureMetadata { total: stake, own: 1, nominator_count: 1, page_count: 1 }, - ); - - ErasStakersPaged::::insert( - (current_era(), &11, 0), - ExposurePage { - page_total: stake - 1, + // Override exposure of 11 + EraInfo::::set_exposure( + 0, + &11, + Exposure { + total: stake, + own: 1, others: vec![IndividualExposure { who: 2, value: stake - 1 }], }, ); @@ -3155,7 +3133,6 @@ fn deferred_slashes_are_deferred() { staking_events_since_last_call().as_slice(), &[ Event::SlashReported { validator: 11, slash_era: 1, .. }, - Event::PagedElectionProceeded { page: 0, result: Ok(2) }, Event::StakersElected, .., Event::Slashed { staker: 11, amount: 100 }, @@ -3492,7 +3469,6 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert_eq!( staking_events_since_last_call(), vec![ - Event::PagedElectionProceeded { page: 0, result: Ok(7) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, Event::SlashReported { @@ -3571,7 +3547,6 @@ fn non_slashable_offence_disables_validator() { assert_eq!( staking_events_since_last_call(), vec![ - Event::PagedElectionProceeded { page: 0, result: Ok(7) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, Event::SlashReported { @@ -3661,7 +3636,6 @@ fn slashing_independent_of_disabling_validator() { assert_eq!( staking_events_since_last_call(), vec![ - Event::PagedElectionProceeded { page: 0, result: Ok(5) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, Event::SlashReported { @@ -3700,7 +3674,7 @@ fn slashing_independent_of_disabling_validator() { } #[test] -fn offence_threshold_doesnt_plan_new_era() { +fn offence_threshold_doesnt_trigger_new_era() { ExtBuilder::default() .validator_count(4) .set_status(41, StakerStatus::Validator) @@ -3938,17 +3912,12 @@ fn six_session_delay() { // pallet-session is delaying session by one, thus the next session to plan is +2. assert_eq!(>::new_session(init_session + 2), None); - - // note a new election happens independently of the call to `new_session`. - Staking::do_elect_paged(0); assert_eq!( >::new_session(init_session + 3), Some(val_set.clone()) ); assert_eq!(>::new_session(init_session + 4), None); assert_eq!(>::new_session(init_session + 5), None); - - Staking::do_elect_paged(0); assert_eq!( >::new_session(init_session + 6), Some(val_set.clone()) @@ -4189,8 +4158,17 @@ fn test_multi_page_payout_stakers_by_page() { ); // verify rewards are tracked to prevent double claims + let ledger = Staking::ledger(11.into()); for page in 0..EraInfo::::get_page_count(1, &11) { - assert_eq!(EraInfo::::is_rewards_claimed(1, &11, page), true); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + ledger.as_ref().unwrap(), + &11, + page + ), + true + ); } for i in 3..16 { @@ -4212,7 +4190,15 @@ fn test_multi_page_payout_stakers_by_page() { // verify we track rewards for each era and page for page in 0..EraInfo::::get_page_count(i - 1, &11) { - assert_eq!(EraInfo::::is_rewards_claimed(i - 1, &11, page), true); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + i - 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + page + ), + true + ); } } @@ -4371,6 +4357,7 @@ fn test_multi_page_payout_stakers_backward_compatible() { } // verify we no longer track rewards in `legacy_claimed_rewards` vec + let ledger = Staking::ledger(11.into()); assert_eq!( Staking::ledger(11.into()).unwrap(), StakingLedgerInspect { @@ -4384,7 +4371,15 @@ fn test_multi_page_payout_stakers_backward_compatible() { // verify rewards are tracked to prevent double claims for page in 0..EraInfo::::get_page_count(1, &11) { - assert_eq!(EraInfo::::is_rewards_claimed(1, &11, page), true); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + ledger.as_ref().unwrap(), + &11, + page + ), + true + ); } for i in 3..16 { @@ -4406,7 +4401,15 @@ fn test_multi_page_payout_stakers_backward_compatible() { // verify we track rewards for each era and page for page in 0..EraInfo::::get_page_count(i - 1, &11) { - assert_eq!(EraInfo::::is_rewards_claimed(i - 1, &11, page), true); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + i - 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + page + ), + true + ); } } @@ -4518,7 +4521,6 @@ fn test_page_count_and_size() { mock::start_active_era(1); // Since max exposure page size is 64, 2 pages of nominators are created. - assert_eq!(MaxExposurePageSize::get(), 64); assert_eq!(EraInfo::::get_page_count(1, &11), 2); // first page has 64 nominators @@ -5292,6 +5294,41 @@ mod election_data_provider { use super::*; use frame_election_provider_support::ElectionDataProvider; + #[test] + fn targets_2sec_block() { + let mut validators = 1000; + while ::WeightInfo::get_npos_targets(validators).all_lt(Weight::from_parts( + 2u64 * frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, + u64::MAX, + )) { + validators += 1; + } + + println!("Can create a snapshot of {} validators in 2sec block", validators); + } + + #[test] + fn voters_2sec_block() { + // we assume a network only wants up to 1000 validators in most cases, thus having 2000 + // candidates is as high as it gets. + let validators = 2000; + let mut nominators = 1000; + + while ::WeightInfo::get_npos_voters(validators, nominators).all_lt( + Weight::from_parts( + 2u64 * frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, + u64::MAX, + ), + ) { + nominators += 1; + } + + println!( + "Can create a snapshot of {} nominators [{} validators, each 1 slashing] in 2sec block", + nominators, validators + ); + } + #[test] fn set_minimum_active_stake_is_correct() { ExtBuilder::default() @@ -5302,15 +5339,14 @@ mod election_data_provider { .build_and_execute(|| { // default bounds are unbounded. assert_ok!(::electing_voters( - DataProviderBounds::default(), - 0 + DataProviderBounds::default() )); assert_eq!(MinimumActiveStake::::get(), 10); // remove staker with lower bond by limiting the number of voters and check // `MinimumActiveStake` again after electing voters. let bounds = ElectionBoundsBuilder::default().voters_count(5.into()).build(); - assert_ok!(::electing_voters(bounds.voters, 0)); + assert_ok!(::electing_voters(bounds.voters)); assert_eq!(MinimumActiveStake::::get(), 50); }); } @@ -5321,8 +5357,7 @@ mod election_data_provider { ExtBuilder::default().has_stakers(false).build_and_execute(|| { // default bounds are unbounded. assert_ok!(::electing_voters( - DataProviderBounds::default(), - 0 + DataProviderBounds::default() )); assert_eq!(::VoterList::count(), 0); assert_eq!(MinimumActiveStake::::get(), 0); @@ -5338,11 +5373,9 @@ mod election_data_provider { assert_ok!(Staking::nominate(RuntimeOrigin::signed(4), vec![1])); assert_eq!(::VoterList::count(), 5); - let voters_before = ::electing_voters( - DataProviderBounds::default(), - 0, - ) - .unwrap(); + let voters_before = + ::electing_voters(DataProviderBounds::default()) + .unwrap(); assert_eq!(MinimumActiveStake::::get(), 5); // update minimum nominator bond. @@ -5352,11 +5385,9 @@ mod election_data_provider { // lower than `MinNominatorBond`. assert_eq!(::VoterList::count(), 5); - let voters = ::electing_voters( - DataProviderBounds::default(), - 0, - ) - .unwrap(); + let voters = + ::electing_voters(DataProviderBounds::default()) + .unwrap(); assert_eq!(voters_before, voters); // minimum active stake is lower than `MinNominatorBond`. @@ -5374,7 +5405,6 @@ mod election_data_provider { assert_eq!(Staking::weight_of(&101), 500); let voters = ::electing_voters( DataProviderBounds::default(), - 0, ) .unwrap(); assert_eq!(voters.len(), 5); @@ -5390,7 +5420,6 @@ mod election_data_provider { let voters = ::electing_voters( DataProviderBounds::default(), - 0, ) .unwrap(); // number of returned voters decreases since ledger entry of stash 101 is now @@ -5412,8 +5441,7 @@ mod election_data_provider { ExtBuilder::default().nominate(false).build_and_execute(|| { // default bounds are unbounded. assert!(>::iter().map(|(x, _)| x).all(|v| Staking::electing_voters( - DataProviderBounds::default(), - 0 + DataProviderBounds::default() ) .unwrap() .into_iter() @@ -5467,15 +5495,12 @@ mod election_data_provider { // 11 is taken; // we finish since the 2x limit is reached. assert_eq!( - Staking::electing_voters( - bounds_builder.voters_count(2.into()).build().voters, - 0 - ) - .unwrap() - .iter() - .map(|(stash, _, _)| stash) - .copied() - .collect::>(), + Staking::electing_voters(bounds_builder.voters_count(2.into()).build().voters) + .unwrap() + .iter() + .map(|(stash, _, _)| stash) + .copied() + .collect::>(), vec![11], ); }); @@ -5493,42 +5518,32 @@ mod election_data_provider { // if voter count limit is less.. assert_eq!( - Staking::electing_voters( - bounds_builder.voters_count(1.into()).build().voters, - 0 - ) - .unwrap() - .len(), + Staking::electing_voters(bounds_builder.voters_count(1.into()).build().voters) + .unwrap() + .len(), 1 ); // if voter count limit is equal.. assert_eq!( - Staking::electing_voters( - bounds_builder.voters_count(5.into()).build().voters, - 0 - ) - .unwrap() - .len(), + Staking::electing_voters(bounds_builder.voters_count(5.into()).build().voters) + .unwrap() + .len(), 5 ); // if voter count limit is more. assert_eq!( - Staking::electing_voters( - bounds_builder.voters_count(55.into()).build().voters, - 0 - ) - .unwrap() - .len(), + Staking::electing_voters(bounds_builder.voters_count(55.into()).build().voters) + .unwrap() + .len(), 5 ); // if target count limit is more.. assert_eq!( Staking::electable_targets( - bounds_builder.targets_count(6.into()).build().targets, - 0, + bounds_builder.targets_count(6.into()).build().targets ) .unwrap() .len(), @@ -5538,8 +5553,7 @@ mod election_data_provider { // if target count limit is equal.. assert_eq!( Staking::electable_targets( - bounds_builder.targets_count(4.into()).build().targets, - 0, + bounds_builder.targets_count(4.into()).build().targets ) .unwrap() .len(), @@ -5549,12 +5563,10 @@ mod election_data_provider { // if target limit count is less, then we return an error. assert_eq!( Staking::electable_targets( - bounds_builder.targets_count(1.into()).build().targets, - 0 + bounds_builder.targets_count(1.into()).build().targets ) - .unwrap() - .len(), - 1, + .unwrap_err(), + "Target snapshot too big" ); }); } @@ -5564,25 +5576,25 @@ mod election_data_provider { ExtBuilder::default().build_and_execute(|| { // voters: set size bounds that allows only for 1 voter. let bounds = ElectionBoundsBuilder::default().voters_size(26.into()).build(); - let elected = Staking::electing_voters(bounds.voters, 0).unwrap(); + let elected = Staking::electing_voters(bounds.voters).unwrap(); assert!(elected.encoded_size() == 26 as usize); let prev_len = elected.len(); // larger size bounds means more quota for voters. let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build(); - let elected = Staking::electing_voters(bounds.voters, 0).unwrap(); + let elected = Staking::electing_voters(bounds.voters).unwrap(); assert!(elected.encoded_size() <= 100 as usize); assert!(elected.len() > 1 && elected.len() > prev_len); // targets: set size bounds that allows for only one target to fit in the snapshot. let bounds = ElectionBoundsBuilder::default().targets_size(10.into()).build(); - let elected = Staking::electable_targets(bounds.targets, 0).unwrap(); + let elected = Staking::electable_targets(bounds.targets).unwrap(); assert!(elected.encoded_size() == 9 as usize); let prev_len = elected.len(); // larger size bounds means more space for targets. let bounds = ElectionBoundsBuilder::default().targets_size(100.into()).build(); - let elected = Staking::electable_targets(bounds.targets, 0).unwrap(); + let elected = Staking::electable_targets(bounds.targets).unwrap(); assert!(elected.encoded_size() <= 100 as usize); assert!(elected.len() > 1 && elected.len() > prev_len); }); @@ -5626,7 +5638,7 @@ mod election_data_provider { // even through 61 has nomination quota of 2 at the time of the election, all the // nominations (5) will be used. assert_eq!( - Staking::electing_voters(DataProviderBounds::default(), 0) + Staking::electing_voters(DataProviderBounds::default()) .unwrap() .iter() .map(|(stash, _, targets)| (*stash, targets.len())) @@ -5650,7 +5662,7 @@ mod election_data_provider { // nominations of controller 70 won't be added due to voter size limit exceeded. let bounds = ElectionBoundsBuilder::default().voters_size(100.into()).build(); assert_eq!( - Staking::electing_voters(bounds.voters, 0) + Staking::electing_voters(bounds.voters) .unwrap() .iter() .map(|(stash, _, targets)| (*stash, targets.len())) @@ -5667,7 +5679,7 @@ mod election_data_provider { // include the electing voters of 70. let bounds = ElectionBoundsBuilder::default().voters_size(1_000.into()).build(); assert_eq!( - Staking::electing_voters(bounds.voters, 0) + Staking::electing_voters(bounds.voters) .unwrap() .iter() .map(|(stash, _, targets)| (*stash, targets.len())) @@ -5678,10 +5690,10 @@ mod election_data_provider { } #[test] - fn estimate_next_election_single_page_works() { + fn estimate_next_election_works() { ExtBuilder::default().session_per_era(5).period(5).build_and_execute(|| { // first session is always length 0. - for b in 1..19 { + for b in 1..20 { run_to_block(b); assert_eq!(Staking::next_election_prediction(System::block_number()), 20); } @@ -5689,9 +5701,10 @@ mod election_data_provider { // election run_to_block(20); assert_eq!(Staking::next_election_prediction(System::block_number()), 45); + assert_eq!(staking_events().len(), 1); assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); - for b in 21..44 { + for b in 21..45 { run_to_block(b); assert_eq!(Staking::next_election_prediction(System::block_number()), 45); } @@ -5699,6 +5712,7 @@ mod election_data_provider { // election run_to_block(45); assert_eq!(Staking::next_election_prediction(System::block_number()), 70); + assert_eq!(staking_events().len(), 3); assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); Staking::force_no_eras(RuntimeOrigin::root()).unwrap(); @@ -5721,6 +5735,7 @@ mod election_data_provider { MinimumValidatorCount::::put(2); run_to_block(55); assert_eq!(Staking::next_election_prediction(System::block_number()), 55 + 25); + assert_eq!(staking_events().len(), 10); assert_eq!( *staking_events().last().unwrap(), Event::ForceEra { mode: Forcing::NotForcing } @@ -6233,7 +6248,7 @@ fn change_of_absolute_max_nominations() { let bounds = DataProviderBounds::default(); // 3 validators and 3 nominators - assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 3); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3); // abrupt change from 16 to 4, everyone should be fine. AbsoluteMaxNominations::set(4); @@ -6244,7 +6259,7 @@ fn change_of_absolute_max_nominations() { .collect::>(), vec![(101, 2), (71, 3), (61, 1)] ); - assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 3); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3); // No one can be chilled on account of non-decodable keys. for k in Nominators::::iter_keys() { @@ -6263,7 +6278,7 @@ fn change_of_absolute_max_nominations() { .collect::>(), vec![(101, 2), (71, 3), (61, 1)] ); - assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 3); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 3); // As before, no one can be chilled on account of non-decodable keys. for k in Nominators::::iter_keys() { @@ -6297,7 +6312,7 @@ fn change_of_absolute_max_nominations() { // but its value cannot be decoded and default is returned. assert!(Nominators::::get(71).is_none()); - assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 2); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 2); assert!(Nominators::::contains_key(101)); // abrupt change from 2 to 1, this should cause some nominators to be non-decodable, and @@ -6321,7 +6336,7 @@ fn change_of_absolute_max_nominations() { assert!(Nominators::::contains_key(61)); assert!(Nominators::::get(71).is_none()); assert!(Nominators::::get(61).is_some()); - assert_eq!(Staking::electing_voters(bounds, 0).unwrap().len(), 3 + 1); + assert_eq!(Staking::electing_voters(bounds).unwrap().len(), 3 + 1); // now one of them can revive themselves by re-nominating to a proper value. assert_ok!(Staking::nominate(RuntimeOrigin::signed(71), vec![1])); @@ -6364,7 +6379,7 @@ fn nomination_quota_max_changes_decoding() { vec![(70, 3), (101, 2), (50, 4), (30, 4), (60, 1)] ); // 4 validators and 4 nominators - assert_eq!(Staking::electing_voters(unbonded_election, 0).unwrap().len(), 4 + 4); + assert_eq!(Staking::electing_voters(unbonded_election).unwrap().len(), 4 + 4); }); } @@ -6765,8 +6780,7 @@ fn reducing_max_unlocking_chunks_abrupt() { #[test] fn cannot_set_unsupported_validator_count() { ExtBuilder::default().build_and_execute(|| { - MaxValidatorSet::set(50); - MaxWinnersPerPage::set(50); + MaxWinners::set(50); // set validator count works assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 30)); assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 50)); @@ -6781,8 +6795,7 @@ fn cannot_set_unsupported_validator_count() { #[test] fn increase_validator_count_errors() { ExtBuilder::default().build_and_execute(|| { - MaxValidatorSet::set(50); - MaxWinnersPerPage::set(50); + MaxWinners::set(50); assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 40)); // increase works @@ -6800,8 +6813,7 @@ fn increase_validator_count_errors() { #[test] fn scale_validator_count_errors() { ExtBuilder::default().build_and_execute(|| { - MaxValidatorSet::set(50); - MaxWinnersPerPage::set(50); + MaxWinners::set(50); assert_ok!(Staking::set_validator_count(RuntimeOrigin::root(), 20)); // scale value works @@ -6939,6 +6951,218 @@ fn should_retain_era_info_only_upto_history_depth() { }); } +#[test] +fn test_legacy_claimed_rewards_is_checked_at_reward_payout() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // Create a validator: + bond_validator(11, 1000); + + // reward validator for next 2 eras + mock::start_active_era(1); + Pallet::::reward_by_ids(vec![(11, 1)]); + mock::start_active_era(2); + Pallet::::reward_by_ids(vec![(11, 1)]); + mock::start_active_era(3); + + //verify rewards are not claimed + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + false + ); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 2, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + false + ); + + // assume reward claim for era 1 was stored in legacy storage + Ledger::::insert( + 11, + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![1], + }, + ); + + // verify rewards for era 1 cannot be claimed + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 1, 0), + Error::::AlreadyClaimed + .with_weight(::WeightInfo::payout_stakers_alive_staked(0)), + ); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 1, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + true + ); + + // verify rewards for era 2 can be claimed + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 2, 0)); + assert_eq!( + EraInfo::::is_rewards_claimed_with_legacy_fallback( + 2, + Staking::ledger(11.into()).as_ref().unwrap(), + &11, + 0 + ), + true + ); + // but the new claimed rewards for era 2 is not stored in legacy storage + assert_eq!( + Ledger::::get(11).unwrap(), + StakingLedgerInspect { + stash: 11, + total: 1000, + active: 1000, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![1], + }, + ); + // instead it is kept in `ClaimedRewards` + assert_eq!(ClaimedRewards::::get(2, 11), vec![0]); + }); +} + +#[test] +fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout() { + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // case 1: exposure exist in clipped. + // set page cap to 10 + MaxExposurePageSize::set(10); + bond_validator(11, 1000); + let mut expected_individual_exposures: Vec> = vec![]; + let mut total_exposure: Balance = 0; + // 1st exposure page + for i in 0..10 { + let who = 1000 + i; + let value = 1000 + i as Balance; + bond_nominator(who, value, vec![11]); + expected_individual_exposures.push(IndividualExposure { who, value }); + total_exposure += value; + } + + for i in 10..15 { + let who = 1000 + i; + let value = 1000 + i as Balance; + bond_nominator(who, value, vec![11]); + expected_individual_exposures.push(IndividualExposure { who, value }); + total_exposure += value; + } + + mock::start_active_era(1); + // reward validator for current era + Pallet::::reward_by_ids(vec![(11, 1)]); + + // start new era + mock::start_active_era(2); + // verify exposure for era 1 is stored in paged storage, that each exposure is stored in + // one and only one page, and no exposure is repeated. + let actual_exposure_page_0 = ErasStakersPaged::::get((1, 11, 0)).unwrap(); + let actual_exposure_page_1 = ErasStakersPaged::::get((1, 11, 1)).unwrap(); + expected_individual_exposures.iter().for_each(|exposure| { + assert!( + actual_exposure_page_0.others.contains(exposure) || + actual_exposure_page_1.others.contains(exposure) + ); + }); + assert_eq!( + expected_individual_exposures.len(), + actual_exposure_page_0.others.len() + actual_exposure_page_1.others.len() + ); + // verify `EraInfo` returns page from paged storage + assert_eq!( + EraInfo::::get_paged_exposure(1, &11, 0).unwrap().others(), + &actual_exposure_page_0.others + ); + assert_eq!( + EraInfo::::get_paged_exposure(1, &11, 1).unwrap().others(), + &actual_exposure_page_1.others + ); + assert_eq!(EraInfo::::get_page_count(1, &11), 2); + + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + + // case 2: exposure exist in ErasStakers and ErasStakersClipped (legacy). + // delete paged storage and add exposure to clipped storage + >::remove((1, 11, 0)); + >::remove((1, 11, 1)); + >::remove(1, 11); + + >::insert( + 1, + 11, + Exposure { + total: total_exposure, + own: 1000, + others: expected_individual_exposures.clone(), + }, + ); + let mut clipped_exposure = expected_individual_exposures.clone(); + clipped_exposure.sort_by(|a, b| b.who.cmp(&a.who)); + clipped_exposure.truncate(10); + >::insert( + 1, + 11, + Exposure { total: total_exposure, own: 1000, others: clipped_exposure.clone() }, + ); + + // verify `EraInfo` returns exposure from clipped storage + let actual_exposure_paged = EraInfo::::get_paged_exposure(1, &11, 0).unwrap(); + assert_eq!(actual_exposure_paged.others(), &clipped_exposure); + assert_eq!(actual_exposure_paged.own(), 1000); + assert_eq!(actual_exposure_paged.exposure_metadata.page_count, 1); + + let actual_exposure_full = EraInfo::::get_full_exposure(1, &11); + assert_eq!(actual_exposure_full.others, expected_individual_exposures); + assert_eq!(actual_exposure_full.own, 1000); + assert_eq!(actual_exposure_full.total, total_exposure); + + // validator is exposed + assert!(::is_exposed_in_era(&11, &1)); + // nominators are exposed + for i in 10..15 { + let who: AccountId = 1000 + i; + assert!(::is_exposed_in_era(&who, &1)); + } + + // for pages other than 0, clipped storage returns empty exposure + assert_eq!(EraInfo::::get_paged_exposure(1, &11, 1), None); + // page size is 1 for clipped storage + assert_eq!(EraInfo::::get_page_count(1, &11), 1); + + // payout for page 0 works + assert_ok!(Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 0)); + // payout for page 1 fails + assert_noop!( + Staking::payout_stakers_by_page(RuntimeOrigin::signed(1337), 11, 0, 1), + Error::::InvalidPage + .with_weight(::WeightInfo::payout_stakers_alive_staked(0)) + ); + }); +} + #[test] fn test_runtime_api_pending_rewards() { ExtBuilder::default().build_and_execute(|| { @@ -6979,36 +7203,70 @@ fn test_runtime_api_pending_rewards() { others: individual_exposures, }; - // add exposure for validators - EraInfo::::upsert_exposure(0, &validator_one, exposure.clone()); - EraInfo::::upsert_exposure(0, &validator_two, exposure.clone()); + // add non-paged exposure for one and two. + >::insert(0, validator_one, exposure.clone()); + >::insert(0, validator_two, exposure.clone()); + // add paged exposure for third validator + EraInfo::::set_exposure(0, &validator_three, exposure); // add some reward to be distributed ErasValidatorReward::::insert(0, 1000); - // SCENARIO: Validator with paged exposure (two pages). - // validators have not claimed rewards, so pending rewards is true. - assert!(EraInfo::::pending_rewards(0, &validator_one)); - assert!(EraInfo::::pending_rewards(0, &validator_two)); - // and payout works - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0)); - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0)); - // validators have two pages of exposure, so pending rewards is still true. - assert!(EraInfo::::pending_rewards(0, &validator_one)); - assert!(EraInfo::::pending_rewards(0, &validator_two)); - // payout again only for validator one - assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0)); - // now pending rewards is false for validator one + // mark rewards claimed for validator_one in legacy claimed rewards + >::insert( + validator_one, + StakingLedgerInspect { + stash: validator_one, + total: stake, + active: stake, + unlocking: Default::default(), + legacy_claimed_rewards: bounded_vec![0], + }, + ); + + // SCENARIO ONE: rewards already marked claimed in legacy storage. + // runtime api should return false for pending rewards for validator_one. assert!(!EraInfo::::pending_rewards(0, &validator_one)); - // and payout fails for validator one + // and if we try to pay, we get an error. assert_noop!( Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_one, 0), Error::::AlreadyClaimed.with_weight(err_weight) ); - // while pending reward is true for validator two + + // SCENARIO TWO: non-paged exposure + // validator two has not claimed rewards, so pending rewards is true. assert!(EraInfo::::pending_rewards(0, &validator_two)); - // and payout works again for validator two. + // and payout works assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0)); + // now pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_two)); + // and payout fails + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_two, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // SCENARIO THREE: validator with paged exposure (two pages). + // validator three has not claimed rewards, so pending rewards is true. + assert!(EraInfo::::pending_rewards(0, &validator_three)); + // and payout works + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0)); + // validator three has two pages of exposure, so pending rewards is still true. + assert!(EraInfo::::pending_rewards(0, &validator_three)); + // payout again + assert_ok!(Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0)); + // now pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_three)); + // and payout fails + assert_noop!( + Staking::payout_stakers(RuntimeOrigin::signed(1337), validator_three, 0), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + + // for eras with no exposure, pending rewards is false. + assert!(!EraInfo::::pending_rewards(0, &validator_one)); + assert!(!EraInfo::::pending_rewards(0, &validator_two)); + assert!(!EraInfo::::pending_rewards(0, &validator_three)); }); } @@ -7462,7 +7720,6 @@ mod staking_unchecked { }) } } - mod ledger { use super::*; @@ -8584,7 +8841,6 @@ fn reenable_lower_offenders_mock() { assert_eq!( staking_events_since_last_call(), vec![ - Event::PagedElectionProceeded { page: 0, result: Ok(7) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, Event::SlashReported { @@ -8672,7 +8928,6 @@ fn do_not_reenable_higher_offenders_mock() { assert_eq!( staking_events_since_last_call(), vec![ - Event::PagedElectionProceeded { page: 0, result: Ok(7) }, Event::StakersElected, Event::EraPaid { era_index: 0, validator_payout: 11075, remainder: 33225 }, Event::SlashReported { @@ -8747,12 +9002,12 @@ mod getters { slashing, tests::{Staking, Test}, ActiveEra, ActiveEraInfo, BalanceOf, CanceledSlashPayout, ClaimedRewards, CurrentEra, - CurrentPlannedSession, EraRewardPoints, ErasRewardPoints, ErasStartSessionIndex, - ErasTotalStake, ErasValidatorPrefs, ErasValidatorReward, ForceEra, Forcing, Nominations, - Nominators, Perbill, SlashRewardFraction, SlashingSpans, ValidatorPrefs, Validators, + CurrentPlannedSession, EraRewardPoints, ErasRewardPoints, ErasStakersClipped, + ErasStartSessionIndex, ErasTotalStake, ErasValidatorPrefs, ErasValidatorReward, ForceEra, + Forcing, Nominations, Nominators, Perbill, SlashRewardFraction, SlashingSpans, + ValidatorPrefs, Validators, }; - use frame_support::BoundedVec; - use sp_staking::{EraIndex, Page, SessionIndex}; + use sp_staking::{EraIndex, Exposure, IndividualExposure, Page, SessionIndex}; #[test] fn get_validator_count_returns_value_from_storage() { @@ -8789,9 +9044,7 @@ mod getters { sp_io::TestExternalities::default().execute_with(|| { // given let v: Vec = vec![1, 2, 3]; - Invulnerables::::put( - BoundedVec::try_from(v.clone()).expect("Too many invulnerable validators!"), - ); + Invulnerables::::put(v.clone()); // when let result = Staking::invulnerables(); @@ -8890,6 +9143,27 @@ mod getters { }); } + #[test] + fn get_eras_stakers_clipped_returns_value_from_storage() { + sp_io::TestExternalities::default().execute_with(|| { + // given + let era: EraIndex = 12; + let account_id: mock::AccountId = 1; + let exposure: Exposure> = Exposure { + total: 1125, + own: 1000, + others: vec![IndividualExposure { who: 101, value: 125 }], + }; + ErasStakersClipped::::insert(era, account_id, exposure.clone()); + + // when + let result = Staking::eras_stakers_clipped(era, &account_id); + + // then + assert_eq!(result, exposure); + }); + } + #[test] fn get_claimed_rewards_returns_value_from_storage() { sp_io::TestExternalities::default().execute_with(|| { diff --git a/substrate/frame/staking/src/tests_paged_election.rs b/substrate/frame/staking/src/tests_paged_election.rs deleted file mode 100644 index 76be6819d113e..0000000000000 --- a/substrate/frame/staking/src/tests_paged_election.rs +++ /dev/null @@ -1,971 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::{mock::*, *}; -use frame_support::{assert_ok, testing_prelude::*}; -use substrate_test_utils::assert_eq_uvec; - -use frame_election_provider_support::{ - bounds::ElectionBoundsBuilder, ElectionDataProvider, SortedListProvider, Support, -}; -use sp_staking::StakingInterface; - -mod electable_stashes { - use super::*; - - #[test] - fn add_electable_stashes_work() { - ExtBuilder::default().try_state(false).build_and_execute(|| { - MaxValidatorSet::set(5); - assert_eq!(MaxValidatorSet::get(), 5); - assert!(ElectableStashes::::get().is_empty()); - - // adds stashes without duplicates, do not overflow bounds. - assert_ok!(Staking::add_electables(vec![1u64, 2, 3].into_iter())); - assert_eq!( - ElectableStashes::::get().into_inner().into_iter().collect::>(), - vec![1, 2, 3] - ); - - // adds with duplicates which are deduplicated implicitly, no not overflow bounds. - assert_ok!(Staking::add_electables(vec![1u64, 2, 4].into_iter())); - assert_eq!( - ElectableStashes::::get().into_inner().into_iter().collect::>(), - vec![1, 2, 3, 4] - ); - }) - } - - #[test] - fn add_electable_stashes_overflow_works() { - ExtBuilder::default().try_state(false).build_and_execute(|| { - MaxValidatorSet::set(5); - assert_eq!(MaxValidatorSet::get(), 5); - assert!(ElectableStashes::::get().is_empty()); - - // adds stashes so that bounds are overflown, fails and internal state changes so that - // all slots are filled. error will return the idx of the first account that was not - // included. - let expected_idx_not_included = 5; // stash 6. - assert_eq!( - Staking::add_electables(vec![1u64, 2, 3, 4, 5, 6, 7, 8].into_iter()), - Err(expected_idx_not_included) - ); - // the included were added to the electable stashes, despite the error. - assert_eq!( - ElectableStashes::::get().into_inner().into_iter().collect::>(), - vec![1, 2, 3, 4, 5] - ); - }) - } - - #[test] - fn overflow_electable_stashes_no_exposures_work() { - // ensures exposures are stored only for the electable stashes that fit within the - // electable stashes bounds in case of overflow. - ExtBuilder::default().try_state(false).build_and_execute(|| { - MaxValidatorSet::set(2); - assert_eq!(MaxValidatorSet::get(), 2); - assert!(ElectableStashes::::get().is_empty()); - - // current era is 0, preparing 1. - assert_eq!(current_era(), 0); - - let supports = to_bounded_supports(vec![ - (1, Support { total: 100, voters: vec![(10, 1_000)] }), - (2, Support { total: 200, voters: vec![(20, 2_000)] }), - (3, Support { total: 300, voters: vec![(30, 3_000)] }), - (4, Support { total: 400, voters: vec![(40, 4_000)] }), - ]); - - // error due to bounds. - let expected_not_included = 2; - assert_eq!(Staking::do_elect_paged_inner(supports), Err(expected_not_included)); - - // electable stashes have been collected to the max bounds despite the error. - assert_eq!(ElectableStashes::::get().into_iter().collect::>(), vec![1, 2]); - - let exposure_exists = - |acc, era| EraInfo::::get_full_exposure(era, &acc).total != 0; - - // exposures were only collected for electable stashes in bounds (1 and 2). - assert!(exposure_exists(1, 1)); - assert!(exposure_exists(2, 1)); - assert!(!exposure_exists(3, 1)); - assert!(!exposure_exists(4, 1)); - }) - } -} - -mod paged_on_initialize { - use super::*; - use frame_election_provider_support::onchain; - - #[test] - fn single_page_election_works() { - ExtBuilder::default() - // set desired targets to 3. - .validator_count(3) - .build_and_execute(|| { - let next_election = Staking::next_election_prediction(System::block_number()); - assert_eq!(next_election, 10); - - // single page. - let pages: BlockNumber = Staking::election_pages().into(); - assert_eq!(pages, 1); - - // genesis validators are now in place. - assert_eq!(current_era(), 0); - assert_eq_uvec!(Session::validators(), vec![11, 21, 31]); - - // force unstake of 31 to ensure the election results of the next era are - // different than genesis. - assert_ok!(Staking::force_unstake(RuntimeOrigin::root(), 31, 0)); - - let expected_elected = Validators::::iter_keys() - .filter(|x| Staking::status(x) == Ok(StakerStatus::Validator)) - .collect::>(); - // use all registered validators as potential targets. - ValidatorCount::::set(expected_elected.len() as u32); - assert_eq!(expected_elected.len(), 2); - - // 1. election prep hasn't started yet, election cursor and electable stashes are - // not set yet. - run_to_block(8); - assert_eq!(NextElectionPage::::get(), None); - assert!(ElectableStashes::::get().is_empty()); - assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); - - // try-state sanity check. - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // 2. starts preparing election at the (election_prediction - n_pages) block. - run_to_block(9); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // electing started, but since single-page, we don't set `NextElectionPage` at all. - assert_eq!(NextElectionPage::::get(), None); - // now the electable stashes have been fetched and stored. - assert_eq_uvec!( - ElectableStashes::::get().into_iter().collect::>(), - expected_elected - ); - assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); - - // era is still 0. - assert_eq!(current_era(), 0); - - // 3. progress to election block, which matches with era rotation. - run_to_block(10); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - assert_eq!(current_era(), 1); - // clears out election metadata for era. - assert!(NextElectionPage::::get().is_none()); - assert!(ElectableStashes::::get().into_iter().collect::>().is_empty()); - assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); - - // era progressed and electable stashes have been served to session pallet. - assert_eq_uvec!(Session::validators(), vec![11, 21, 31]); - - // 4. in the next era, the validator set does not include 31 anymore which was - // unstaked. - start_active_era(2); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - }) - } - - #[test] - fn single_page_election_era_transition_exposures_work() { - ExtBuilder::default() - // set desired targets to 3. - .validator_count(3) - .build_and_execute(|| { - assert_eq!(current_era(), 0); - - // 3 sessions per era. - assert_eq!(SessionsPerEra::get(), 3); - - // genesis validators and exposures. - assert_eq!(current_era(), 0); - assert_eq_uvec!(validator_controllers(), vec![11, 21, 31]); - assert_eq!( - era_exposures(current_era()), - vec![ - ( - 11, - Exposure { - total: 1125, - own: 1000, - others: vec![IndividualExposure { who: 101, value: 125 }] - } - ), - ( - 21, - Exposure { - total: 1375, - own: 1000, - others: vec![IndividualExposure { who: 101, value: 375 }] - } - ), - (31, Exposure { total: 500, own: 500, others: vec![] }) - ] - ); - - // try-state sanity check. - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - start_session(1); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - assert_eq!(current_era(), 0); - // election haven't started yet. - assert_eq!(NextElectionPage::::get(), None); - assert!(ElectableStashes::::get().is_empty()); - - // progress to era rotation session. - start_session(SessionsPerEra::get()); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - assert_eq!(current_era(), 1); - assert_eq_uvec!(Session::validators(), vec![11, 21, 31]); - assert_eq!( - era_exposures(current_era()), - vec![ - ( - 11, - Exposure { - total: 1125, - own: 1000, - others: vec![IndividualExposure { who: 101, value: 125 }] - } - ), - ( - 21, - Exposure { - total: 1375, - own: 1000, - others: vec![IndividualExposure { who: 101, value: 375 }] - } - ), - (31, Exposure { total: 500, own: 500, others: vec![] }) - ] - ); - - // force unstake validator 31 for next era. - assert_ok!(Staking::force_unstake(RuntimeOrigin::root(), 31, 0)); - - // progress session and rotate era. - start_session(SessionsPerEra::get() * 2); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - assert_eq!(current_era(), 2); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - - assert_eq!( - era_exposures(current_era()), - vec![ - ( - 11, - Exposure { - total: 1125, - own: 1000, - others: vec![IndividualExposure { who: 101, value: 125 }] - } - ), - ( - 21, - Exposure { - total: 1375, - own: 1000, - others: vec![IndividualExposure { who: 101, value: 375 }] - } - ), - ] - ); - - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - }) - } - - #[test] - fn multi_page_election_works() { - ExtBuilder::default() - .add_staker(61, 61, 1000, StakerStatus::Validator) - .add_staker(71, 71, 1000, StakerStatus::Validator) - .add_staker(81, 81, 1000, StakerStatus::Validator) - .add_staker(91, 91, 1000, StakerStatus::Validator) - .multi_page_election_provider(3) - .max_winners_per_page(5) - .build_and_execute(|| { - // we need this later. - let genesis_validators = Session::validators(); - - // election provider has 3 pages. - let pages: BlockNumber = - <::ElectionProvider as ElectionProvider>::Pages::get().into(); - assert_eq!(pages, 3); - - // 5 max winners per page. - let max_winners_page = <::ElectionProvider as ElectionProvider>::MaxWinnersPerPage::get(); - assert_eq!(max_winners_page, 5); - - // genesis era. - assert_eq!(current_era(), 0); - - // confirm the genesis validators. - assert_eq!(Session::validators(), vec![11, 21]); - - let next_election = ::next_election_prediction( - System::block_number(), - ); - assert_eq!(next_election, 10); - - let expected_elected = Validators::::iter_keys() - .filter(|x| Staking::status(x) == Ok(StakerStatus::Validator)) - // mock multi page election provider takes first `max_winners_page` - // validators as winners. - .take(max_winners_page as usize) - .collect::>(); - // adjust desired targets to number of winners per page. - ValidatorCount::::set(expected_elected.len() as u32); - assert_eq!(expected_elected.len(), 5); - - // try-state sanity check. - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // 1. election prep hasn't started yet, election cursor and electable stashes are - // not set yet. - run_to_block(6); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - assert_eq!(NextElectionPage::::get(), None); - assert!(ElectableStashes::::get().is_empty()); - - // 2. starts preparing election at the (election_prediction - n_pages) block. - // fetches msp (i.e. 2). - run_to_block(7); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // electing started at cursor is set once the election starts to be prepared. - assert_eq!(NextElectionPage::::get(), Some(1)); - // now the electable stashes started to be fetched and stored. - assert_eq_uvec!( - ElectableStashes::::get().into_iter().collect::>(), - expected_elected - ); - // exposures have been collected for all validators in the page. - // note that the mock election provider adds one exposures per winner for - // each page. - for s in expected_elected.iter() { - // 1 page fetched, 1 `other` exposure collected per electable stash. - assert_eq!(Staking::eras_stakers(current_era() + 1, s).others.len(), 1); - } - - // 3. progress one block to fetch page 1. - run_to_block(8); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - // the electable stashes remain the same. - assert_eq_uvec!( - ElectableStashes::::get().into_iter().collect::>(), - expected_elected - ); - // election cursor moves along. - assert_eq!(NextElectionPage::::get(), Some(0)); - // exposures have been collected for all validators in the page. - for s in expected_elected.iter() { - // 2 pages fetched, 2 `other` exposures collected per electable stash. - assert_eq!(Staking::eras_stakers(current_era() + 1, s).others.len(), 2); - } - - // 4. progress one block to fetch lsp (i.e. 0). - run_to_block(9); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - // the electable stashes remain the same. - assert_eq_uvec!( - ElectableStashes::::get().into_iter().collect::>(), - expected_elected - ); - // exposures have been collected for all validators in the page. - for s in expected_elected.iter() { - // 3 pages fetched, 3 `other` exposures collected per electable stash. - assert_eq!(Staking::eras_stakers(current_era() + 1, s).others.len(), 3); - } - assert_eq!(NextElectionPage::::get(), None); - assert_eq!(staking_events_since_last_call(), vec![ - Event::PagedElectionProceeded { page: 2, result: Ok(5) }, - Event::PagedElectionProceeded { page: 1, result: Ok(0) }, - Event::PagedElectionProceeded { page: 0, result: Ok(0) } - ]); - - // upon fetching page 0, the electing started will remain in storage until the - // era rotates. - assert_eq!(current_era(), 0); - - // Next block the era will rotate. - run_to_block(10); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - // and all the metadata has been cleared up and ready for the next election. - assert!(NextElectionPage::::get().is_none()); - assert!(ElectableStashes::::get().is_empty()); - // events - assert_eq!(staking_events_since_last_call(), vec![ - Event::StakersElected - ]); - // session validators are not updated yet, these are genesis validators - assert_eq_uvec!(Session::validators(), genesis_validators); - - // next session they are updated. - advance_session(); - // the new era validators are the expected elected stashes. - assert_eq_uvec!(Session::validators(), expected_elected); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - }) - } - - #[test] - fn multi_page_election_with_mulit_page_exposures_rewards_work() { - ExtBuilder::default() - .add_staker(61, 61, 1000, StakerStatus::Validator) - .add_staker(71, 71, 1000, StakerStatus::Validator) - .add_staker(1, 1, 5, StakerStatus::Nominator(vec![21, 31, 71])) - .add_staker(2, 2, 5, StakerStatus::Nominator(vec![21, 31, 71])) - .add_staker(3, 3, 5, StakerStatus::Nominator(vec![21, 31, 71])) - .multi_page_election_provider(3) - .max_winners_per_page(3) - .exposures_page_size(2) - .build_and_execute(|| { - // election provider has 3 pages. - let pages: BlockNumber = - <::ElectionProvider as ElectionProvider>::Pages::get().into(); - assert_eq!(pages, 3); - // 3 max winners per page. - let max_winners_page = <::ElectionProvider as ElectionProvider>::MaxWinnersPerPage::get(); - assert_eq!(max_winners_page, 3); - - // setup validator payee prefs and 10% commission. - for s in vec![21, 31, 71] { - Payee::::insert(s, RewardDestination::Account(s)); - let prefs = ValidatorPrefs { commission: Perbill::from_percent(10), ..Default::default() }; - Validators::::insert(s, prefs.clone()); - } - - let init_balance_all = vec![21, 31, 71, 1, 2, 3].iter().fold(0, |mut acc, s| { - acc += asset::total_balance::(&s); - acc - }); - - // progress era. - assert_eq!(current_era(), 0); - start_active_era(1); - assert_eq!(current_era(), 1); - assert_eq!(Session::validators(), vec![21, 31, 71]); - - // distribute reward, - Pallet::::reward_by_ids(vec![(21, 50)]); - Pallet::::reward_by_ids(vec![(31, 50)]); - Pallet::::reward_by_ids(vec![(71, 50)]); - - let total_payout = current_total_payout_for_duration(reward_time_per_era()); - - start_active_era(2); - - // all the validators exposed in era 1 have two pages of exposures, since exposure - // page size is 2. - assert_eq!(MaxExposurePageSize::get(), 2); - assert_eq!(EraInfo::::get_page_count(1, &21), 2); - assert_eq!(EraInfo::::get_page_count(1, &31), 2); - assert_eq!(EraInfo::::get_page_count(1, &71), 2); - - make_all_reward_payment(1); - - let balance_all = vec![21, 31, 71, 1, 2, 3].iter().fold(0, |mut acc, s| { - acc += asset::total_balance::(&s); - acc - }); - - assert_eq_error_rate!( - total_payout, - balance_all - init_balance_all, - 4 - ); - }) - } - - #[test] - fn multi_page_election_is_graceful() { - // demonstrate that in a multi-page election, in some of the `elect(_)` calls fail we won't - // bail right away. - ExtBuilder::default().multi_page_election_provider(3).build_and_execute(|| { - // load some exact data into the election provider, some of which are error or empty. - let correct_results = ::GenesisElectionProvider::elect(0); - CustomElectionSupports::set(Some(vec![ - // page 0. - correct_results.clone(), - // page 1. - Err(onchain::Error::FailedToBound), - // page 2. - Ok(Default::default()), - ])); - - // genesis era. - assert_eq!(current_era(), 0); - - let next_election = - ::next_election_prediction(System::block_number()); - assert_eq!(next_election, 10); - - // try-state sanity check. - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // 1. election prep hasn't started yet, election cursor and electable stashes are - // not set yet. - run_to_block(6); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - assert_eq!(NextElectionPage::::get(), None); - assert!(ElectableStashes::::get().is_empty()); - - // 2. starts preparing election at the (election_prediction - n_pages) block. - // fetches lsp (i.e. 2). - run_to_block(7); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // electing started at cursor is set once the election starts to be prepared. - assert_eq!(NextElectionPage::::get(), Some(1)); - // in elect(2) we won't collect any stashes yet. - assert!(ElectableStashes::::get().is_empty()); - - // 3. progress one block to fetch page 1. - run_to_block(8); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // in elect(1) we won't collect any stashes yet. - assert!(ElectableStashes::::get().is_empty()); - // election cursor is updated - assert_eq!(NextElectionPage::::get(), Some(0)); - - // 4. progress one block to fetch mps (i.e. 0). - run_to_block(9); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // some stashes come in. - assert_eq!( - ElectableStashes::::get().into_iter().collect::>(), - vec![11 as AccountId, 21] - ); - // cursor is now none - assert_eq!(NextElectionPage::::get(), None); - - // events thus far - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::PagedElectionProceeded { page: 2, result: Ok(0) }, - Event::PagedElectionProceeded { page: 1, result: Err(0) }, - Event::PagedElectionProceeded { page: 0, result: Ok(2) } - ] - ); - - // upon fetching page 0, the electing started will remain in storage until the - // era rotates. - assert_eq!(current_era(), 0); - - // Next block the era will rotate. - run_to_block(10); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // and all the metadata has been cleared up and ready for the next election. - assert!(NextElectionPage::::get().is_none()); - assert!(ElectableStashes::::get().is_empty()); - - // and the overall staking worked fine. - assert_eq!(staking_events_since_last_call(), vec![Event::StakersElected]); - }) - } - - #[test] - fn multi_page_election_fails_if_not_enough_validators() { - // a graceful multi-page election still fails if not enough validators are provided. - ExtBuilder::default() - .multi_page_election_provider(3) - .minimum_validator_count(3) - .build_and_execute(|| { - // load some exact data into the election provider, some of which are error or - // empty. - let correct_results = ::GenesisElectionProvider::elect(0); - CustomElectionSupports::set(Some(vec![ - // page 0. - correct_results.clone(), - // page 1. - Err(onchain::Error::FailedToBound), - // page 2. - Ok(Default::default()), - ])); - - // genesis era. - assert_eq!(current_era(), 0); - - let next_election = ::next_election_prediction( - System::block_number(), - ); - assert_eq!(next_election, 10); - - // try-state sanity check. - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // 1. election prep hasn't started yet, election cursor and electable stashes are - // not set yet. - run_to_block(6); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - assert_eq!(NextElectionPage::::get(), None); - assert!(ElectableStashes::::get().is_empty()); - - // 2. starts preparing election at the (election_prediction - n_pages) block. - // fetches lsp (i.e. 2). - run_to_block(7); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // electing started at cursor is set once the election starts to be prepared. - assert_eq!(NextElectionPage::::get(), Some(1)); - // in elect(2) we won't collect any stashes yet. - assert!(ElectableStashes::::get().is_empty()); - - // 3. progress one block to fetch page 1. - run_to_block(8); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // in elect(1) we won't collect any stashes yet. - assert!(ElectableStashes::::get().is_empty()); - // election cursor is updated - assert_eq!(NextElectionPage::::get(), Some(0)); - - // 4. progress one block to fetch mps (i.e. 0). - run_to_block(9); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // some stashes come in. - assert_eq!( - ElectableStashes::::get().into_iter().collect::>(), - vec![11 as AccountId, 21] - ); - // cursor is now none - assert_eq!(NextElectionPage::::get(), None); - - // events thus far - assert_eq!( - staking_events_since_last_call(), - vec![ - Event::PagedElectionProceeded { page: 2, result: Ok(0) }, - Event::PagedElectionProceeded { page: 1, result: Err(0) }, - Event::PagedElectionProceeded { page: 0, result: Ok(2) } - ] - ); - - // upon fetching page 0, the electing started will remain in storage until the - // era rotates. - assert_eq!(current_era(), 0); - - // Next block the era will rotate. - run_to_block(10); - assert_ok!(Staking::ensure_snapshot_metadata_state(System::block_number())); - - // and all the metadata has been cleared up and ready for the next election. - assert!(NextElectionPage::::get().is_none()); - assert!(ElectableStashes::::get().is_empty()); - - // and the overall staking worked fine. - assert_eq!(staking_events_since_last_call(), vec![Event::StakingElectionFailed]); - }) - } -} - -mod paged_snapshot { - use super::*; - - #[test] - fn target_snapshot_works() { - ExtBuilder::default() - .nominate(true) - .set_status(41, StakerStatus::Validator) - .set_status(51, StakerStatus::Validator) - .set_status(101, StakerStatus::Idle) - .build_and_execute(|| { - // all registered validators. - let all_targets = vec![51, 31, 41, 21, 11]; - assert_eq_uvec!( - ::TargetList::iter().collect::>(), - all_targets, - ); - - // 3 targets per page. - let bounds = - ElectionBoundsBuilder::default().targets_count(3.into()).build().targets; - - let targets = - ::electable_targets(bounds, 0).unwrap(); - assert_eq_uvec!(targets, all_targets.iter().take(3).cloned().collect::>()); - - // emulates a no bounds target snapshot request. - let bounds = - ElectionBoundsBuilder::default().targets_count(u32::MAX.into()).build().targets; - - let single_page_targets = - ::electable_targets(bounds, 0).unwrap(); - - // complete set of paged targets is the same as single page, no bounds set of - // targets. - assert_eq_uvec!(all_targets, single_page_targets); - }) - } - - #[test] - fn target_snaposhot_multi_page_redundant() { - ExtBuilder::default().build_and_execute(|| { - let all_targets = vec![31, 21, 11]; - assert_eq_uvec!(::TargetList::iter().collect::>(), all_targets,); - - // no bounds. - let bounds = - ElectionBoundsBuilder::default().targets_count(u32::MAX.into()).build().targets; - - // target snapshot supports only single-page, thus it is redundant what's the page index - // requested. - let snapshot = Staking::electable_targets(bounds, 0).unwrap(); - assert!( - snapshot == all_targets && - snapshot == Staking::electable_targets(bounds, 1).unwrap() && - snapshot == Staking::electable_targets(bounds, 2).unwrap() && - snapshot == Staking::electable_targets(bounds, u32::MAX).unwrap(), - ); - }) - } - - #[test] - fn voter_snapshot_works() { - ExtBuilder::default() - .nominate(true) - .set_status(51, StakerStatus::Validator) - .set_status(41, StakerStatus::Nominator(vec![51])) - .set_status(101, StakerStatus::Validator) - .build_and_execute(|| { - let bounds = ElectionBoundsBuilder::default().voters_count(3.into()).build().voters; - assert_eq!( - ::VoterList::iter().collect::>(), - vec![11, 21, 31, 41, 51, 101], - ); - - let mut all_voters = vec![]; - - let voters_page_3 = ::electing_voters(bounds, 3) - .unwrap() - .into_iter() - .map(|(a, _, _)| a) - .collect::>(); - all_voters.extend(voters_page_3.clone()); - - assert_eq!(voters_page_3, vec![11, 21, 31]); - - let voters_page_2 = ::electing_voters(bounds, 2) - .unwrap() - .into_iter() - .map(|(a, _, _)| a) - .collect::>(); - all_voters.extend(voters_page_2.clone()); - - assert_eq!(voters_page_2, vec![41, 51, 101]); - - // all voters in the list have been consumed. - assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Consumed); - - // thus page 1 and 0 are empty. - assert!(::electing_voters(bounds, 1) - .unwrap() - .is_empty()); - assert!(::electing_voters(bounds, 0) - .unwrap() - .is_empty()); - - // last page has been requested, reset the snapshot status to waiting. - assert_eq!(VoterSnapshotStatus::::get(), SnapshotStatus::Waiting); - - // now request 1 page with bounds where all registered voters fit. u32::MAX - // emulates a no bounds request. - let bounds = - ElectionBoundsBuilder::default().voters_count(u32::MAX.into()).build().targets; - - let single_page_voters = - ::electing_voters(bounds, 0) - .unwrap() - .into_iter() - .map(|(a, _, _)| a) - .collect::>(); - - // complete set of paged voters is the same as single page, no bounds set of - // voters. - assert_eq!(all_voters, single_page_voters); - }) - } - - #[test] - #[should_panic] - fn voter_snapshot_starts_from_msp_to_lsp() { - todo!(); - } -} - -mod paged_exposures { - use super::*; - - #[test] - fn genesis_collect_exposures_works() { - ExtBuilder::default().multi_page_election_provider(3).build_and_execute(|| { - // first, clean up all the era data and metadata to mimic a genesis election next. - Staking::clear_era_information(current_era()); - - // genesis election is single paged. - let genesis_result = <::GenesisElectionProvider>::elect(0u32).unwrap(); - let expected_exposures = Staking::collect_exposures(genesis_result.clone()); - - Staking::try_plan_new_era(0u32, true); - - // expected exposures are stored for the expected genesis validators. - for exposure in expected_exposures { - assert_eq!(EraInfo::::get_full_exposure(0, &exposure.0), exposure.1); - } - }) - } - - #[test] - fn store_stakers_info_elect_works() { - ExtBuilder::default().exposures_page_size(2).build_and_execute(|| { - assert_eq!(MaxExposurePageSize::get(), 2); - - let exposure_one = Exposure { - total: 1000 + 700, - own: 1000, - others: vec![ - IndividualExposure { who: 101, value: 500 }, - IndividualExposure { who: 102, value: 100 }, - IndividualExposure { who: 103, value: 100 }, - ], - }; - - let exposure_two = Exposure { - total: 1000 + 1000, - own: 1000, - others: vec![ - IndividualExposure { who: 104, value: 500 }, - IndividualExposure { who: 105, value: 500 }, - ], - }; - - let exposure_three = Exposure { - total: 1000 + 500, - own: 1000, - others: vec![ - IndividualExposure { who: 110, value: 250 }, - IndividualExposure { who: 111, value: 250 }, - ], - }; - - let exposures_page_one = bounded_vec![(1, exposure_one), (2, exposure_two),]; - let exposures_page_two = bounded_vec![(1, exposure_three),]; - - // stores exposure page with exposures of validator 1 and 2, returns exposed validator - // account id. - assert_eq!( - Pallet::::store_stakers_info(exposures_page_one, current_era()).to_vec(), - vec![1, 2] - ); - // Stakers overview OK for validator 1 and 2. - assert_eq!( - ErasStakersOverview::::get(0, &1).unwrap(), - PagedExposureMetadata { total: 1700, own: 1000, nominator_count: 3, page_count: 2 }, - ); - assert_eq!( - ErasStakersOverview::::get(0, &2).unwrap(), - PagedExposureMetadata { total: 2000, own: 1000, nominator_count: 2, page_count: 1 }, - ); - - // stores exposure page with exposures of validator 1, returns exposed validator - // account id. - assert_eq!( - Pallet::::store_stakers_info(exposures_page_two, current_era()).to_vec(), - vec![1] - ); - - // Stakers overview OK for validator 1. - assert_eq!( - ErasStakersOverview::::get(0, &1).unwrap(), - PagedExposureMetadata { total: 2200, own: 1000, nominator_count: 5, page_count: 3 }, - ); - - // validator 1 has 3 paged exposures. - assert!( - ErasStakersPaged::::iter_prefix_values((0, &1)).count() as u32 == - EraInfo::::get_page_count(0, &1) && - EraInfo::::get_page_count(0, &1) == 3 - ); - assert!(ErasStakersPaged::::get((0, &1, 0)).is_some()); - assert!(ErasStakersPaged::::get((0, &1, 1)).is_some()); - assert!(ErasStakersPaged::::get((0, &1, 2)).is_some()); - assert!(ErasStakersPaged::::get((0, &1, 3)).is_none()); - - // validator 2 has 1 paged exposures. - assert!(ErasStakersPaged::::get((0, &2, 0)).is_some()); - assert!(ErasStakersPaged::::get((0, &2, 1)).is_none()); - assert_eq!(ErasStakersPaged::::iter_prefix_values((0, &2)).count(), 1); - - // exposures of validator 1 are the expected: - assert_eq!( - ErasStakersPaged::::get((0, &1, 0)).unwrap(), - ExposurePage { - page_total: 600, - others: vec![ - IndividualExposure { who: 101, value: 500 }, - IndividualExposure { who: 102, value: 100 } - ] - }, - ); - assert_eq!( - ErasStakersPaged::::get((0, &1, 1)).unwrap(), - ExposurePage { - page_total: 350, - others: vec![ - IndividualExposure { who: 103, value: 100 }, - IndividualExposure { who: 110, value: 250 } - ] - } - ); - assert_eq!( - ErasStakersPaged::::get((0, &1, 2)).unwrap(), - ExposurePage { - page_total: 250, - others: vec![IndividualExposure { who: 111, value: 250 }] - } - ); - - // exposures of validator 2. - assert_eq!( - ErasStakersPaged::::iter_prefix_values((0, &2)).collect::>(), - vec![ExposurePage { - page_total: 1000, - others: vec![ - IndividualExposure { who: 104, value: 500 }, - IndividualExposure { who: 105, value: 500 } - ] - }], - ); - }) - } -} diff --git a/substrate/frame/staking/src/weights.rs b/substrate/frame/staking/src/weights.rs index 660d817bf30e2..e1da847f06631 100644 --- a/substrate/frame/staking/src/weights.rs +++ b/substrate/frame/staking/src/weights.rs @@ -72,8 +72,6 @@ use core::marker::PhantomData; /// Weight functions needed for `pallet_staking`. pub trait WeightInfo { - fn on_initialize_noop() -> Weight; - fn do_elect_paged_inner(v: u32, ) -> Weight; fn get_npos_voters(v: u32, n: u32, ) -> Weight; fn get_npos_targets(v: u32, ) -> Weight; fn bond() -> Weight; @@ -99,6 +97,7 @@ pub trait WeightInfo { fn payout_stakers_alive_staked(n: u32, ) -> Weight; fn rebond(l: u32, ) -> Weight; fn reap_stash(s: u32, ) -> Weight; + fn new_era(v: u32, n: u32, ) -> Weight; fn set_staking_configs_all_set() -> Weight; fn set_staking_configs_all_remove() -> Weight; fn chill_other() -> Weight; @@ -112,50 +111,6 @@ pub trait WeightInfo { /// Weights for `pallet_staking` using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { - /// Storage: `Staking::ProcessingOffence` (r:1 w:0) - /// Proof: `Staking::ProcessingOffence` (`max_values`: Some(1), `max_size`: Some(85), added: 580, mode: `MaxEncodedLen`) - /// Storage: `Staking::OffenceQueueEras` (r:1 w:0) - /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(2690), added: 3185, mode: `MaxEncodedLen`) - /// Storage: `Staking::ActiveEra` (r:1 w:0) - /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) - /// Storage: `Staking::NextElectionPage` (r:1 w:0) - /// Proof: `Staking::NextElectionPage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xd93c9708f5182731b2e90757fd7abf7a` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xd93c9708f5182731b2e90757fd7abf7a` (r:1 w:0) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentPlannedSession` (r:1 w:0) - /// Proof: `Staking::CurrentPlannedSession` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) - /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `Babe::EpochIndex` (r:1 w:0) - /// Proof: `Babe::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Babe::GenesisSlot` (r:1 w:0) - /// Proof: `Babe::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Babe::CurrentSlot` (r:1 w:0) - /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Staking::ForceEra` (r:1 w:0) - /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `Staking::ElectableStashes` (r:1 w:0) - /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `MaxEncodedLen`) - fn on_initialize_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `33487` - // Minimum execution time: 8_949_000 picoseconds. - Weight::from_parts(9_194_000, 33487) - .saturating_add(T::DbWeight::get().reads(13_u64)) - } - /// The range of component `v` is `[1, 1000]`. - fn do_elect_paged_inner(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 139_000 picoseconds. - Weight::from_parts(165_527, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(5, 0).saturating_mul(v.into())) - } /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:180 w:0) @@ -730,6 +685,60 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:110 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:110 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:110 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:110 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:11 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumValidatorCount` (r:1 w:0) + /// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:1) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:10) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:0 w:10) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:0 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// The range of component `v` is `[1, 10]`. + /// The range of component `n` is `[0, 100]`. + fn new_era(v: u32, n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` + // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` + // Minimum execution time: 692_301_000 picoseconds. + Weight::from_parts(708_732_000, 512390) + // Standard Error: 2_117_299 + .saturating_add(Weight::from_parts(70_087_600, 0).saturating_mul(v.into())) + // Standard Error: 210_977 + .saturating_add(Weight::from_parts(22_953_405, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(206_u64)) + .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) + .saturating_add(T::DbWeight::get().writes(3_u64)) + .saturating_add(T::DbWeight::get().writes((3_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) + } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:0 w:1) @@ -903,50 +912,6 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests. impl WeightInfo for () { - /// Storage: `Staking::ProcessingOffence` (r:1 w:0) - /// Proof: `Staking::ProcessingOffence` (`max_values`: Some(1), `max_size`: Some(85), added: 580, mode: `MaxEncodedLen`) - /// Storage: `Staking::OffenceQueueEras` (r:1 w:0) - /// Proof: `Staking::OffenceQueueEras` (`max_values`: Some(1), `max_size`: Some(2690), added: 3185, mode: `MaxEncodedLen`) - /// Storage: `Staking::ActiveEra` (r:1 w:0) - /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) - /// Storage: `Staking::NextElectionPage` (r:1 w:0) - /// Proof: `Staking::NextElectionPage` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: UNKNOWN KEY `0xd93c9708f5182731b2e90757fd7abf7a` (r:1 w:0) - /// Proof: UNKNOWN KEY `0xd93c9708f5182731b2e90757fd7abf7a` (r:1 w:0) - /// Storage: `Staking::CurrentEra` (r:1 w:0) - /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::CurrentPlannedSession` (r:1 w:0) - /// Proof: `Staking::CurrentPlannedSession` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) - /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) - /// Storage: `Babe::EpochIndex` (r:1 w:0) - /// Proof: `Babe::EpochIndex` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Babe::GenesisSlot` (r:1 w:0) - /// Proof: `Babe::GenesisSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Babe::CurrentSlot` (r:1 w:0) - /// Proof: `Babe::CurrentSlot` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) - /// Storage: `Staking::ForceEra` (r:1 w:0) - /// Proof: `Staking::ForceEra` (`max_values`: Some(1), `max_size`: Some(1), added: 496, mode: `MaxEncodedLen`) - /// Storage: `Staking::ElectableStashes` (r:1 w:0) - /// Proof: `Staking::ElectableStashes` (`max_values`: Some(1), `max_size`: Some(32002), added: 32497, mode: `MaxEncodedLen`) - fn on_initialize_noop() -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `33487` - // Minimum execution time: 8_949_000 picoseconds. - Weight::from_parts(9_194_000, 33487) - .saturating_add(RocksDbWeight::get().reads(13_u64)) - } - /// The range of component `v` is `[1, 1000]`. - fn do_elect_paged_inner(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `0` - // Estimated: `0` - // Minimum execution time: 139_000 picoseconds. - Weight::from_parts(165_527, 0) - // Standard Error: 1 - .saturating_add(Weight::from_parts(5, 0).saturating_mul(v.into())) - } /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `VoterList::ListBags` (r:180 w:0) @@ -1521,6 +1486,60 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((1_u64).saturating_mul(s.into()))) .saturating_add(Weight::from_parts(0, 4).saturating_mul(s.into())) } + /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) + /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListBags` (r:200 w:0) + /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) + /// Storage: `VoterList::ListNodes` (r:110 w:0) + /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) + /// Storage: `Staking::Bonded` (r:110 w:0) + /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) + /// Storage: `Staking::Ledger` (r:110 w:0) + /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) + /// Storage: `Staking::Nominators` (r:110 w:0) + /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) + /// Storage: `Staking::Validators` (r:11 w:0) + /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) + /// Storage: `Staking::CounterForValidators` (r:1 w:0) + /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ValidatorCount` (r:1 w:0) + /// Proof: `Staking::ValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumValidatorCount` (r:1 w:0) + /// Proof: `Staking::MinimumValidatorCount` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::CurrentEra` (r:1 w:1) + /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasValidatorPrefs` (r:0 w:10) + /// Proof: `Staking::ErasValidatorPrefs` (`max_values`: None, `max_size`: Some(57), added: 2532, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStakersPaged` (r:0 w:10) + /// Proof: `Staking::ErasStakersPaged` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Staking::ErasStakersOverview` (r:0 w:10) + /// Proof: `Staking::ErasStakersOverview` (`max_values`: None, `max_size`: Some(92), added: 2567, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasTotalStake` (r:0 w:1) + /// Proof: `Staking::ErasTotalStake` (`max_values`: None, `max_size`: Some(28), added: 2503, mode: `MaxEncodedLen`) + /// Storage: `Staking::ErasStartSessionIndex` (r:0 w:1) + /// Proof: `Staking::ErasStartSessionIndex` (`max_values`: None, `max_size`: Some(16), added: 2491, mode: `MaxEncodedLen`) + /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) + /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) + /// The range of component `v` is `[1, 10]`. + /// The range of component `n` is `[0, 100]`. + fn new_era(v: u32, n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `0 + n * (720 ±0) + v * (3598 ±0)` + // Estimated: `512390 + n * (3566 ±0) + v * (3566 ±0)` + // Minimum execution time: 692_301_000 picoseconds. + Weight::from_parts(708_732_000, 512390) + // Standard Error: 2_117_299 + .saturating_add(Weight::from_parts(70_087_600, 0).saturating_mul(v.into())) + // Standard Error: 210_977 + .saturating_add(Weight::from_parts(22_953_405, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(206_u64)) + .saturating_add(RocksDbWeight::get().reads((5_u64).saturating_mul(v.into()))) + .saturating_add(RocksDbWeight::get().reads((4_u64).saturating_mul(n.into()))) + .saturating_add(RocksDbWeight::get().writes(3_u64)) + .saturating_add(RocksDbWeight::get().writes((3_u64).saturating_mul(v.into()))) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) + .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) + } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:0 w:1) diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs index 1495bd210127b..23e240d6dbe11 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs @@ -161,7 +161,7 @@ pub fn expand_outer_enum( #scrate::__private::codec::Decode, #scrate::__private::codec::DecodeWithMemTracking, #scrate::__private::scale_info::TypeInfo, - #scrate::__private::Debug, + #scrate::__private::RuntimeDebug, )] #[allow(non_camel_case_types)] pub enum #enum_name_ident { diff --git a/substrate/frame/support/procedural/src/pallet/expand/event.rs b/substrate/frame/support/procedural/src/pallet/expand/event.rs index 8ebf077d0925d..7759500a61c59 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/event.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/event.rs @@ -120,7 +120,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::CloneNoBound, #frame_support::EqNoBound, #frame_support::PartialEqNoBound, - #frame_support::DebugNoBound, + #frame_support::RuntimeDebugNoBound, #frame_support::__private::codec::Encode, #frame_support::__private::codec::Decode, #frame_support::__private::codec::DecodeWithMemTracking, diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 86c7330d275de..4074f4d440996 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -43,7 +43,6 @@ extern crate alloc; pub mod __private { pub use alloc::{ boxed::Box, - fmt::Debug, rc::Rc, string::String, vec, diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index b468b8647ca19..13d94e542850c 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -237,14 +237,12 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: std::fmt::Debug` | = help: the trait `std::fmt::Debug` is implemented for `frame_system::Event` = note: required for `frame_system::Event` to implement `std::fmt::Debug` - = note: 1 redundant requirement hidden - = note: required for `&frame_system::Event` to implement `std::fmt::Debug` - = note: required for the cast from `&&frame_system::Event` to `&dyn std::fmt::Debug` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::Debug` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: required for the cast from `&frame_system::Event` to `&dyn std::fmt::Debug` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -256,14 +254,12 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime` + | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Error: std::fmt::Debug` | = help: the trait `std::fmt::Debug` is implemented for `frame_system::Error` = note: required for `frame_system::Error` to implement `std::fmt::Debug` - = note: 1 redundant requirement hidden - = note: required for `&frame_system::Error` to implement `std::fmt::Debug` - = note: required for the cast from `&&frame_system::Error` to `&dyn std::fmt::Debug` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::Debug` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: required for the cast from `&frame_system::Error` to `&dyn std::fmt::Debug` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 diff --git a/substrate/primitives/npos-elections/src/helpers.rs b/substrate/primitives/npos-elections/src/helpers.rs index 04f8a5648af85..7df6ec9d9dbaa 100644 --- a/substrate/primitives/npos-elections/src/helpers.rs +++ b/substrate/primitives/npos-elections/src/helpers.rs @@ -17,11 +17,8 @@ //! Helper methods for npos-elections. -use crate::{ - Assignment, Error, ExtendedBalance, IdentifierT, PerThing128, StakedAssignment, Supports, - VoteWeight, -}; -use alloc::{collections::BTreeMap, vec::Vec}; +use crate::{Assignment, Error, IdentifierT, PerThing128, StakedAssignment, VoteWeight}; +use alloc::vec::Vec; use sp_arithmetic::PerThing; /// Converts a vector of ratio assignments into ones with absolute budget value. @@ -53,7 +50,7 @@ where { let mut staked = assignment_ratio_to_staked(ratio, &stake_of); staked.iter_mut().try_for_each(|a| { - a.try_normalize(stake_of(&a.who).into()).map_err(|_| Error::ArithmeticError) + a.try_normalize(stake_of(&a.who).into()).map_err(Error::ArithmeticError) })?; Ok(staked) } @@ -73,28 +70,11 @@ pub fn assignment_staked_to_ratio_normalized( ) -> Result>, Error> { let mut ratio = staked.into_iter().map(|a| a.into_assignment()).collect::>(); for assignment in ratio.iter_mut() { - assignment.try_normalize().map_err(|_| Error::ArithmeticError)?; + assignment.try_normalize().map_err(Error::ArithmeticError)?; } Ok(ratio) } -/// Convert some [`Supports`]s into vector of [`StakedAssignment`] -pub fn supports_to_staked_assignment( - supports: Supports, -) -> Vec> { - let mut staked: BTreeMap> = BTreeMap::new(); - for (target, support) in supports { - for (voter, amount) in support.voters { - staked.entry(voter).or_default().push((target.clone(), amount)) - } - } - - staked - .into_iter() - .map(|(who, distribution)| StakedAssignment { who, distribution }) - .collect::>() -} - #[cfg(test)] mod tests { use super::*; diff --git a/substrate/primitives/npos-elections/src/lib.rs b/substrate/primitives/npos-elections/src/lib.rs index 6fcdd38d40107..f5a8ccb4351a4 100644 --- a/substrate/primitives/npos-elections/src/lib.rs +++ b/substrate/primitives/npos-elections/src/lib.rs @@ -83,7 +83,7 @@ use scale_info::TypeInfo; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use sp_arithmetic::{traits::Zero, Normalizable, PerThing, Rational128, ThresholdOrd}; -use sp_core::RuntimeDebug; +use sp_core::{bounded::BoundedVec, RuntimeDebug}; #[cfg(test)] mod mock; @@ -110,16 +110,7 @@ pub use reduce::reduce; pub use traits::{IdentifierT, PerThing128}; /// The errors that might occur in this crate and `frame-election-provider-solution-type`. -#[derive( - Eq, - PartialEq, - RuntimeDebug, - Clone, - codec::Encode, - codec::Decode, - codec::DecodeWithMemTracking, - scale_info::TypeInfo, -)] +#[derive(Eq, PartialEq, RuntimeDebug)] pub enum Error { /// While going from solution indices to ratio, the weight of all the edges has gone above the /// total. @@ -131,13 +122,11 @@ pub enum Error { /// One of the page indices was invalid. SolutionInvalidPageIndex, /// An error occurred in some arithmetic operation. - ArithmeticError, + ArithmeticError(&'static str), /// The data provided to create support map was invalid. InvalidSupportEdge, /// The number of voters is bigger than the `MaxVoters` bound. TooManyVoters, - /// Some bounds were exceeded when converting election types. - BoundsExceeded, } /// A type which is used in the API of this crate as a numeric weight of a vote, most often the @@ -467,18 +456,6 @@ impl Default for Support { } } -impl Support { - pub fn self_vote_only(who: AccountId, amount: ExtendedBalance) -> (AccountId, Self) { - (who.clone(), Self { total: amount, voters: vec![(who, amount)] }) - } -} - -impl Backings for &Support { - fn total(&self) -> ExtendedBalance { - self.total - } -} - /// A target-major representation of the the election outcome. /// /// Essentially a flat variant of [`SupportMap`]. @@ -486,6 +463,11 @@ impl Backings for &Support { /// The main advantage of this is that it is encodable. pub type Supports = Vec<(A, Support)>; +/// Same as `Supports` but bounded by `B`. +/// +/// To note, the inner `Support` is still unbounded. +pub type BoundedSupports = BoundedVec<(A, Support), B>; + /// Linkage from a winner to their [`Support`]. /// /// This is more helpful than a normal [`Supports`] as it allows faster error checking. @@ -509,7 +491,8 @@ pub fn to_support_map( supports } -/// Same as [`to_support_map`] except it returns a flat vector. +/// Same as [`to_support_map`] except it returns a +/// flat vector. pub fn to_supports( assignments: &[StakedAssignment], ) -> Supports { @@ -528,34 +511,23 @@ pub trait EvaluateSupport { impl EvaluateSupport for Supports { fn evaluate(&self) -> ElectionScore { - evaluate_support(self.iter().map(|(_, s)| s)) - } -} - -/// Generic representation of a support. -pub trait Backings { - /// The total backing of an individual target. - fn total(&self) -> ExtendedBalance; -} - -/// General evaluation of a list of backings that returns an election score. -pub fn evaluate_support(backings: impl Iterator) -> ElectionScore { - let mut minimal_stake = ExtendedBalance::max_value(); - let mut sum_stake: ExtendedBalance = Zero::zero(); - // NOTE: The third element might saturate but fine for now since this will run on-chain and - // need to be fast. - let mut sum_stake_squared: ExtendedBalance = Zero::zero(); - - for support in backings { - sum_stake = sum_stake.saturating_add(support.total()); - let squared = support.total().saturating_mul(support.total()); - sum_stake_squared = sum_stake_squared.saturating_add(squared); - if support.total() < minimal_stake { - minimal_stake = support.total(); + let mut minimal_stake = ExtendedBalance::max_value(); + let mut sum_stake: ExtendedBalance = Zero::zero(); + // NOTE: The third element might saturate but fine for now since this will run on-chain and + // need to be fast. + let mut sum_stake_squared: ExtendedBalance = Zero::zero(); + + for (_, support) in self { + sum_stake = sum_stake.saturating_add(support.total); + let squared = support.total.saturating_mul(support.total); + sum_stake_squared = sum_stake_squared.saturating_add(squared); + if support.total < minimal_stake { + minimal_stake = support.total; + } } - } - ElectionScore { minimal_stake, sum_stake, sum_stake_squared } + ElectionScore { minimal_stake, sum_stake, sum_stake_squared } + } } /// Converts raw inputs to types used in this crate. diff --git a/substrate/primitives/npos-elections/src/phragmen.rs b/substrate/primitives/npos-elections/src/phragmen.rs index 404c2ff8e6b69..f331152e722a2 100644 --- a/substrate/primitives/npos-elections/src/phragmen.rs +++ b/substrate/primitives/npos-elections/src/phragmen.rs @@ -97,7 +97,7 @@ pub fn seq_phragmen( voters.into_iter().filter_map(|v| v.into_assignment()).collect::>(); let _ = assignments .iter_mut() - .try_for_each(|a| a.try_normalize().map_err(|_| crate::Error::ArithmeticError))?; + .try_for_each(|a| a.try_normalize().map_err(crate::Error::ArithmeticError))?; let winners = winners .into_iter() .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake)) @@ -205,7 +205,7 @@ pub fn seq_phragmen_core( // edge of all candidates that eventually have a non-zero weight must be elected. debug_assert!(voter.edges.iter().all(|e| e.candidate.borrow().elected)); // inc budget to sum the budget. - voter.try_normalize_elected().map_err(|_| crate::Error::ArithmeticError)?; + voter.try_normalize_elected().map_err(crate::Error::ArithmeticError)?; } Ok((candidates, voters)) diff --git a/substrate/primitives/npos-elections/src/phragmms.rs b/substrate/primitives/npos-elections/src/phragmms.rs index 6a44bf8651254..9a17f0dfa7ce9 100644 --- a/substrate/primitives/npos-elections/src/phragmms.rs +++ b/substrate/primitives/npos-elections/src/phragmms.rs @@ -71,7 +71,7 @@ pub fn phragmms( let _ = assignments .iter_mut() .try_for_each(|a| a.try_normalize()) - .map_err(|_| crate::Error::ArithmeticError)?; + .map_err(crate::Error::ArithmeticError)?; let winners = winners .into_iter() .map(|w_ptr| (w_ptr.borrow().who.clone(), w_ptr.borrow().backed_stake)) diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 863e6cbe2b20f..11eb916f702de 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -25,7 +25,7 @@ extern crate alloc; use crate::currency_to_vote::CurrencyToVote; use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, DecodeWithMemTracking, Encode, FullCodec, HasCompact, MaxEncodedLen}; -use core::ops::{Add, AddAssign, Sub, SubAssign}; +use core::ops::{AddAssign, Sub}; use scale_info::TypeInfo; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Zero}, @@ -402,31 +402,7 @@ impl< Balance: HasCompact + AtLeast32BitUnsigned + Copy + codec::MaxEncodedLen, > Exposure { - /// Splits self into two instances of exposures. - /// - /// `n_others` individual exposures are consumed from self and returned as part of the new - /// exposure. - /// - /// Since this method splits `others` of a single exposure, `total.own` will be the same for - /// both `self` and the returned exposure. - pub fn split_others(&mut self, n_others: u32) -> Self { - let head_others: Vec<_> = - self.others.drain(..(n_others as usize).min(self.others.len())).collect(); - - let total_others_head: Balance = head_others - .iter() - .fold(Zero::zero(), |acc: Balance, o| acc.saturating_add(o.value)); - - self.total = self.total.saturating_sub(total_others_head); - - Self { - total: total_others_head.saturating_add(self.own), - own: self.own, - others: head_others, - } - } - - /// Converts an `Exposure` into `PagedExposureMetadata` and multiple chunks of + /// Splits an `Exposure` into `PagedExposureMetadata` and multiple chunks of /// `IndividualExposure` with each chunk having maximum of `page_size` elements. pub fn into_pages( self, @@ -447,6 +423,7 @@ impl< value: individual.value, }) } + exposure_pages.push(ExposurePage { page_total, others }); } @@ -478,19 +455,6 @@ impl Default for ExposurePage { } } -/// Returns an exposure page from a set of individual exposures. -impl From>> - for ExposurePage -{ - fn from(exposures: Vec>) -> Self { - exposures.into_iter().fold(ExposurePage::default(), |mut page, e| { - page.page_total += e.value.clone(); - page.others.push(e); - page - }) - } -} - /// Metadata for Paged Exposure of a validator such as total stake across pages and page count. /// /// In combination with the associated `ExposurePage`s, it can be used to reconstruct a full @@ -508,7 +472,6 @@ impl From { /// The total balance backing this validator. @@ -523,42 +486,6 @@ pub struct PagedExposureMetadata { pub page_count: Page, } -impl PagedExposureMetadata -where - Balance: HasCompact - + codec::MaxEncodedLen - + Add - + Sub - + sp_runtime::Saturating - + PartialEq - + Copy - + sp_runtime::traits::Debug, -{ - /// Consumes self and returns the result of the metadata updated with `other_balances` and - /// of adding `other_num` nominators to the metadata. - /// - /// `Max` is a getter of the maximum number of nominators per page. - pub fn update_with>( - self, - others_balance: Balance, - others_num: u32, - ) -> Self { - let page_limit = Max::get().max(1); - let new_nominator_count = self.nominator_count.saturating_add(others_num); - let new_page_count = new_nominator_count - .saturating_add(page_limit) - .saturating_sub(1) - .saturating_div(page_limit); - - Self { - total: self.total.saturating_add(others_balance), - own: self.own, - nominator_count: new_nominator_count, - page_count: new_page_count, - } - } -} - /// A type that belongs only in the context of an `Agent`. /// /// `Agent` is someone that manages delegated funds from [`Delegator`] accounts. It can @@ -719,114 +646,3 @@ pub trait DelegationMigrator { } sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); - -#[cfg(test)] -mod tests { - use sp_core::ConstU32; - - use super::*; - - #[test] - fn update_with_works() { - let metadata = PagedExposureMetadata:: { - total: 1000, - own: 0, // don't care - nominator_count: 10, - page_count: 1, - }; - - assert_eq!( - metadata.update_with::>(1, 1), - PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 2 }, - ); - - assert_eq!( - metadata.update_with::>(1, 1), - PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 3 }, - ); - - assert_eq!( - metadata.update_with::>(1, 1), - PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 3 }, - ); - - assert_eq!( - metadata.update_with::>(1, 1), - PagedExposureMetadata { total: 1001, own: 0, nominator_count: 11, page_count: 11 }, - ); - } - - #[test] - fn individual_exposures_to_exposure_works() { - let exposure_1 = IndividualExposure { who: 1, value: 10u32 }; - let exposure_2 = IndividualExposure { who: 2, value: 20 }; - let exposure_3 = IndividualExposure { who: 3, value: 30 }; - - let exposure_page: ExposurePage = vec![exposure_1, exposure_2, exposure_3].into(); - - assert_eq!( - exposure_page, - ExposurePage { page_total: 60, others: vec![exposure_1, exposure_2, exposure_3] }, - ); - } - - #[test] - fn empty_individual_exposures_to_exposure_works() { - let empty_exposures: Vec> = vec![]; - - let exposure_page: ExposurePage = empty_exposures.into(); - assert_eq!(exposure_page, ExposurePage { page_total: 0, others: vec![] }); - } - - #[test] - fn exposure_split_others_works() { - let exposure = Exposure { - total: 100, - own: 20, - others: vec![ - IndividualExposure { who: 1, value: 20u32 }, - IndividualExposure { who: 2, value: 20 }, - IndividualExposure { who: 3, value: 20 }, - IndividualExposure { who: 4, value: 20 }, - ], - }; - - let mut exposure_0 = exposure.clone(); - // split others with with 0 `n_others` is a noop and returns an empty exposure (with `own` - // only). - let split_exposure = exposure_0.split_others(0); - assert_eq!(exposure_0, exposure); - assert_eq!(split_exposure, Exposure { total: 20, own: 20, others: vec![] }); - - let mut exposure_1 = exposure.clone(); - // split individual exposures so that the returned exposure has 1 individual exposure. - let split_exposure = exposure_1.split_others(1); - assert_eq!(exposure_1.own, 20); - assert_eq!(exposure_1.total, 20 + 3 * 20); - assert_eq!(exposure_1.others.len(), 3); - - assert_eq!(split_exposure.own, 20); - assert_eq!(split_exposure.total, 20 + 1 * 20); - assert_eq!(split_exposure.others.len(), 1); - - let mut exposure_3 = exposure.clone(); - // split individual exposures so that the returned exposure has 3 individual exposures, - // which are consumed from the original exposure. - let split_exposure = exposure_3.split_others(3); - assert_eq!(exposure_3.own, 20); - assert_eq!(exposure_3.total, 20 + 1 * 20); - assert_eq!(exposure_3.others.len(), 1); - - assert_eq!(split_exposure.own, 20); - assert_eq!(split_exposure.total, 20 + 3 * 20); - assert_eq!(split_exposure.others.len(), 3); - - let mut exposure_max = exposure.clone(); - // split others with with more `n_others` than the number of others in the exposure - // consumes all the individual exposures of the original Exposure and returns them in the - // new exposure. - let split_exposure = exposure_max.split_others(u32::MAX); - assert_eq!(split_exposure, exposure); - assert_eq!(exposure_max, Exposure { total: 20, own: 20, others: vec![] }); - } -} diff --git a/substrate/primitives/staking/src/offence.rs b/substrate/primitives/staking/src/offence.rs index 9e3c0e5a1946b..e73e8efe58396 100644 --- a/substrate/primitives/staking/src/offence.rs +++ b/substrate/primitives/staking/src/offence.rs @@ -19,7 +19,7 @@ //! that use staking. use alloc::vec::Vec; -use codec::{Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode}; use sp_core::Get; use sp_runtime::{transaction_validity::TransactionValidityError, DispatchError, Perbill}; @@ -252,15 +252,7 @@ impl OffenceReportSystem for () { /// For instance used for the purposes of distinguishing who should be /// prioritized for disablement. #[derive( - Clone, - Copy, - PartialEq, - Eq, - Encode, - Decode, - MaxEncodedLen, - sp_runtime::RuntimeDebug, - scale_info::TypeInfo, + Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug, scale_info::TypeInfo, )] pub struct OffenceSeverity(pub Perbill); diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index af9118140d91c..5501f37103baa 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -600,7 +600,6 @@ impl PalletCmd { let benchmark_name = &benchmark.name; if extrinsic.is_empty() || extrinsic.as_bytes() == &b"*"[..] || - extrinsic.as_bytes() == &b"all"[..] || extrinsics.contains(&&benchmark_name[..]) { benchmarks_to_run.push(( @@ -648,10 +647,7 @@ impl PalletCmd { fn pallet_selected(&self, pallet: &Vec) -> bool { let include = self.pallet.clone().unwrap_or_default(); - let included = include.is_empty() || - include == "*" || - include == "all" || - include.as_bytes() == pallet; + let included = include.is_empty() || include == "*" || include.as_bytes() == pallet; let excluded = self.exclude_pallets.iter().any(|p| p.as_bytes() == pallet); included && !excluded diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs index caa999c3a6c58..54a055d4a33f9 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -50,7 +50,7 @@ pub struct PalletCmd { #[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] pub pallet: Option, - /// Select an extrinsic inside the pallet to benchmark, or `*` or 'all' for all. + /// Select an extrinsic inside the pallet to benchmark, or `*` for all. #[arg(short, long, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] pub extrinsic: Option, diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 9d010bd9b8e9d..e87b7ada6612f 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -88,7 +88,6 @@ std = [ "pallet-delegated-staking?/std", "pallet-democracy?/std", "pallet-dev-mode?/std", - "pallet-election-provider-multi-block?/std", "pallet-election-provider-multi-phase?/std", "pallet-election-provider-support-benchmarking?/std", "pallet-elections-phragmen?/std", @@ -284,7 +283,6 @@ runtime-benchmarks = [ "pallet-core-fellowship?/runtime-benchmarks", "pallet-delegated-staking?/runtime-benchmarks", "pallet-democracy?/runtime-benchmarks", - "pallet-election-provider-multi-block?/runtime-benchmarks", "pallet-election-provider-multi-phase?/runtime-benchmarks", "pallet-election-provider-support-benchmarking?/runtime-benchmarks", "pallet-elections-phragmen?/runtime-benchmarks", @@ -422,7 +420,6 @@ try-runtime = [ "pallet-delegated-staking?/try-runtime", "pallet-democracy?/try-runtime", "pallet-dev-mode?/try-runtime", - "pallet-election-provider-multi-block?/try-runtime", "pallet-election-provider-multi-phase?/try-runtime", "pallet-elections-phragmen?/try-runtime", "pallet-fast-unstake?/try-runtime", @@ -552,241 +549,8 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] -runtime-full = [ - "assets-common", - "binary-merkle-tree", - "bp-header-chain", - "bp-messages", - "bp-parachains", - "bp-polkadot", - "bp-polkadot-core", - "bp-relayers", - "bp-runtime", - "bp-test-utils", - "bp-xcm-bridge-hub", - "bp-xcm-bridge-hub-router", - "bridge-hub-common", - "bridge-runtime-common", - "cumulus-pallet-aura-ext", - "cumulus-pallet-dmp-queue", - "cumulus-pallet-parachain-system", - "cumulus-pallet-parachain-system-proc-macro", - "cumulus-pallet-session-benchmarking", - "cumulus-pallet-solo-to-para", - "cumulus-pallet-weight-reclaim", - "cumulus-pallet-xcm", - "cumulus-pallet-xcmp-queue", - "cumulus-ping", - "cumulus-primitives-aura", - "cumulus-primitives-core", - "cumulus-primitives-parachain-inherent", - "cumulus-primitives-proof-size-hostfunction", - "cumulus-primitives-storage-weight-reclaim", - "cumulus-primitives-timestamp", - "cumulus-primitives-utility", - "frame-benchmarking", - "frame-benchmarking-pallet-pov", - "frame-election-provider-solution-type", - "frame-election-provider-support", - "frame-executive", - "frame-metadata-hash-extension", - "frame-support", - "frame-support-procedural", - "frame-support-procedural-tools-derive", - "frame-system", - "frame-system-benchmarking", - "frame-system-rpc-runtime-api", - "frame-try-runtime", - "pallet-alliance", - "pallet-asset-conversion", - "pallet-asset-conversion-ops", - "pallet-asset-conversion-tx-payment", - "pallet-asset-rate", - "pallet-asset-rewards", - "pallet-asset-tx-payment", - "pallet-assets", - "pallet-assets-freezer", - "pallet-assets-holder", - "pallet-atomic-swap", - "pallet-aura", - "pallet-authority-discovery", - "pallet-authorship", - "pallet-babe", - "pallet-bags-list", - "pallet-balances", - "pallet-beefy", - "pallet-beefy-mmr", - "pallet-bounties", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-bridge-parachains", - "pallet-bridge-relayers", - "pallet-broker", - "pallet-child-bounties", - "pallet-collator-selection", - "pallet-collective", - "pallet-collective-content", - "pallet-contracts", - "pallet-contracts-proc-macro", - "pallet-contracts-uapi", - "pallet-conviction-voting", - "pallet-core-fellowship", - "pallet-delegated-staking", - "pallet-democracy", - "pallet-dev-mode", - "pallet-election-provider-multi-block", - "pallet-election-provider-multi-phase", - "pallet-election-provider-support-benchmarking", - "pallet-elections-phragmen", - "pallet-fast-unstake", - "pallet-glutton", - "pallet-grandpa", - "pallet-identity", - "pallet-im-online", - "pallet-indices", - "pallet-insecure-randomness-collective-flip", - "pallet-lottery", - "pallet-membership", - "pallet-message-queue", - "pallet-migrations", - "pallet-mixnet", - "pallet-mmr", - "pallet-multisig", - "pallet-nft-fractionalization", - "pallet-nfts", - "pallet-nfts-runtime-api", - "pallet-nis", - "pallet-node-authorization", - "pallet-nomination-pools", - "pallet-nomination-pools-benchmarking", - "pallet-nomination-pools-runtime-api", - "pallet-offences", - "pallet-offences-benchmarking", - "pallet-paged-list", - "pallet-parameters", - "pallet-preimage", - "pallet-proxy", - "pallet-ranked-collective", - "pallet-recovery", - "pallet-referenda", - "pallet-remark", - "pallet-revive", - "pallet-revive-proc-macro", - "pallet-revive-uapi", - "pallet-root-offences", - "pallet-root-testing", - "pallet-safe-mode", - "pallet-salary", - "pallet-scheduler", - "pallet-scored-pool", - "pallet-session", - "pallet-session-benchmarking", - "pallet-skip-feeless-payment", - "pallet-society", - "pallet-staking", - "pallet-staking-reward-curve", - "pallet-staking-reward-fn", - "pallet-staking-runtime-api", - "pallet-state-trie-migration", - "pallet-statement", - "pallet-sudo", - "pallet-timestamp", - "pallet-tips", - "pallet-transaction-payment", - "pallet-transaction-payment-rpc-runtime-api", - "pallet-transaction-storage", - "pallet-treasury", - "pallet-tx-pause", - "pallet-uniques", - "pallet-utility", - "pallet-verify-signature", - "pallet-vesting", - "pallet-whitelist", - "pallet-xcm", - "pallet-xcm-benchmarks", - "pallet-xcm-bridge-hub", - "pallet-xcm-bridge-hub-router", - "parachains-common", - "polkadot-core-primitives", - "polkadot-parachain-primitives", - "polkadot-primitives", - "polkadot-runtime-common", - "polkadot-runtime-metrics", - "polkadot-runtime-parachains", - "polkadot-sdk-frame", - "sc-chain-spec-derive", - "sc-tracing-proc-macro", - "slot-range-helper", - "snowbridge-beacon-primitives", - "snowbridge-core", - "snowbridge-ethereum", - "snowbridge-outbound-queue-merkle-tree", - "snowbridge-outbound-queue-runtime-api", - "snowbridge-pallet-ethereum-client", - "snowbridge-pallet-ethereum-client-fixtures", - "snowbridge-pallet-inbound-queue", - "snowbridge-pallet-inbound-queue-fixtures", - "snowbridge-pallet-outbound-queue", - "snowbridge-pallet-system", - "snowbridge-router-primitives", - "snowbridge-runtime-common", - "snowbridge-system-runtime-api", - "sp-api", - "sp-api-proc-macro", - "sp-application-crypto", - "sp-arithmetic", - "sp-authority-discovery", - "sp-block-builder", - "sp-consensus-aura", - "sp-consensus-babe", - "sp-consensus-beefy", - "sp-consensus-grandpa", - "sp-consensus-pow", - "sp-consensus-slots", - "sp-core", - "sp-crypto-ec-utils", - "sp-crypto-hashing", - "sp-crypto-hashing-proc-macro", - "sp-debug-derive", - "sp-externalities", - "sp-genesis-builder", - "sp-inherents", - "sp-io", - "sp-keyring", - "sp-keystore", - "sp-metadata-ir", - "sp-mixnet", - "sp-mmr-primitives", - "sp-npos-elections", - "sp-offchain", - "sp-runtime", - "sp-runtime-interface", - "sp-runtime-interface-proc-macro", - "sp-session", - "sp-staking", - "sp-state-machine", - "sp-statement-store", - "sp-std", - "sp-storage", - "sp-timestamp", - "sp-tracing", - "sp-transaction-pool", - "sp-transaction-storage-proof", - "sp-trie", - "sp-version", - "sp-version-proc-macro", - "sp-wasm-interface", - "sp-weights", - "staging-parachain-info", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", - "substrate-bip39", - "testnet-parachains-constants", - "tracing-gum-proc-macro", - "xcm-procedural", - "xcm-runtime-apis", -] + +runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] runtime = [ "frame-benchmarking", "frame-benchmarking-pallet-pov", @@ -1422,11 +1186,6 @@ default-features = false optional = true path = "../substrate/frame/examples/dev-mode" -[dependencies.pallet-election-provider-multi-block] -default-features = false -optional = true -path = "../substrate/frame/election-provider-multi-block" - [dependencies.pallet-election-provider-multi-phase] default-features = false optional = true diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 89cd300b418f6..4d28c7f542dff 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -447,10 +447,6 @@ pub use pallet_democracy; #[cfg(feature = "pallet-dev-mode")] pub use pallet_dev_mode; -/// PALLET multi phase+block election providers. -#[cfg(feature = "pallet-election-provider-multi-block")] -pub use pallet_election_provider_multi_block; - /// PALLET two phase election providers. #[cfg(feature = "pallet-election-provider-multi-phase")] pub use pallet_election_provider_multi_phase; From dcddac6ea38069c4d878214b3e3c56912670b1a0 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 00:00:45 +0100 Subject: [PATCH 04/38] test compiles, 17 failure --- substrate/frame/staking/src/mock.rs | 11 ++++++++++- substrate/frame/staking/src/tests.rs | 17 ++--------------- substrate/primitives/staking/src/lib.rs | 2 +- 3 files changed, 13 insertions(+), 17 deletions(-) diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 3915ee8d745e7..f74b3c48cc776 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -634,6 +634,11 @@ pub(crate) fn run_to_block(n: BlockNumber) { ); } +/// Progress by n block. +pub(crate) fn advance_blocks(n: u64) { + run_to_block(System::block_number() + n); +} + /// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. pub(crate) fn start_session(end_session_idx: SessionIndex) { let period = Period::get(); @@ -733,7 +738,11 @@ pub(crate) fn on_offence_in_era( let bonded_eras = crate::BondedEras::::get(); for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { - let _ = Staking::on_offence(offenders, slash_fraction, start_session); + let _ = >::on_offence( + offenders, + slash_fraction, + start_session + ); return } else if bonded_era > era { break diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 15f7015049c23..12ef73a2c201a 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -3476,7 +3476,6 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid fraction: Perbill::from_percent(10), slash_era: 1 }, - Event::ValidatorDisabled { stash: 11 }, Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 }, ] @@ -3554,13 +3553,11 @@ fn non_slashable_offence_disables_validator() { fraction: Perbill::from_percent(0), slash_era: 1 }, - Event::ValidatorDisabled { stash: 11 }, Event::SlashReported { validator: 21, fraction: Perbill::from_percent(25), slash_era: 1 }, - Event::ValidatorDisabled { stash: 21 }, Event::Slashed { staker: 21, amount: 250 }, Event::Slashed { staker: 101, amount: 94 } ] @@ -3643,7 +3640,6 @@ fn slashing_independent_of_disabling_validator() { fraction: Perbill::from_percent(0), slash_era: 1 }, - Event::ValidatorDisabled { stash: 11 }, Event::SlashReported { validator: 11, fraction: Perbill::from_percent(50), @@ -8848,7 +8844,6 @@ fn reenable_lower_offenders_mock() { fraction: Perbill::from_percent(10), slash_era: 1 }, - Event::ValidatorDisabled { stash: 11 }, Event::Slashed { staker: 11, amount: 100 }, Event::Slashed { staker: 101, amount: 12 }, Event::SlashReported { @@ -8856,7 +8851,6 @@ fn reenable_lower_offenders_mock() { fraction: Perbill::from_percent(20), slash_era: 1 }, - Event::ValidatorDisabled { stash: 21 }, Event::Slashed { staker: 21, amount: 200 }, Event::Slashed { staker: 101, amount: 75 }, Event::SlashReported { @@ -8864,8 +8858,6 @@ fn reenable_lower_offenders_mock() { fraction: Perbill::from_percent(50), slash_era: 1 }, - Event::ValidatorDisabled { stash: 31 }, - Event::ValidatorReenabled { stash: 11 }, Event::Slashed { staker: 31, amount: 250 }, ] ); @@ -8935,7 +8927,6 @@ fn do_not_reenable_higher_offenders_mock() { fraction: Perbill::from_percent(50), slash_era: 1 }, - Event::ValidatorDisabled { stash: 11 }, Event::Slashed { staker: 11, amount: 500 }, Event::Slashed { staker: 101, amount: 62 }, Event::SlashReported { @@ -8943,7 +8934,6 @@ fn do_not_reenable_higher_offenders_mock() { fraction: Perbill::from_percent(50), slash_era: 1 }, - Event::ValidatorDisabled { stash: 21 }, Event::Slashed { staker: 21, amount: 500 }, Event::Slashed { staker: 101, amount: 187 }, Event::SlashReported { @@ -9745,11 +9735,8 @@ fn manual_slashing_works() { let has_offence_reported = System::events().iter().any(|record| { matches!( record.event, - RuntimeEvent::Staking(Event::::OffenceReported { - validator, - fraction, - .. - }) if validator == validator_stash && fraction == slash_fraction_2 + RuntimeEvent::Staking(Event::::Slashed { staker, .. }) + if staker == validator_stash ) }); assert!(has_offence_reported, "No OffenceReported event was emitted"); diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 11eb916f702de..024b93d1e0775 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -25,7 +25,7 @@ extern crate alloc; use crate::currency_to_vote::CurrencyToVote; use alloc::{collections::btree_map::BTreeMap, vec, vec::Vec}; use codec::{Decode, DecodeWithMemTracking, Encode, FullCodec, HasCompact, MaxEncodedLen}; -use core::ops::{AddAssign, Sub}; +use core::ops::Sub; use scale_info::TypeInfo; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Zero}, From d4364a44804e04310bfd3fc0e4137dcdabdfe2be Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 00:10:45 +0100 Subject: [PATCH 05/38] ensure offence is reported to session --- substrate/frame/staking/src/pallet/impls.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index eb028ba7410cf..9aaff61d79ccc 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -1310,6 +1310,16 @@ impl Pallet { slash_era, }); + if slash_era == active_era { + // offence is in the current active era. Report it to session to maybe disable the + // validator. + add_db_reads_writes(2, 2); + T::SessionInterface::report_offence( + stash.clone(), + crate::OffenceSeverity(*slash_fraction), + ); + } + let unapplied = slashing::compute_slash::(slashing::SlashParams { stash, slash: *slash_fraction, From 42a0b9df6ba6b3c70fd2f5d119215fa505c874a7 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 00:11:34 +0100 Subject: [PATCH 06/38] fmt --- substrate/bin/node/cli/src/chain_spec.rs | 1 - substrate/frame/staking/src/migrations.rs | 3 +-- substrate/frame/staking/src/mock.rs | 2 +- substrate/frame/staking/src/pallet/impls.rs | 4 ++-- substrate/frame/staking/src/pallet/mod.rs | 8 ++------ substrate/frame/staking/src/slashing.rs | 5 ++--- 6 files changed, 8 insertions(+), 15 deletions(-) diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index 1b0b29a1062c9..f1f1ef30fc91d 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -314,7 +314,6 @@ pub fn testnet_genesis( let (initial_authorities, endowed_accounts, stakers) = configure_accounts(initial_authorities, initial_nominators, endowed_accounts, STASH); - serde_json::json!({ "balances": { "balances": endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect::>(), diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index e17780308f426..2c67eec5a9070 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -90,8 +90,7 @@ pub mod v16 { #[frame_support::storage_alias] pub(crate) type DisabledValidators = - StorageValue, Vec<(u32, OffenceSeverity)>, ValueQuery>; - + StorageValue, Vec<(u32, OffenceSeverity)>, ValueQuery>; pub struct VersionUncheckedMigrateV15ToV16(core::marker::PhantomData); impl UncheckedOnRuntimeUpgrade for VersionUncheckedMigrateV15ToV16 { diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index f74b3c48cc776..717e1442d5645 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -741,7 +741,7 @@ pub(crate) fn on_offence_in_era( let _ = >::on_offence( offenders, slash_fraction, - start_session + start_session, ); return } else if bonded_era > era { diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 9aaff61d79ccc..3d8c8365cd45e 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -1374,7 +1374,6 @@ impl Pallet { consumed_weight } - } impl Pallet { @@ -1668,7 +1667,8 @@ where OffenceDetails { offender: offender.clone(), reporters: details.reporters.clone() } }); - Self::on_offence(offenders, slash_fractions, slash_session) } + Self::on_offence(offenders, slash_fractions, slash_session) + } } impl ScoreProvider for Pallet { diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index c0ec45b8de351..3a3f997bfe1af 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -53,10 +53,9 @@ pub use impls::*; use crate::{ asset, slashing, weights::WeightInfo, AccountIdLookupOf, ActiveEraInfo, BalanceOf, EraPayout, - EraRewardPoints, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, + EraRewardPoints, Exposure, ExposurePage, Forcing, LedgerIntegrityState, MaxNominationsOf, NegativeImbalanceOf, Nominations, NominationsQuota, PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, UnappliedSlash, UnlockChunk, ValidatorPrefs, - Exposure, }; // The speculative number of spans are used as an input of the weight annotation of @@ -314,7 +313,6 @@ pub mod pallet { #[pallet::no_default_bounds] type EventListeners: sp_staking::OnStakingUpdate>; - #[pallet::no_default_bounds] /// Filter some accounts from participating in staking. /// @@ -873,9 +871,7 @@ pub mod pallet { /// A new force era mode was set. ForceEra { mode: Forcing }, /// Report of a controller batch deprecation. - ControllerBatchDeprecated { - failures: u32, - }, + ControllerBatchDeprecated { failures: u32 }, /// Staking balance migrated from locks to holds, with any balance that could not be held /// is force withdrawn. CurrencyMigrated { stash: T::AccountId, force_withdraw: BalanceOf }, diff --git a/substrate/frame/staking/src/slashing.rs b/substrate/frame/staking/src/slashing.rs index 9352fda84a2f3..66a2982111f48 100644 --- a/substrate/frame/staking/src/slashing.rs +++ b/substrate/frame/staking/src/slashing.rs @@ -50,9 +50,8 @@ //! Based on research at use crate::{ - asset, BalanceOf, Config, Error, NegativeImbalanceOf, NominatorSlashInEra, - Pallet, Perbill, Exposure, - SpanSlash, UnappliedSlash, ValidatorSlashInEra, + asset, BalanceOf, Config, Error, Exposure, NegativeImbalanceOf, NominatorSlashInEra, Pallet, + Perbill, SpanSlash, UnappliedSlash, ValidatorSlashInEra, }; use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; From e14b22bdce6c587d894bd2e96af8980595e291cd Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 00:44:18 +0100 Subject: [PATCH 07/38] fix westend --- .../westend/src/weights/pallet_staking.rs | 82 ------------------- .../election-provider-support/src/lib.rs | 2 + substrate/frame/staking/src/tests.rs | 22 +---- 3 files changed, 3 insertions(+), 103 deletions(-) diff --git a/polkadot/runtime/westend/src/weights/pallet_staking.rs b/polkadot/runtime/westend/src/weights/pallet_staking.rs index 3a99452cfd9fa..b2dbff2b9e108 100644 --- a/polkadot/runtime/westend/src/weights/pallet_staking.rs +++ b/polkadot/runtime/westend/src/weights/pallet_staking.rs @@ -714,60 +714,6 @@ impl pallet_staking::WeightInfo for WeightInfo { .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) } - /// Storage: `VoterList::CounterForListNodes` (r:1 w:0) - /// Proof: `VoterList::CounterForListNodes` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListBags` (r:178 w:0) - /// Proof: `VoterList::ListBags` (`max_values`: None, `max_size`: Some(82), added: 2557, mode: `MaxEncodedLen`) - /// Storage: `VoterList::ListNodes` (r:2000 w:0) - /// Proof: `VoterList::ListNodes` (`max_values`: None, `max_size`: Some(154), added: 2629, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:2000 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:2000 w:0) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `Staking::Nominators` (r:2000 w:0) - /// Proof: `Staking::Nominators` (`max_values`: None, `max_size`: Some(558), added: 3033, mode: `MaxEncodedLen`) - /// Storage: `Staking::Validators` (r:1000 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// Storage: `Staking::MinimumActiveStake` (r:0 w:1) - /// Proof: `Staking::MinimumActiveStake` (`max_values`: Some(1), `max_size`: Some(16), added: 511, mode: `MaxEncodedLen`) - /// The range of component `v` is `[500, 1000]`. - /// The range of component `n` is `[500, 1000]`. - fn get_npos_voters(v: u32, n: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `3141 + n * (907 ±0) + v * (391 ±0)` - // Estimated: `456136 + n * (3566 ±0) + v * (3566 ±0)` - // Minimum execution time: 42_790_195_000 picoseconds. - Weight::from_parts(42_954_437_000, 0) - .saturating_add(Weight::from_parts(0, 456136)) - // Standard Error: 478_107 - .saturating_add(Weight::from_parts(6_744_044, 0).saturating_mul(v.into())) - // Standard Error: 478_107 - .saturating_add(Weight::from_parts(4_837_739, 0).saturating_mul(n.into())) - .saturating_add(T::DbWeight::get().reads(179)) - .saturating_add(T::DbWeight::get().reads((5_u64).saturating_mul(v.into()))) - .saturating_add(T::DbWeight::get().reads((4_u64).saturating_mul(n.into()))) - .saturating_add(T::DbWeight::get().writes(1)) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(n.into())) - .saturating_add(Weight::from_parts(0, 3566).saturating_mul(v.into())) - } - /// Storage: `Staking::CounterForValidators` (r:1 w:0) - /// Proof: `Staking::CounterForValidators` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) - /// Storage: `Staking::Validators` (r:1001 w:0) - /// Proof: `Staking::Validators` (`max_values`: None, `max_size`: Some(45), added: 2520, mode: `MaxEncodedLen`) - /// The range of component `v` is `[500, 1000]`. - fn get_npos_targets(v: u32, ) -> Weight { - // Proof Size summary in bytes: - // Measured: `979 + v * (50 ±0)` - // Estimated: `3510 + v * (2520 ±0)` - // Minimum execution time: 2_851_801_000 picoseconds. - Weight::from_parts(4_477_533, 0) - .saturating_add(Weight::from_parts(0, 3510)) - // Standard Error: 8_644 - .saturating_add(Weight::from_parts(5_811_682, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(2)) - .saturating_add(T::DbWeight::get().reads((1_u64).saturating_mul(v.into()))) - .saturating_add(Weight::from_parts(0, 2520).saturating_mul(v.into())) - } /// Storage: `Staking::MinCommission` (r:0 w:1) /// Proof: `Staking::MinCommission` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::MinValidatorBond` (r:0 w:1) @@ -915,34 +861,6 @@ impl pallet_staking::WeightInfo for WeightInfo { .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `Staking::ActiveEra` (r:1 w:0) - /// Proof: `Staking::ActiveEra` (`max_values`: Some(1), `max_size`: Some(13), added: 508, mode: `MaxEncodedLen`) - /// Storage: `Staking::UnappliedSlashes` (r:1 w:1) - /// Proof: `Staking::UnappliedSlashes` (`max_values`: None, `max_size`: Some(3231), added: 5706, mode: `MaxEncodedLen`) - /// Storage: `Staking::Bonded` (r:65 w:0) - /// Proof: `Staking::Bonded` (`max_values`: None, `max_size`: Some(72), added: 2547, mode: `MaxEncodedLen`) - /// Storage: `Staking::Ledger` (r:65 w:65) - /// Proof: `Staking::Ledger` (`max_values`: None, `max_size`: Some(1091), added: 3566, mode: `MaxEncodedLen`) - /// Storage: `NominationPools::ReversePoolIdLookup` (r:65 w:0) - /// Proof: `NominationPools::ReversePoolIdLookup` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) - /// Storage: `DelegatedStaking::Agents` (r:65 w:65) - /// Proof: `DelegatedStaking::Agents` (`max_values`: None, `max_size`: Some(120), added: 2595, mode: `MaxEncodedLen`) - /// Storage: `System::Account` (r:65 w:65) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - /// Storage: `Staking::VirtualStakers` (r:65 w:0) - /// Proof: `Staking::VirtualStakers` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) - /// Storage: `Balances::Holds` (r:65 w:65) - /// Proof: `Balances::Holds` (`max_values`: None, `max_size`: Some(103), added: 2578, mode: `MaxEncodedLen`) - fn apply_slash() -> Weight { - // Proof Size summary in bytes: - // Measured: `29228` - // Estimated: `232780` - // Minimum execution time: 3_571_461_000 picoseconds. - Weight::from_parts(3_638_696_000, 0) - .saturating_add(Weight::from_parts(0, 232780)) - .saturating_add(T::DbWeight::get().reads(457)) - .saturating_add(T::DbWeight::get().writes(261)) - } /// Storage: `Staking::CurrentEra` (r:1 w:0) /// Proof: `Staking::CurrentEra` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) /// Storage: `Staking::ErasStartSessionIndex` (r:1 w:0) diff --git a/substrate/frame/election-provider-support/src/lib.rs b/substrate/frame/election-provider-support/src/lib.rs index 90966ec59346e..ba081aa533ffd 100644 --- a/substrate/frame/election-provider-support/src/lib.rs +++ b/substrate/frame/election-provider-support/src/lib.rs @@ -188,6 +188,8 @@ use sp_runtime::{ pub use bounds::DataProviderBounds; pub use codec::{Decode, DecodeWithMemTracking, Encode, MaxEncodedLen}; +/// Re-export the solution generation macro. +pub use frame_election_provider_solution_type::generate_solution_type; pub use frame_support::{traits::Get, weights::Weight, BoundedVec}; /// Re-export some type as they are used in the interface. pub use sp_arithmetic::PerThing; diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 12ef73a2c201a..9384aa900b899 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -9694,9 +9694,6 @@ fn manual_slashing_works() { slash_fraction_1 )); - // process offence - advance_blocks(1); - // check if balance was slashed correctly (25%) let balance_after_first_slash = Staking::slashable_balance_of(&validator_stash); let expected_balance_1 = initial_balance - (initial_balance / 4); // 25% slash @@ -9730,18 +9727,7 @@ fn manual_slashing_works() { "Balance changed after slashing with smaller fraction" ); - // with the new implementation, we should see an OffenceReported event - // but no Slashed event yet as the slash will be queued - let has_offence_reported = System::events().iter().any(|record| { - matches!( - record.event, - RuntimeEvent::Staking(Event::::Slashed { staker, .. }) - if staker == validator_stash - ) - }); - assert!(has_offence_reported, "No OffenceReported event was emitted"); - - // verify no Slashed event was emitted yet (since it's queued for later processing) + // verify no Slashed event since slash fraction is lower than previous let no_slashed_events = !System::events().iter().any(|record| { matches!(record.event, RuntimeEvent::Staking(Event::::Slashed { .. })) }); @@ -9761,9 +9747,6 @@ fn manual_slashing_works() { slash_fraction_3 )); - // process offence - advance_blocks(1); - // check if balance was further slashed (from 75% to 50% of original) let balance_after_third_slash = Staking::slashable_balance_of(&validator_stash); let expected_balance_3 = initial_balance / 2; // 50% of original @@ -9807,9 +9790,6 @@ fn manual_slashing_works() { Perbill::from_percent(75) )); - // process offence - advance_blocks(1); - // check balance was further reduced let balance_after_fifth_slash = Staking::slashable_balance_of(&validator_stash); let expected_balance_5 = initial_balance / 4; // 25% of original (75% slashed) From c15fb371d03fee381eff6bc5e31053fc7a3c6983 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 00:57:15 +0100 Subject: [PATCH 08/38] unrevert some stuff that we need --- .github/workflows/runtimes-matrix.json | 2 +- polkadot/runtime/common/src/try_runtime.rs | 2 +- prdoc/pr_6034.prdoc | 25 +++++++ prdoc/pr_6689.prdoc | 7 +- prdoc/pr_7042.prdoc | 4 +- prdoc/pr_7282.prdoc | 72 +++++++++++++++++++ prdoc/pr_7424.prdoc | 37 ++++++++++ prdoc/pr_7582.prdoc | 17 +++++ .../frame-umbrella-weight-template.hbs | 17 +++++ substrate/.maintain/frame-weight-template.hbs | 17 +++++ substrate/frame/bags-list/src/lib.rs | 2 +- substrate/frame/bags-list/src/list/mod.rs | 11 ++- 12 files changed, 202 insertions(+), 11 deletions(-) create mode 100644 prdoc/pr_6034.prdoc create mode 100644 prdoc/pr_7282.prdoc create mode 100644 prdoc/pr_7424.prdoc create mode 100644 prdoc/pr_7582.prdoc diff --git a/.github/workflows/runtimes-matrix.json b/.github/workflows/runtimes-matrix.json index ce206c0781572..f47990217beb3 100644 --- a/.github/workflows/runtimes-matrix.json +++ b/.github/workflows/runtimes-matrix.json @@ -6,7 +6,7 @@ "header": "substrate/HEADER-APACHE2", "template": "substrate/.maintain/frame-weight-template.hbs", "bench_features": "runtime-benchmarks", - "bench_flags": "--genesis-builder-policy=none --exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage", + "bench_flags": "--exclude-pallets=pallet_xcm,pallet_xcm_benchmarks::fungible,pallet_xcm_benchmarks::generic,pallet_nomination_pools,pallet_remark,pallet_transaction_storage", "uri": null, "is_relay": false }, diff --git a/polkadot/runtime/common/src/try_runtime.rs b/polkadot/runtime/common/src/try_runtime.rs index b22e170329206..795249dde20b2 100644 --- a/polkadot/runtime/common/src/try_runtime.rs +++ b/polkadot/runtime/common/src/try_runtime.rs @@ -36,7 +36,7 @@ where let all_stakers = Ledger::::iter().map(|(ctrl, l)| (ctrl, l.stash)).collect::>(); let mut all_exposed = BTreeSet::new(); - ErasStakers::::iter().for_each(|(_, val, expo)| { + ErasStakersPaged::::iter().for_each(|((_era, val, _page), expo)| { all_exposed.insert(val); all_exposed.extend(expo.others.iter().map(|ie| ie.who.clone())) }); diff --git a/prdoc/pr_6034.prdoc b/prdoc/pr_6034.prdoc new file mode 100644 index 0000000000000..e6ecd8aae5c8c --- /dev/null +++ b/prdoc/pr_6034.prdoc @@ -0,0 +1,25 @@ +title: Adds multi-block election types and refactors current single logic to support it + +doc: + - audience: Runtime Dev + description: | + This PR adds election types and structs required to run a multi-block election. In addition, + it modifies EPM, staking pallet and all dependent pallets and logic to use the multi-block types. + +crates: + - name: frame-election-provider-support + bump: major + - name: pallet-election-provider-multi-phase + bump: major + - name: pallet-staking + bump: major + - name: pallet-fast-unstake + bump: minor + - name: pallet-delegated-staking + bump: minor + - name: sp-npos-elections + bump: major + - name: sp-staking + bump: major + - name: pallet-bags-list-remote-tests + bump: minor diff --git a/prdoc/pr_6689.prdoc b/prdoc/pr_6689.prdoc index 2cbb49cd7dd24..72e935e2e984a 100644 --- a/prdoc/pr_6689.prdoc +++ b/prdoc/pr_6689.prdoc @@ -1,13 +1,12 @@ title: '[pallet-revive] Update gas encoding' doc: - audience: Runtime Dev - description: |- + description: | Update the current approach to attach the `ref_time`, `pov` and `deposit` parameters to an Ethereum transaction. -Previously, these three parameters were passed along with the signed payload, and the fees resulting from gas × gas_price were checked to ensure they matched the actual fees paid by the user for the extrinsic - + Previously, these three parameters were passed along with the signed payload, and the fees resulting from gas × gas_price were checked to ensure they matched the actual fees paid by the user for the extrinsic This approach unfortunately can be attacked. A malicious actor could force such a transaction to fail by injecting low values for some of these extra parameters as they are not part of the signed payload. - The new approach encodes these 3 extra parameters in the lower digits of the transaction gas, using the log2 of the actual values to encode each components on 2 digits + crates: - name: pallet-revive-eth-rpc bump: minor diff --git a/prdoc/pr_7042.prdoc b/prdoc/pr_7042.prdoc index 00fb34c6af493..1c585f9dff0d6 100644 --- a/prdoc/pr_7042.prdoc +++ b/prdoc/pr_7042.prdoc @@ -1,4 +1,4 @@ -title: `networking::TransactionPool` should accept `Arc` +title: networking::TransactionPool should accept Arc doc: - audience: Node Dev description: The `sc_network_transactions::config::TransactionPool` trait now returns an `Arc` for transactions. @@ -6,4 +6,4 @@ crates: - name: sc-network-transactions bump: minor - name: sc-service - bump: minor \ No newline at end of file + bump: minor diff --git a/prdoc/pr_7282.prdoc b/prdoc/pr_7282.prdoc new file mode 100644 index 0000000000000..3d12a8b184abd --- /dev/null +++ b/prdoc/pr_7282.prdoc @@ -0,0 +1,72 @@ +title: AHM Multi-block staking election pallet +doc: +- audience: Runtime Dev + description: | + ## Multi Block Election Pallet + + This PR adds the first iteration of the multi-block staking pallet. + + From this point onwards, the staking and its election provider pallets are being customized to work in AssetHub. While usage in solo-chains is still possible, it is not longer the main focus of this pallet. For a safer usage, please fork and user an older version of this pallet. +crates: +- name: pallet-election-provider-multi-block + bump: major +- name: frame-election-provider-support + bump: major +- name: frame-election-provider-solution-type + bump: major +- name: sp-npos-elections + bump: major +- name: sp-staking + bump: major +- name: pallet-staking + bump: major +- name: pallet-election-provider-multi-phase + bump: major +- name: westend-runtime + bump: major +- name: pallet-delegated-staking + bump: major +- name: pallet-fast-unstake + bump: major +- name: pallet-session-benchmarking + bump: major +- name: sc-consensus-grandpa + bump: major +- name: pallet-babe + bump: major +- name: pallet-beefy + bump: major +- name: pallet-grandpa + bump: major +- name: pallet-nomination-pools + bump: major +- name: pallet-root-offences + bump: major +- name: pallet-nomination-pools-benchmarking + bump: major +- name: pallet-offences-benchmarking + bump: major +- name: cumulus-pov-validator + bump: major +- name: polkadot-sdk + bump: major +- name: asset-hub-rococo-runtime + bump: major +- name: pallet-bags-list + bump: major +- name: frame-benchmarking + bump: major +- name: frame-support-procedural + bump: major +- name: frame-support + bump: major +- name: frame-benchmarking-cli + bump: major +- name: polkadot-runtime-common + bump: major +- name: pallet-elections-phragmen + bump: major +- name: pallet-election-provider-support-benchmarking + bump: major +- name: pallet-session + bump: major diff --git a/prdoc/pr_7424.prdoc b/prdoc/pr_7424.prdoc new file mode 100644 index 0000000000000..e177f41371bc6 --- /dev/null +++ b/prdoc/pr_7424.prdoc @@ -0,0 +1,37 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: 'Bounded Slashing: Paginated Offence Processing & Slash Application' + +doc: + - audience: Runtime Dev + description: | + This PR refactors the slashing mechanism in `pallet-staking` to be bounded by introducing paged offence processing and paged slash application. + + ### Key Changes + - Offences are queued instead of being processed immediately. + - Slashes are computed in pages, stored as a `StorageDoubleMap` with `(Validator, SlashFraction, PageIndex)` to uniquely identify them. + - Slashes are applied incrementally across multiple blocks instead of a single unbounded operation. + - New storage items: `OffenceQueue`, `ProcessingOffence`, `OffenceQueueEras`. + - Updated API for cancelling and applying slashes. + - Preliminary benchmarks added; further optimizations planned. + + This enables staking slashing to scale efficiently and removes a major blocker for staking migration to a parachain (AH). + +crates: +- name: pallet-babe + bump: patch +- name: pallet-staking + bump: major +- name: pallet-grandpa + bump: patch +- name: westend-runtime + bump: minor +- name: pallet-beefy + bump: patch +- name: pallet-offences-benchmarking + bump: patch +- name: pallet-session-benchmarking + bump: patch +- name: pallet-root-offences + bump: patch \ No newline at end of file diff --git a/prdoc/pr_7582.prdoc b/prdoc/pr_7582.prdoc new file mode 100644 index 0000000000000..26e594c4373f2 --- /dev/null +++ b/prdoc/pr_7582.prdoc @@ -0,0 +1,17 @@ +title: Implementation of `ah-client` and `rc-client` staking pallets +doc: +- audience: Runtime Dev + description: |- + This PR introduces the initial structure for `pallet-ah-client` and `pallet-rc-client`. These + pallets will reside on the relay chain and AssetHub, respectively, and will manage the interaction + between `pallet-session` on the relay chain and `pallet-staking` on AssetHub. + Both pallets are experimental and not intended for production use. +crates: +- name: pallet-staking-ah-client + bump: major +- name: pallet-staking-rc-client + bump: major +- name: pallet-election-provider-multi-block + bump: minor +- name: pallet-staking + bump: major diff --git a/substrate/.maintain/frame-umbrella-weight-template.hbs b/substrate/.maintain/frame-umbrella-weight-template.hbs index c99758c41d9d7..6985944b0a3bd 100644 --- a/substrate/.maintain/frame-umbrella-weight-template.hbs +++ b/substrate/.maintain/frame-umbrella-weight-template.hbs @@ -1,3 +1,20 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + {{header}} //! Autogenerated weights for `{{pallet}}` //! diff --git a/substrate/.maintain/frame-weight-template.hbs b/substrate/.maintain/frame-weight-template.hbs index 624fc57aa3295..c2a22200dc99b 100644 --- a/substrate/.maintain/frame-weight-template.hbs +++ b/substrate/.maintain/frame-weight-template.hbs @@ -1,3 +1,20 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + {{header}} //! Autogenerated weights for `{{pallet}}` //! diff --git a/substrate/frame/bags-list/src/lib.rs b/substrate/frame/bags-list/src/lib.rs index 37077cd2d4835..606b07b6e7b6f 100644 --- a/substrate/frame/bags-list/src/lib.rs +++ b/substrate/frame/bags-list/src/lib.rs @@ -148,7 +148,7 @@ pub use list::{notional_bag_for, Bag, List, ListError, Node}; pub use pallet::*; pub use weights::WeightInfo; -pub(crate) const LOG_TARGET: &str = "runtime::bags_list"; +pub(crate) const LOG_TARGET: &str = "runtime::bags-list"; // syntactic sugar for logging. #[macro_export] diff --git a/substrate/frame/bags-list/src/list/mod.rs b/substrate/frame/bags-list/src/list/mod.rs index 8344674fa1341..1fe4ffffaa658 100644 --- a/substrate/frame/bags-list/src/list/mod.rs +++ b/substrate/frame/bags-list/src/list/mod.rs @@ -255,7 +255,7 @@ impl, I: 'static> List { /// Iterate over all nodes in all bags in the list. /// /// Full iteration can be expensive; it's recommended to limit the number of items with - /// `.take(n)`. + /// `.take(n)`, or call `.next()` one by one. pub(crate) fn iter() -> impl Iterator> { // We need a touch of special handling here: because we permit `T::BagThresholds` to // omit the final bound, we need to ensure that we explicitly include that threshold in the @@ -302,6 +302,13 @@ impl, I: 'static> List { .filter_map(Bag::get) .flat_map(|bag| bag.iter()); + crate::log!( + debug, + "starting to iterate from {:?}, who's bag is {:?}, and there are {:?} leftover bags", + &start, + start_node_upper, + idx + ); Ok(start_bag.chain(leftover_bags)) } @@ -341,7 +348,7 @@ impl, I: 'static> List { bag.put(); crate::log!( - debug, + trace, "inserted {:?} with score {:?} into bag {:?}, new count is {}", id, score, From e6fc0c1a8aa00ca7398798eec7f2de0f05236046 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 01:37:06 +0100 Subject: [PATCH 09/38] bring back some more changes --- polkadot/runtime/test-runtime/src/lib.rs | 4 +- substrate/bin/node/runtime/src/lib.rs | 4 +- substrate/frame/babe/src/mock.rs | 4 +- substrate/frame/beefy/src/tests.rs | 2 + substrate/frame/benchmarking/src/lib.rs | 2 +- .../election-provider-multi-block/src/lib.rs | 2 +- .../test-staking-e2e/src/mock.rs | 9 +- .../solution-type/fuzzer/src/compact.rs | 3 +- substrate/frame/grandpa/src/mock.rs | 4 +- .../frame/offences/benchmarking/src/mock.rs | 4 +- substrate/frame/root-offences/src/lib.rs | 17 +- substrate/frame/root-offences/src/mock.rs | 4 +- .../frame/session/benchmarking/src/mock.rs | 4 +- substrate/frame/session/src/lib.rs | 10 +- substrate/frame/staking/src/lib.rs | 12 +- substrate/frame/staking/src/migrations.rs | 259 ------------- substrate/frame/staking/src/mock.rs | 27 +- substrate/frame/staking/src/pallet/impls.rs | 29 +- substrate/frame/staking/src/tests.rs | 349 ++++-------------- .../construct_runtime/expand/outer_enums.rs | 2 +- .../procedural/src/pallet/expand/event.rs | 2 +- substrate/frame/support/src/lib.rs | 1 + .../deprecated_where_block.stderr | 16 +- .../frame/benchmarking-cli/src/pallet/mod.rs | 2 +- 24 files changed, 164 insertions(+), 608 deletions(-) diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 65e20eccd71a5..eac53afc647ea 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -323,8 +323,8 @@ impl pallet_session::Config for Runtime { } impl pallet_session::historical::Config for Runtime { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::NullIdentity; } pallet_staking_reward_curve::build! { diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 062672373b300..2a3fba9f22aa8 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -693,8 +693,8 @@ impl pallet_session::Config for Runtime { } impl pallet_session::historical::Config for Runtime { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::NullIdentity; } pallet_staking_reward_curve::build! { diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index 20634704fb025..c51ddeb9ab9cd 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -105,8 +105,8 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::NullIdentity; } impl pallet_authorship::Config for Test { diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index 1bd0a72b25ecd..5f713a41cafa5 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -39,6 +39,8 @@ use crate::{self as beefy, mock::*, Call, Config, Error, WeightInfoExt}; fn init_block(block: u64) { System::set_block_number(block); + // Staking has to also be initialized, and be the first, to have the new validator set ready. + Staking::on_initialize(block); Session::on_initialize(block); } diff --git a/substrate/frame/benchmarking/src/lib.rs b/substrate/frame/benchmarking/src/lib.rs index 6e21356e9d47a..0af02ccc1af40 100644 --- a/substrate/frame/benchmarking/src/lib.rs +++ b/substrate/frame/benchmarking/src/lib.rs @@ -381,7 +381,7 @@ pub use v1::*; /// /// #[extrinsic_call] /// _(RuntimeOrigin::Signed(caller), vec![0u8; l]); -/// +/// /// // Everything onwards will be treated as test. /// assert_last_event::(Event::FooExecuted { result: Ok(()) }.into()); /// Ok(()) diff --git a/substrate/frame/election-provider-multi-block/src/lib.rs b/substrate/frame/election-provider-multi-block/src/lib.rs index 547b99cfc5416..86a94c67c5844 100644 --- a/substrate/frame/election-provider-multi-block/src/lib.rs +++ b/substrate/frame/election-provider-multi-block/src/lib.rs @@ -66,7 +66,7 @@ //! //! ## Pagination //! -//! Most of the external APIs of this pallet are paginated. All pagination follow a patter where if +//! Most of the external APIs of this pallet are paginated. All pagination follow a pattern where if //! `N` pages exist, the first paginated call is `function(N-1)` and the last one is `function(0)`. //! For example, with 3 pages, the `elect` of [`ElectionProvider`] is expected to be called as //! `elect(2) -> elect(1) -> elect(0)`. In essence, calling a paginated function with index 0 is diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 1a9bf7165511a..8619325d56b35 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -147,8 +147,8 @@ impl pallet_session::Config for Runtime { type WeightInfo = (); } impl pallet_session::historical::Config for Runtime { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::NullIdentity; } frame_election_provider_support::generate_solution_type!( @@ -898,10 +898,7 @@ pub(crate) fn on_offence_now( // Add offence to validator, slash it. pub(crate) fn add_slash(who: &AccountId) { on_offence_now( - &[OffenceDetails { - offender: (*who, Staking::eras_stakers(active_era(), who)), - reporters: vec![], - }], + &[OffenceDetails { offender: (*who, ()), reporters: vec![] }], &[Perbill::from_percent(10)], ); } diff --git a/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs b/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs index 90fd9509e6f29..c4ae7c8462347 100644 --- a/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs +++ b/substrate/frame/election-provider-support/solution-type/fuzzer/src/compact.rs @@ -21,7 +21,8 @@ use sp_arithmetic::Percent; use sp_runtime::codec::{Encode, Error}; fn main() { - generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::< + generate_solution_type!( + #[compact] pub struct InnerTestSolutionCompact::< VoterIndex = u32, TargetIndex = u32, Accuracy = Percent, diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 933aa6c3ea2fd..5de6107440d97 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -109,8 +109,8 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::NullIdentity; } impl pallet_authorship::Config for Test { diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index c87fe36ee23d4..7dab3da6ee365 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -54,8 +54,8 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::NullIdentity; } sp_runtime::impl_opaque_keys! { diff --git a/substrate/frame/root-offences/src/lib.rs b/substrate/frame/root-offences/src/lib.rs index fd6ffc55e40c3..8e91c4ecfd1cd 100644 --- a/substrate/frame/root-offences/src/lib.rs +++ b/substrate/frame/root-offences/src/lib.rs @@ -31,7 +31,7 @@ extern crate alloc; use alloc::vec::Vec; use pallet_session::historical::IdentificationTuple; -use pallet_staking::{BalanceOf, Exposure, ExposureOf, Pallet as Staking}; +use pallet_staking::Pallet as Staking; use sp_runtime::Perbill; use sp_staking::offence::OnOffenceHandler; @@ -49,11 +49,8 @@ pub mod pallet { + pallet_staking::Config + pallet_session::Config::AccountId> + pallet_session::historical::Config< - FullIdentification = Exposure< - ::AccountId, - BalanceOf, - >, - FullIdentificationOf = ExposureOf, + FullIdentification = (), + FullIdentificationOf = pallet_staking::NullIdentity, > { type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -106,15 +103,11 @@ pub mod pallet { fn get_offence_details( offenders: Vec<(T::AccountId, Perbill)>, ) -> Result>, DispatchError> { - let now = pallet_staking::ActiveEra::::get() - .map(|e| e.index) - .ok_or(Error::::FailedToGetActiveEra)?; - Ok(offenders .clone() .into_iter() .map(|(o, _)| OffenceDetails:: { - offender: (o.clone(), Staking::::eras_stakers(now, &o)), + offender: (o.clone(), ()), reporters: Default::default(), }) .collect()) @@ -124,7 +117,7 @@ pub mod pallet { fn submit_offence(offenders: &[OffenceDetails], slash_fraction: &[Perbill]) { let session_index = as frame_support::traits::ValidatorSet>::session_index(); - as OnOffenceHandler< + as OnOffenceHandler< T::AccountId, IdentificationTuple, Weight, diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 45f1fa8c2058c..54ddbecaebfa0 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -144,8 +144,8 @@ impl pallet_staking::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::NullIdentity; } sp_runtime::impl_opaque_keys! { diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 0be337c459d14..8e775eaf011de 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -68,8 +68,8 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } impl pallet_session::historical::Config for Test { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = (); + type FullIdentificationOf = pallet_staking::NullIdentity; } sp_runtime::impl_opaque_keys! { diff --git a/substrate/frame/session/src/lib.rs b/substrate/frame/session/src/lib.rs index 8c359a4bf665f..a80a2b235757b 100644 --- a/substrate/frame/session/src/lib.rs +++ b/substrate/frame/session/src/lib.rs @@ -661,8 +661,6 @@ impl Pallet { /// punishment after a fork. pub fn rotate_session() { let session_index = CurrentIndex::::get(); - log::trace!(target: "runtime::session", "rotating session {:?}", session_index); - let changed = QueuedChanged::::get(); // Inform the session handlers that a session is going to end. @@ -684,11 +682,17 @@ impl Pallet { // Increment session index. let session_index = session_index + 1; CurrentIndex::::put(session_index); - T::SessionManager::start_session(session_index); + log::trace!(target: "runtime::session", "starting_session {:?}", session_index); // Get next validator set. let maybe_next_validators = T::SessionManager::new_session(session_index + 1); + log::trace!( + target: "runtime::session", + "planning_session {:?} with {:?} validators", + session_index + 1, + maybe_next_validators.as_ref().map(|v| v.len()) + ); let (next_validators, next_identities_changed) = if let Some(validators) = maybe_next_validators { // NOTE: as per the documentation on `OnSessionEnding`, we consider diff --git a/substrate/frame/staking/src/lib.rs b/substrate/frame/staking/src/lib.rs index d42c863592124..6d712f1329148 100644 --- a/substrate/frame/staking/src/lib.rs +++ b/substrate/frame/staking/src/lib.rs @@ -933,10 +933,7 @@ pub trait SessionInterface { impl SessionInterface<::AccountId> for T where T: pallet_session::Config::AccountId>, - T: pallet_session::historical::Config< - FullIdentification = Exposure<::AccountId, BalanceOf>, - FullIdentificationOf = ExposureOf, - >, + T: pallet_session::historical::Config, T::SessionHandler: pallet_session::SessionHandler<::AccountId>, T::SessionManager: pallet_session::SessionManager<::AccountId>, T::ValidatorIdOf: Convert< @@ -1080,6 +1077,13 @@ impl Convert } } +pub struct NullIdentity; +impl Convert> for NullIdentity { + fn convert(_: T) -> Option<()> { + Some(()) + } +} + /// Filter historical offences out and only allow those from the bonding period. pub struct FilterHistoricalOffences { _inner: core::marker::PhantomData<(T, R)>, diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 2c67eec5a9070..f6e75202b7015 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -18,7 +18,6 @@ //! [CHANGELOG.md](https://github.com/paritytech/polkadot-sdk/blob/master/substrate/frame/staking/CHANGELOG.md). use super::*; -use frame_election_provider_support::SortedListProvider; use frame_support::{ migrations::VersionedMigration, pallet_prelude::ValueQuery, @@ -36,10 +35,6 @@ use sp_runtime::TryRuntimeError; /// Obsolete from v13. Keeping around to make encoding/decoding of old migration code easier. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] enum ObsoleteReleases { - V1_0_0Ancient, - V2_0_0, - V3_0_0, - V4_0_0, V5_0_0, // blockable validators. V6_0_0, // removal of all storage associated with offchain phragmen. V7_0_0, // keep track of number of nominators / validators in map @@ -441,257 +436,3 @@ pub mod v11 { } } } - -pub mod v10 { - use super::*; - use frame_support::storage_alias; - - #[storage_alias] - type EarliestUnappliedSlash = StorageValue, EraIndex>; - - /// Apply any pending slashes that where queued. - /// - /// That means we might slash someone a bit too early, but we will definitely - /// won't forget to slash them. The cap of 512 is somewhat randomly taken to - /// prevent us from iterating over an arbitrary large number of keys `on_runtime_upgrade`. - pub struct MigrateToV10(core::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV10 { - fn on_runtime_upgrade() -> frame_support::weights::Weight { - if StorageVersion::::get() == ObsoleteReleases::V9_0_0 { - let pending_slashes = UnappliedSlashes::::iter().take(512); - for (era, slashes) in pending_slashes { - for slash in slashes { - // in the old slashing scheme, the slash era was the key at which we read - // from `UnappliedSlashes`. - log!(warn, "prematurely applying a slash ({:?}) for era {:?}", slash, era); - slashing::apply_slash::(slash, era); - } - } - - EarliestUnappliedSlash::::kill(); - StorageVersion::::put(ObsoleteReleases::V10_0_0); - - log!(info, "MigrateToV10 executed successfully"); - T::DbWeight::get().reads_writes(1, 2) - } else { - log!(warn, "MigrateToV10 should be removed."); - T::DbWeight::get().reads(1) - } - } - } -} - -pub mod v9 { - use super::*; - #[cfg(feature = "try-runtime")] - use alloc::vec::Vec; - #[cfg(feature = "try-runtime")] - use codec::{Decode, Encode}; - - /// Migration implementation that injects all validators into sorted list. - /// - /// This is only useful for chains that started their `VoterList` just based on nominators. - pub struct InjectValidatorsIntoVoterList(core::marker::PhantomData); - impl OnRuntimeUpgrade for InjectValidatorsIntoVoterList { - fn on_runtime_upgrade() -> Weight { - if StorageVersion::::get() == ObsoleteReleases::V8_0_0 { - let prev_count = T::VoterList::count(); - let weight_of_cached = Pallet::::weight_of_fn(); - for (v, _) in Validators::::iter() { - let weight = weight_of_cached(&v); - let _ = T::VoterList::on_insert(v.clone(), weight).map_err(|err| { - log!(warn, "failed to insert {:?} into VoterList: {:?}", v, err) - }); - } - - log!( - info, - "injected a total of {} new voters, prev count: {} next count: {}, updating to version 9", - Validators::::count(), - prev_count, - T::VoterList::count(), - ); - - StorageVersion::::put(ObsoleteReleases::V9_0_0); - T::BlockWeights::get().max_block - } else { - log!( - warn, - "InjectValidatorsIntoVoterList being executed on the wrong storage \ - version, expected ObsoleteReleases::V8_0_0" - ); - T::DbWeight::get().reads(1) - } - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, TryRuntimeError> { - frame_support::ensure!( - StorageVersion::::get() == ObsoleteReleases::V8_0_0, - "must upgrade linearly" - ); - - let prev_count = T::VoterList::count(); - Ok(prev_count.encode()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(prev_count: Vec) -> Result<(), TryRuntimeError> { - let prev_count: u32 = Decode::decode(&mut prev_count.as_slice()).expect( - "the state parameter should be something that was generated by pre_upgrade", - ); - let post_count = T::VoterList::count(); - let validators = Validators::::count(); - ensure!( - post_count == prev_count + validators, - "`VoterList` count after the migration must equal to the sum of \ - previous count and the current number of validators" - ); - - frame_support::ensure!( - StorageVersion::::get() == ObsoleteReleases::V9_0_0, - "must upgrade" - ); - Ok(()) - } - } -} - -pub mod v8 { - use super::*; - use crate::{Config, Nominators, Pallet, Weight}; - use frame_election_provider_support::SortedListProvider; - use frame_support::traits::Get; - - #[cfg(feature = "try-runtime")] - pub fn pre_migrate() -> Result<(), &'static str> { - frame_support::ensure!( - StorageVersion::::get() == ObsoleteReleases::V7_0_0, - "must upgrade linearly" - ); - - crate::log!(info, "👜 staking bags-list migration passes PRE migrate checks ✅",); - Ok(()) - } - - /// Migration to sorted `VoterList`. - pub fn migrate() -> Weight { - if StorageVersion::::get() == ObsoleteReleases::V7_0_0 { - crate::log!(info, "migrating staking to ObsoleteReleases::V8_0_0"); - - let migrated = T::VoterList::unsafe_regenerate( - Nominators::::iter().map(|(id, _)| id), - Pallet::::weight_of_fn(), - ); - - StorageVersion::::put(ObsoleteReleases::V8_0_0); - crate::log!( - info, - "👜 completed staking migration to ObsoleteReleases::V8_0_0 with {} voters migrated", - migrated, - ); - - T::BlockWeights::get().max_block - } else { - T::DbWeight::get().reads(1) - } - } - - #[cfg(feature = "try-runtime")] - pub fn post_migrate() -> Result<(), &'static str> { - T::VoterList::try_state().map_err(|_| "VoterList is not in a sane state.")?; - crate::log!(info, "👜 staking bags-list migration passes POST migrate checks ✅",); - Ok(()) - } -} - -pub mod v7 { - use super::*; - use frame_support::storage_alias; - - #[storage_alias] - type CounterForValidators = StorageValue, u32>; - #[storage_alias] - type CounterForNominators = StorageValue, u32>; - - pub fn pre_migrate() -> Result<(), &'static str> { - assert!( - CounterForValidators::::get().unwrap().is_zero(), - "CounterForValidators already set." - ); - assert!( - CounterForNominators::::get().unwrap().is_zero(), - "CounterForNominators already set." - ); - assert!(Validators::::count().is_zero(), "Validators already set."); - assert!(Nominators::::count().is_zero(), "Nominators already set."); - assert!(StorageVersion::::get() == ObsoleteReleases::V6_0_0); - Ok(()) - } - - pub fn migrate() -> Weight { - log!(info, "Migrating staking to ObsoleteReleases::V7_0_0"); - let validator_count = Validators::::iter().count() as u32; - let nominator_count = Nominators::::iter().count() as u32; - - CounterForValidators::::put(validator_count); - CounterForNominators::::put(nominator_count); - - StorageVersion::::put(ObsoleteReleases::V7_0_0); - log!(info, "Completed staking migration to ObsoleteReleases::V7_0_0"); - - T::DbWeight::get().reads_writes(validator_count.saturating_add(nominator_count).into(), 2) - } -} - -pub mod v6 { - use super::*; - use frame_support::{storage_alias, traits::Get, weights::Weight}; - - // NOTE: value type doesn't matter, we just set it to () here. - #[storage_alias] - type SnapshotValidators = StorageValue, ()>; - #[storage_alias] - type SnapshotNominators = StorageValue, ()>; - #[storage_alias] - type QueuedElected = StorageValue, ()>; - #[storage_alias] - type QueuedScore = StorageValue, ()>; - #[storage_alias] - type EraElectionStatus = StorageValue, ()>; - #[storage_alias] - type IsCurrentSessionFinal = StorageValue, ()>; - - /// check to execute prior to migration. - pub fn pre_migrate() -> Result<(), &'static str> { - // these may or may not exist. - log!(info, "SnapshotValidators.exits()? {:?}", SnapshotValidators::::exists()); - log!(info, "SnapshotNominators.exits()? {:?}", SnapshotNominators::::exists()); - log!(info, "QueuedElected.exits()? {:?}", QueuedElected::::exists()); - log!(info, "QueuedScore.exits()? {:?}", QueuedScore::::exists()); - // these must exist. - assert!( - IsCurrentSessionFinal::::exists(), - "IsCurrentSessionFinal storage item not found!" - ); - assert!(EraElectionStatus::::exists(), "EraElectionStatus storage item not found!"); - Ok(()) - } - - /// Migrate storage to v6. - pub fn migrate() -> Weight { - log!(info, "Migrating staking to ObsoleteReleases::V6_0_0"); - - SnapshotValidators::::kill(); - SnapshotNominators::::kill(); - QueuedElected::::kill(); - QueuedScore::::kill(); - EraElectionStatus::::kill(); - IsCurrentSessionFinal::::kill(); - - StorageVersion::::put(ObsoleteReleases::V6_0_0); - - log!(info, "Done."); - T::DbWeight::get().writes(6 + 1) - } -} diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 717e1442d5645..384f23dc2c475 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -151,8 +151,8 @@ impl pallet_session::Config for Test { } impl pallet_session::historical::Config for Test { - type FullIdentification = crate::Exposure; - type FullIdentificationOf = crate::ExposureOf; + type FullIdentification = (); + type FullIdentificationOf = NullIdentity; } impl pallet_authorship::Config for Test { type FindAuthor = Author11; @@ -634,11 +634,6 @@ pub(crate) fn run_to_block(n: BlockNumber) { ); } -/// Progress by n block. -pub(crate) fn advance_blocks(n: u64) { - run_to_block(System::block_number() + n); -} - /// Progresses from the current block number (whatever that may be) to the `P * session_index + 1`. pub(crate) fn start_session(end_session_idx: SessionIndex) { let period = Period::get(); @@ -771,14 +766,18 @@ pub(crate) fn on_offence_now( on_offence_in_era(offenders, slash_fraction, now) } +pub(crate) fn offence_from( + offender: AccountId, + reporter: Option, +) -> OffenceDetails> { + OffenceDetails { + offender: (offender, ()), + reporters: reporter.map(|r| vec![(r)]).unwrap_or_default(), + } +} + pub(crate) fn add_slash(who: &AccountId) { - on_offence_now( - &[OffenceDetails { - offender: (*who, Staking::eras_stakers(active_era(), who)), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(*who, None)], &[Perbill::from_percent(10)]); } /// Make all validator and nominator request their payment diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index 3d8c8365cd45e..c60eb5524491e 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -50,10 +50,9 @@ use sp_staking::{ use crate::{ asset, election_size_tracker::StaticTracker, log, slashing, weights::WeightInfo, ActiveEraInfo, - BalanceOf, EraInfo, EraPayout, Exposure, ExposureOf, Forcing, IndividualExposure, - LedgerIntegrityState, MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, - PositiveImbalanceOf, RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, - STAKING_ID, + BalanceOf, EraInfo, EraPayout, Exposure, Forcing, IndividualExposure, LedgerIntegrityState, + MaxNominationsOf, MaxWinnersOf, Nominations, NominationsQuota, PositiveImbalanceOf, + RewardDestination, SessionInterface, StakingLedger, ValidatorPrefs, STAKING_ID, }; use alloc::{boxed::Box, vec, vec::Vec}; @@ -1617,6 +1616,23 @@ impl historical::SessionManager historical::SessionManager for Pallet { + fn new_session(new_index: SessionIndex) -> Option> { + >::new_session(new_index) + .map(|validators| validators.into_iter().map(|v| (v, ())).collect()) + } + fn new_session_genesis(new_index: SessionIndex) -> Option> { + >::new_session_genesis(new_index) + .map(|validators| validators.into_iter().map(|v| (v, ())).collect()) + } + fn start_session(start_index: SessionIndex) { + >::start_session(start_index) + } + fn end_session(end_index: SessionIndex) { + >::end_session(end_index) + } +} + /// Add reward points to block authors: /// * 20 points to the block producer for producing a (non-uncle) block, impl pallet_authorship::EventHandler> for Pallet @@ -1634,10 +1650,7 @@ impl for Pallet where T: pallet_session::Config::AccountId>, - T: pallet_session::historical::Config< - FullIdentification = Exposure<::AccountId, BalanceOf>, - FullIdentificationOf = ExposureOf, - >, + T: pallet_session::historical::Config, T::SessionHandler: pallet_session::SessionHandler<::AccountId>, T::SessionManager: pallet_session::SessionManager<::AccountId>, T::ValidatorIdOf: Convert< diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 9384aa900b899..5ccea0df5db6c 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -390,9 +390,9 @@ fn rewards_should_work() { ); assert_eq_error_rate!( asset::total_balance::(&101), - init_balance_101 + - part_for_101_from_11 * total_payout_0 * 2 / 3 + - part_for_101_from_21 * total_payout_0 * 1 / 3, + init_balance_101 + + part_for_101_from_11 * total_payout_0 * 2 / 3 + + part_for_101_from_21 * total_payout_0 * 1 / 3, 2 ); @@ -430,9 +430,9 @@ fn rewards_should_work() { ); assert_eq_error_rate!( asset::total_balance::(&101), - init_balance_101 + - part_for_101_from_11 * (total_payout_0 * 2 / 3 + total_payout_1) + - part_for_101_from_21 * total_payout_0 * 1 / 3, + init_balance_101 + + part_for_101_from_11 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_101_from_21 * total_payout_0 * 1 / 3, 2 ); }); @@ -752,10 +752,7 @@ fn nominators_also_get_slashed_pro_rata() { let exposed_nominator = initial_exposure.others.first().unwrap().value; // 11 goes offline - on_offence_now( - &[OffenceDetails { offender: (11, initial_exposure.clone()), reporters: vec![] }], - &[slash_percent], - ); + on_offence_now(&[offence_from(11, None)], &[slash_percent]); // both stakes must have been decreased. assert!(Staking::ledger(101.into()).unwrap().active < nominator_stake); @@ -2431,13 +2428,7 @@ fn reward_validator_slashing_validator_does_not_overflow() { ); // Check slashing - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(100)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)]); assert_eq!(asset::stakeable_balance::(&11), stake - 1); assert_eq!(asset::stakeable_balance::(&2), 1); @@ -2530,13 +2521,7 @@ fn era_is_always_same_length() { #[test] fn offence_doesnt_force_new_era() { ExtBuilder::default().build_and_execute(|| { - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(5)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)]); assert_eq!(ForceEra::::get(), Forcing::NotForcing); }); @@ -2548,13 +2533,7 @@ fn offence_ensures_new_era_without_clobbering() { assert_ok!(Staking::force_new_era_always(RuntimeOrigin::root())); assert_eq!(ForceEra::::get(), Forcing::ForceAlways); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(5)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(5)]); assert_eq!(ForceEra::::get(), Forcing::ForceAlways); }); @@ -2572,13 +2551,7 @@ fn offence_deselects_validator_even_when_slash_is_zero() { assert!(Session::validators().contains(&11)); assert!(>::contains_key(11)); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)]); assert_eq!(ForceEra::::get(), Forcing::NotForcing); assert!(is_disabled(11)); @@ -2598,13 +2571,7 @@ fn slashing_performed_according_exposure() { assert_eq!(Staking::eras_stakers(active_era(), &11).own, 1000); // Handle an offence with a historical exposure. - on_offence_now( - &[OffenceDetails { - offender: (11, Exposure { total: 500, own: 500, others: vec![] }), - reporters: vec![], - }], - &[Perbill::from_percent(50)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]); // The stash account should be slashed for 250 (50% of 500). assert_eq!(asset::stakeable_balance::(&11), 1000 - 250); @@ -2622,13 +2589,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { assert!(>::contains_key(11)); assert!(Session::validators().contains(&11)); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)]); assert_eq!(ForceEra::::get(), Forcing::NotForcing); assert!(is_disabled(11)); @@ -2644,14 +2605,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { mock::start_active_era(3); // an offence committed in era 1 is reported in era 3 - on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - 1, - ); + on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(0)], 1); // the validator doesn't get disabled for an old offence assert!(Validators::::iter().any(|(stash, _)| stash == 11)); @@ -2661,10 +2615,7 @@ fn validator_is_not_disabled_for_an_offence_in_previous_era() { assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], + &[offence_from(11, None)], // NOTE: A 100% slash here would clean up the account, causing de-registration. &[Perbill::from_percent(95)], 1, @@ -2688,13 +2639,7 @@ fn reporters_receive_their_slice() { assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![1, 2], - }], - &[Perbill::from_percent(50)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]); // F1 * (reward_proportion * slash - 0) // 50% * (10% * initial_balance / 2) @@ -2715,26 +2660,14 @@ fn subsequent_reports_in_same_span_pay_out_less() { assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![1], - }], - &[Perbill::from_percent(20)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(20)]); // F1 * (reward_proportion * slash - 0) // 50% * (10% * initial_balance * 20%) let reward = (initial_balance / 5) / 20; assert_eq!(asset::total_balance::(&1), 10 + reward); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![1], - }], - &[Perbill::from_percent(50)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]); let prior_payout = reward; @@ -2762,16 +2695,7 @@ fn invulnerables_are_not_slashed() { .collect(); on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }, - OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), &21)), - reporters: vec![], - }, - ], + &[offence_from(11, None), offence_from(21, None)], &[Perbill::from_percent(50), Perbill::from_percent(20)], ); @@ -2796,13 +2720,7 @@ fn dont_slash_if_fraction_is_zero() { ExtBuilder::default().build_and_execute(|| { assert_eq!(asset::stakeable_balance::(&11), 1000); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)]); // The validator hasn't been slashed. The new era is not forced. assert_eq!(asset::stakeable_balance::(&11), 1000); @@ -2817,36 +2735,18 @@ fn only_slash_for_max_in_era() { ExtBuilder::default().build_and_execute(|| { assert_eq!(asset::stakeable_balance::(&11), 1000); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(50)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]); // The validator has been slashed and has been force-chilled. assert_eq!(asset::stakeable_balance::(&11), 500); assert_eq!(ForceEra::::get(), Forcing::NotForcing); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(25)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)]); // The validator has not been slashed additionally. assert_eq!(asset::stakeable_balance::(&11), 500); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(60)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(60)]); // The validator got slashed 10% more. assert_eq!(asset::stakeable_balance::(&11), 400); @@ -2862,25 +2762,13 @@ fn garbage_collection_after_slashing() { .build_and_execute(|| { assert_eq!(asset::stakeable_balance::(&11), 2000); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); assert_eq!(asset::stakeable_balance::(&11), 2000 - 200); assert!(SlashingSpans::::get(&11).is_some()); assert_eq!(SpanSlash::::get(&(11, 0)).amount(), &200); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(100)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)]); // validator and nominator slash in era are garbage-collected by era change, // so we don't test those here. @@ -2918,13 +2806,7 @@ fn garbage_collection_on_window_pruning() { assert_eq!(asset::stakeable_balance::(&101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(now, &11)), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); assert_eq!(asset::stakeable_balance::(&11), 900); assert_eq!(asset::stakeable_balance::(&101), 2000 - (nominated_value / 10)); @@ -2962,14 +2844,7 @@ fn slashing_nominators_by_span_max() { let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value; let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - 2, - ); + on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(10)], 2); assert_eq!(asset::stakeable_balance::(&11), 900); @@ -2989,10 +2864,7 @@ fn slashing_nominators_by_span_max() { // second slash: higher era, higher value, same span. on_offence_in_era( - &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), &21)), - reporters: vec![], - }], + &[offence_from(21, None)], &[Perbill::from_percent(30)], 3, ); @@ -3009,14 +2881,7 @@ fn slashing_nominators_by_span_max() { // third slash: in same era and on same validator as first, higher // in-era value, but lower slash value than slash 2. - on_offence_in_era( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(20)], - 2, - ); + on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(20)], 2); // 11 was further slashed, but 21 and 101 were not. assert_eq!(asset::stakeable_balance::(&11), 800); @@ -3044,10 +2909,7 @@ fn slashes_are_summed_across_spans() { let get_span = |account| SlashingSpans::::get(&account).unwrap(); on_offence_now( - &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), &21)), - reporters: vec![], - }], + &[offence_from(21, None)], &[Perbill::from_percent(10)], ); @@ -3067,10 +2929,7 @@ fn slashes_are_summed_across_spans() { assert_eq!(Staking::slashable_balance_of(&21), 900); on_offence_now( - &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), &21)), - reporters: vec![], - }], + &[offence_from(21, None)], &[Perbill::from_percent(10)], ); @@ -3098,13 +2957,7 @@ fn deferred_slashes_are_deferred() { System::reset_events(); - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); // nominations are not removed regardless of the deferring. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3147,16 +3000,13 @@ fn retroactive_deferred_slashes_two_eras_before() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { assert_eq!(BondingDuration::get(), 3); - mock::start_active_era(1); - let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11); - mock::start_active_era(3); assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); System::reset_events(); on_offence_in_era( - &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(10)], 1, // should be deferred for two full eras, and applied at the beginning of era 4. ); @@ -3180,9 +3030,6 @@ fn retroactive_deferred_slashes_one_before() { ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { assert_eq!(BondingDuration::get(), 3); - mock::start_active_era(1); - let exposure_11_at_era1 = Staking::eras_stakers(active_era(), &11); - // unbond at slash era. mock::start_active_era(2); assert_ok!(Staking::chill(RuntimeOrigin::signed(11))); @@ -3191,7 +3038,7 @@ fn retroactive_deferred_slashes_one_before() { mock::start_active_era(3); System::reset_events(); on_offence_in_era( - &[OffenceDetails { offender: (11, exposure_11_at_era1), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(10)], 2, // should be deferred for two full eras, and applied at the beginning of era 5. ); @@ -3231,13 +3078,7 @@ fn staker_cannot_bail_deferred_slash() { let exposure = Staking::eras_stakers(active_era(), &11); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); // now we chill assert_ok!(Staking::chill(RuntimeOrigin::signed(101))); @@ -3308,7 +3149,7 @@ fn remove_deferred() { // deferred to start of era 4. on_offence_now( - &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(10)], ); @@ -3320,7 +3161,7 @@ fn remove_deferred() { // reported later, but deferred to start of era 4 as well. System::reset_events(); on_offence_in_era( - &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(15)], 1, ); @@ -3376,35 +3217,30 @@ fn remove_multi_deferred() { mock::start_active_era(1); assert_eq!(asset::stakeable_balance::(&11), 1000); - - let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(asset::stakeable_balance::(&101), 2000); on_offence_now( - &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(10)], ); on_offence_now( - &[OffenceDetails { - offender: (21, Staking::eras_stakers(active_era(), &21)), - reporters: vec![], - }], + &[offence_from(21, None)], &[Perbill::from_percent(10)], ); on_offence_now( - &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(25)], ); on_offence_now( - &[OffenceDetails { offender: (42, exposure.clone()), reporters: vec![] }], + &[offence_from(42, None)], &[Perbill::from_percent(25)], ); on_offence_now( - &[OffenceDetails { offender: (69, exposure.clone()), reporters: vec![] }], + &[offence_from(69, None)], &[Perbill::from_percent(25)], ); @@ -3462,7 +3298,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert_eq!(exposure_21.total, 1000 + 375); on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(10)], ); @@ -3522,12 +3358,9 @@ fn non_slashable_offence_disables_validator() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); - // offence with no slash associated on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::zero()], ); @@ -3536,7 +3369,7 @@ fn non_slashable_offence_disables_validator() { // offence that slashes 25% of the bond on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[offence_from(21, None)], &[Perbill::from_percent(25)], ); @@ -3589,15 +3422,12 @@ fn slashing_independent_of_disabling_validator() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51]); - let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); - let now = ActiveEra::::get().unwrap().index; // --- Disable without a slash --- // offence with no slash associated on_offence_in_era( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::zero()], now, ); @@ -3611,14 +3441,14 @@ fn slashing_independent_of_disabling_validator() { // --- Slash without disabling --- // offence that slashes 50% of the bond (setup for next slash) on_offence_in_era( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(50)], now, ); // offence that slashes 25% of the bond but does not disable on_offence_in_era( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[offence_from(21, None)], &[Perbill::from_percent(25)], now, ); @@ -3687,13 +3517,8 @@ fn offence_threshold_doesnt_trigger_new_era() { // we have 4 validators and an offending validator threshold of 1/3, // even if the third validator commits an offence a new era should not be forced - - let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); - let exposure_31 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &31); - on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(50)], ); @@ -3703,7 +3528,7 @@ fn offence_threshold_doesnt_trigger_new_era() { assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[offence_from(21, None)], &[Perbill::zero()], ); @@ -3714,7 +3539,7 @@ fn offence_threshold_doesnt_trigger_new_era() { assert_eq!(ForceEra::::get(), Forcing::NotForcing); on_offence_now( - &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], + &[offence_from(31, None)], &[Perbill::zero()], ); @@ -3738,11 +3563,8 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); assert_eq!(::SessionsPerEra::get(), 3); - let exposure_11 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(ActiveEra::::get().unwrap().index, &21); - on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[offence_from(21, None)], &[Perbill::from_percent(25)], ); @@ -3759,7 +3581,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { // validator 11 commits an offence on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(25)], ); @@ -3877,12 +3699,10 @@ fn zero_slash_keeps_nominators() { mock::start_active_era(1); assert_eq!(asset::stakeable_balance::(&11), 1000); - - let exposure = Staking::eras_stakers(active_era(), &11); assert_eq!(asset::stakeable_balance::(&101), 2000); on_offence_now( - &[OffenceDetails { offender: (11, exposure.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(0)], ); @@ -4927,7 +4747,7 @@ fn offences_weight_calculated_correctly() { >, > = (1..10) .map(|i| OffenceDetails { - offender: (i, Staking::eras_stakers(active_era(), &i)), + offender: (i, ()), reporters: vec![], }) .collect(); @@ -4941,10 +4761,7 @@ fn offences_weight_calculated_correctly() { ); // On Offence with one offenders, Applied - let one_offender = [OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![1], - }]; + let one_offender = [offence_from(11, Some(1))]; let n = 1; // Number of offenders let rw = 3 + 3 * n; // rw reads and writes @@ -7073,8 +6890,8 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( let actual_exposure_page_1 = ErasStakersPaged::::get((1, 11, 1)).unwrap(); expected_individual_exposures.iter().for_each(|exposure| { assert!( - actual_exposure_page_0.others.contains(exposure) || - actual_exposure_page_1.others.contains(exposure) + actual_exposure_page_0.others.contains(exposure) + || actual_exposure_page_1.others.contains(exposure) ); }); assert_eq!( @@ -7293,13 +7110,7 @@ mod staking_interface { #[test] fn do_withdraw_unbonded_with_wrong_slash_spans_works_as_expected() { ExtBuilder::default().build_and_execute(|| { - on_offence_now( - &[OffenceDetails { - offender: (11, Staking::eras_stakers(active_era(), &11)), - reporters: vec![], - }], - &[Perbill::from_percent(100)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(100)]); assert_eq!(Staking::bonded(&11), Some(11)); @@ -7584,10 +7395,7 @@ mod staking_unchecked { // 11 goes offline on_offence_now( - &[OffenceDetails { - offender: (11, initial_exposure.clone()), - reporters: vec![], - }], + &[offence_from(11, None)], &[slash_percent], ); @@ -7656,10 +7464,7 @@ mod staking_unchecked { // 11 goes offline on_offence_now( - &[OffenceDetails { - offender: (11, initial_exposure.clone()), - reporters: vec![], - }], + &[offence_from(11, None)], &[slash_percent], ); @@ -8800,17 +8605,13 @@ fn reenable_lower_offenders_mock() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); - // offence with a low slash on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(10)], ); on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[offence_from(21, None)], &[Perbill::from_percent(20)], ); @@ -8823,7 +8624,7 @@ fn reenable_lower_offenders_mock() { // offence with a higher slash on_offence_now( - &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], + &[offence_from(31, None)], &[Perbill::from_percent(50)], ); @@ -8887,17 +8688,13 @@ fn do_not_reenable_higher_offenders_mock() { mock::start_active_era(1); assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); - // offence with a major slash on_offence_now( - &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[offence_from(11, None)], &[Perbill::from_percent(50)], ); on_offence_now( - &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[offence_from(21, None)], &[Perbill::from_percent(50)], ); @@ -8907,7 +8704,7 @@ fn do_not_reenable_higher_offenders_mock() { // offence with a minor slash on_offence_now( - &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], + &[offence_from(31, None)], &[Perbill::from_percent(10)], ); @@ -9699,8 +9496,8 @@ fn manual_slashing_works() { let expected_balance_1 = initial_balance - (initial_balance / 4); // 25% slash assert!( - balance_after_first_slash <= expected_balance_1 && - balance_after_first_slash >= expected_balance_1 - 5, + balance_after_first_slash <= expected_balance_1 + && balance_after_first_slash >= expected_balance_1 - 5, "First slash was not applied correctly. Expected around {}, got {}", expected_balance_1, balance_after_first_slash @@ -9752,8 +9549,8 @@ fn manual_slashing_works() { let expected_balance_3 = initial_balance / 2; // 50% of original assert!( - balance_after_third_slash <= expected_balance_3 && - balance_after_third_slash >= expected_balance_3 - 5, + balance_after_third_slash <= expected_balance_3 + && balance_after_third_slash >= expected_balance_3 - 5, "Third slash was not applied correctly. Expected around {}, got {}", expected_balance_3, balance_after_third_slash @@ -9795,8 +9592,8 @@ fn manual_slashing_works() { let expected_balance_5 = initial_balance / 4; // 25% of original (75% slashed) assert!( - balance_after_fifth_slash <= expected_balance_5 && - balance_after_fifth_slash >= expected_balance_5 - 5, + balance_after_fifth_slash <= expected_balance_5 + && balance_after_fifth_slash >= expected_balance_5 - 5, "Fifth slash was not applied correctly. Expected around {}, got {}", expected_balance_5, balance_after_fifth_slash diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs index 23e240d6dbe11..1495bd210127b 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/outer_enums.rs @@ -161,7 +161,7 @@ pub fn expand_outer_enum( #scrate::__private::codec::Decode, #scrate::__private::codec::DecodeWithMemTracking, #scrate::__private::scale_info::TypeInfo, - #scrate::__private::RuntimeDebug, + #scrate::__private::Debug, )] #[allow(non_camel_case_types)] pub enum #enum_name_ident { diff --git a/substrate/frame/support/procedural/src/pallet/expand/event.rs b/substrate/frame/support/procedural/src/pallet/expand/event.rs index 7759500a61c59..8ebf077d0925d 100644 --- a/substrate/frame/support/procedural/src/pallet/expand/event.rs +++ b/substrate/frame/support/procedural/src/pallet/expand/event.rs @@ -120,7 +120,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { #frame_support::CloneNoBound, #frame_support::EqNoBound, #frame_support::PartialEqNoBound, - #frame_support::RuntimeDebugNoBound, + #frame_support::DebugNoBound, #frame_support::__private::codec::Encode, #frame_support::__private::codec::Decode, #frame_support::__private::codec::DecodeWithMemTracking, diff --git a/substrate/frame/support/src/lib.rs b/substrate/frame/support/src/lib.rs index 4074f4d440996..86c7330d275de 100644 --- a/substrate/frame/support/src/lib.rs +++ b/substrate/frame/support/src/lib.rs @@ -43,6 +43,7 @@ extern crate alloc; pub mod __private { pub use alloc::{ boxed::Box, + fmt::Debug, rc::Rc, string::String, vec, diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 13d94e542850c..b468b8647ca19 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -237,12 +237,14 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Event: std::fmt::Debug` + | |_^ the trait `Config` is not implemented for `Runtime` | = help: the trait `std::fmt::Debug` is implemented for `frame_system::Event` = note: required for `frame_system::Event` to implement `std::fmt::Debug` - = note: required for the cast from `&frame_system::Event` to `&dyn std::fmt::Debug` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: 1 redundant requirement hidden + = note: required for `&frame_system::Event` to implement `std::fmt::Debug` + = note: required for the cast from `&&frame_system::Event` to `&dyn std::fmt::Debug` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::Debug` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 @@ -254,12 +256,14 @@ error[E0277]: the trait bound `Runtime: Config` is not satisfied ... | 27 | | } 28 | | } - | |_^ the trait `Config` is not implemented for `Runtime`, which is required by `frame_system::Error: std::fmt::Debug` + | |_^ the trait `Config` is not implemented for `Runtime` | = help: the trait `std::fmt::Debug` is implemented for `frame_system::Error` = note: required for `frame_system::Error` to implement `std::fmt::Debug` - = note: required for the cast from `&frame_system::Error` to `&dyn std::fmt::Debug` - = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::RuntimeDebug` (in Nightly builds, run with -Z macro-backtrace for more info) + = note: 1 redundant requirement hidden + = note: required for `&frame_system::Error` to implement `std::fmt::Debug` + = note: required for the cast from `&&frame_system::Error` to `&dyn std::fmt::Debug` + = note: this error originates in the derive macro `self::sp_api_hidden_includes_construct_runtime::hidden_include::__private::Debug` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `Runtime: Config` is not satisfied --> tests/construct_runtime_ui/deprecated_where_block.rs:20:1 diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs index 54a055d4a33f9..caa999c3a6c58 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/mod.rs @@ -50,7 +50,7 @@ pub struct PalletCmd { #[arg(short, long, value_parser = parse_pallet_name, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] pub pallet: Option, - /// Select an extrinsic inside the pallet to benchmark, or `*` for all. + /// Select an extrinsic inside the pallet to benchmark, or `*` or 'all' for all. #[arg(short, long, required_unless_present_any = ["list", "json_input", "all"], default_value_if("all", "true", Some("*".into())))] pub extrinsic: Option, From 755aacbfd8ee5ad2f777e9ba4f6baaddaf4215f2 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 01:55:30 +0100 Subject: [PATCH 10/38] fmt --- substrate/frame/staking/src/tests.rs | 183 +++++++-------------------- 1 file changed, 44 insertions(+), 139 deletions(-) diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 5ccea0df5db6c..76e010da0f36e 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -390,9 +390,9 @@ fn rewards_should_work() { ); assert_eq_error_rate!( asset::total_balance::(&101), - init_balance_101 - + part_for_101_from_11 * total_payout_0 * 2 / 3 - + part_for_101_from_21 * total_payout_0 * 1 / 3, + init_balance_101 + + part_for_101_from_11 * total_payout_0 * 2 / 3 + + part_for_101_from_21 * total_payout_0 * 1 / 3, 2 ); @@ -430,9 +430,9 @@ fn rewards_should_work() { ); assert_eq_error_rate!( asset::total_balance::(&101), - init_balance_101 - + part_for_101_from_11 * (total_payout_0 * 2 / 3 + total_payout_1) - + part_for_101_from_21 * total_payout_0 * 1 / 3, + init_balance_101 + + part_for_101_from_11 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_101_from_21 * total_payout_0 * 1 / 3, 2 ); }); @@ -2863,11 +2863,7 @@ fn slashing_nominators_by_span_max() { assert_eq!(get_span(101).iter().collect::>(), expected_spans); // second slash: higher era, higher value, same span. - on_offence_in_era( - &[offence_from(21, None)], - &[Perbill::from_percent(30)], - 3, - ); + on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(30)], 3); // 11 was not further slashed, but 21 and 101 were. assert_eq!(asset::stakeable_balance::(&11), 900); @@ -2908,10 +2904,7 @@ fn slashes_are_summed_across_spans() { let get_span = |account| SlashingSpans::::get(&account).unwrap(); - on_offence_now( - &[offence_from(21, None)], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)]); let expected_spans = vec![ slashing::SlashingSpan { index: 1, start: 4, length: None }, @@ -2928,10 +2921,7 @@ fn slashes_are_summed_across_spans() { assert_eq!(Staking::slashable_balance_of(&21), 900); - on_offence_now( - &[offence_from(21, None)], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)]); let expected_spans = vec![ slashing::SlashingSpan { index: 2, start: 5, length: None }, @@ -3148,10 +3138,7 @@ fn remove_deferred() { let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; // deferred to start of era 4. - on_offence_now( - &[offence_from(11, None)], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); @@ -3160,11 +3147,7 @@ fn remove_deferred() { // reported later, but deferred to start of era 4 as well. System::reset_events(); - on_offence_in_era( - &[offence_from(11, None)], - &[Perbill::from_percent(15)], - 1, - ); + on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(15)], 1); // fails if empty assert_noop!( @@ -3219,30 +3202,15 @@ fn remove_multi_deferred() { assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); - on_offence_now( - &[offence_from(11, None)], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); - on_offence_now( - &[offence_from(21, None)], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)]); - on_offence_now( - &[offence_from(11, None)], - &[Perbill::from_percent(25)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)]); - on_offence_now( - &[offence_from(42, None)], - &[Perbill::from_percent(25)], - ); + on_offence_now(&[offence_from(42, None)], &[Perbill::from_percent(25)]); - on_offence_now( - &[offence_from(69, None)], - &[Perbill::from_percent(25)], - ); + on_offence_now(&[offence_from(69, None)], &[Perbill::from_percent(25)]); assert_eq!(UnappliedSlashes::::get(&4).len(), 5); @@ -3297,10 +3265,7 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid assert_eq!(exposure_11.total, 1000 + 125); assert_eq!(exposure_21.total, 1000 + 375); - on_offence_now( - &[offence_from(11, None)], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); assert_eq!( staking_events_since_last_call(), @@ -3359,19 +3324,13 @@ fn non_slashable_offence_disables_validator() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); // offence with no slash associated - on_offence_now( - &[offence_from(11, None)], - &[Perbill::zero()], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::zero()]); // it does NOT affect the nominator. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); // offence that slashes 25% of the bond - on_offence_now( - &[offence_from(21, None)], - &[Perbill::from_percent(25)], - ); + on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)]); // it DOES NOT affect the nominator. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3426,11 +3385,7 @@ fn slashing_independent_of_disabling_validator() { // --- Disable without a slash --- // offence with no slash associated - on_offence_in_era( - &[offence_from(11, None)], - &[Perbill::zero()], - now, - ); + on_offence_in_era(&[offence_from(11, None)], &[Perbill::zero()], now); // nomination remains untouched. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3440,18 +3395,10 @@ fn slashing_independent_of_disabling_validator() { // --- Slash without disabling --- // offence that slashes 50% of the bond (setup for next slash) - on_offence_in_era( - &[offence_from(11, None)], - &[Perbill::from_percent(50)], - now, - ); + on_offence_in_era(&[offence_from(11, None)], &[Perbill::from_percent(50)], now); // offence that slashes 25% of the bond but does not disable - on_offence_in_era( - &[offence_from(21, None)], - &[Perbill::from_percent(25)], - now, - ); + on_offence_in_era(&[offence_from(21, None)], &[Perbill::from_percent(25)], now); // nomination remains untouched. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3517,20 +3464,14 @@ fn offence_threshold_doesnt_trigger_new_era() { // we have 4 validators and an offending validator threshold of 1/3, // even if the third validator commits an offence a new era should not be forced - on_offence_now( - &[offence_from(11, None)], - &[Perbill::from_percent(50)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]); // 11 should be disabled because the byzantine threshold is 1 assert!(is_disabled(11)); assert_eq!(ForceEra::::get(), Forcing::NotForcing); - on_offence_now( - &[offence_from(21, None)], - &[Perbill::zero()], - ); + on_offence_now(&[offence_from(21, None)], &[Perbill::zero()]); // 21 should not be disabled because the number of disabled validators will be above the // byzantine threshold @@ -3538,10 +3479,7 @@ fn offence_threshold_doesnt_trigger_new_era() { assert_eq!(ForceEra::::get(), Forcing::NotForcing); - on_offence_now( - &[offence_from(31, None)], - &[Perbill::zero()], - ); + on_offence_now(&[offence_from(31, None)], &[Perbill::zero()]); // same for 31 assert!(!is_disabled(31)); @@ -3563,10 +3501,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); assert_eq!(::SessionsPerEra::get(), 3); - on_offence_now( - &[offence_from(21, None)], - &[Perbill::from_percent(25)], - ); + on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(25)]); // nominations are not updated. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3580,10 +3515,7 @@ fn disabled_validators_are_kept_disabled_for_whole_era() { assert!(is_disabled(21)); // validator 11 commits an offence - on_offence_now( - &[offence_from(11, None)], - &[Perbill::from_percent(25)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)]); // nominations are not updated. assert_eq!(Nominators::::get(101).unwrap().targets, vec![11, 21]); @@ -3701,10 +3633,7 @@ fn zero_slash_keeps_nominators() { assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); - on_offence_now( - &[offence_from(11, None)], - &[Perbill::from_percent(0)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(0)]); assert_eq!(asset::stakeable_balance::(&11), 1000); assert_eq!(asset::stakeable_balance::(&101), 2000); @@ -6890,8 +6819,8 @@ fn test_validator_exposure_is_backward_compatible_with_non_paged_rewards_payout( let actual_exposure_page_1 = ErasStakersPaged::::get((1, 11, 1)).unwrap(); expected_individual_exposures.iter().for_each(|exposure| { assert!( - actual_exposure_page_0.others.contains(exposure) - || actual_exposure_page_1.others.contains(exposure) + actual_exposure_page_0.others.contains(exposure) || + actual_exposure_page_1.others.contains(exposure) ); }); assert_eq!( @@ -7394,10 +7323,7 @@ mod staking_unchecked { let exposed_nominator = initial_exposure.others.first().unwrap().value; // 11 goes offline - on_offence_now( - &[offence_from(11, None)], - &[slash_percent], - ); + on_offence_now(&[offence_from(11, None)], &[slash_percent]); let slash_amount = slash_percent * exposed_stake; let validator_share = @@ -7463,10 +7389,7 @@ mod staking_unchecked { let nominator_stake = Staking::ledger(101.into()).unwrap().total; // 11 goes offline - on_offence_now( - &[offence_from(11, None)], - &[slash_percent], - ); + on_offence_now(&[offence_from(11, None)], &[slash_percent]); // both stakes must have been decreased to 0. assert_eq!(Staking::ledger(101.into()).unwrap().active, 0); @@ -8606,14 +8529,8 @@ fn reenable_lower_offenders_mock() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); // offence with a low slash - on_offence_now( - &[offence_from(11, None)], - &[Perbill::from_percent(10)], - ); - on_offence_now( - &[offence_from(21, None)], - &[Perbill::from_percent(20)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); + on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(20)]); // it does NOT affect the nominator. assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); @@ -8623,10 +8540,7 @@ fn reenable_lower_offenders_mock() { assert!(is_disabled(21)); // offence with a higher slash - on_offence_now( - &[offence_from(31, None)], - &[Perbill::from_percent(50)], - ); + on_offence_now(&[offence_from(31, None)], &[Perbill::from_percent(50)]); // First offender is no longer disabled assert!(!is_disabled(11)); @@ -8689,24 +8603,15 @@ fn do_not_reenable_higher_offenders_mock() { assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41, 51, 201, 202]); // offence with a major slash - on_offence_now( - &[offence_from(11, None)], - &[Perbill::from_percent(50)], - ); - on_offence_now( - &[offence_from(21, None)], - &[Perbill::from_percent(50)], - ); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]); + on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(50)]); // both validators should be disabled assert!(is_disabled(11)); assert!(is_disabled(21)); // offence with a minor slash - on_offence_now( - &[offence_from(31, None)], - &[Perbill::from_percent(10)], - ); + on_offence_now(&[offence_from(31, None)], &[Perbill::from_percent(10)]); // First and second offenders are still disabled assert!(is_disabled(11)); @@ -9496,8 +9401,8 @@ fn manual_slashing_works() { let expected_balance_1 = initial_balance - (initial_balance / 4); // 25% slash assert!( - balance_after_first_slash <= expected_balance_1 - && balance_after_first_slash >= expected_balance_1 - 5, + balance_after_first_slash <= expected_balance_1 && + balance_after_first_slash >= expected_balance_1 - 5, "First slash was not applied correctly. Expected around {}, got {}", expected_balance_1, balance_after_first_slash @@ -9549,8 +9454,8 @@ fn manual_slashing_works() { let expected_balance_3 = initial_balance / 2; // 50% of original assert!( - balance_after_third_slash <= expected_balance_3 - && balance_after_third_slash >= expected_balance_3 - 5, + balance_after_third_slash <= expected_balance_3 && + balance_after_third_slash >= expected_balance_3 - 5, "Third slash was not applied correctly. Expected around {}, got {}", expected_balance_3, balance_after_third_slash @@ -9592,8 +9497,8 @@ fn manual_slashing_works() { let expected_balance_5 = initial_balance / 4; // 25% of original (75% slashed) assert!( - balance_after_fifth_slash <= expected_balance_5 - && balance_after_fifth_slash >= expected_balance_5 - 5, + balance_after_fifth_slash <= expected_balance_5 && + balance_after_fifth_slash >= expected_balance_5 - 5, "Fifth slash was not applied correctly. Expected around {}, got {}", expected_balance_5, balance_after_fifth_slash From 667d0590fc09de52c4cdc7a70ee739437db17511 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 08:47:21 +0100 Subject: [PATCH 11/38] fix manual slash test --- substrate/frame/staking/src/pallet/impls.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/staking/src/pallet/impls.rs b/substrate/frame/staking/src/pallet/impls.rs index c60eb5524491e..76bdb80647370 100644 --- a/substrate/frame/staking/src/pallet/impls.rs +++ b/substrate/frame/staking/src/pallet/impls.rs @@ -1296,7 +1296,7 @@ impl Pallet { for (details, slash_fraction) in offenders.zip(slash_fractions) { let stash = &details.offender; - let exposure = Self::eras_stakers(active_era, stash); + let exposure = Self::eras_stakers(slash_era, stash); // Skip if the validator is invulnerable. if invulnerables.contains(stash) { From 781e565babab4049ad01d5cf21c4d6d05a202bb5 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 09:03:35 +0100 Subject: [PATCH 12/38] fix some more tests --- substrate/frame/staking/src/mock.rs | 7 +-- substrate/frame/staking/src/tests.rs | 71 +++++++++++++++------------- 2 files changed, 40 insertions(+), 38 deletions(-) diff --git a/substrate/frame/staking/src/mock.rs b/substrate/frame/staking/src/mock.rs index 384f23dc2c475..a759d4ab2c63f 100644 --- a/substrate/frame/staking/src/mock.rs +++ b/substrate/frame/staking/src/mock.rs @@ -768,12 +768,9 @@ pub(crate) fn on_offence_now( pub(crate) fn offence_from( offender: AccountId, - reporter: Option, + reporter: Option>, ) -> OffenceDetails> { - OffenceDetails { - offender: (offender, ()), - reporters: reporter.map(|r| vec![(r)]).unwrap_or_default(), - } + OffenceDetails { offender: (offender, ()), reporters: reporter.unwrap_or_default() } } pub(crate) fn add_slash(who: &AccountId) { diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 76e010da0f36e..845dc446b87d0 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -2639,7 +2639,7 @@ fn reporters_receive_their_slice() { assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]); + on_offence_now(&[offence_from(11, Some(vec![1, 2]))], &[Perbill::from_percent(50)]); // F1 * (reward_proportion * slash - 0) // 50% * (10% * initial_balance / 2) @@ -3196,47 +3196,52 @@ fn remove_deferred() { #[test] fn remove_multi_deferred() { - ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_active_era(1); + ExtBuilder::default() + .slash_defer_duration(2) + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .set_status(51, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); - assert_eq!(asset::stakeable_balance::(&11), 1000); - assert_eq!(asset::stakeable_balance::(&101), 2000); + assert_eq!(asset::stakeable_balance::(&11), 1000); + assert_eq!(asset::stakeable_balance::(&101), 2000); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(10)]); - on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)]); + on_offence_now(&[offence_from(21, None)], &[Perbill::from_percent(10)]); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)]); + on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(25)]); - on_offence_now(&[offence_from(42, None)], &[Perbill::from_percent(25)]); + on_offence_now(&[offence_from(41, None)], &[Perbill::from_percent(25)]); - on_offence_now(&[offence_from(69, None)], &[Perbill::from_percent(25)]); + on_offence_now(&[offence_from(51, None)], &[Perbill::from_percent(25)]); - assert_eq!(UnappliedSlashes::::get(&4).len(), 5); + assert_eq!(UnappliedSlashes::::get(&4).len(), 5); - // fails if list is not sorted - assert_noop!( - Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![2, 0, 4]), - Error::::NotSortedAndUnique - ); - // fails if list is not unique - assert_noop!( - Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![0, 2, 2]), - Error::::NotSortedAndUnique - ); - // fails if bad index - assert_noop!( - Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![1, 2, 3, 4, 5]), - Error::::InvalidSlashIndex - ); + // fails if list is not sorted + assert_noop!( + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![2, 0, 4]), + Error::::NotSortedAndUnique + ); + // fails if list is not unique + assert_noop!( + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![0, 2, 2]), + Error::::NotSortedAndUnique + ); + // fails if bad index + assert_noop!( + Staking::cancel_deferred_slash(RuntimeOrigin::root(), 1, vec![1, 2, 3, 4, 5]), + Error::::InvalidSlashIndex + ); - assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0, 2, 4])); + assert_ok!(Staking::cancel_deferred_slash(RuntimeOrigin::root(), 4, vec![0, 2, 4])); - let slashes = UnappliedSlashes::::get(&4); - assert_eq!(slashes.len(), 2); - assert_eq!(slashes[0].validator, 21); - assert_eq!(slashes[1].validator, 42); - }) + let slashes = UnappliedSlashes::::get(&4); + assert_eq!(slashes.len(), 2); + assert_eq!(slashes[0].validator, 21); + assert_eq!(slashes[1].validator, 41); + }) } #[test] @@ -4690,7 +4695,7 @@ fn offences_weight_calculated_correctly() { ); // On Offence with one offenders, Applied - let one_offender = [offence_from(11, Some(1))]; + let one_offender = [offence_from(11, Some(vec![1]))]; let n = 1; // Number of offenders let rw = 3 + 3 * n; // rw reads and writes From d9ccb91342e4327bd4ee644ab17875ffbc072ef1 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Mon, 17 Mar 2025 13:28:27 +0200 Subject: [PATCH 13/38] Fix the failing staking tests --- substrate/frame/staking/src/tests.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/substrate/frame/staking/src/tests.rs b/substrate/frame/staking/src/tests.rs index 845dc446b87d0..d65e9fa066d4a 100644 --- a/substrate/frame/staking/src/tests.rs +++ b/substrate/frame/staking/src/tests.rs @@ -2573,8 +2573,8 @@ fn slashing_performed_according_exposure() { // Handle an offence with a historical exposure. on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]); - // The stash account should be slashed for 250 (50% of 500). - assert_eq!(asset::stakeable_balance::(&11), 1000 - 250); + // The stash account should be slashed for 500 (50% of 1000). + assert_eq!(asset::stakeable_balance::(&11), 500); }); } @@ -2660,14 +2660,14 @@ fn subsequent_reports_in_same_span_pay_out_less() { assert_eq!(Staking::eras_stakers(active_era(), &11).total, initial_balance); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(20)]); + on_offence_now(&[offence_from(11, Some(vec![1]))], &[Perbill::from_percent(20)]); // F1 * (reward_proportion * slash - 0) // 50% * (10% * initial_balance * 20%) let reward = (initial_balance / 5) / 20; assert_eq!(asset::total_balance::(&1), 10 + reward); - on_offence_now(&[offence_from(11, None)], &[Perbill::from_percent(50)]); + on_offence_now(&[offence_from(11, Some(vec![1]))], &[Perbill::from_percent(50)]); let prior_payout = reward; @@ -4669,10 +4669,12 @@ fn offences_weight_calculated_correctly() { zero_offence_weight ); - // On Offence with N offenders, Unapplied: 4 Reads, 1 Write + 4 Reads, 5 Writes + // On Offence with N offenders, Unapplied: 4 Reads, 1 Write + 4 Reads, 5 Writes, 2 Reads + 2 + // Writes for `SessionInterface::report_offence` call. let n_offence_unapplied_weight = ::DbWeight::get() .reads_writes(4, 1) + - ::DbWeight::get().reads_writes(4, 5); + ::DbWeight::get().reads_writes(4, 5) + + ::DbWeight::get().reads_writes(2, 2); let offenders: Vec< OffenceDetails< @@ -4709,7 +4711,8 @@ fn offences_weight_calculated_correctly() { + ::DbWeight::get().reads_writes(6, 5) // `reward_cost` * reporters (1) + ::DbWeight::get().reads_writes(2, 2) - ; + // `SessionInterface::report_offence` + + ::DbWeight::get().reads_writes(2, 2); assert_eq!( >::on_offence( From 3ce2484cb6c357d401fd766a45782b9d250e1c04 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Mon, 17 Mar 2025 13:56:04 +0200 Subject: [PATCH 14/38] Fix a compilation error in benchmarks --- substrate/frame/staking/src/benchmarking.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index 41bfeed5b6de6..6551a8cfcf816 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -1168,7 +1168,6 @@ mod benchmarks { false, true, RewardDestination::Staked, - era, )?; let slash_fraction = Perbill::from_percent(10); From f3c83b7de1fafa5ea923566dacae1db82b4ee8a6 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Mon, 17 Mar 2025 15:01:27 +0200 Subject: [PATCH 15/38] Fix a compilation warning --- substrate/frame/staking/src/migrations.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index f6e75202b7015..891d0bb0377a4 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -25,8 +25,6 @@ use frame_support::{ traits::{GetStorageVersion, OnRuntimeUpgrade, UncheckedOnRuntimeUpgrade}, }; -#[cfg(feature = "try-runtime")] -use frame_support::ensure; #[cfg(feature = "try-runtime")] use sp_runtime::TryRuntimeError; From 82d58e3e0bc15b668621fec200b94e9f79195724 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 14:14:04 +0100 Subject: [PATCH 16/38] fix bench --- substrate/frame/staking/src/benchmarking.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/substrate/frame/staking/src/benchmarking.rs b/substrate/frame/staking/src/benchmarking.rs index 6551a8cfcf816..0378b0dac3d22 100644 --- a/substrate/frame/staking/src/benchmarking.rs +++ b/substrate/frame/staking/src/benchmarking.rs @@ -1156,12 +1156,8 @@ mod benchmarks { #[benchmark] fn manual_slash() -> Result<(), BenchmarkError> { - let era = EraIndex::zero(); - CurrentEra::::put(era); - ErasStartSessionIndex::::insert(era, 0); - ActiveEra::::put(ActiveEraInfo { index: era, start: None }); - // Create a validator with nominators + // This will add exposure for our validator in the current era. let (validator_stash, _nominators) = create_validator_with_nominators::( T::MaxExposurePageSize::get() as u32, T::MaxExposurePageSize::get() as u32, @@ -1170,6 +1166,8 @@ mod benchmarks { RewardDestination::Staked, )?; + let era = CurrentEra::::get().unwrap(); + ActiveEra::::put(ActiveEraInfo { index: era, start: None }); let slash_fraction = Perbill::from_percent(10); #[extrinsic_call] From 0f7a0f47b5233f5b4f322c7bfc5567d729b21ea1 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 14:18:59 +0100 Subject: [PATCH 17/38] fix root offences --- substrate/frame/root-offences/src/mock.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/root-offences/src/mock.rs b/substrate/frame/root-offences/src/mock.rs index 54ddbecaebfa0..19119c5541e57 100644 --- a/substrate/frame/root-offences/src/mock.rs +++ b/substrate/frame/root-offences/src/mock.rs @@ -27,7 +27,7 @@ use frame_support::{ derive_impl, parameter_types, traits::{ConstU32, ConstU64, OneSessionHandler}, }; -use pallet_staking::StakerStatus; +use pallet_staking::{BalanceOf, StakerStatus}; use sp_runtime::{curve::PiecewiseLinear, testing::UintAuthorityId, traits::Zero, BuildStorage}; use sp_staking::{EraIndex, SessionIndex}; From aaa9fea34c66ea89e1ec5c352c67e9b79593bc96 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 14:43:11 +0100 Subject: [PATCH 18/38] fix kitchensink genesis --- substrate/bin/node/runtime/src/genesis_config_presets.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/substrate/bin/node/runtime/src/genesis_config_presets.rs b/substrate/bin/node/runtime/src/genesis_config_presets.rs index 99ae8f1d83939..82769a70c66bb 100644 --- a/substrate/bin/node/runtime/src/genesis_config_presets.rs +++ b/substrate/bin/node/runtime/src/genesis_config_presets.rs @@ -58,7 +58,7 @@ pub fn kitchensink_genesis( stakers: Vec, staking_playground_config: Option, ) -> serde_json::Value { - let (validator_count, min_validator_count, dev_stakers) = match staking_playground_config { + let (validator_count, min_validator_count, _dev_stakers) = match staking_playground_config { Some(c) => (c.validator_count, c.minimum_validator_count, Some(c.dev_stakers)), None => { let authorities_count = initial_authorities.len() as u32; @@ -90,7 +90,6 @@ pub fn kitchensink_genesis( .expect("Too many invulnerable validators: upper limit is MaxInvulnerables from pallet staking config"), slash_reward_fraction: Perbill::from_percent(10), stakers, - dev_stakers }, elections: ElectionsConfig { members: collective.iter().cloned().map(|member| (member, STASH)).collect(), From e9a04a680644b043d345070f1cdfb477ce734f74 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 14:49:14 +0100 Subject: [PATCH 19/38] taplo --- cumulus/bin/pov-validator/Cargo.toml | 2 +- umbrella/Cargo.toml | 234 ++++++++++++++++++++++++++- 2 files changed, 234 insertions(+), 2 deletions(-) diff --git a/cumulus/bin/pov-validator/Cargo.toml b/cumulus/bin/pov-validator/Cargo.toml index d7af29a6bcb25..a919e3f68eace 100644 --- a/cumulus/bin/pov-validator/Cargo.toml +++ b/cumulus/bin/pov-validator/Cargo.toml @@ -19,8 +19,8 @@ sc-executor.workspace = true sp-core.workspace = true sp-io.workspace = true sp-maybe-compressed-blob.workspace = true -tracing-subscriber.workspace = true tracing.workspace = true +tracing-subscriber.workspace = true [lints] workspace = true diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index e87b7ada6612f..9b1afb23d3239 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -550,7 +550,239 @@ with-tracing = [ "sp-tracing?/with-tracing", ] -runtime-full = ["assets-common", "binary-merkle-tree", "bp-header-chain", "bp-messages", "bp-parachains", "bp-polkadot", "bp-polkadot-core", "bp-relayers", "bp-runtime", "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "bridge-hub-common", "bridge-runtime-common", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", "cumulus-pallet-parachain-system", "cumulus-pallet-parachain-system-proc-macro", "cumulus-pallet-session-benchmarking", "cumulus-pallet-solo-to-para", "cumulus-pallet-weight-reclaim", "cumulus-pallet-xcm", "cumulus-pallet-xcmp-queue", "cumulus-ping", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", "cumulus-primitives-proof-size-hostfunction", "cumulus-primitives-storage-weight-reclaim", "cumulus-primitives-timestamp", "cumulus-primitives-utility", "frame-benchmarking", "frame-benchmarking-pallet-pov", "frame-election-provider-solution-type", "frame-election-provider-support", "frame-executive", "frame-metadata-hash-extension", "frame-support", "frame-support-procedural", "frame-support-procedural-tools-derive", "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", "frame-try-runtime", "pallet-alliance", "pallet-asset-conversion", "pallet-asset-conversion-ops", "pallet-asset-conversion-tx-payment", "pallet-asset-rate", "pallet-asset-rewards", "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", "pallet-bags-list", "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", "pallet-bounties", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-bridge-relayers", "pallet-broker", "pallet-child-bounties", "pallet-collator-selection", "pallet-collective", "pallet-collective-content", "pallet-contracts", "pallet-contracts-proc-macro", "pallet-contracts-uapi", "pallet-conviction-voting", "pallet-core-fellowship", "pallet-delegated-staking", "pallet-democracy", "pallet-dev-mode", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", "pallet-elections-phragmen", "pallet-fast-unstake", "pallet-glutton", "pallet-grandpa", "pallet-identity", "pallet-im-online", "pallet-indices", "pallet-insecure-randomness-collective-flip", "pallet-lottery", "pallet-membership", "pallet-message-queue", "pallet-migrations", "pallet-mixnet", "pallet-mmr", "pallet-multisig", "pallet-nft-fractionalization", "pallet-nfts", "pallet-nfts-runtime-api", "pallet-nis", "pallet-node-authorization", "pallet-nomination-pools", "pallet-nomination-pools-benchmarking", "pallet-nomination-pools-runtime-api", "pallet-offences", "pallet-offences-benchmarking", "pallet-paged-list", "pallet-parameters", "pallet-preimage", "pallet-proxy", "pallet-ranked-collective", "pallet-recovery", "pallet-referenda", "pallet-remark", "pallet-revive", "pallet-revive-proc-macro", "pallet-revive-uapi", "pallet-root-offences", "pallet-root-testing", "pallet-safe-mode", "pallet-salary", "pallet-scheduler", "pallet-scored-pool", "pallet-session", "pallet-session-benchmarking", "pallet-skip-feeless-payment", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", "pallet-staking-reward-fn", "pallet-staking-runtime-api", "pallet-state-trie-migration", "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", "pallet-transaction-storage", "pallet-treasury", "pallet-tx-pause", "pallet-uniques", "pallet-utility", "pallet-verify-signature", "pallet-vesting", "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", "pallet-xcm-bridge-hub", "pallet-xcm-bridge-hub-router", "parachains-common", "polkadot-core-primitives", "polkadot-parachain-primitives", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-metrics", "polkadot-runtime-parachains", "polkadot-sdk-frame", "sc-chain-spec-derive", "sc-tracing-proc-macro", "slot-range-helper", "snowbridge-beacon-primitives", "snowbridge-core", "snowbridge-ethereum", "snowbridge-outbound-queue-merkle-tree", "snowbridge-outbound-queue-runtime-api", "snowbridge-pallet-ethereum-client", "snowbridge-pallet-ethereum-client-fixtures", "snowbridge-pallet-inbound-queue", "snowbridge-pallet-inbound-queue-fixtures", "snowbridge-pallet-outbound-queue", "snowbridge-pallet-system", "snowbridge-router-primitives", "snowbridge-runtime-common", "snowbridge-system-runtime-api", "sp-api", "sp-api-proc-macro", "sp-application-crypto", "sp-arithmetic", "sp-authority-discovery", "sp-block-builder", "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-consensus-grandpa", "sp-consensus-pow", "sp-consensus-slots", "sp-core", "sp-crypto-ec-utils", "sp-crypto-hashing", "sp-crypto-hashing-proc-macro", "sp-debug-derive", "sp-externalities", "sp-genesis-builder", "sp-inherents", "sp-io", "sp-keyring", "sp-keystore", "sp-metadata-ir", "sp-mixnet", "sp-mmr-primitives", "sp-npos-elections", "sp-offchain", "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-proc-macro", "sp-session", "sp-staking", "sp-state-machine", "sp-statement-store", "sp-std", "sp-storage", "sp-timestamp", "sp-tracing", "sp-transaction-pool", "sp-transaction-storage-proof", "sp-trie", "sp-version", "sp-version-proc-macro", "sp-wasm-interface", "sp-weights", "staging-parachain-info", "staging-xcm", "staging-xcm-builder", "staging-xcm-executor", "substrate-bip39", "testnet-parachains-constants", "tracing-gum-proc-macro", "xcm-procedural", "xcm-runtime-apis"] +runtime-full = [ + "assets-common", + "binary-merkle-tree", + "bp-header-chain", + "bp-messages", + "bp-parachains", + "bp-polkadot", + "bp-polkadot-core", + "bp-relayers", + "bp-runtime", + "bp-test-utils", + "bp-xcm-bridge-hub", + "bp-xcm-bridge-hub-router", + "bridge-hub-common", + "bridge-runtime-common", + "cumulus-pallet-aura-ext", + "cumulus-pallet-dmp-queue", + "cumulus-pallet-parachain-system", + "cumulus-pallet-parachain-system-proc-macro", + "cumulus-pallet-session-benchmarking", + "cumulus-pallet-solo-to-para", + "cumulus-pallet-weight-reclaim", + "cumulus-pallet-xcm", + "cumulus-pallet-xcmp-queue", + "cumulus-ping", + "cumulus-primitives-aura", + "cumulus-primitives-core", + "cumulus-primitives-parachain-inherent", + "cumulus-primitives-proof-size-hostfunction", + "cumulus-primitives-storage-weight-reclaim", + "cumulus-primitives-timestamp", + "cumulus-primitives-utility", + "frame-benchmarking", + "frame-benchmarking-pallet-pov", + "frame-election-provider-solution-type", + "frame-election-provider-support", + "frame-executive", + "frame-metadata-hash-extension", + "frame-support", + "frame-support-procedural", + "frame-support-procedural-tools-derive", + "frame-system", + "frame-system-benchmarking", + "frame-system-rpc-runtime-api", + "frame-try-runtime", + "pallet-alliance", + "pallet-asset-conversion", + "pallet-asset-conversion-ops", + "pallet-asset-conversion-tx-payment", + "pallet-asset-rate", + "pallet-asset-rewards", + "pallet-asset-tx-payment", + "pallet-assets", + "pallet-assets-freezer", + "pallet-atomic-swap", + "pallet-aura", + "pallet-authority-discovery", + "pallet-authorship", + "pallet-babe", + "pallet-bags-list", + "pallet-balances", + "pallet-beefy", + "pallet-beefy-mmr", + "pallet-bounties", + "pallet-bridge-grandpa", + "pallet-bridge-messages", + "pallet-bridge-parachains", + "pallet-bridge-relayers", + "pallet-broker", + "pallet-child-bounties", + "pallet-collator-selection", + "pallet-collective", + "pallet-collective-content", + "pallet-contracts", + "pallet-contracts-proc-macro", + "pallet-contracts-uapi", + "pallet-conviction-voting", + "pallet-core-fellowship", + "pallet-delegated-staking", + "pallet-democracy", + "pallet-dev-mode", + "pallet-election-provider-multi-phase", + "pallet-election-provider-support-benchmarking", + "pallet-elections-phragmen", + "pallet-fast-unstake", + "pallet-glutton", + "pallet-grandpa", + "pallet-identity", + "pallet-im-online", + "pallet-indices", + "pallet-insecure-randomness-collective-flip", + "pallet-lottery", + "pallet-membership", + "pallet-message-queue", + "pallet-migrations", + "pallet-mixnet", + "pallet-mmr", + "pallet-multisig", + "pallet-nft-fractionalization", + "pallet-nfts", + "pallet-nfts-runtime-api", + "pallet-nis", + "pallet-node-authorization", + "pallet-nomination-pools", + "pallet-nomination-pools-benchmarking", + "pallet-nomination-pools-runtime-api", + "pallet-offences", + "pallet-offences-benchmarking", + "pallet-paged-list", + "pallet-parameters", + "pallet-preimage", + "pallet-proxy", + "pallet-ranked-collective", + "pallet-recovery", + "pallet-referenda", + "pallet-remark", + "pallet-revive", + "pallet-revive-proc-macro", + "pallet-revive-uapi", + "pallet-root-offences", + "pallet-root-testing", + "pallet-safe-mode", + "pallet-salary", + "pallet-scheduler", + "pallet-scored-pool", + "pallet-session", + "pallet-session-benchmarking", + "pallet-skip-feeless-payment", + "pallet-society", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-staking-reward-fn", + "pallet-staking-runtime-api", + "pallet-state-trie-migration", + "pallet-statement", + "pallet-sudo", + "pallet-timestamp", + "pallet-tips", + "pallet-transaction-payment", + "pallet-transaction-payment-rpc-runtime-api", + "pallet-transaction-storage", + "pallet-treasury", + "pallet-tx-pause", + "pallet-uniques", + "pallet-utility", + "pallet-verify-signature", + "pallet-vesting", + "pallet-whitelist", + "pallet-xcm", + "pallet-xcm-benchmarks", + "pallet-xcm-bridge-hub", + "pallet-xcm-bridge-hub-router", + "parachains-common", + "polkadot-core-primitives", + "polkadot-parachain-primitives", + "polkadot-primitives", + "polkadot-runtime-common", + "polkadot-runtime-metrics", + "polkadot-runtime-parachains", + "polkadot-sdk-frame", + "sc-chain-spec-derive", + "sc-tracing-proc-macro", + "slot-range-helper", + "snowbridge-beacon-primitives", + "snowbridge-core", + "snowbridge-ethereum", + "snowbridge-outbound-queue-merkle-tree", + "snowbridge-outbound-queue-runtime-api", + "snowbridge-pallet-ethereum-client", + "snowbridge-pallet-ethereum-client-fixtures", + "snowbridge-pallet-inbound-queue", + "snowbridge-pallet-inbound-queue-fixtures", + "snowbridge-pallet-outbound-queue", + "snowbridge-pallet-system", + "snowbridge-router-primitives", + "snowbridge-runtime-common", + "snowbridge-system-runtime-api", + "sp-api", + "sp-api-proc-macro", + "sp-application-crypto", + "sp-arithmetic", + "sp-authority-discovery", + "sp-block-builder", + "sp-consensus-aura", + "sp-consensus-babe", + "sp-consensus-beefy", + "sp-consensus-grandpa", + "sp-consensus-pow", + "sp-consensus-slots", + "sp-core", + "sp-crypto-ec-utils", + "sp-crypto-hashing", + "sp-crypto-hashing-proc-macro", + "sp-debug-derive", + "sp-externalities", + "sp-genesis-builder", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-keystore", + "sp-metadata-ir", + "sp-mixnet", + "sp-mmr-primitives", + "sp-npos-elections", + "sp-offchain", + "sp-runtime", + "sp-runtime-interface", + "sp-runtime-interface-proc-macro", + "sp-session", + "sp-staking", + "sp-state-machine", + "sp-statement-store", + "sp-std", + "sp-storage", + "sp-timestamp", + "sp-tracing", + "sp-transaction-pool", + "sp-transaction-storage-proof", + "sp-trie", + "sp-version", + "sp-version-proc-macro", + "sp-wasm-interface", + "sp-weights", + "staging-parachain-info", + "staging-xcm", + "staging-xcm-builder", + "staging-xcm-executor", + "substrate-bip39", + "testnet-parachains-constants", + "tracing-gum-proc-macro", + "xcm-procedural", + "xcm-runtime-apis", +] runtime = [ "frame-benchmarking", "frame-benchmarking-pallet-pov", From 202138e4c408f0dca7810239b222a71da4e80f4d Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 15:01:23 +0100 Subject: [PATCH 20/38] update storage version --- substrate/frame/staking/src/pallet/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/staking/src/pallet/mod.rs b/substrate/frame/staking/src/pallet/mod.rs index 3a3f997bfe1af..7f45094d53621 100644 --- a/substrate/frame/staking/src/pallet/mod.rs +++ b/substrate/frame/staking/src/pallet/mod.rs @@ -72,7 +72,7 @@ pub mod pallet { use crate::{BenchmarkingConfig, PagedExposureMetadata}; /// The in-code storage version. - const STORAGE_VERSION: StorageVersion = StorageVersion::new(16); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(17); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] From e80b05cf4ffd645cb514140f84a2a19f6be43d72 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 17 Mar 2025 17:10:51 +0100 Subject: [PATCH 21/38] node cli --- substrate/bin/node/cli/src/chain_spec.rs | 91 +++++++++--------------- 1 file changed, 32 insertions(+), 59 deletions(-) diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index f1f1ef30fc91d..08efe2cf4bd61 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -314,65 +314,38 @@ pub fn testnet_genesis( let (initial_authorities, endowed_accounts, stakers) = configure_accounts(initial_authorities, initial_nominators, endowed_accounts, STASH); - serde_json::json!({ - "balances": { - "balances": endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect::>(), - }, - "session": { - "keys": initial_authorities - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - x.7.clone(), - ), - ) - }) - .collect::>(), - }, - "staking": { - "validatorCount": initial_authorities.len() as u32, - "minimumValidatorCount": initial_authorities.len() as u32, - "invulnerables": initial_authorities.iter().map(|x| x.0.clone()).collect::>(), - "slashRewardFraction": Perbill::from_percent(10), - "stakers": stakers.clone(), - }, - "elections": { - "members": endowed_accounts - .iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .map(|member| (member, STASH)) - .collect::>(), - }, - "technicalCommittee": { - "members": endowed_accounts - .iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .collect::>(), - }, - "sudo": { "key": Some(root_key.clone()) }, - "babe": { - "epochConfig": Some(kitchensink_runtime::BABE_GENESIS_EPOCH_CONFIG), - }, - "society": { "pot": 0 }, - "assets": { - // This asset is used by the NIS pallet as counterpart currency. - "assets": vec![(9, Sr25519Keyring::Alice.to_account_id(), true, 1)], - }, - "nominationPools": { - "minCreateBond": 10 * DOLLARS, - "minJoinBond": 1 * DOLLARS, - }, - }) + let staking_playground_config = if cfg!(feature = "staking-playground") { + Some(get_staking_playground_config()) + } else { + None + }; + + // Todo: After #7748 is done, we can refactor this to avoid + // calling into the native runtime. + kitchensink_runtime::genesis_config_presets::kitchensink_genesis( + initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + // stash account is controller + x.0.clone(), + session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + x.7.clone(), + ), + ) + }) + .collect(), + root_key, + endowed_accounts, + stakers, + staking_playground_config, + ) } fn get_staking_playground_config() -> StakingPlaygroundConfig { From 0600e8d5e83539eecada6d0770ac771b14a5d591 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 10:56:32 +0200 Subject: [PATCH 22/38] Add `staking-playground` feature to `staging-node-cli` tu suppress a warning --- substrate/bin/node/cli/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 7b355074823c3..f64db3fec4502 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -182,6 +182,7 @@ try-runtime = [ "polkadot-sdk/try-runtime", "substrate-cli-test-utils/try-runtime", ] +staking-playground = [] [[bench]] name = "transaction_pool" From 0caa4acd1865b1be1df31dba16c284b537340ede Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 14:08:22 +0200 Subject: [PATCH 23/38] Fix a warning --- substrate/frame/offences/benchmarking/src/mock.rs | 1 - substrate/frame/session/benchmarking/src/mock.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 7dab3da6ee365..fe5ef8e172c81 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -33,7 +33,6 @@ use sp_runtime::{ }; type AccountId = u64; -type Balance = u64; #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] impl frame_system::Config for Test { diff --git a/substrate/frame/session/benchmarking/src/mock.rs b/substrate/frame/session/benchmarking/src/mock.rs index 8e775eaf011de..da39ed4e1ffd0 100644 --- a/substrate/frame/session/benchmarking/src/mock.rs +++ b/substrate/frame/session/benchmarking/src/mock.rs @@ -31,7 +31,6 @@ use sp_runtime::{traits::IdentityLookup, BuildStorage, KeyTypeId}; type AccountId = u64; type Nonce = u32; -type Balance = u64; type Block = frame_system::mocking::MockBlock; From d8619fe3fde2dcf7b009c8dd9fb07281e26d3230 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 14:53:43 +0200 Subject: [PATCH 24/38] prdoc - initial --- prdoc/pr_7939.prdoc | 52 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 prdoc/pr_7939.prdoc diff --git a/prdoc/pr_7939.prdoc b/prdoc/pr_7939.prdoc new file mode 100644 index 0000000000000..6a1ecaad8b608 --- /dev/null +++ b/prdoc/pr_7939.prdoc @@ -0,0 +1,52 @@ +title: Revert pallet-staking changes which should be released as a separate pallet +doc: +- audience: Runtime Dev + description: |- + Revert changes to staking pallet for AssetHubNext because they will be released as a separate pallet. +crates: +- name: sp-staking + bump: major +- name: polkadot-sdk + bump: major +- name: polkadot-runtime-parachains + bump: major +- name: pallet-babe + bump: major +- name: frame-election-provider-support + bump: major +- name: frame-election-provider-solution-type + bump: major +- name: sp-npos-elections + bump: major +- name: pallet-staking + bump: major +- name: pallet-bags-list + bump: major +- name: pallet-grandpa + bump: major +- name: pallet-election-provider-multi-phase + bump: major +- name: pallet-election-provider-support-benchmarking + bump: major +- name: pallet-fast-unstake + bump: major +- name: frame-benchmarking-cli + bump: major +- name: westend-runtime + bump: major +- name: pallet-beefy + bump: major +- name: pallet-delegated-staking + bump: major +- name: pallet-elections-phragmen + bump: major +- name: pallet-nomination-pools-benchmarking + bump: major +- name: pallet-offences-benchmarking + bump: major +- name: pallet-session-benchmarking + bump: major +- name: pallet-root-offences + bump: major +- name: cumulus-pov-validator + bump: major \ No newline at end of file From c20653ed127c536980f1fb8d064ef419af3d3991 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 15:15:49 +0200 Subject: [PATCH 25/38] Add `pallet-assets-holder` to umbrella --- umbrella/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 882afb6170abf..09be174c8a050 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -608,6 +608,7 @@ runtime-full = [ "pallet-asset-tx-payment", "pallet-assets", "pallet-assets-freezer", + "pallet-assets-holder", "pallet-atomic-swap", "pallet-aura", "pallet-authority-discovery", From 39b755d87f9b56e2ec397632907196072cc096b4 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 15:31:08 +0200 Subject: [PATCH 26/38] no new lines in umbrella's cargo file!!! --- umbrella/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 09be174c8a050..dc6440423cfaf 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -553,7 +553,6 @@ with-tracing = [ "sp-tracing?/with-tracing", "sp-tracing?/with-tracing", ] - runtime-full = [ "assets-common", "binary-merkle-tree", From ad6c73f1e4bf27f5926faff5edbe6c1d5ab30f12 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 15:32:42 +0200 Subject: [PATCH 27/38] taplo --- cumulus/bin/pov-validator/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cumulus/bin/pov-validator/Cargo.toml b/cumulus/bin/pov-validator/Cargo.toml index a919e3f68eace..d7af29a6bcb25 100644 --- a/cumulus/bin/pov-validator/Cargo.toml +++ b/cumulus/bin/pov-validator/Cargo.toml @@ -19,8 +19,8 @@ sc-executor.workspace = true sp-core.workspace = true sp-io.workspace = true sp-maybe-compressed-blob.workspace = true -tracing.workspace = true tracing-subscriber.workspace = true +tracing.workspace = true [lints] workspace = true From 419c26574dc8503247d15b6c0ec5a99f91df21b9 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 15:58:20 +0200 Subject: [PATCH 28/38] Code review feedback --- substrate/bin/node/cli/Cargo.toml | 1 - substrate/bin/node/cli/src/chain_spec.rs | 6 +----- .../primitives/npos-elections/src/helpers.rs | 17 +++++++++++++++++ .../benchmarking-cli/src/pallet/command.rs | 5 ++++- 4 files changed, 22 insertions(+), 7 deletions(-) diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index f64db3fec4502..7b355074823c3 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -182,7 +182,6 @@ try-runtime = [ "polkadot-sdk/try-runtime", "substrate-cli-test-utils/try-runtime", ] -staking-playground = [] [[bench]] name = "transaction_pool" diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index 08efe2cf4bd61..dea4d634e40f4 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -314,11 +314,7 @@ pub fn testnet_genesis( let (initial_authorities, endowed_accounts, stakers) = configure_accounts(initial_authorities, initial_nominators, endowed_accounts, STASH); - let staking_playground_config = if cfg!(feature = "staking-playground") { - Some(get_staking_playground_config()) - } else { - None - }; + let staking_playground_config = None; // Todo: After #7748 is done, we can refactor this to avoid // calling into the native runtime. diff --git a/substrate/primitives/npos-elections/src/helpers.rs b/substrate/primitives/npos-elections/src/helpers.rs index 7df6ec9d9dbaa..ef6fc613ab10c 100644 --- a/substrate/primitives/npos-elections/src/helpers.rs +++ b/substrate/primitives/npos-elections/src/helpers.rs @@ -75,6 +75,23 @@ pub fn assignment_staked_to_ratio_normalized( Ok(ratio) } +/// Convert some [`Supports`]s into vector of [`StakedAssignment`] +pub fn supports_to_staked_assignment( + supports: Supports, +) -> Vec> { + let mut staked: BTreeMap> = BTreeMap::new(); + for (target, support) in supports { + for (voter, amount) in support.voters { + staked.entry(voter).or_default().push((target.clone(), amount)) + } + } + + staked + .into_iter() + .map(|(who, distribution)| StakedAssignment { who, distribution }) + .collect::>() +} + #[cfg(test)] mod tests { use super::*; diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index 5501f37103baa..ec035fadbeba2 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -647,7 +647,10 @@ impl PalletCmd { fn pallet_selected(&self, pallet: &Vec) -> bool { let include = self.pallet.clone().unwrap_or_default(); - let included = include.is_empty() || include == "*" || include.as_bytes() == pallet; + let included = include.is_empty() || + include == "*" || + include == "all" || + include.as_bytes() == pallet; let excluded = self.exclude_pallets.iter().any(|p| p.as_bytes() == pallet); included && !excluded From 400933e2853f9a5e9923c19b5c16bd8036a53e9d Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 16:46:44 +0200 Subject: [PATCH 29/38] fix compilation errors --- substrate/primitives/npos-elections/src/helpers.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/substrate/primitives/npos-elections/src/helpers.rs b/substrate/primitives/npos-elections/src/helpers.rs index ef6fc613ab10c..45455b42fb6ca 100644 --- a/substrate/primitives/npos-elections/src/helpers.rs +++ b/substrate/primitives/npos-elections/src/helpers.rs @@ -17,8 +17,11 @@ //! Helper methods for npos-elections. -use crate::{Assignment, Error, IdentifierT, PerThing128, StakedAssignment, VoteWeight}; -use alloc::vec::Vec; +use crate::{ + Assignment, Error, ExtendedBalance, IdentifierT, PerThing128, StakedAssignment, Supports, + VoteWeight, +}; +use alloc::{collections::BTreeMap, vec::Vec}; use sp_arithmetic::PerThing; /// Converts a vector of ratio assignments into ones with absolute budget value. From 083679a79434bc504f505f09b2e49e8f98cdf089 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 17:01:53 +0200 Subject: [PATCH 30/38] Remove `get_staking_playground_config` --- substrate/bin/node/cli/src/chain_spec.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index dea4d634e40f4..52d740623ba6f 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -344,24 +344,6 @@ pub fn testnet_genesis( ) } -fn get_staking_playground_config() -> StakingPlaygroundConfig { - let random_validators = - std::option_env!("VALIDATORS").map(|s| s.parse::().unwrap()).unwrap_or(100); - let random_nominators = std::option_env!("NOMINATORS") - .map(|s| s.parse::().unwrap()) - .unwrap_or(3000); - - let validator_count = std::option_env!("VALIDATOR_COUNT") - .map(|v| v.parse::().unwrap()) - .unwrap_or(100); - - StakingPlaygroundConfig { - dev_stakers: (random_validators, random_nominators), - validator_count, - minimum_validator_count: 10, - } -} - fn props() -> Properties { let mut properties = Properties::new(); properties.insert("tokenDecimals".to_string(), 12.into()); From 7c19410cf96255efe29627d4527077512bd1068b Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Wed, 19 Mar 2025 17:53:43 +0200 Subject: [PATCH 31/38] Remove unused import --- substrate/bin/node/cli/src/chain_spec.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/bin/node/cli/src/chain_spec.rs b/substrate/bin/node/cli/src/chain_spec.rs index 52d740623ba6f..fe2c2b780ea6f 100644 --- a/substrate/bin/node/cli/src/chain_spec.rs +++ b/substrate/bin/node/cli/src/chain_spec.rs @@ -22,7 +22,7 @@ use polkadot_sdk::*; use crate::chain_spec::sc_service::Properties; use kitchensink_runtime::{ - genesis_config_presets::{session_keys, Staker, StakingPlaygroundConfig, STASH}, + genesis_config_presets::{session_keys, Staker, STASH}, wasm_binary_unwrap, Block, MaxNominations, StakerStatus, }; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; From 5e3ea96f9752e7edcd8054087ceb07a310df09eb Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Thu, 20 Mar 2025 11:32:50 +0200 Subject: [PATCH 32/38] newline at the end of the prdoc will make the semver check pass? --- prdoc/pr_7939.prdoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prdoc/pr_7939.prdoc b/prdoc/pr_7939.prdoc index 6a1ecaad8b608..1425be47f2384 100644 --- a/prdoc/pr_7939.prdoc +++ b/prdoc/pr_7939.prdoc @@ -49,4 +49,4 @@ crates: - name: pallet-root-offences bump: major - name: cumulus-pov-validator - bump: major \ No newline at end of file + bump: major From e156b2b289e52a8d1d6502e439bd0e07df093094 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Mon, 24 Mar 2025 09:32:16 +0100 Subject: [PATCH 33/38] Update substrate/frame/staking/src/migrations.rs Co-authored-by: Maciej --- substrate/frame/staking/src/migrations.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 891d0bb0377a4..49bd02323d6f0 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -53,6 +53,7 @@ impl Default for ObsoleteReleases { #[storage_alias] type StorageVersion = StorageValue, ObsoleteReleases, ValueQuery>; +/// Supports the migration of Validator Disabling from pallet-staking to pallet-session pub mod v17 { use super::*; From 0449188792069122ab670afde851cea127b0b991 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 24 Mar 2025 10:19:06 +0100 Subject: [PATCH 34/38] add a note to reverted PR docs. --- prdoc/pr_7282.prdoc | 1 + prdoc/pr_7424.prdoc | 3 ++- prdoc/pr_7582.prdoc | 1 + prdoc/pr_7939.prdoc | 7 ++++--- substrate/bin/node/runtime/src/genesis_config_presets.rs | 8 +++----- 5 files changed, 11 insertions(+), 9 deletions(-) diff --git a/prdoc/pr_7282.prdoc b/prdoc/pr_7282.prdoc index 3d12a8b184abd..12a2a6e4f7c1b 100644 --- a/prdoc/pr_7282.prdoc +++ b/prdoc/pr_7282.prdoc @@ -2,6 +2,7 @@ title: AHM Multi-block staking election pallet doc: - audience: Runtime Dev description: | + NOTE: This is reverted in #7939. ## Multi Block Election Pallet This PR adds the first iteration of the multi-block staking pallet. diff --git a/prdoc/pr_7424.prdoc b/prdoc/pr_7424.prdoc index e177f41371bc6..e90853df4aa08 100644 --- a/prdoc/pr_7424.prdoc +++ b/prdoc/pr_7424.prdoc @@ -6,6 +6,7 @@ title: 'Bounded Slashing: Paginated Offence Processing & Slash Application' doc: - audience: Runtime Dev description: | + NOTE: This is reverted in #7939. This PR refactors the slashing mechanism in `pallet-staking` to be bounded by introducing paged offence processing and paged slash application. ### Key Changes @@ -34,4 +35,4 @@ crates: - name: pallet-session-benchmarking bump: patch - name: pallet-root-offences - bump: patch \ No newline at end of file + bump: patch diff --git a/prdoc/pr_7582.prdoc b/prdoc/pr_7582.prdoc index 26e594c4373f2..23a35b1266526 100644 --- a/prdoc/pr_7582.prdoc +++ b/prdoc/pr_7582.prdoc @@ -2,6 +2,7 @@ title: Implementation of `ah-client` and `rc-client` staking pallets doc: - audience: Runtime Dev description: |- + NOTE: This is reverted in #7939. This PR introduces the initial structure for `pallet-ah-client` and `pallet-rc-client`. These pallets will reside on the relay chain and AssetHub, respectively, and will manage the interaction between `pallet-session` on the relay chain and `pallet-staking` on AssetHub. diff --git a/prdoc/pr_7939.prdoc b/prdoc/pr_7939.prdoc index 1425be47f2384..baef7116063fc 100644 --- a/prdoc/pr_7939.prdoc +++ b/prdoc/pr_7939.prdoc @@ -2,7 +2,10 @@ title: Revert pallet-staking changes which should be released as a separate pall doc: - audience: Runtime Dev description: |- - Revert changes to staking pallet for AssetHubNext because they will be released as a separate pallet. + Revert multi-block election, slashing and staking client pallets. + + Reverted PRs: #7582, #7424, #7282 + crates: - name: sp-staking bump: major @@ -32,8 +35,6 @@ crates: bump: major - name: frame-benchmarking-cli bump: major -- name: westend-runtime - bump: major - name: pallet-beefy bump: major - name: pallet-delegated-staking diff --git a/substrate/bin/node/runtime/src/genesis_config_presets.rs b/substrate/bin/node/runtime/src/genesis_config_presets.rs index 82769a70c66bb..49d2ca7e5293e 100644 --- a/substrate/bin/node/runtime/src/genesis_config_presets.rs +++ b/substrate/bin/node/runtime/src/genesis_config_presets.rs @@ -41,8 +41,6 @@ pub const ENDOWMENT: Balance = 10_000_000 * DOLLARS; pub const STASH: Balance = ENDOWMENT / 1000; pub struct StakingPlaygroundConfig { - /// (Validators, Nominators) - pub dev_stakers: (u32, u32), pub validator_count: u32, pub minimum_validator_count: u32, } @@ -58,11 +56,11 @@ pub fn kitchensink_genesis( stakers: Vec, staking_playground_config: Option, ) -> serde_json::Value { - let (validator_count, min_validator_count, _dev_stakers) = match staking_playground_config { - Some(c) => (c.validator_count, c.minimum_validator_count, Some(c.dev_stakers)), + let (validator_count, min_validator_count) = match staking_playground_config { + Some(c) => (c.validator_count, c.minimum_validator_count), None => { let authorities_count = initial_authorities.len() as u32; - (authorities_count, authorities_count, None) + (authorities_count, authorities_count) }, }; From ac255e22c205d7ada0e8d61a6b7a9802ce53e72f Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 24 Mar 2025 12:35:51 +0100 Subject: [PATCH 35/38] fix prdoc bumps --- prdoc/pr_7939.prdoc | 26 ++-- substrate/frame/bags-list/src/benchmarks.rs | 119 ++++++++++++++++++ .../solution-type/src/codec.rs | 1 + .../solution-type/src/single_page.rs | 90 ++++++++++++- .../benchmarking-cli/src/pallet/command.rs | 1 + 5 files changed, 221 insertions(+), 16 deletions(-) diff --git a/prdoc/pr_7939.prdoc b/prdoc/pr_7939.prdoc index baef7116063fc..16b732f06ee16 100644 --- a/prdoc/pr_7939.prdoc +++ b/prdoc/pr_7939.prdoc @@ -10,21 +10,19 @@ crates: - name: sp-staking bump: major - name: polkadot-sdk - bump: major + bump: patch - name: polkadot-runtime-parachains - bump: major + bump: patch - name: pallet-babe - bump: major + bump: patch - name: frame-election-provider-support bump: major -- name: frame-election-provider-solution-type - bump: major - name: sp-npos-elections bump: major - name: pallet-staking bump: major - name: pallet-bags-list - bump: major + bump: patch - name: pallet-grandpa bump: major - name: pallet-election-provider-multi-phase @@ -32,22 +30,20 @@ crates: - name: pallet-election-provider-support-benchmarking bump: major - name: pallet-fast-unstake - bump: major + bump: patch - name: frame-benchmarking-cli bump: major - name: pallet-beefy bump: major - name: pallet-delegated-staking - bump: major + bump: patch - name: pallet-elections-phragmen - bump: major + bump: patch - name: pallet-nomination-pools-benchmarking - bump: major + bump: patch - name: pallet-offences-benchmarking - bump: major + bump: patch - name: pallet-session-benchmarking - bump: major + bump: patch - name: pallet-root-offences - bump: major -- name: cumulus-pov-validator - bump: major + bump: patch diff --git a/substrate/frame/bags-list/src/benchmarks.rs b/substrate/frame/bags-list/src/benchmarks.rs index 55f4c24835ea6..7db4c4bb359f7 100644 --- a/substrate/frame/bags-list/src/benchmarks.rs +++ b/substrate/frame/bags-list/src/benchmarks.rs @@ -29,6 +29,125 @@ use frame_system::RawOrigin as SystemOrigin; use sp_runtime::traits::One; benchmarks_instance_pallet! { + // iteration of any number of items should only touch that many nodes and bags. + #[extra] + iter { + let n = 100; + + // clear any pre-existing storage. + List::::unsafe_clear(); + + // add n nodes, half to first bag and half to second bag. + let bag_thresh = T::BagThresholds::get()[0]; + let second_bag_thresh = T::BagThresholds::get()[1]; + + + for i in 0..n/2 { + let node: T::AccountId = account("node", i, 0); + assert_ok!(List::::insert(node.clone(), bag_thresh - One::one())); + } + for i in 0..n/2 { + let node: T::AccountId = account("node", i, 1); + assert_ok!(List::::insert(node.clone(), bag_thresh + One::one())); + } + assert_eq!( + List::::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::>(), + vec![ + (bag_thresh, (n / 2) as usize), + (second_bag_thresh, (n / 2) as usize), + ] + ); + }: { + let voters = List::::iter(); + let len = voters.collect::>().len(); + assert!(len as u32 == n, "len is {}, expected {}", len, n); + } + + // iteration of any number of items should only touch that many nodes and bags. + #[extra] + iter_take { + let n = 100; + + // clear any pre-existing storage. + List::::unsafe_clear(); + + // add n nodes, half to first bag and half to second bag. + let bag_thresh = T::BagThresholds::get()[0]; + let second_bag_thresh = T::BagThresholds::get()[1]; + + + for i in 0..n/2 { + let node: T::AccountId = account("node", i, 0); + assert_ok!(List::::insert(node.clone(), bag_thresh - One::one())); + } + for i in 0..n/2 { + let node: T::AccountId = account("node", i, 1); + assert_ok!(List::::insert(node.clone(), bag_thresh + One::one())); + } + assert_eq!( + List::::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::>(), + vec![ + (bag_thresh, (n / 2) as usize), + (second_bag_thresh, (n / 2) as usize), + ] + ); + }: { + // this should only go into one of the bags + let voters = List::::iter().take(n as usize / 4 ); + let len = voters.collect::>().len(); + assert!(len as u32 == n / 4, "len is {}, expected {}", len, n / 4); + } + + #[extra] + iter_from { + let n = 100; + + // clear any pre-existing storage. + List::::unsafe_clear(); + + // populate the first 4 bags with n/4 nodes each + let bag_thresh = T::BagThresholds::get()[0]; + + for i in 0..n/4 { + let node: T::AccountId = account("node", i, 0); + assert_ok!(List::::insert(node.clone(), bag_thresh - One::one())); + } + for i in 0..n/4 { + let node: T::AccountId = account("node", i, 1); + assert_ok!(List::::insert(node.clone(), bag_thresh + One::one())); + } + + let bag_thresh = T::BagThresholds::get()[2]; + + for i in 0..n/4 { + let node: T::AccountId = account("node", i, 2); + assert_ok!(List::::insert(node.clone(), bag_thresh - One::one())); + } + + for i in 0..n/4 { + let node: T::AccountId = account("node", i, 3); + assert_ok!(List::::insert(node.clone(), bag_thresh + One::one())); + } + + assert_eq!( + List::::get_bags().into_iter().map(|(bag, nodes)| (bag, nodes.len())).collect::>(), + vec![ + (T::BagThresholds::get()[0], (n / 4) as usize), + (T::BagThresholds::get()[1], (n / 4) as usize), + (T::BagThresholds::get()[2], (n / 4) as usize), + (T::BagThresholds::get()[3], (n / 4) as usize), + ] + ); + + // iter from someone in the 3rd bag, so this should touch ~75 nodes and 3 bags + let from: T::AccountId = account("node", 0, 2); + }: { + let voters = List::::iter_from(&from).unwrap(); + let len = voters.collect::>().len(); + assert!(len as u32 == 74, "len is {}, expected {}", len, 74); + } + + rebag_non_terminal { // An expensive case for rebag-ing (rebag a non-terminal node): // diff --git a/substrate/frame/election-provider-support/solution-type/src/codec.rs b/substrate/frame/election-provider-support/solution-type/src/codec.rs index 16d5f17469b7e..c1dd62fe55506 100644 --- a/substrate/frame/election-provider-support/solution-type/src/codec.rs +++ b/substrate/frame/election-provider-support/solution-type/src/codec.rs @@ -33,6 +33,7 @@ pub(crate) fn codec_and_info_impl( let scale_info = scale_info_impl(&ident, &voter_type, &target_type, &weight_type, count); quote! { + impl _fepsp::codec::EncodeLike for #ident {} #encode #decode #scale_info diff --git a/substrate/frame/election-provider-support/solution-type/src/single_page.rs b/substrate/frame/election-provider-support/solution-type/src/single_page.rs index b496c349d8db8..c921be34b3430 100644 --- a/substrate/frame/election-provider-support/solution-type/src/single_page.rs +++ b/substrate/frame/election-provider-support/solution-type/src/single_page.rs @@ -84,6 +84,8 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { Eq, Clone, Debug, + Ord, + PartialOrd, _fepsp::codec::Encode, _fepsp::codec::Decode, _fepsp::codec::DecodeWithMemTracking, @@ -97,6 +99,8 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { let from_impl = from_impl(&struct_name, count); let into_impl = into_impl(&assignment_name, count, weight_type.clone()); let from_index_impl = crate::index_assignment::from_impl(&struct_name, count); + let sort_impl = sort_impl(count); + let remove_weakest_sorted_impl = remove_weakest_sorted_impl(count); Ok(quote! ( /// A struct to encode a election assignment in a compact way. @@ -179,6 +183,29 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { all_targets.into_iter().collect() } + + fn sort(&mut self, mut voter_stake: F) + where + F: FnMut(&Self::VoterIndex) -> _feps::VoteWeight + { + #sort_impl + } + + fn remove_weakest_sorted(&mut self, mut voter_stake: F) -> Option + where + F: FnMut(&Self::VoterIndex) -> _feps::VoteWeight + { + #remove_weakest_sorted_impl + } + + fn corrupt(&mut self) { + self.votes1.push( + ( + _fepsp::sp_arithmetic::traits::Bounded::max_value(), + _fepsp::sp_arithmetic::traits::Bounded::max_value() + ) + ) + } } type __IndexAssignment = _feps::IndexAssignment< @@ -186,11 +213,12 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { <#ident as _feps::NposSolution>::TargetIndex, <#ident as _feps::NposSolution>::Accuracy, >; + impl _fepsp::codec::MaxEncodedLen for #ident { fn max_encoded_len() -> usize { use frame_support::traits::Get; use _fepsp::codec::Encode; - let s: u32 = #max_voters::get(); + let s: u32 = <#max_voters as _feps::Get>::get(); let max_element_size = // the first voter.. #voter_type::max_encoded_len() @@ -207,6 +235,7 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { .saturating_add((s as usize).saturating_mul(max_element_size)) } } + impl<'a> core::convert::TryFrom<&'a [__IndexAssignment]> for #ident { type Error = _feps::Error; fn try_from(index_assignments: &'a [__IndexAssignment]) -> Result { @@ -228,6 +257,65 @@ pub(crate) fn generate(def: crate::SolutionDef) -> Result { )) } +fn sort_impl(count: usize) -> TokenStream2 { + (1..=count) + .map(|c| { + let field = vote_field(c); + quote! { + // NOTE: self.filed here is sometimes `Vec<(voter, weight)>` and sometimes + // `Vec<(voter, weights, last_weight)>`, but Rust's great patter matching makes it + // all work super nice. + self.#field.sort_by(|(a, ..), (b, ..)| voter_stake(&b).cmp(&voter_stake(&a))); + // ---------------------------------^^ in all fields, the index 0 is the voter id. + } + }) + .collect::() +} + +fn remove_weakest_sorted_impl(count: usize) -> TokenStream2 { + // check minium from field 2 onwards. We assume 0 is minimum + let check_minimum = (2..=count).map(|c| { + let filed = vote_field(c); + quote! { + let filed_value = self.#filed + .last() + .map(|(x, ..)| voter_stake(x)) + .unwrap_or_else(|| _fepsp::sp_arithmetic::traits::Bounded::max_value()); + if filed_value < minimum { + minimum = filed_value; + minimum_filed = #c + } + } + }); + + let remove_minimum_match = (1..=count).map(|c| { + let filed = vote_field(c); + quote! { + #c => self.#filed.pop().map(|(x, ..)| x), + } + }); + + let first_filed = vote_field(1); + quote! { + // we assume first one is the minimum. No problem if it is empty. + let mut minimum_filed = 1; + let mut minimum = self.#first_filed + .last() + .map(|(x, ..)| voter_stake(x)) + .unwrap_or_else(|| _fepsp::sp_arithmetic::traits::Bounded::max_value()); + + #( #check_minimum )* + + match minimum_filed { + #( #remove_minimum_match )* + _ => { + debug_assert!(false); + None + } + } + } +} + fn remove_voter_impl(count: usize) -> TokenStream2 { let field_name = vote_field(1); let single = quote! { diff --git a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs index ec035fadbeba2..af9118140d91c 100644 --- a/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/pallet/command.rs @@ -600,6 +600,7 @@ impl PalletCmd { let benchmark_name = &benchmark.name; if extrinsic.is_empty() || extrinsic.as_bytes() == &b"*"[..] || + extrinsic.as_bytes() == &b"all"[..] || extrinsics.contains(&&benchmark_name[..]) { benchmarks_to_run.push(( From f49e2e4360f548852e2515a171e82a21ae6d720b Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 24 Mar 2025 12:47:40 +0100 Subject: [PATCH 36/38] fix election provider --- .../benchmarking/src/inner.rs | 2 +- .../election-provider-support/src/traits.rs | 23 +++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/substrate/frame/election-provider-support/benchmarking/src/inner.rs b/substrate/frame/election-provider-support/benchmarking/src/inner.rs index 7fb8c1bdb7290..a7b969bb1cf9b 100644 --- a/substrate/frame/election-provider-support/benchmarking/src/inner.rs +++ b/substrate/frame/election-provider-support/benchmarking/src/inner.rs @@ -37,7 +37,7 @@ fn set_up_voters_targets( voters_len: u32, targets_len: u32, degree: usize, -) -> (Vec<(AccountId, u64, impl IntoIterator)>, Vec) { +) -> (Vec<(AccountId, u64, impl Clone + IntoIterator)>, Vec) { // fill targets. let mut targets = (0..targets_len) .map(|i| frame_benchmarking::account::("Target", i, SEED)) diff --git a/substrate/frame/election-provider-support/src/traits.rs b/substrate/frame/election-provider-support/src/traits.rs index 84fd57992d343..d8ffd41d8ae51 100644 --- a/substrate/frame/election-provider-support/src/traits.rs +++ b/substrate/frame/election-provider-support/src/traits.rs @@ -42,6 +42,8 @@ where + Clone + Bounded + Encode + + Ord + + PartialOrd + TypeInfo; /// The target type. Needs to be an index (convert to usize). @@ -53,6 +55,8 @@ where + Clone + Bounded + Encode + + Ord + + PartialOrd + TypeInfo; /// The weight/accuracy type of each vote. @@ -123,4 +127,23 @@ where voter_at: impl Fn(Self::VoterIndex) -> Option, target_at: impl Fn(Self::TargetIndex) -> Option, ) -> Result>, Error>; + + /// Sort self by the means of the given function. + /// + /// This might be helpful to allow for easier trimming. + fn sort(&mut self, voter_stake: F) + where + F: FnMut(&Self::VoterIndex) -> VoteWeight; + + /// Remove the least staked voter. + /// + /// This is ONLY sensible to do if [`Self::sort`] has been called on the struct at least once. + fn remove_weakest_sorted(&mut self, voter_stake: F) -> Option + where + F: FnMut(&Self::VoterIndex) -> VoteWeight; + + /// Make this solution corrupt. This should set the index of a voter to `Bounded::max_value()`. + /// + /// Obviously, this is only useful for testing. + fn corrupt(&mut self); } From 2340bb9f73e4023d35981fb0242c6825bb5e344a Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 24 Mar 2025 12:50:41 +0100 Subject: [PATCH 37/38] undo bounds --- .../frame/election-provider-support/src/bounds.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/substrate/frame/election-provider-support/src/bounds.rs b/substrate/frame/election-provider-support/src/bounds.rs index 6b2423b7fece6..6ef0604cb4bef 100644 --- a/substrate/frame/election-provider-support/src/bounds.rs +++ b/substrate/frame/election-provider-support/src/bounds.rs @@ -54,6 +54,7 @@ //! A default or `None` bound means that no bounds are enforced (i.e. unlimited result size). In //! general, be careful when using unbounded election bounds in production. +use codec::Encode; use core::ops::Add; use sp_runtime::traits::Zero; @@ -154,6 +155,15 @@ impl DataProviderBounds { self.size_exhausted(given_size.unwrap_or(SizeBound::zero())) } + /// Ensures the given encode-able slice meets both the length and count bounds. + /// + /// Same as `exhausted` but a better syntax. + pub fn slice_exhausted(self, input: &[T]) -> bool { + let size = Some((input.encoded_size() as u32).into()); + let count = Some((input.len() as u32).into()); + self.exhausted(size, count) + } + /// Returns an instance of `Self` that is constructed by capping both the `count` and `size` /// fields. If `self` is None, overwrite it with the provided bounds. pub fn max(self, bounds: DataProviderBounds) -> Self { From 670b95c33ae2931bde2ed6d1dc78a8c63c150649 Mon Sep 17 00:00:00 2001 From: Ankan Date: Mon, 24 Mar 2025 13:27:01 +0100 Subject: [PATCH 38/38] re-prdoc --- prdoc/pr_7939.prdoc | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/prdoc/pr_7939.prdoc b/prdoc/pr_7939.prdoc index 16b732f06ee16..5ed023382b760 100644 --- a/prdoc/pr_7939.prdoc +++ b/prdoc/pr_7939.prdoc @@ -21,20 +21,16 @@ crates: bump: major - name: pallet-staking bump: major -- name: pallet-bags-list - bump: patch - name: pallet-grandpa - bump: major + bump: patch - name: pallet-election-provider-multi-phase bump: major - name: pallet-election-provider-support-benchmarking - bump: major + bump: patch - name: pallet-fast-unstake bump: patch -- name: frame-benchmarking-cli - bump: major - name: pallet-beefy - bump: major + bump: patch - name: pallet-delegated-staking bump: patch - name: pallet-elections-phragmen @@ -47,3 +43,5 @@ crates: bump: patch - name: pallet-root-offences bump: patch +- name: westend-runtime + bump: major