Skip to content

Commit

Permalink
Add retry mechanics to pallet-scheduler (#3060)
Browse files Browse the repository at this point in the history
Fixes #3014 

This PR adds retry mechanics to `pallet-scheduler`, as described in the
issue above.

Users can now set a retry configuration for a task so that, in case its
scheduled run fails, it will be retried after a number of blocks, for a
specified number of times or until it succeeds.

If a retried task runs successfully before running out of retries, its
remaining retry counter will be reset to the initial value. If a retried
task runs out of retries, it will be removed from the schedule.

Tasks which need to be scheduled for a retry are still subject to weight
metering and agenda space, same as a regular task. Periodic tasks will
have their periodic schedule put on hold while the task is retrying.

---------

Signed-off-by: georgepisaltu <george.pisaltu@parity.io>
Co-authored-by: command-bot <>
  • Loading branch information
georgepisaltu authored Feb 16, 2024
1 parent ad68a05 commit 9346019
Show file tree
Hide file tree
Showing 9 changed files with 2,292 additions and 377 deletions.
Original file line number Diff line number Diff line change
@@ -1,42 +1,41 @@
// Copyright (C) Parity Technologies (UK) Ltd.
// SPDX-License-Identifier: Apache-2.0
// This file is part of Cumulus.

// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Cumulus is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.

// Cumulus is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.

// You should have received a copy of the GNU General Public License
// along with Cumulus. If not, see <http://www.gnu.org/licenses/>.

//! Autogenerated weights for `pallet_scheduler`
//!
//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
//! DATE: 2023-07-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! DATE: 2024-01-25, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]`
//! WORST CASE MAP SIZE: `1000000`
//! HOSTNAME: `runner-ynta1nyy-project-238-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! EXECUTION: ``, WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-polkadot-dev")`, DB CACHE: 1024
//! HOSTNAME: `runner-grjcggob-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz`
//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("collectives-westend-dev")`, DB CACHE: 1024

// Executed Command:
// ./target/production/polkadot-parachain
// target/production/polkadot-parachain
// benchmark
// pallet
// --chain=collectives-polkadot-dev
// --wasm-execution=compiled
// --pallet=pallet_scheduler
// --no-storage-info
// --no-median-slopes
// --no-min-squares
// --extrinsic=*
// --steps=50
// --repeat=20
// --json
// --header=./file_header.txt
// --output=./parachains/runtimes/collectives/collectives-polkadot/src/weights/
// --extrinsic=*
// --wasm-execution=compiled
// --heap-pages=4096
// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json
// --pallet=pallet_scheduler
// --chain=collectives-westend-dev
// --header=./cumulus/file_header.txt
// --output=./cumulus/parachains/runtimes/collectives/collectives-westend/src/weights/

#![cfg_attr(rustfmt, rustfmt_skip)]
#![allow(unused_parens)]
Expand All @@ -55,8 +54,8 @@ impl<T: frame_system::Config> pallet_scheduler::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `31`
// Estimated: `1489`
// Minimum execution time: 3_441_000 picoseconds.
Weight::from_parts(3_604_000, 0)
// Minimum execution time: 2_475_000 picoseconds.
Weight::from_parts(2_644_000, 0)
.saturating_add(Weight::from_parts(0, 1489))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
Expand All @@ -68,37 +67,39 @@ impl<T: frame_system::Config> pallet_scheduler::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `77 + s * (177 ±0)`
// Estimated: `159279`
// Minimum execution time: 2_879_000 picoseconds.
Weight::from_parts(2_963_000, 0)
// Minimum execution time: 2_898_000 picoseconds.
Weight::from_parts(1_532_342, 0)
.saturating_add(Weight::from_parts(0, 159279))
// Standard Error: 3_764
.saturating_add(Weight::from_parts(909_557, 0).saturating_mul(s.into()))
// Standard Error: 4_736
.saturating_add(Weight::from_parts(412_374, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
fn service_task_base() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 5_172_000 picoseconds.
Weight::from_parts(5_294_000, 0)
// Minimum execution time: 3_171_000 picoseconds.
Weight::from_parts(3_349_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
/// Storage: `Preimage::PreimageFor` (r:1 w:1)
/// Proof: `Preimage::PreimageFor` (`max_values`: None, `max_size`: Some(4194344), added: 4196819, mode: `Measured`)
/// Storage: `Preimage::StatusFor` (r:1 w:1)
/// Storage: `Preimage::StatusFor` (r:1 w:0)
/// Proof: `Preimage::StatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`)
/// Storage: `Preimage::RequestStatusFor` (r:1 w:1)
/// Proof: `Preimage::RequestStatusFor` (`max_values`: None, `max_size`: Some(91), added: 2566, mode: `MaxEncodedLen`)
/// The range of component `s` is `[128, 4194304]`.
fn service_task_fetched(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `213 + s * (1 ±0)`
// Estimated: `3678 + s * (1 ±0)`
// Minimum execution time: 19_704_000 picoseconds.
Weight::from_parts(19_903_000, 0)
.saturating_add(Weight::from_parts(0, 3678))
// Standard Error: 5
.saturating_add(Weight::from_parts(1_394, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
// Measured: `246 + s * (1 ±0)`
// Estimated: `3711 + s * (1 ±0)`
// Minimum execution time: 17_329_000 picoseconds.
Weight::from_parts(17_604_000, 0)
.saturating_add(Weight::from_parts(0, 3711))
// Standard Error: 1
.saturating_add(Weight::from_parts(1_256, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(3))
.saturating_add(T::DbWeight::get().writes(2))
.saturating_add(Weight::from_parts(0, 1).saturating_mul(s.into()))
}
Expand All @@ -108,33 +109,33 @@ impl<T: frame_system::Config> pallet_scheduler::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 6_359_000 picoseconds.
Weight::from_parts(6_599_000, 0)
// Minimum execution time: 4_503_000 picoseconds.
Weight::from_parts(4_677_000, 0)
.saturating_add(Weight::from_parts(0, 0))
.saturating_add(T::DbWeight::get().writes(1))
}
fn service_task_periodic() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 5_217_000 picoseconds.
Weight::from_parts(5_333_000, 0)
// Minimum execution time: 3_145_000 picoseconds.
Weight::from_parts(3_252_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn execute_dispatch_signed() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 2_406_000 picoseconds.
Weight::from_parts(2_541_000, 0)
// Minimum execution time: 1_804_000 picoseconds.
Weight::from_parts(1_891_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
fn execute_dispatch_unsigned() -> Weight {
// Proof Size summary in bytes:
// Measured: `0`
// Estimated: `0`
// Minimum execution time: 2_370_000 picoseconds.
Weight::from_parts(2_561_000, 0)
// Minimum execution time: 1_706_000 picoseconds.
Weight::from_parts(1_776_000, 0)
.saturating_add(Weight::from_parts(0, 0))
}
/// Storage: `Scheduler::Agenda` (r:1 w:1)
Expand All @@ -144,11 +145,11 @@ impl<T: frame_system::Config> pallet_scheduler::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `77 + s * (177 ±0)`
// Estimated: `159279`
// Minimum execution time: 11_784_000 picoseconds.
Weight::from_parts(5_574_404, 0)
// Minimum execution time: 8_629_000 picoseconds.
Weight::from_parts(6_707_232, 0)
.saturating_add(Weight::from_parts(0, 159279))
// Standard Error: 7_217
.saturating_add(Weight::from_parts(1_035_248, 0).saturating_mul(s.into()))
// Standard Error: 5_580
.saturating_add(Weight::from_parts(471_827, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
Expand All @@ -161,11 +162,11 @@ impl<T: frame_system::Config> pallet_scheduler::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `77 + s * (177 ±0)`
// Estimated: `159279`
// Minimum execution time: 16_373_000 picoseconds.
Weight::from_parts(3_088_135, 0)
// Minimum execution time: 12_675_000 picoseconds.
Weight::from_parts(7_791_682, 0)
.saturating_add(Weight::from_parts(0, 159279))
// Standard Error: 7_095
.saturating_add(Weight::from_parts(1_745_270, 0).saturating_mul(s.into()))
// Standard Error: 5_381
.saturating_add(Weight::from_parts(653_023, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(2))
}
Expand All @@ -178,11 +179,11 @@ impl<T: frame_system::Config> pallet_scheduler::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `468 + s * (179 ±0)`
// Estimated: `159279`
// Minimum execution time: 14_822_000 picoseconds.
Weight::from_parts(9_591_402, 0)
// Minimum execution time: 11_908_000 picoseconds.
Weight::from_parts(11_833_059, 0)
.saturating_add(Weight::from_parts(0, 159279))
// Standard Error: 7_151
.saturating_add(Weight::from_parts(1_058_408, 0).saturating_mul(s.into()))
// Standard Error: 5_662
.saturating_add(Weight::from_parts(482_816, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
}
Expand All @@ -195,12 +196,91 @@ impl<T: frame_system::Config> pallet_scheduler::WeightInfo for WeightInfo<T> {
// Proof Size summary in bytes:
// Measured: `509 + s * (179 ±0)`
// Estimated: `159279`
// Minimum execution time: 18_541_000 picoseconds.
Weight::from_parts(6_522_239, 0)
// Minimum execution time: 15_506_000 picoseconds.
Weight::from_parts(11_372_975, 0)
.saturating_add(Weight::from_parts(0, 159279))
// Standard Error: 8_349
.saturating_add(Weight::from_parts(1_760_431, 0).saturating_mul(s.into()))
// Standard Error: 5_765
.saturating_add(Weight::from_parts(656_322, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(2))
}
/// Storage: `Scheduler::Retries` (r:1 w:2)
/// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
/// Storage: `Scheduler::Agenda` (r:1 w:1)
/// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
/// Storage: `Scheduler::Lookup` (r:0 w:1)
/// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
/// The range of component `s` is `[1, 200]`.
fn schedule_retry(s: u32, ) -> Weight {
// Proof Size summary in bytes:
// Measured: `159`
// Estimated: `159279`
// Minimum execution time: 14_069_000 picoseconds.
Weight::from_parts(14_868_345, 0)
.saturating_add(Weight::from_parts(0, 159279))
// Standard Error: 425
.saturating_add(Weight::from_parts(33_468, 0).saturating_mul(s.into()))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(4))
}
/// Storage: `Scheduler::Agenda` (r:1 w:0)
/// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
/// Storage: `Scheduler::Retries` (r:0 w:1)
/// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
fn set_retry() -> Weight {
// Proof Size summary in bytes:
// Measured: `77 + s * (177 ±0)`
// Estimated: `159279`
// Minimum execution time: 7_550_000 picoseconds.
Weight::from_parts(6_735_955, 0)
.saturating_add(Weight::from_parts(0, 159279))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Scheduler::Lookup` (r:1 w:0)
/// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
/// Storage: `Scheduler::Agenda` (r:1 w:0)
/// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
/// Storage: `Scheduler::Retries` (r:0 w:1)
/// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
fn set_retry_named() -> Weight {
// Proof Size summary in bytes:
// Measured: `513 + s * (179 ±0)`
// Estimated: `159279`
// Minimum execution time: 11_017_000 picoseconds.
Weight::from_parts(11_749_385, 0)
.saturating_add(Weight::from_parts(0, 159279))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Scheduler::Agenda` (r:1 w:0)
/// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
/// Storage: `Scheduler::Retries` (r:0 w:1)
/// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
fn cancel_retry() -> Weight {
// Proof Size summary in bytes:
// Measured: `77 + s * (177 ±0)`
// Estimated: `159279`
// Minimum execution time: 7_550_000 picoseconds.
Weight::from_parts(6_735_955, 0)
.saturating_add(Weight::from_parts(0, 159279))
.saturating_add(T::DbWeight::get().reads(1))
.saturating_add(T::DbWeight::get().writes(1))
}
/// Storage: `Scheduler::Lookup` (r:1 w:0)
/// Proof: `Scheduler::Lookup` (`max_values`: None, `max_size`: Some(48), added: 2523, mode: `MaxEncodedLen`)
/// Storage: `Scheduler::Agenda` (r:1 w:0)
/// Proof: `Scheduler::Agenda` (`max_values`: None, `max_size`: Some(155814), added: 158289, mode: `MaxEncodedLen`)
/// Storage: `Scheduler::Retries` (r:0 w:1)
/// Proof: `Scheduler::Retries` (`max_values`: None, `max_size`: Some(30), added: 2505, mode: `MaxEncodedLen`)
fn cancel_retry_named() -> Weight {
// Proof Size summary in bytes:
// Measured: `513 + s * (179 ±0)`
// Estimated: `159279`
// Minimum execution time: 11_017_000 picoseconds.
Weight::from_parts(11_749_385, 0)
.saturating_add(Weight::from_parts(0, 159279))
.saturating_add(T::DbWeight::get().reads(2))
.saturating_add(T::DbWeight::get().writes(1))
}
}
Loading

0 comments on commit 9346019

Please sign in to comment.