From 92aca015ad1c0df32e57fac4cc27f223dcb02f87 Mon Sep 17 00:00:00 2001 From: Andriy Berestovskyy Date: Thu, 3 Oct 2024 00:44:05 +0200 Subject: [PATCH] feat: EXC-1751: Charge idle canisters for full execution (#1806) The idle canisters in front of the round schedule should be marked as fully executed, as they were scheduled first in the round. This helps to rotate the round schedule faster. --- rs/execution_environment/src/scheduler.rs | 13 +++- .../src/scheduler/round_schedule.rs | 38 ++++++++++- .../src/scheduler/tests.rs | 65 ++++++++++++++++++- 3 files changed, 109 insertions(+), 7 deletions(-) diff --git a/rs/execution_environment/src/scheduler.rs b/rs/execution_environment/src/scheduler.rs index d4aff4789ec..c1f46ad8011 100644 --- a/rs/execution_environment/src/scheduler.rs +++ b/rs/execution_environment/src/scheduler.rs @@ -701,7 +701,14 @@ impl SchedulerImpl { // Update subnet available memory before taking out the canisters. round_limits.subnet_available_memory = self.exec_env.subnet_available_memory(&state); - let canisters = state.take_canister_states(); + let mut canisters = state.take_canister_states(); + round_schedule.charge_idle_canisters( + &mut canisters, + &mut round_fully_executed_canister_ids, + current_round, + is_first_iteration, + ); + // Obtain the active canisters and update the collection of heap delta rate-limited canisters. let (active_round_schedule, rate_limited_canister_ids) = round_schedule .filter_canisters( @@ -1906,7 +1913,7 @@ fn observe_instructions_consumed_per_message( struct ExecutionThreadResult { canisters: Vec, executed_canister_ids: BTreeSet, - fully_executed_canister_ids: Vec, + fully_executed_canister_ids: BTreeSet, ingress_results: Vec<(MessageId, IngressStatus)>, slices_executed: NumSlices, messages_executed: NumMessages, @@ -1944,7 +1951,7 @@ fn execute_canisters_on_thread( // These variables accumulate the results and will be returned at the end. let mut canisters = vec![]; let mut executed_canister_ids = BTreeSet::new(); - let mut fully_executed_canister_ids = vec![]; + let mut fully_executed_canister_ids = BTreeSet::new(); let mut ingress_results = vec![]; let mut total_slices_executed = NumSlices::from(0); let mut total_messages_executed = NumMessages::from(0); diff --git a/rs/execution_environment/src/scheduler/round_schedule.rs b/rs/execution_environment/src/scheduler/round_schedule.rs index f8b30150a0d..fdb7e87ab8c 100644 --- a/rs/execution_environment/src/scheduler/round_schedule.rs +++ b/rs/execution_environment/src/scheduler/round_schedule.rs @@ -104,6 +104,40 @@ impl RoundSchedule { } } + /// Marks idle canisters in front of the schedule as fully executed. + pub fn charge_idle_canisters( + &self, + canisters: &mut BTreeMap, + fully_executed_canister_ids: &mut BTreeSet, + round_id: ExecutionRound, + is_first_iteration: bool, + ) { + for canister_id in self.ordered_new_execution_canister_ids.iter() { + let canister = canisters.get_mut(canister_id); + if let Some(canister) = canister { + let next_execution = canister.next_execution(); + match next_execution { + NextExecution::None => { + Self::finish_canister_execution( + canister, + fully_executed_canister_ids, + round_id, + is_first_iteration, + 0, + ); + } + // Skip install code canisters. + NextExecution::ContinueInstallCode => {} + + NextExecution::StartNew | NextExecution::ContinueLong => { + // Stop searching after the first non-idle canister. + break; + } + } + } + } + } + /// Returns a round schedule covering active canisters only; and the set of /// rate limited canisters. pub fn filter_canisters( @@ -237,7 +271,7 @@ impl RoundSchedule { pub fn finish_canister_execution( canister: &mut CanisterState, - fully_executed_canister_ids: &mut Vec, + fully_executed_canister_ids: &mut BTreeSet, round_id: ExecutionRound, is_first_iteration: bool, rank: usize, @@ -258,7 +292,7 @@ impl RoundSchedule { // We schedule canisters (as opposed to individual messages), // and we charge for every full execution round. - fully_executed_canister_ids.push(canister.canister_id()); + fully_executed_canister_ids.insert(canister.canister_id()); } } diff --git a/rs/execution_environment/src/scheduler/tests.rs b/rs/execution_environment/src/scheduler/tests.rs index 9377b347341..ee4b0609258 100644 --- a/rs/execution_environment/src/scheduler/tests.rs +++ b/rs/execution_environment/src/scheduler/tests.rs @@ -1609,7 +1609,7 @@ fn can_execute_messages_with_just_enough_instructions() { } #[test] -fn execute_only_canisters_with_messages() { +fn execute_idle_and_canisters_with_messages() { let mut test = SchedulerTestBuilder::new() .with_scheduler_config(SchedulerConfig { scheduler_cores: 2, @@ -1637,7 +1637,7 @@ fn execute_only_canisters_with_messages() { let idle = test.canister_state(idle); assert_eq!( idle.scheduler_state.last_full_execution_round, - ExecutionRound::from(0) + test.last_round() ); assert_eq!( idle.system_state @@ -6014,3 +6014,64 @@ fn charge_canisters_for_full_execution(#[strategy(2..10_usize)] scheduler_cores: } prop_assert_eq!(total_accumulated_priority - total_priority_credit, 0); } + +#[test] +fn charge_idle_canisters_for_full_execution_round() { + let scheduler_cores = 2; + let num_rounds = 100; + let slice = 20; + let mut test = SchedulerTestBuilder::new() + .with_scheduler_config(SchedulerConfig { + scheduler_cores, + max_instructions_per_round: slice.into(), + max_instructions_per_message: slice.into(), + max_instructions_per_message_without_dts: slice.into(), + max_instructions_per_slice: slice.into(), + instruction_overhead_per_execution: NumInstructions::from(0), + instruction_overhead_per_canister: NumInstructions::from(0), + instruction_overhead_per_canister_for_finalization: NumInstructions::from(0), + ..SchedulerConfig::application_subnet() + }) + .build(); + + // Bump up the round number. + test.execute_round(ExecutionRoundType::OrdinaryRound); + + // Create many idle canisters. + for _ in 0..scheduler_cores * 2 { + test.create_canister(); + } + + // Create many busy canisters. + for _ in 0..scheduler_cores * 2 { + let canister_id = test.create_canister(); + for _ in 0..num_rounds { + test.send_ingress(canister_id, ingress(slice)); + } + } + + let multiplier = scheduler_cores * test.state().canister_states.len(); + for round in 0..num_rounds { + test.execute_round(ExecutionRoundType::OrdinaryRound); + + let mut total_accumulated_priority = 0; + let mut total_priority_credit = 0; + for canister in test.state().canisters_iter() { + let scheduler_state = &canister.scheduler_state; + // Assert that we punished all idle canisters, not just top `scheduler_cores`. + if round == 0 && !canister.has_input() { + assert_ne!(test.last_round(), 0.into()); + assert_eq!(scheduler_state.last_full_execution_round, test.last_round()); + } + // Assert there is no divergency in accumulated priorities. + let priority = scheduler_state.accumulated_priority - scheduler_state.priority_credit; + assert!(priority.get() <= 100 * multiplier as i64); + assert!(priority.get() >= -100 * multiplier as i64); + + total_accumulated_priority += scheduler_state.accumulated_priority.get(); + total_priority_credit += scheduler_state.priority_credit.get(); + } + // The accumulated priority invariant should be respected. + assert_eq!(total_accumulated_priority - total_priority_credit, 0); + } +}