diff --git a/rs/execution_environment/src/scheduler.rs b/rs/execution_environment/src/scheduler.rs index cb5be9e989e..9af14a02a37 100644 --- a/rs/execution_environment/src/scheduler.rs +++ b/rs/execution_environment/src/scheduler.rs @@ -701,7 +701,14 @@ impl SchedulerImpl { // Update subnet available memory before taking out the canisters. round_limits.subnet_available_memory = self.exec_env.subnet_available_memory(&state); - let canisters = state.take_canister_states(); + let mut canisters = state.take_canister_states(); + round_schedule.charge_idle_canisters( + &mut canisters, + &mut round_fully_executed_canister_ids, + current_round, + is_first_iteration, + ); + // Obtain the active canisters and update the collection of heap delta rate-limited canisters. let (active_round_schedule, rate_limited_canister_ids) = round_schedule .filter_canisters( @@ -1962,7 +1969,7 @@ fn observe_instructions_consumed_per_message( struct ExecutionThreadResult { canisters: Vec, executed_canister_ids: BTreeSet, - fully_executed_canister_ids: Vec, + fully_executed_canister_ids: BTreeSet, ingress_results: Vec<(MessageId, IngressStatus)>, slices_executed: NumSlices, messages_executed: NumMessages, @@ -2000,7 +2007,7 @@ fn execute_canisters_on_thread( // These variables accumulate the results and will be returned at the end. let mut canisters = vec![]; let mut executed_canister_ids = BTreeSet::new(); - let mut fully_executed_canister_ids = vec![]; + let mut fully_executed_canister_ids = BTreeSet::new(); let mut ingress_results = vec![]; let mut total_slices_executed = NumSlices::from(0); let mut total_messages_executed = NumMessages::from(0); diff --git a/rs/execution_environment/src/scheduler/round_schedule.rs b/rs/execution_environment/src/scheduler/round_schedule.rs index f8b30150a0d..fdb7e87ab8c 100644 --- a/rs/execution_environment/src/scheduler/round_schedule.rs +++ b/rs/execution_environment/src/scheduler/round_schedule.rs @@ -104,6 +104,40 @@ impl RoundSchedule { } } + /// Marks idle canisters in front of the schedule as fully executed. + pub fn charge_idle_canisters( + &self, + canisters: &mut BTreeMap, + fully_executed_canister_ids: &mut BTreeSet, + round_id: ExecutionRound, + is_first_iteration: bool, + ) { + for canister_id in self.ordered_new_execution_canister_ids.iter() { + let canister = canisters.get_mut(canister_id); + if let Some(canister) = canister { + let next_execution = canister.next_execution(); + match next_execution { + NextExecution::None => { + Self::finish_canister_execution( + canister, + fully_executed_canister_ids, + round_id, + is_first_iteration, + 0, + ); + } + // Skip install code canisters. + NextExecution::ContinueInstallCode => {} + + NextExecution::StartNew | NextExecution::ContinueLong => { + // Stop searching after the first non-idle canister. + break; + } + } + } + } + } + /// Returns a round schedule covering active canisters only; and the set of /// rate limited canisters. pub fn filter_canisters( @@ -237,7 +271,7 @@ impl RoundSchedule { pub fn finish_canister_execution( canister: &mut CanisterState, - fully_executed_canister_ids: &mut Vec, + fully_executed_canister_ids: &mut BTreeSet, round_id: ExecutionRound, is_first_iteration: bool, rank: usize, @@ -258,7 +292,7 @@ impl RoundSchedule { // We schedule canisters (as opposed to individual messages), // and we charge for every full execution round. - fully_executed_canister_ids.push(canister.canister_id()); + fully_executed_canister_ids.insert(canister.canister_id()); } } diff --git a/rs/execution_environment/src/scheduler/tests.rs b/rs/execution_environment/src/scheduler/tests.rs index b0328dbdaf7..1d6245e65e2 100644 --- a/rs/execution_environment/src/scheduler/tests.rs +++ b/rs/execution_environment/src/scheduler/tests.rs @@ -1609,7 +1609,7 @@ fn can_execute_messages_with_just_enough_instructions() { } #[test] -fn execute_only_canisters_with_messages() { +fn execute_idle_and_canisters_with_messages() { let mut test = SchedulerTestBuilder::new() .with_scheduler_config(SchedulerConfig { scheduler_cores: 2, @@ -1637,7 +1637,7 @@ fn execute_only_canisters_with_messages() { let idle = test.canister_state(idle); assert_eq!( idle.scheduler_state.last_full_execution_round, - ExecutionRound::from(0) + test.last_round() ); assert_eq!( idle.system_state @@ -6029,3 +6029,64 @@ fn charge_canisters_for_full_execution(#[strategy(2..10_usize)] scheduler_cores: } prop_assert_eq!(total_accumulated_priority - total_priority_credit, 0); } + +#[test] +fn charge_idle_canisters_for_full_execution_round() { + let scheduler_cores = 2; + let num_rounds = 100; + let slice = 20; + let mut test = SchedulerTestBuilder::new() + .with_scheduler_config(SchedulerConfig { + scheduler_cores, + max_instructions_per_round: slice.into(), + max_instructions_per_message: slice.into(), + max_instructions_per_message_without_dts: slice.into(), + max_instructions_per_slice: slice.into(), + instruction_overhead_per_execution: NumInstructions::from(0), + instruction_overhead_per_canister: NumInstructions::from(0), + instruction_overhead_per_canister_for_finalization: NumInstructions::from(0), + ..SchedulerConfig::application_subnet() + }) + .build(); + + // Bump up the round number. + test.execute_round(ExecutionRoundType::OrdinaryRound); + + // Create many idle canisters. + for _ in 0..scheduler_cores * 2 { + test.create_canister(); + } + + // Create many busy canisters. + for _ in 0..scheduler_cores * 2 { + let canister_id = test.create_canister(); + for _ in 0..num_rounds { + test.send_ingress(canister_id, ingress(slice)); + } + } + + let multiplier = scheduler_cores * test.state().canister_states.len(); + for round in 0..num_rounds { + test.execute_round(ExecutionRoundType::OrdinaryRound); + + let mut total_accumulated_priority = 0; + let mut total_priority_credit = 0; + for canister in test.state().canisters_iter() { + let scheduler_state = &canister.scheduler_state; + // Assert that we punished all idle canisters, not just top `scheduler_cores`. + if round == 0 && !canister.has_input() { + assert_ne!(test.last_round(), 0.into()); + assert_eq!(scheduler_state.last_full_execution_round, test.last_round()); + } + // Assert there is no divergency in accumulated priorities. + let priority = scheduler_state.accumulated_priority - scheduler_state.priority_credit; + assert!(priority.get() <= 100 * multiplier as i64); + assert!(priority.get() >= -100 * multiplier as i64); + + total_accumulated_priority += scheduler_state.accumulated_priority.get(); + total_priority_credit += scheduler_state.priority_credit.get(); + } + // The accumulated priority invariant should be respected. + assert_eq!(total_accumulated_priority - total_priority_credit, 0); + } +}