Skip to content

Commit

Permalink
Revert "feat: EXC-1751: Charge idle canisters for full execution (#1806
Browse files Browse the repository at this point in the history
…)"

This reverts commit ebe9a62.
  • Loading branch information
berestovskyy authored and DFINITYManu committed Oct 8, 2024
1 parent 837588c commit dfc32fd
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 109 deletions.
13 changes: 3 additions & 10 deletions rs/execution_environment/src/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -701,14 +701,7 @@ impl SchedulerImpl {

// Update subnet available memory before taking out the canisters.
round_limits.subnet_available_memory = self.exec_env.subnet_available_memory(&state);
let mut canisters = state.take_canister_states();
round_schedule.charge_idle_canisters(
&mut canisters,
&mut round_fully_executed_canister_ids,
current_round,
is_first_iteration,
);

let canisters = state.take_canister_states();
// Obtain the active canisters and update the collection of heap delta rate-limited canisters.
let (active_round_schedule, rate_limited_canister_ids) = round_schedule
.filter_canisters(
Expand Down Expand Up @@ -1969,7 +1962,7 @@ fn observe_instructions_consumed_per_message(
struct ExecutionThreadResult {
canisters: Vec<CanisterState>,
executed_canister_ids: BTreeSet<CanisterId>,
fully_executed_canister_ids: BTreeSet<CanisterId>,
fully_executed_canister_ids: Vec<CanisterId>,
ingress_results: Vec<(MessageId, IngressStatus)>,
slices_executed: NumSlices,
messages_executed: NumMessages,
Expand Down Expand Up @@ -2007,7 +2000,7 @@ fn execute_canisters_on_thread(
// These variables accumulate the results and will be returned at the end.
let mut canisters = vec![];
let mut executed_canister_ids = BTreeSet::new();
let mut fully_executed_canister_ids = BTreeSet::new();
let mut fully_executed_canister_ids = vec![];
let mut ingress_results = vec![];
let mut total_slices_executed = NumSlices::from(0);
let mut total_messages_executed = NumMessages::from(0);
Expand Down
38 changes: 2 additions & 36 deletions rs/execution_environment/src/scheduler/round_schedule.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,40 +104,6 @@ impl RoundSchedule {
}
}

/// Marks idle canisters in front of the schedule as fully executed.
pub fn charge_idle_canisters(
&self,
canisters: &mut BTreeMap<CanisterId, CanisterState>,
fully_executed_canister_ids: &mut BTreeSet<CanisterId>,
round_id: ExecutionRound,
is_first_iteration: bool,
) {
for canister_id in self.ordered_new_execution_canister_ids.iter() {
let canister = canisters.get_mut(canister_id);
if let Some(canister) = canister {
let next_execution = canister.next_execution();
match next_execution {
NextExecution::None => {
Self::finish_canister_execution(
canister,
fully_executed_canister_ids,
round_id,
is_first_iteration,
0,
);
}
// Skip install code canisters.
NextExecution::ContinueInstallCode => {}

NextExecution::StartNew | NextExecution::ContinueLong => {
// Stop searching after the first non-idle canister.
break;
}
}
}
}
}

/// Returns a round schedule covering active canisters only; and the set of
/// rate limited canisters.
pub fn filter_canisters(
Expand Down Expand Up @@ -271,7 +237,7 @@ impl RoundSchedule {

pub fn finish_canister_execution(
canister: &mut CanisterState,
fully_executed_canister_ids: &mut BTreeSet<CanisterId>,
fully_executed_canister_ids: &mut Vec<CanisterId>,
round_id: ExecutionRound,
is_first_iteration: bool,
rank: usize,
Expand All @@ -292,7 +258,7 @@ impl RoundSchedule {

// We schedule canisters (as opposed to individual messages),
// and we charge for every full execution round.
fully_executed_canister_ids.insert(canister.canister_id());
fully_executed_canister_ids.push(canister.canister_id());
}
}

Expand Down
65 changes: 2 additions & 63 deletions rs/execution_environment/src/scheduler/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1609,7 +1609,7 @@ fn can_execute_messages_with_just_enough_instructions() {
}

#[test]
fn execute_idle_and_canisters_with_messages() {
fn execute_only_canisters_with_messages() {
let mut test = SchedulerTestBuilder::new()
.with_scheduler_config(SchedulerConfig {
scheduler_cores: 2,
Expand Down Expand Up @@ -1637,7 +1637,7 @@ fn execute_idle_and_canisters_with_messages() {
let idle = test.canister_state(idle);
assert_eq!(
idle.scheduler_state.last_full_execution_round,
test.last_round()
ExecutionRound::from(0)
);
assert_eq!(
idle.system_state
Expand Down Expand Up @@ -6029,64 +6029,3 @@ fn charge_canisters_for_full_execution(#[strategy(2..10_usize)] scheduler_cores:
}
prop_assert_eq!(total_accumulated_priority - total_priority_credit, 0);
}

#[test]
fn charge_idle_canisters_for_full_execution_round() {
let scheduler_cores = 2;
let num_rounds = 100;
let slice = 20;
let mut test = SchedulerTestBuilder::new()
.with_scheduler_config(SchedulerConfig {
scheduler_cores,
max_instructions_per_round: slice.into(),
max_instructions_per_message: slice.into(),
max_instructions_per_message_without_dts: slice.into(),
max_instructions_per_slice: slice.into(),
instruction_overhead_per_execution: NumInstructions::from(0),
instruction_overhead_per_canister: NumInstructions::from(0),
instruction_overhead_per_canister_for_finalization: NumInstructions::from(0),
..SchedulerConfig::application_subnet()
})
.build();

// Bump up the round number.
test.execute_round(ExecutionRoundType::OrdinaryRound);

// Create many idle canisters.
for _ in 0..scheduler_cores * 2 {
test.create_canister();
}

// Create many busy canisters.
for _ in 0..scheduler_cores * 2 {
let canister_id = test.create_canister();
for _ in 0..num_rounds {
test.send_ingress(canister_id, ingress(slice));
}
}

let multiplier = scheduler_cores * test.state().canister_states.len();
for round in 0..num_rounds {
test.execute_round(ExecutionRoundType::OrdinaryRound);

let mut total_accumulated_priority = 0;
let mut total_priority_credit = 0;
for canister in test.state().canisters_iter() {
let scheduler_state = &canister.scheduler_state;
// Assert that we punished all idle canisters, not just top `scheduler_cores`.
if round == 0 && !canister.has_input() {
assert_ne!(test.last_round(), 0.into());
assert_eq!(scheduler_state.last_full_execution_round, test.last_round());
}
// Assert there is no divergency in accumulated priorities.
let priority = scheduler_state.accumulated_priority - scheduler_state.priority_credit;
assert!(priority.get() <= 100 * multiplier as i64);
assert!(priority.get() >= -100 * multiplier as i64);

total_accumulated_priority += scheduler_state.accumulated_priority.get();
total_priority_credit += scheduler_state.priority_credit.get();
}
// The accumulated priority invariant should be respected.
assert_eq!(total_accumulated_priority - total_priority_credit, 0);
}
}

0 comments on commit dfc32fd

Please sign in to comment.