diff --git a/Cargo.lock b/Cargo.lock index d0233c278..71655f629 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2087,9 +2087,9 @@ dependencies = [ [[package]] name = "fvm_ipld_amt" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c0b0ee51ca8defa9717a72e1d35c8cbb85bd8320a835911410b63b9a63dffec" +checksum = "5fea333475130094f27ce67809aae3f69eb5247541d835950b7c5da733dbbb34" dependencies = [ "anyhow", "cid 0.10.1", diff --git a/Cargo.toml b/Cargo.toml index c83543736..eb75594d5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,7 +120,7 @@ fvm_ipld_encoding = "0.4.0" fvm_ipld_blockstore = "0.2.0" fvm_ipld_hamt = "0.7.0" fvm_ipld_kamt = "0.3.0" -fvm_ipld_amt = { version = "0.6.1" } +fvm_ipld_amt = { version = "0.6.2" } fvm_ipld_bitfield = "0.6.0" # workspace diff --git a/actors/miner/src/deadline_state.rs b/actors/miner/src/deadline_state.rs index 824bef343..be6ed791f 100644 --- a/actors/miner/src/deadline_state.rs +++ b/actors/miner/src/deadline_state.rs @@ -23,6 +23,7 @@ use super::{ BitFieldQueue, ExpirationSet, Partition, PartitionSectorMap, PoStPartition, PowerPair, SectorOnChainInfo, Sectors, TerminationResult, }; + use crate::SECTORS_AMT_BITWIDTH; // Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. @@ -102,6 +103,102 @@ impl Deadlines { self.due[deadline_idx as usize] = store.put_cbor(deadline, Code::Blake2b256)?; Ok(()) } + + pub fn move_partitions( + policy: &Policy, + store: &BS, + orig_deadline: &mut Deadline, + dest_deadline: &mut Deadline, + partitions: &BitField, + ) -> anyhow::Result<()> { + let mut orig_partitions = orig_deadline.partitions_amt(store)?; + let mut dest_partitions = dest_deadline.partitions_amt(store)?; + + // even though we're moving partitions intact, we still need to update orig/dest `Deadline` accordingly. + + if dest_partitions.count() + partitions.len() > policy.max_partitions_per_deadline { + return Err(actor_error!( + forbidden, + "partitions in dest_deadline will exceed max_partitions_per_deadline" + ))?; + } + + let first_dest_partition_idx = dest_partitions.count(); + for (i, orig_partition_idx) in partitions.iter().enumerate() { + let moving_partition = orig_partitions + .get(orig_partition_idx)? + .ok_or_else(|| actor_error!(not_found, "no partition {}", orig_partition_idx))? + .clone(); + if !moving_partition.faults.is_empty() || !moving_partition.unproven.is_empty() { + return Err(actor_error!(forbidden, "partition with faults or unproven sectors are not allowed to move, partition_idx {}", orig_partition_idx))?; + } + if orig_deadline.early_terminations.get(orig_partition_idx) { + return Err(actor_error!(forbidden, "partition with early terminated sectors are not allowed to move, partition_idx {}", orig_partition_idx))?; + } + if !moving_partition.faulty_power.is_zero() { + return Err(actor_error!( + illegal_state, + "partition faulty_power should be zero when faults is empty, partition_idx {}", + orig_partition_idx + ))?; + } + + let dest_partition_idx = first_dest_partition_idx + i as u64; + + // sector_count is both total sector count and total live sector count, since no sector is faulty here. + let sector_count = moving_partition.sectors.len(); + + // start updating orig/dest `Deadline` here + + orig_deadline.total_sectors -= sector_count; + orig_deadline.live_sectors -= sector_count; + + dest_deadline.total_sectors += sector_count; + dest_deadline.live_sectors += sector_count; + + orig_partitions.set(orig_partition_idx, Partition::new(store)?)?; + dest_partitions.set(dest_partition_idx, moving_partition)?; + } + + // update expirations_epochs Cid of Deadline. + // Note that when moving a partition from `orig_expirations_epochs` to `dest_expirations_epochs`, + // we explicitly keep the `dest_epoch` the same as `orig_epoch`, this is by design of not re-quantizing. + { + let mut epochs_to_remove = Vec::::new(); + let mut orig_expirations_epochs: Array = + Array::load(&orig_deadline.expirations_epochs, store)?; + let mut dest_expirations_epochs: Array = + Array::load(&dest_deadline.expirations_epochs, store)?; + orig_expirations_epochs.for_each_mut(|orig_epoch, orig_bitfield| { + let dest_epoch = orig_epoch; + let mut to_bitfield = + dest_expirations_epochs.get(dest_epoch)?.cloned().unwrap_or_default(); + for (i, partition_id) in partitions.iter().enumerate() { + if orig_bitfield.get(partition_id) { + orig_bitfield.unset(partition_id); + to_bitfield.set(first_dest_partition_idx + i as u64); + } + } + dest_expirations_epochs.set(dest_epoch, to_bitfield)?; + + if orig_bitfield.is_empty() { + epochs_to_remove.push(orig_epoch); + } + + Ok(()) + })?; + if !epochs_to_remove.is_empty() { + orig_expirations_epochs.batch_delete(epochs_to_remove, true)?; + } + orig_deadline.expirations_epochs = orig_expirations_epochs.flush()?; + dest_deadline.expirations_epochs = dest_expirations_epochs.flush()?; + } + + orig_deadline.partitions = orig_partitions.flush()?; + dest_deadline.partitions = dest_partitions.flush()?; + + Ok(()) + } } /// Deadline holds the state for all sectors due at a specific deadline. diff --git a/actors/miner/src/deadlines.rs b/actors/miner/src/deadlines.rs index 707bcdad1..686b70de3 100644 --- a/actors/miner/src/deadlines.rs +++ b/actors/miner/src/deadlines.rs @@ -127,6 +127,73 @@ pub fn deadline_available_for_compaction( ) } +/// the distance between from_deadline and to_deadline clockwise in deadline unit. +fn deadline_distance(policy: &Policy, from_deadline: u64, to_deadline: u64) -> u64 { + if to_deadline >= from_deadline { + to_deadline - from_deadline + } else { + policy.wpost_period_deadlines - from_deadline + to_deadline + } +} + +/// only allow moving to a nearer deadline from current one +pub fn ensure_deadline_available_for_move( + policy: &Policy, + orig_deadline: u64, + dest_deadline: u64, + current_deadline: &DeadlineInfo, +) -> Result<(), String> { + if !deadline_is_mutable( + policy, + current_deadline.period_start, + orig_deadline, + current_deadline.current_epoch, + ) { + return Err(format!( + "cannot move from a deadline {}, immutable at epoch {}", + orig_deadline, current_deadline.current_epoch + )); + } + + if !deadline_is_mutable( + policy, + current_deadline.period_start, + dest_deadline, + current_deadline.current_epoch, + ) { + return Err(format!( + "cannot move to a deadline {}, immutable at epoch {}", + dest_deadline, current_deadline.current_epoch + )); + } + + if deadline_distance(policy, current_deadline.index, dest_deadline) + >= deadline_distance(policy, current_deadline.index, orig_deadline) + { + return Err(format!( + "can only move to a deadline which is nearer from current deadline {}, dest_deadline {} is not nearer than orig_deadline {}", + current_deadline.index, dest_deadline, orig_deadline + )); + } + + Ok(()) +} + +// returns the nearest deadline info with index `target_deadline` that has already occured from the point of view of the current deadline(including the current deadline). +pub fn nearest_occured_deadline_info( + policy: &Policy, + current_deadline: &DeadlineInfo, + target_deadline: u64, +) -> DeadlineInfo { + // Find the proving period start for the deadline in question. + let mut pp_start = current_deadline.period_start; + if current_deadline.index < target_deadline { + pp_start -= policy.wpost_proving_period + } + + new_deadline_info(policy, pp_start, target_deadline, current_deadline.current_epoch) +} + // Determine current period start and deadline index directly from current epoch and // the offset implied by the proving period. This works correctly even for the state // of a miner actor without an active deadline cron diff --git a/actors/miner/src/expiration_queue.rs b/actors/miner/src/expiration_queue.rs index d6777c9e9..207222431 100644 --- a/actors/miner/src/expiration_queue.rs +++ b/actors/miner/src/expiration_queue.rs @@ -6,6 +6,7 @@ use std::convert::TryInto; use anyhow::{anyhow, Context}; use cid::Cid; +use fil_actors_runtime::network::EPOCHS_IN_DAY; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ActorDowncast, Array}; use fvm_ipld_amt::{Error as AmtError, ValueMut}; @@ -643,16 +644,16 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { Ok(()) } + /// Note that the `epoch` parameter doesn't quantize, and assumes the entry for the epoch is non-empty. fn remove( &mut self, - raw_epoch: ChainEpoch, + epoch: ChainEpoch, on_time_sectors: &BitField, early_sectors: &BitField, active_power: &PowerPair, faulty_power: &PowerPair, pledge: &TokenAmount, ) -> anyhow::Result<()> { - let epoch = self.quant.quantize_up(raw_epoch); let mut expiration_set = self .amt .get(epoch.try_into()?) @@ -776,33 +777,60 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { sector_size: SectorSize, sectors: &[SectorOnChainInfo], ) -> anyhow::Result> { - let mut declared_expirations = BTreeMap::::new(); + if sectors.is_empty() { + return Ok(Vec::new()); + } + let mut sectors_by_number = BTreeMap::::new(); let mut all_remaining = BTreeSet::::new(); + let mut declared_expirations = BTreeSet::::new(); for sector in sectors { - let q_expiration = self.quant.quantize_up(sector.expiration); - declared_expirations.insert(q_expiration, true); + declared_expirations.insert(sector.expiration); all_remaining.insert(sector.sector_number); sectors_by_number.insert(sector.sector_number, sector); } - let mut expiration_groups = - Vec::::with_capacity(declared_expirations.len()); - - for (&expiration, _) in declared_expirations.iter() { - let es = self.may_get(expiration)?; - - let group = group_expiration_set( - sector_size, - §ors_by_number, - &mut all_remaining, - es, - expiration, - ); - if !group.sector_epoch_set.sectors.is_empty() { - expiration_groups.push(group); - } + let mut expiration_groups = Vec::::with_capacity(sectors.len()); + + let mut old_end = 0i64; + for expiration in declared_expirations.iter() { + // Basically we're scanning [sector.expiration, sector.expiration+EPOCHS_IN_DAY) for active sectors. + // Since a sector may have been moved from another deadline, the possible range for an active sector is [sector.expiration, sector.expiration+EPOCHS_IN_DAY). + // + // And we're also trying to avoid scanning the same range twice by choosing a proper `start_at`. + + let start_at = if *expiration > old_end { + *expiration + } else { + // +1 since the range is inclusive + old_end + 1 + }; + let new_end = (expiration + EPOCHS_IN_DAY - 1) as u64; + + // scan range [start_at, new_end] for active sectors of interest + self.amt.for_each_while_ranged(Some(start_at as u64), None, |epoch, es| { + if epoch > new_end { + // no need to scan any more + return Ok(false); + } + + let group = group_expiration_set( + sector_size, + §ors_by_number, + &mut all_remaining, + es, + epoch as ChainEpoch, + ); + + if !group.sector_epoch_set.sectors.is_empty() { + expiration_groups.push(group); + } + + Ok(epoch < new_end && !all_remaining.is_empty()) + })?; + + old_end = new_end as i64; } // If sectors remain, traverse next in epoch order. Remaining sectors should be @@ -810,12 +838,6 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { if !all_remaining.is_empty() { self.amt.for_each_while(|epoch, es| { let epoch = epoch as ChainEpoch; - // If this set's epoch is one of our declared epochs, we've already processed it - // in the loop above, so skip processing here. Sectors rescheduled to this epoch - // would have been included in the earlier processing. - if declared_expirations.contains_key(&epoch) { - return Ok(true); - } // Sector should not be found in EarlyExpirations which holds faults. An implicit assumption // of grouping is that it only returns sectors with active power. ExpirationQueue should not @@ -826,7 +848,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { sector_size, §ors_by_number, &mut all_remaining, - es.clone(), + es, epoch, ); @@ -911,7 +933,7 @@ fn group_expiration_set( sector_size: SectorSize, sectors: &BTreeMap, include_set: &mut BTreeSet, - es: ExpirationSet, + es: &ExpirationSet, expiration: ChainEpoch, ) -> SectorExpirationSet { let mut sector_numbers = Vec::new(); @@ -927,14 +949,26 @@ fn group_expiration_set( } } - SectorExpirationSet { - sector_epoch_set: SectorEpochSet { - epoch: expiration, - sectors: sector_numbers, - power: total_power, - pledge: total_pledge, - }, - expiration_set: es, + if sector_numbers.is_empty() { + SectorExpirationSet { + sector_epoch_set: SectorEpochSet { + epoch: expiration, + sectors: sector_numbers, + power: total_power, + pledge: total_pledge, + }, + expiration_set: ExpirationSet::default(), + } + } else { + SectorExpirationSet { + sector_epoch_set: SectorEpochSet { + epoch: expiration, + sectors: sector_numbers, + power: total_power, + pledge: total_pledge, + }, + expiration_set: es.clone(), // lazy clone + } } } diff --git a/actors/miner/src/lib.rs b/actors/miner/src/lib.rs index 46f59e299..48537f84e 100644 --- a/actors/miner/src/lib.rs +++ b/actors/miner/src/lib.rs @@ -128,6 +128,7 @@ pub enum Method { ChangeBeneficiary = 30, GetBeneficiary = 31, ExtendSectorExpiration2 = 32, + MovePartitions = 33, // Method numbers derived from FRC-0042 standards ChangeWorkerAddressExported = frc42_dispatch::method_hash!("ChangeWorkerAddress"), ChangePeerIDExported = frc42_dispatch::method_hash!("ChangePeerID"), @@ -1274,12 +1275,8 @@ impl Actor { // --- check proof --- // Find the proving period start for the deadline in question. - let mut pp_start = dl_info.period_start; - if dl_info.index < params.deadline { - pp_start -= policy.wpost_proving_period - } let target_deadline = - new_deadline_info(policy, pp_start, params.deadline, current_epoch); + nearest_occured_deadline_info(policy, &dl_info, params.deadline); // Load the target deadline let mut deadlines_current = st .load_deadlines(rt.store()) @@ -2695,6 +2692,196 @@ impl Actor { Ok(()) } + /// FYI: https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0070.md + fn move_partitions(rt: &impl Runtime, params: MovePartitionsParams) -> Result<(), ActorError> { + if params.orig_deadline == params.dest_deadline { + return Err(actor_error!(illegal_argument, "orig_deadline == dest_deadline")); + } + let policy = rt.policy(); + if params.orig_deadline >= policy.wpost_period_deadlines + || params.dest_deadline >= policy.wpost_period_deadlines + { + return Err(actor_error!( + illegal_argument, + "invalid param, orig_deadline: {}, dest_deadline: {}", + params.orig_deadline, + params.dest_deadline + )); + } + if params.partitions.is_empty() { + return Err(actor_error!(illegal_argument, "empty partitions not allowed")); + } + + rt.transaction(|state: &mut State, rt| { + let info = get_miner_info(rt.store(), state)?; + + rt.validate_immediate_caller_is( + info.control_addresses.iter().chain(&[info.worker, info.owner]), + )?; + + let store = rt.store(); + let current_deadline = state.deadline_info(policy, rt.curr_epoch()); + let mut deadlines = state + .load_deadlines(store) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deadlines")?; + + ensure_deadline_available_for_move( + policy, + params.orig_deadline, + params.dest_deadline, + ¤t_deadline, + ) + .context_code( + ExitCode::USR_FORBIDDEN, + "conditions not satisfied for deadline_available_for_move", + )?; + + let mut orig_deadline = deadlines + .load_deadline(store, params.orig_deadline) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load deadline {}", params.orig_deadline) + })?; + // only try to do immediate (non-optimistic) Window Post verification if the from deadline is in dispute window + // note that as window post is batched, the best we can do is to verify only those that contains at least one partition being moved. + // besides, after verification, the corresponding PostProof is deleted, leaving other PostProof intact. + // thus it's necessary that for those moved partitions, there's an empty partition left in place to keep the partitions from re-indexing. + if deadline_available_for_optimistic_post_dispute( + policy, + current_deadline.period_start, + params.orig_deadline, + rt.curr_epoch(), + ) { + let proofs_snapshot = orig_deadline + .optimistic_proofs_snapshot_amt(store) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load proofs snapshot")?; + + let partitions_snapshot = + orig_deadline.partitions_snapshot_amt(store).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load partitions snapshot", + )?; + + // Find the proving period start for the deadline in question. + let prev_orig_deadline = + nearest_occured_deadline_info(policy, ¤t_deadline, params.orig_deadline); + + // Load sectors for the dispute. + let sectors = Sectors::load(rt.store(), &orig_deadline.sectors_snapshot) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; + + proofs_snapshot + .for_each(|_, window_proof| { + if !params.partitions.contains_any(&window_proof.partitions) { + return Ok(()); + } + + let mut all_sectors = + Vec::::with_capacity(params.partitions.len() as usize); + let mut all_ignored = + Vec::::with_capacity(params.partitions.len() as usize); + + for partition_idx in window_proof.partitions.iter() { + let partition = partitions_snapshot + .get(partition_idx) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load partitions snapshot for proof", + )? + .ok_or_else(|| { + actor_error!( + illegal_state, + "failed to load partitions snapshot for proof" + ) + })?; + if !partition.faults.is_empty() { + return Err(actor_error!( + forbidden, + "cannot move partition {}: had faults at last Window PoST", + partition_idx + ))?; + } + if !partition.unproven.is_empty() { + return Err(actor_error!( + forbidden, + "cannot move partition {}: had unproven at last Window PoST", + partition_idx + ))?; + } + all_sectors.push(partition.sectors.clone()); + all_ignored.push(partition.terminated.clone()); + } + + // Load sector infos for proof, substituting a known-good sector for known-faulty sectors. + let sector_infos = sectors + .load_for_proof( + &BitField::union(&all_sectors), + &BitField::union(&all_ignored), + ) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load sectors for post verification", + )?; + + if !verify_windowed_post( + rt, + prev_orig_deadline.challenge, + §or_infos, + window_proof.proofs.clone(), + ) + .context_code(ExitCode::USR_ILLEGAL_STATE, "window post failed")? + { + return Err(actor_error!( + illegal_argument, + "invalid post was submitted" + ))?; + } + + Ok(()) + }) + .context_code(ExitCode::USR_ILLEGAL_STATE, "while removing partitions")?; + } + + let mut dest_deadline = + deadlines.load_deadline(store, params.dest_deadline).context_code( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to load deadline {}", params.dest_deadline), + )?; + + Deadlines::move_partitions( + policy, + store, + &mut orig_deadline, + &mut dest_deadline, + ¶ms.partitions, + ) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to move partitions")?; + + deadlines + .update_deadline(policy, store, params.orig_deadline, &orig_deadline) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to update deadline {}", params.orig_deadline), + )?; + deadlines + .update_deadline(policy, store, params.dest_deadline, &dest_deadline) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to update deadline {}", params.dest_deadline), + )?; + + state.save_deadlines(store, deadlines).context_code( + ExitCode::USR_ILLEGAL_STATE, + format!( + "failed to save deadline when move_partitions from {} to {}", + params.orig_deadline, params.dest_deadline + ), + )?; + + Ok(()) + })?; + + Ok(()) + } /// Compacts sector number allocations to reduce the size of the allocated sector /// number bitfield. /// @@ -5196,6 +5383,7 @@ impl ActorCode for Actor { ConfirmSectorProofsValid => confirm_sector_proofs_valid, ChangeMultiaddrs|ChangeMultiaddrsExported => change_multiaddresses, CompactPartitions => compact_partitions, + MovePartitions => move_partitions, CompactSectorNumbers => compact_sector_numbers, ConfirmChangeWorkerAddress|ConfirmChangeWorkerAddressExported => confirm_change_worker_address, RepayDebt|RepayDebtExported => repay_debt, diff --git a/actors/miner/src/testing.rs b/actors/miner/src/testing.rs index f1c33bc31..19b07c960 100644 --- a/actors/miner/src/testing.rs +++ b/actors/miner/src/testing.rs @@ -629,9 +629,6 @@ impl ExpirationQueueStateSummary { let ret = expiration_queue.amt.for_each(|epoch, expiration_set| { let epoch = epoch as i64; let acc = acc.with_prefix(format!("expiration epoch {epoch}: ")); - let quant_up = quant.quantize_up(epoch); - acc.require(quant_up == epoch, format!("expiration queue key {epoch} is not quantized, expected {quant_up}")); - expiration_epochs.push(epoch); let mut on_time_sectors_pledge = TokenAmount::zero(); @@ -643,8 +640,7 @@ impl ExpirationQueueStateSummary { // check expiring sectors are still alive if let Some(sector) = live_sectors.get(§or_number) { - let target = quant.quantize_up(sector.expiration); - acc.require(epoch == target, format!("invalid expiration {epoch} for sector {sector_number}, expected {target}")); + acc.require(epoch >= sector.expiration , format!("invalid expiration {epoch} for sector {sector_number}")); on_time_sectors_pledge += sector.initial_pledge.clone(); } else { acc.add(format!("on time expiration sector {sector_number} isn't live")); diff --git a/actors/miner/src/types.rs b/actors/miner/src/types.rs index 651273263..a7dc44c36 100644 --- a/actors/miner/src/types.rs +++ b/actors/miner/src/types.rs @@ -244,6 +244,13 @@ pub struct CompactPartitionsParams { pub partitions: BitField, } +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct MovePartitionsParams { + pub orig_deadline: u64, + pub dest_deadline: u64, + pub partitions: BitField, +} + #[derive(Serialize_tuple, Deserialize_tuple)] pub struct CompactSectorNumbersParams { pub mask_sector_numbers: BitField, diff --git a/actors/miner/tests/move_partitions_test.rs b/actors/miner/tests/move_partitions_test.rs new file mode 100644 index 000000000..04e75b71d --- /dev/null +++ b/actors/miner/tests/move_partitions_test.rs @@ -0,0 +1,629 @@ +use fil_actor_miner::{ + deadline_available_for_compaction, deadline_available_for_optimistic_post_dispute, + deadline_is_mutable, expected_reward_for_power, new_deadline_info, + pledge_penalty_for_termination, qa_power_for_sector, DeadlineInfo, SectorOnChainInfo, State, + INITIAL_PLEDGE_PROJECTION_PERIOD, +}; + +use fil_actors_runtime::network::EPOCHS_IN_DAY; +use fil_actors_runtime::{ + runtime::Runtime, + runtime::{DomainSeparationTag, RuntimePolicy}, + test_utils::{expect_abort_contains_message, MockRuntime}, +}; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_encoding::RawBytes; +use fvm_shared::econ::TokenAmount; +use fvm_shared::randomness::Randomness; +use fvm_shared::{clock::ChainEpoch, error::ExitCode}; +use num_traits::Zero; + +mod util; +use util::*; + +const PERIOD_OFFSET: ChainEpoch = 100; + +fn setup() -> (ActorHarness, MockRuntime) { + let h = ActorHarness::new(PERIOD_OFFSET); + let rt = h.new_runtime(); + h.construct_and_verify(&rt); + rt.balance.replace(BIG_BALANCE.clone()); + + (h, rt) +} + +// returns the nearest epoch such that synchronous post verification is required +fn nearest_unsafe_epoch(rt: &MockRuntime, h: &ActorHarness, orig_deadline_id: u64) -> i64 { + let current_ddl = h.current_deadline(rt); + + for i in *rt.epoch.borrow().. { + if !deadline_available_for_compaction( + &rt.policy, + current_ddl.period_start, + orig_deadline_id, + i, + ) && deadline_available_for_optimistic_post_dispute( + &rt.policy, + current_ddl.period_start, + orig_deadline_id, + i, + ) { + return i; + } + } + + panic!("impossible path"); +} + +// returns the nearest epoch such that no synchronous post verification is necessary +fn nearest_safe_epoch(rt: &MockRuntime, h: &ActorHarness, orig_deadline_id: u64) -> i64 { + let current_ddl = h.current_deadline(rt); + + for i in *rt.epoch.borrow().. { + if deadline_available_for_compaction( + &rt.policy, + current_ddl.period_start, + orig_deadline_id, + i, + ) { + return i; + } + } + + panic!("impossible path"); +} + +// returns the farthest deadline from current that satisfies deadline_available_for_move +fn farthest_possible_to_deadline( + rt: &MockRuntime, + orig_deadline_id: u64, + current_deadline: DeadlineInfo, +) -> u64 { + assert_ne!( + orig_deadline_id, current_deadline.index, + "can't move nearer when the deadline_distance is 0" + ); + + if current_deadline.index < orig_deadline_id { + // the deadline distance can only be nearer + for i in (current_deadline.index..(orig_deadline_id)).rev() { + if deadline_is_mutable(&rt.policy, current_deadline.period_start, i, *rt.epoch.borrow()) + { + return i; + } + } + } else { + for i in (0..(orig_deadline_id)).rev() { + if deadline_is_mutable(&rt.policy, current_deadline.period_start, i, *rt.epoch.borrow()) + { + return i; + } + } + + for i in (current_deadline.index..rt.policy.wpost_period_deadlines).rev() { + if deadline_is_mutable(&rt.policy, current_deadline.period_start, i, *rt.epoch.borrow()) + { + return i; + } + } + } + + panic!("no candidate to_deadline"); +} + +#[test] +fn fail_to_move_partitions_with_faults_from_safe_epoch() { + let (mut h, rt) = setup(); + + // create 2 sectors in partition 0 + let sectors_info = h.commit_and_prove_sectors( + &rt, + 2, + DEFAULT_SECTOR_EXPIRATION, + vec![vec![10], vec![20]], + true, + ); + h.advance_and_submit_posts(&rt, §ors_info); + + let st = h.get_state(&rt); + let (orig_deadline_id, partition_id) = + st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + + // fault sector 1 + h.declare_faults(&rt, §ors_info[0..1]); + + h.advance_to_epoch_with_cron(&rt, nearest_safe_epoch(&rt, &h, orig_deadline_id)); + + let to_deadline_id = + farthest_possible_to_deadline(&rt, orig_deadline_id, h.current_deadline(&rt)); + + let result = h.move_partitions( + &rt, + orig_deadline_id, + to_deadline_id, + bitfield_from_slice(&[partition_id]), + || {}, + ); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_STATE, + "partition with faults or unproven sectors are not allowed to move", + result, + ); + + h.check_state(&rt); +} + +#[test] +fn fail_to_move_partitions_with_faults_from_unsafe_epoch() { + let (mut h, rt) = setup(); + + // create 2 sectors in partition 0 + let sectors_info = h.commit_and_prove_sectors( + &rt, + 2, + DEFAULT_SECTOR_EXPIRATION, + vec![vec![10], vec![20]], + true, + ); + h.advance_and_submit_posts(&rt, §ors_info); + + let st = h.get_state(&rt); + let (orig_deadline_id, partition_id) = + st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + + // fault sector 1 + h.declare_faults(&rt, §ors_info[0..1]); + + h.advance_to_epoch_with_cron(&rt, nearest_unsafe_epoch(&rt, &h, orig_deadline_id)); + + let dest_deadline_id = + farthest_possible_to_deadline(&rt, orig_deadline_id, h.current_deadline(&rt)); + + let result = h.move_partitions( + &rt, + orig_deadline_id, + dest_deadline_id, + bitfield_from_slice(&[partition_id]), + || { + let current_deadline = h.current_deadline(&rt); + + let from_deadline = new_deadline_info( + rt.policy(), + if current_deadline.index < orig_deadline_id { + current_deadline.period_start - rt.policy().wpost_proving_period + } else { + current_deadline.period_start + }, + orig_deadline_id, + *rt.epoch.borrow(), + ); + + let from_ddl = h.get_deadline(&rt, orig_deadline_id); + + let entropy = RawBytes::serialize(h.receiver).unwrap(); + rt.expect_get_randomness_from_beacon( + DomainSeparationTag::WindowedPoStChallengeSeed, + from_deadline.challenge, + entropy.to_vec(), + TEST_RANDOMNESS_ARRAY_FROM_ONE, + ); + + let post = h.get_submitted_proof(&rt, &from_ddl, 0); + + let all_ignored = BitField::new(); + let vi = h.make_window_post_verify_info( + §ors_info, + &all_ignored, + sectors_info[1].clone(), + Randomness(TEST_RANDOMNESS_ARRAY_FROM_ONE.into()), + post.proofs, + ); + rt.expect_verify_post(vi, ExitCode::OK); + }, + ); + expect_abort_contains_message( + ExitCode::USR_ILLEGAL_STATE, + "partition with faults or unproven sectors are not allowed to move", + result, + ); + + h.check_state(&rt); +} + +#[test] +fn ok_to_move_partitions_from_safe_epoch() { + let (mut h, rt) = setup(); + + // create 2 sectors in partition 0 + let sectors_info = h.commit_and_prove_sectors( + &rt, + 2, + DEFAULT_SECTOR_EXPIRATION, + vec![vec![10], vec![20]], + true, + ); + h.advance_and_submit_posts(&rt, §ors_info); + + let st = h.get_state(&rt); + let (orig_deadline_id, partition_id) = + st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + + h.advance_to_epoch_with_cron(&rt, nearest_safe_epoch(&rt, &h, orig_deadline_id)); + + let dest_deadline_id = + farthest_possible_to_deadline(&rt, orig_deadline_id, h.current_deadline(&rt)); + + let result = h.move_partitions( + &rt, + orig_deadline_id, + dest_deadline_id, + bitfield_from_slice(&[partition_id]), + || {}, + ); + assert!(result.is_ok()); + + h.check_state(&rt); +} + +#[test] +fn ok_to_move_partitions_from_unsafe_epoch() { + let (mut h, rt) = setup(); + + // create 2 sectors in partition 0 + let sectors_info = h.commit_and_prove_sectors( + &rt, + 2, + DEFAULT_SECTOR_EXPIRATION, + vec![vec![10], vec![20]], + true, + ); + h.advance_and_submit_posts(&rt, §ors_info); + + let st = h.get_state(&rt); + let (orig_deadline_id, partition_id) = + st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + + h.advance_to_epoch_with_cron(&rt, nearest_unsafe_epoch(&rt, &h, orig_deadline_id)); + + let dest_deadline_id = + farthest_possible_to_deadline(&rt, orig_deadline_id, h.current_deadline(&rt)); + + let result = h.move_partitions( + &rt, + orig_deadline_id, + dest_deadline_id, + bitfield_from_slice(&[partition_id]), + || { + let current_deadline = h.current_deadline(&rt); + + let from_deadline = new_deadline_info( + rt.policy(), + if current_deadline.index < orig_deadline_id { + current_deadline.period_start - rt.policy().wpost_proving_period + } else { + current_deadline.period_start + }, + orig_deadline_id, + *rt.epoch.borrow(), + ); + + let from_ddl = h.get_deadline(&rt, orig_deadline_id); + + let entropy = RawBytes::serialize(h.receiver).unwrap(); + rt.expect_get_randomness_from_beacon( + DomainSeparationTag::WindowedPoStChallengeSeed, + from_deadline.challenge, + entropy.to_vec(), + TEST_RANDOMNESS_ARRAY_FROM_ONE, + ); + + let post = h.get_submitted_proof(&rt, &from_ddl, 0); + + let all_ignored = BitField::new(); + let vi = h.make_window_post_verify_info( + §ors_info, + &all_ignored, + sectors_info[1].clone(), + Randomness(TEST_RANDOMNESS_ARRAY_FROM_ONE.into()), + post.proofs, + ); + rt.expect_verify_post(vi, ExitCode::OK); + }, + ); + assert!(result.is_ok()); + + h.check_state(&rt); +} + +#[test] +fn fault_and_recover_after_move() { + let (mut h, rt) = setup(); + + let sectors_info = h.commit_and_prove_sectors( + &rt, + 2, + DEFAULT_SECTOR_EXPIRATION, + vec![vec![10], vec![20]], + true, + ); + h.advance_and_submit_posts(&rt, §ors_info); + + let st = h.get_state(&rt); + let (orig_deadline_id, partition_id) = + st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + + h.advance_to_epoch_with_cron(&rt, nearest_safe_epoch(&rt, &h, orig_deadline_id)); + let dest_deadline_id = + farthest_possible_to_deadline(&rt, orig_deadline_id, h.current_deadline(&rt)); + + let result = h.move_partitions( + &rt, + orig_deadline_id, + dest_deadline_id, + bitfield_from_slice(&[partition_id]), + || {}, + ); + assert!(result.is_ok()); + + let st = h.get_state(&rt); + let (dl_idx, p_idx) = st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + assert!(dl_idx == dest_deadline_id); + + h.check_state(&rt); + + // fault and recover + + h.declare_faults(&rt, §ors_info); + + h.declare_recoveries( + &rt, + dl_idx, + p_idx, + BitField::try_from_bits(sectors_info.iter().map(|s| s.sector_number)).unwrap(), + TokenAmount::zero(), + ) + .unwrap(); + + let dl = h.get_deadline(&rt, dl_idx); + let p = dl.load_partition(&rt.store, p_idx).unwrap(); + assert_eq!(p.faults, p.recoveries); + h.check_state(&rt); +} + +#[test] +fn fault_and_terminate_after_move() { + let (mut h, rt) = setup(); + + let sectors_info = h.commit_and_prove_sectors( + &rt, + 1, + DEFAULT_SECTOR_EXPIRATION, + vec![vec![10], vec![20]], + true, + ); + h.advance_and_submit_posts(&rt, §ors_info); + + let st = h.get_state(&rt); + let (orig_deadline_id, partition_id) = + st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + + h.advance_to_epoch_with_cron(&rt, nearest_safe_epoch(&rt, &h, orig_deadline_id)); + let dest_deadline_id = + farthest_possible_to_deadline(&rt, orig_deadline_id, h.current_deadline(&rt)); + + let result = h.move_partitions( + &rt, + orig_deadline_id, + dest_deadline_id, + bitfield_from_slice(&[partition_id]), + || {}, + ); + assert!(result.is_ok()); + + let st = h.get_state(&rt); + let (dl_idx, _) = st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + assert!(dl_idx == dest_deadline_id); + + h.check_state(&rt); + + // fault and terminate + + h.declare_faults(&rt, §ors_info); + + // A miner will pay the minimum of termination fee and locked funds. Add some locked funds to ensure + // correct fee calculation is used. + h.apply_rewards(&rt, BIG_REWARDS.clone(), TokenAmount::zero()); + let state: State = rt.get_state(); + let initial_locked_funds = state.locked_funds; + + let expected_fee = calc_expected_fee_for_termination(&h, &rt, sectors_info[0].clone()); + let sectors = bitfield_from_slice(&[sectors_info[0].sector_number]); + h.terminate_sectors(&rt, §ors, expected_fee.clone()); + + // expect sector to be marked as terminated and the early termination queue to be empty (having been fully processed) + let state: State = rt.get_state(); + let (_, mut partition) = h.find_sector(&rt, sectors_info[0].sector_number); + let terminated = partition.terminated.get(sectors_info[0].sector_number); + assert!(terminated); + + let (result, _) = partition.pop_early_terminations(rt.store(), 1000).unwrap(); + assert!(result.is_empty()); + + // expect fee to have been unlocked and burnt + assert_eq!(initial_locked_funds - expected_fee, state.locked_funds); + + //expect pledge requirement to have been decremented + assert!(state.initial_pledge.is_zero()); + + h.check_state(&rt); +} + +fn calc_expected_fee_for_termination( + h: &ActorHarness, + rt: &MockRuntime, + sector: SectorOnChainInfo, +) -> TokenAmount { + let sector_power = qa_power_for_sector(sector.seal_proof.sector_size().unwrap(), §or); + let day_reward = expected_reward_for_power( + &h.epoch_reward_smooth, + &h.epoch_qa_power_smooth, + §or_power, + EPOCHS_IN_DAY, + ); + let twenty_day_reward = expected_reward_for_power( + &h.epoch_reward_smooth, + &h.epoch_qa_power_smooth, + §or_power, + INITIAL_PLEDGE_PROJECTION_PERIOD, + ); + let sector_age = *rt.epoch.borrow() - sector.activation; + pledge_penalty_for_termination( + &day_reward, + sector_age, + &twenty_day_reward, + &h.epoch_qa_power_smooth, + §or_power, + &h.epoch_reward_smooth, + &TokenAmount::zero(), + 0, + ) +} + +#[test] +fn directly_terminate_after_move() { + let (mut h, rt) = setup(); + + let sectors_info = h.commit_and_prove_sectors( + &rt, + 1, + DEFAULT_SECTOR_EXPIRATION, + vec![vec![10], vec![20]], + true, + ); + h.advance_and_submit_posts(&rt, §ors_info); + + let st = h.get_state(&rt); + let (orig_deadline_id, partition_id) = + st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + + h.advance_to_epoch_with_cron(&rt, nearest_safe_epoch(&rt, &h, orig_deadline_id)); + let dest_deadline_id = + farthest_possible_to_deadline(&rt, orig_deadline_id, h.current_deadline(&rt)); + + let result = h.move_partitions( + &rt, + orig_deadline_id, + dest_deadline_id, + bitfield_from_slice(&[partition_id]), + || {}, + ); + assert!(result.is_ok()); + + let st = h.get_state(&rt); + let (dl_idx, _) = st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + assert!(dl_idx == dest_deadline_id); + + h.check_state(&rt); + + // directly terminate + + // A miner will pay the minimum of termination fee and locked funds. Add some locked funds to ensure + // correct fee calculation is used. + h.apply_rewards(&rt, BIG_REWARDS.clone(), TokenAmount::zero()); + let state: State = rt.get_state(); + let initial_locked_funds = state.locked_funds; + + let expected_fee = calc_expected_fee_for_termination(&h, &rt, sectors_info[0].clone()); + let sectors = bitfield_from_slice(&[sectors_info[0].sector_number]); + h.terminate_sectors(&rt, §ors, expected_fee.clone()); + + // expect sector to be marked as terminated and the early termination queue to be empty (having been fully processed) + let state: State = rt.get_state(); + let (_, mut partition) = h.find_sector(&rt, sectors_info[0].sector_number); + let terminated = partition.terminated.get(sectors_info[0].sector_number); + assert!(terminated); + + let (result, _) = partition.pop_early_terminations(rt.store(), 1000).unwrap(); + assert!(result.is_empty()); + + // expect fee to have been unlocked and burnt + assert_eq!(initial_locked_funds - expected_fee, state.locked_funds); + + //expect pledge requirement to have been decremented + assert!(state.initial_pledge.is_zero()); + + h.check_state(&rt); +} + +#[test] +fn fault_and_expire_after_move() { + let (mut h, rt) = setup(); + + let sectors_info = h.commit_and_prove_sectors( + &rt, + 1, + DEFAULT_SECTOR_EXPIRATION, + vec![vec![10], vec![20]], + true, + ); + h.advance_and_submit_posts(&rt, §ors_info); + + let st = h.get_state(&rt); + let (orig_deadline_id, partition_id) = + st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + + h.advance_to_epoch_with_cron(&rt, nearest_safe_epoch(&rt, &h, orig_deadline_id)); + let dest_deadline_id = + farthest_possible_to_deadline(&rt, orig_deadline_id, h.current_deadline(&rt)); + + let result = h.move_partitions( + &rt, + orig_deadline_id, + dest_deadline_id, + bitfield_from_slice(&[partition_id]), + || {}, + ); + assert!(result.is_ok()); + + let st = h.get_state(&rt); + let (dl_idx, partition_id) = st.find_sector(&rt.store, sectors_info[0].sector_number).unwrap(); + assert!(dl_idx == dest_deadline_id); + + h.check_state(&rt); + + // fault and expire + + h.declare_faults(&rt, §ors_info); + + let st = h.get_state(&rt); + let quant = st.quant_spec_for_deadline(rt.policy(), dl_idx); + + let current_deadline = h.current_deadline(&rt); + + let target_deadline = new_deadline_info( + rt.policy(), + if current_deadline.index < orig_deadline_id { + current_deadline.period_start - rt.policy().wpost_proving_period + } else { + current_deadline.period_start + }, + orig_deadline_id, + *rt.epoch.borrow(), + ); + let fault_expiration_epoch = target_deadline.last() + rt.policy.fault_max_age; + let new_expiration = quant.quantize_up(fault_expiration_epoch); + + // assert that new expiration exists + let (_, mut partition) = h.get_deadline_and_partition(&rt, dl_idx, partition_id); + let expiration_set = + partition.pop_expired_sectors(rt.store(), new_expiration - 1, quant).unwrap(); + assert!(expiration_set.is_empty()); + + let expiration_set = partition + .pop_expired_sectors(rt.store(), quant.quantize_up(new_expiration), quant) + .unwrap(); + assert_eq!(expiration_set.len(), 1); + assert!(expiration_set.early_sectors.get(sectors_info[0].sector_number)); + + h.check_state(&rt); +} diff --git a/actors/miner/tests/util.rs b/actors/miner/tests/util.rs index c06ceadef..2926abc9d 100644 --- a/actors/miner/tests/util.rs +++ b/actors/miner/tests/util.rs @@ -67,7 +67,7 @@ use fil_actor_miner::{ ExpirationQueue, ExpirationSet, ExtendSectorExpiration2Params, ExtendSectorExpirationParams, FaultDeclaration, GetAvailableBalanceReturn, GetBeneficiaryReturn, GetControlAddressesReturn, GetMultiaddrsReturn, GetPeerIDReturn, Method, MinerConstructorParams as ConstructorParams, - MinerInfo, Partition, PendingBeneficiaryChange, PoStPartition, PowerPair, + MinerInfo, MovePartitionsParams, Partition, PendingBeneficiaryChange, PoStPartition, PowerPair, PreCommitSectorBatchParams, PreCommitSectorBatchParams2, PreCommitSectorParams, ProveCommitSectorParams, RecoveryDeclaration, ReportConsensusFaultParams, SectorOnChainInfo, SectorPreCommitInfo, SectorPreCommitOnChainInfo, Sectors, State, SubmitWindowedPoStParams, @@ -1475,7 +1475,7 @@ impl ActorHarness { ) } - fn make_window_post_verify_info( + pub fn make_window_post_verify_info( &self, infos: &[SectorOnChainInfo], all_ignored: &BitField, @@ -1635,7 +1635,12 @@ impl ActorHarness { rt.verify(); } - fn get_submitted_proof(&self, rt: &MockRuntime, deadline: &Deadline, idx: u64) -> WindowedPoSt { + pub fn get_submitted_proof( + &self, + rt: &MockRuntime, + deadline: &Deadline, + idx: u64, + ) -> WindowedPoSt { amt_get::(rt, &deadline.optimistic_post_submissions_snapshot, idx) } @@ -2052,8 +2057,18 @@ impl ActorHarness { let mut deal_ids: Vec = Vec::new(); let mut sector_infos: Vec = Vec::new(); + let mut has_active_sector = false; for sector in sectors.iter() { + let (_, partition) = self.find_sector(&rt, sector); + let non_active = partition.terminated.get(sector) + || partition.faults.get(sector) + || partition.unproven.get(sector); + if !non_active { + has_active_sector = true; + } + let sector = self.get_sector(rt, sector); + deal_ids.extend(sector.deal_ids.iter()); sector_infos.push(sector); } @@ -2111,14 +2126,17 @@ impl ActorHarness { raw_byte_delta: -sector_power.raw.clone(), quality_adjusted_delta: -sector_power.qa.clone(), }; - rt.expect_send_simple( - STORAGE_POWER_ACTOR_ADDR, - UPDATE_CLAIMED_POWER_METHOD, - IpldBlock::serialize_cbor(¶ms).unwrap(), - TokenAmount::zero(), - None, - ExitCode::OK, - ); + + if has_active_sector { + rt.expect_send_simple( + STORAGE_POWER_ACTOR_ADDR, + UPDATE_CLAIMED_POWER_METHOD, + IpldBlock::serialize_cbor(¶ms).unwrap(), + TokenAmount::zero(), + None, + ExitCode::OK, + ); + } // create declarations let state: State = rt.get_state(); @@ -2593,6 +2611,28 @@ impl ActorHarness { Ok(()) } + pub fn move_partitions( + &self, + rt: &MockRuntime, + orig_deadline: u64, + dest_deadline: u64, + partitions: BitField, + mut f: impl FnMut(), + ) -> Result<(), ActorError> { + f(); + + let params = MovePartitionsParams { orig_deadline, dest_deadline, partitions }; + + rt.expect_validate_caller_addr(self.caller_addrs()); + rt.set_caller(*ACCOUNT_ACTOR_CODE_ID, self.worker); + + rt.call::( + Method::MovePartitions as u64, + IpldBlock::serialize_cbor(¶ms).unwrap(), + )?; + rt.verify(); + Ok(()) + } pub fn get_info(&self, rt: &MockRuntime) -> MinerInfo { let state: State = rt.get_state(); state.get_info(rt.store()).unwrap() diff --git a/test_vm/tests/move_partitions_test.rs b/test_vm/tests/move_partitions_test.rs new file mode 100644 index 000000000..724561715 --- /dev/null +++ b/test_vm/tests/move_partitions_test.rs @@ -0,0 +1,213 @@ +use fvm_ipld_blockstore::MemoryBlockstore; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared::address::Address; +use fvm_shared::bigint::Zero; +use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; +use fvm_shared::sector::{RegisteredSealProof, SectorNumber}; + +use fil_actor_cron::Method as CronMethod; +use fil_actor_market::Method as MarketMethod; +use fil_actor_miner::{ + power_for_sector, DeadlineInfo, Method as MinerMethod, MovePartitionsParams, + ProveCommitSectorParams, State as MinerState, +}; + +use fil_actor_power::{Method as PowerMethod, State as PowerState}; +use fil_actors_integration_tests::expects::Expect; +use fil_actors_integration_tests::util::{ + advance_by_deadline_to_epoch, advance_to_proving_deadline, assert_invariants, create_accounts, + create_miner, cron_tick, get_network_stats, make_bitfield, miner_balance, precommit_sectors, + submit_windowed_post, +}; +use fil_actors_runtime::runtime::Policy; +use fil_actors_runtime::{ + CRON_ACTOR_ADDR, CRON_ACTOR_ID, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, + STORAGE_POWER_ACTOR_ID, SYSTEM_ACTOR_ADDR, +}; +use test_vm::TestVM; +use vm_api::trace::ExpectInvocation; +use vm_api::util::{apply_ok, get_state, DynBlockstore}; +use vm_api::VM; + +#[test] +fn move_partitions_success() { + let store = MemoryBlockstore::new(); + let (v, miner, sector) = setup(&store); + + submit_post_succeeds_test(&v, miner.clone(), sector); + + let prove_time = v.epoch() + Policy::default().wpost_dispute_window; + advance_by_deadline_to_epoch(&v, &miner.miner_id, prove_time); + + let move_params = MovePartitionsParams { + orig_deadline: 0, + dest_deadline: 47, + partitions: make_bitfield(&[0u64]), + }; + let prove_params_ser = IpldBlock::serialize_cbor(&move_params).unwrap(); + apply_ok( + &v, + &miner.worker, + &miner.miner_robust, + &TokenAmount::zero(), + MinerMethod::MovePartitions as u64, + Some(move_params), + ); + ExpectInvocation { + from: miner.worker.id().unwrap(), + to: miner.miner_id, + method: MinerMethod::MovePartitions as u64, + params: Some(prove_params_ser), + subinvocs: Some(vec![]), + ..Default::default() + } + .matches(v.take_invocations().last().unwrap()); + + cron_tick(&v); + v.set_epoch(v.epoch() + 1); + assert_invariants(&v, &Policy::default()); +} + +fn submit_post_succeeds_test(v: &dyn VM, miner_info: MinerInfo, sector_info: SectorInfo) { + // submit post + let st: MinerState = get_state(v, &miner_info.miner_id).unwrap(); + let sector = + st.get_sector(&DynBlockstore::wrap(v.blockstore()), sector_info.number).unwrap().unwrap(); + let sector_power = power_for_sector(miner_info.seal_proof.sector_size().unwrap(), §or); + submit_windowed_post( + v, + &miner_info.worker, + &miner_info.miner_id, + sector_info.deadline_info, + sector_info.partition_index, + Some(sector_power.clone()), + ); + let balances = miner_balance(v, &miner_info.miner_id); + assert!(balances.initial_pledge.is_positive()); + let p_st: PowerState = get_state(v, &STORAGE_POWER_ACTOR_ADDR).unwrap(); + assert_eq!(sector_power.raw, p_st.total_bytes_committed); + + assert_invariants(v, &Policy::default()); +} + +struct SectorInfo { + number: SectorNumber, + deadline_info: DeadlineInfo, + partition_index: u64, +} + +#[derive(Clone)] +struct MinerInfo { + seal_proof: RegisteredSealProof, + worker: Address, + miner_id: Address, + miner_robust: Address, +} + +fn setup(store: &'_ MemoryBlockstore) -> (TestVM, MinerInfo, SectorInfo) { + let v = TestVM::::new_with_singletons(store); + let addrs = create_accounts(&v, 1, &TokenAmount::from_whole(10_000)); + let seal_proof = RegisteredSealProof::StackedDRG32GiBV1P1; + let (owner, worker) = (addrs[0], addrs[0]); + let (id_addr, robust_addr) = create_miner( + &v, + &owner, + &worker, + seal_proof.registered_window_post_proof().unwrap(), + &TokenAmount::from_whole(10_000), + ); + v.set_epoch(200); + + // precommit and advance to prove commit time + let sector_number: SectorNumber = 100; + precommit_sectors(&v, 1, 1, &worker, &id_addr, seal_proof, sector_number, true, None); + + let balances = miner_balance(&v, &id_addr); + assert!(balances.pre_commit_deposit.is_positive()); + + let prove_time = v.epoch() + Policy::default().pre_commit_challenge_delay + 1; + advance_by_deadline_to_epoch(&v, &id_addr, prove_time); + + // prove commit, cron, advance to post time + let prove_params = ProveCommitSectorParams { sector_number, proof: vec![] }; + let prove_params_ser = IpldBlock::serialize_cbor(&prove_params).unwrap(); + apply_ok( + &v, + &worker, + &robust_addr, + &TokenAmount::zero(), + MinerMethod::ProveCommitSector as u64, + Some(prove_params), + ); + ExpectInvocation { + from: worker.id().unwrap(), + to: id_addr, + method: MinerMethod::ProveCommitSector as u64, + params: Some(prove_params_ser), + subinvocs: Some(vec![Expect::power_submit_porep(id_addr.id().unwrap())]), + ..Default::default() + } + .matches(v.take_invocations().last().unwrap()); + let res = v + .execute_message( + &SYSTEM_ACTOR_ADDR, + &CRON_ACTOR_ADDR, + &TokenAmount::zero(), + CronMethod::EpochTick as u64, + None, + ) + .unwrap(); + assert_eq!(ExitCode::OK, res.code); + ExpectInvocation { + to: CRON_ACTOR_ADDR, + method: CronMethod::EpochTick as u64, + subinvocs: Some(vec![ + ExpectInvocation { + from: CRON_ACTOR_ID, + to: STORAGE_POWER_ACTOR_ADDR, + method: PowerMethod::OnEpochTickEnd as u64, + subinvocs: Some(vec![ + Expect::reward_this_epoch(STORAGE_POWER_ACTOR_ID), + ExpectInvocation { + from: STORAGE_POWER_ACTOR_ID, + to: id_addr, + method: MinerMethod::ConfirmSectorProofsValid as u64, + subinvocs: Some(vec![Expect::power_update_pledge( + id_addr.id().unwrap(), + None, + )]), + ..Default::default() + }, + Expect::reward_update_kpi(), + ]), + ..Default::default() + }, + ExpectInvocation { + from: CRON_ACTOR_ID, + to: STORAGE_MARKET_ACTOR_ADDR, + method: MarketMethod::CronTick as u64, + ..Default::default() + }, + ]), + ..Default::default() + } + .matches(v.take_invocations().last().unwrap()); + // pcd is released ip is added + let balances = miner_balance(&v, &id_addr); + assert!(balances.initial_pledge.is_positive()); + assert!(balances.pre_commit_deposit.is_zero()); + + // power unproven so network stats are the same + + let network_stats = get_network_stats(&v); + assert!(network_stats.total_bytes_committed.is_zero()); + assert!(network_stats.total_pledge_collateral.is_positive()); + + let (deadline_info, partition_index) = advance_to_proving_deadline(&v, &id_addr, sector_number); + ( + v, + MinerInfo { seal_proof, worker, miner_id: id_addr, miner_robust: robust_addr }, + SectorInfo { number: sector_number, deadline_info, partition_index }, + ) +}