Skip to content

Commit

Permalink
Add test coverage for holder commitment rebroadcast after reorg
Browse files Browse the repository at this point in the history
  • Loading branch information
wpaulino committed Dec 12, 2023
1 parent 90f24a6 commit 60bb39a
Showing 1 changed file with 119 additions and 0 deletions.
119 changes: 119 additions & 0 deletions lightning/src/ln/reorg_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -759,3 +759,122 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa
// commitment (still unrevoked) is the currently confirmed closing transaction.
assert_eq!(htlc_preimage_tx.input[0].witness.second_to_last().unwrap(), &payment_preimage.0[..]);
}

fn do_test_retries_own_commitment_broadcast_after_reorg(anchors: bool, revoked_counterparty_commitment: bool) {
// Tests that a node will retry broadcasting its own commitment after seeing a confirmed
// counterparty commitment be reorged out.
let mut chanmon_cfgs = create_chanmon_cfgs(2);
if revoked_counterparty_commitment {
chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
}
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let mut config = test_default_channel_config();
if anchors {
config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
config.manually_accept_inbound_channels = true;
}
let persister;
let new_chain_monitor;
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]);
let nodes_1_deserialized;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);

let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);

// Route a payment so we have an HTLC to claim as well.
let _ = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);

if revoked_counterparty_commitment {
// Trigger a fee update such that we advance the state. We will have B broadcast its state
// without the fee update.
let serialized_node = nodes[1].node.encode();
let serialized_monitor = get_monitor!(nodes[1], chan_id).encode();

*chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap() += 1;
nodes[0].node.timer_tick_occurred();
check_added_monitors!(nodes[0], 1);

let fee_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &fee_update.update_fee.unwrap());
commitment_signed_dance!(nodes[1], nodes[0], fee_update.commitment_signed, false);

reload_node!(
nodes[1], config, &serialized_node, &[&serialized_monitor], persister, new_chain_monitor, nodes_1_deserialized
);
}

// Connect blocks until the HTLC expiry is met, prompting a commitment broadcast by A.
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);

{
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
if anchors {
assert_eq!(txn.len(), 1);
let commitment_tx_a = txn.pop().unwrap();
check_spends!(commitment_tx_a, funding_tx);
} else {
assert_eq!(txn.len(), 2);
let htlc_tx_a = txn.pop().unwrap();
let commitment_tx_a = txn.pop().unwrap();
check_spends!(commitment_tx_a, funding_tx);
check_spends!(htlc_tx_a, commitment_tx_a);
}
};

// B will also broadcast its own commitment.
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_broadcast(&nodes[1], 1, true);
check_added_monitors(&nodes[1], 1);
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);

let commitment_b = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
let tx = txn.pop().unwrap();
check_spends!(tx, funding_tx);
tx
};

// Confirm B's commitment, A should now broadcast an HTLC timeout for commitment B.
mine_transaction(&nodes[0], &commitment_b);
{
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
if nodes[0].connect_style.borrow().updates_best_block_first() {
// `commitment_a` and `htlc_timeout_a` are rebroadcast because the best block was
// updated prior to seeing `commitment_b`.
assert_eq!(txn.len(), if anchors { 2 } else { 3 });
check_spends!(txn.last().unwrap(), commitment_b);
} else {
assert_eq!(txn.len(), 1);
check_spends!(txn[0], commitment_b);
}
}

// Disconnect the block, allowing A to retry its own commitment. Note that we connect two
// blocks, one to get us back to the original height, and another to retry our pending claims.
disconnect_blocks(&nodes[0], 1);
connect_blocks(&nodes[0], 2);
{
let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
if anchors {
assert_eq!(txn.len(), 1);
check_spends!(txn[0], funding_tx);
} else {
assert_eq!(txn.len(), 2);
check_spends!(txn[0], txn[1]); // HTLC timeout A
check_spends!(txn[1], funding_tx); // Commitment A
assert_ne!(txn[1].txid(), commitment_b.txid());
}
}
}

#[test]
fn test_retries_own_commitment_broadcast_after_reorg() {
do_test_retries_own_commitment_broadcast_after_reorg(false, false);
do_test_retries_own_commitment_broadcast_after_reorg(false, true);
do_test_retries_own_commitment_broadcast_after_reorg(true, false);
do_test_retries_own_commitment_broadcast_after_reorg(true, true);
}

0 comments on commit 60bb39a

Please sign in to comment.