Skip to content

Commit

Permalink
Add test coverage for holder commitment rebroadcast after reorg
Browse files Browse the repository at this point in the history
  • Loading branch information
wpaulino committed Dec 5, 2023
1 parent d9422ca commit 569fd4a
Showing 1 changed file with 71 additions and 0 deletions.
71 changes: 71 additions & 0 deletions lightning/src/ln/reorg_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -759,3 +759,74 @@ fn test_htlc_preimage_claim_prev_counterparty_commitment_after_current_counterpa
// commitment (still unrevoked) is the currently confirmed closing transaction.
assert_eq!(htlc_preimage_tx.input[0].witness.second_to_last().unwrap(), &payment_preimage.0[..]);
}

#[test]
fn test_retries_own_commitment_broadcast_after_reorg() {
// Tests that a node will retry broadcasting its own commitment after seeing a confirmed
// counterparty commitment be reorged out.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);

let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);

// Route a payment so we have an HTLC to claim as well.
let _ = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);

// Connect blocks until the HTLC expiry is met, prompting a commitment broadcast by A.
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);

{
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 2);
let htlc_tx_a = txn.pop().unwrap();
let commitment_tx_a = txn.pop().unwrap();
check_spends!(commitment_tx_a, funding_tx);
check_spends!(htlc_tx_a, commitment_tx_a);
};

// B will also broadcast its own commitment.
nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_broadcast(&nodes[1], 1, true);
check_added_monitors(&nodes[1], 1);
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed, false, &[nodes[0].node.get_our_node_id()], 100_000);

let commitment_b = {
let mut txn = nodes[1].tx_broadcaster.txn_broadcast();
assert_eq!(txn.len(), 1);
let tx = txn.pop().unwrap();
check_spends!(tx, funding_tx);
tx
};

// Confirm B's commitment, A should now broadcast an HTLC timeout for commitment B.
mine_transaction(&nodes[0], &commitment_b);
{
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
if nodes[0].connect_style.borrow().updates_best_block_first() {
// `commitment_a` and `htlc_timeout_a` are rebroadcast because the best block was
// updated prior to seeing `commitment_b`.
assert_eq!(txn.len(), 3);
check_spends!(txn[2], commitment_b);
} else {
assert_eq!(txn.len(), 1);
check_spends!(txn[0], commitment_b);
}
}

// Disconnect the block, allowing A to retry its own commitment. Note that we connect two
// blocks, one to get us back to the original height, and another to retry our pending claims.
disconnect_blocks(&nodes[0], 1);
connect_blocks(&nodes[0], 2);
{
let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast();
assert_eq!(txn.len(), 2);
check_spends!(txn[0], txn[1]); // HTLC timeout A
check_spends!(txn[1], funding_tx); // Commitment A
assert_ne!(txn[1].txid(), commitment_b.txid());
}
}

0 comments on commit 569fd4a

Please sign in to comment.