Skip to content

Commit 73abeda

Browse files
committed
Sometimes use new mon state when reloading in chanmon_consistency
When we reload a node in the `chanmon_consistency` fuzzer, we always reload with the latest `ChannelMonitor` state which was confirmed as persisted to the running `ChannelManager`. This is nice in that it tests losing the latest `ChannelMonitor`, but there may also be bugs in the on-startup `ChannelMonitor` replay. Thus, here, we optionally reload with a newer `ChannelMonitor` than the last-persisted one. Note that this breaks backwards compat for existing `chanmon_consistency` tests, requiring that 0x2c bytes be replaced with 0xb1, 0x2d with 0xb4 and 0x2e with 0xbd.
1 parent 0760f99 commit 73abeda

File tree

1 file changed

+46
-29
lines changed

1 file changed

+46
-29
lines changed

fuzz/src/chanmon_consistency.rs

+46-29
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@ use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature};
8080
use bitcoin::secp256k1::schnorr;
8181
use bitcoin::secp256k1::{self, Message, PublicKey, Scalar, Secp256k1, SecretKey};
8282

83-
use lightning::io::Cursor;
8483
use lightning::util::dyn_signer::DynSigner;
8584

8685
use std::cell::RefCell;
@@ -253,7 +252,7 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
253252
.unwrap_or(&map_entry.persisted_monitor);
254253
let deserialized_monitor =
255254
<(BlockHash, channelmonitor::ChannelMonitor<TestChannelSigner>)>::read(
256-
&mut Cursor::new(&latest_monitor_data),
255+
&mut &latest_monitor_data[..],
257256
(&*self.keys, &*self.keys),
258257
)
259258
.unwrap()
@@ -680,8 +679,8 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
680679
let mon_style = [default_mon_style.clone(), default_mon_style.clone(), default_mon_style];
681680

682681
macro_rules! reload_node {
683-
($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => {{
684-
let keys_manager = Arc::clone(&$keys_manager);
682+
($ser: expr, $node_id: expr, $old_monitors: expr, $use_old_mons: expr, $keys: expr, $fee_estimator: expr) => {{
683+
let keys_manager = Arc::clone(&$keys);
685684
let logger: Arc<dyn Logger> =
686685
Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone()));
687686
let chain_monitor = Arc::new(TestChainMonitor::new(
@@ -691,7 +690,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
691690
Arc::new(TestPersister {
692691
update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed),
693692
}),
694-
Arc::clone(&$keys_manager),
693+
Arc::clone(&$keys),
695694
));
696695

697696
let mut config = UserConfig::default();
@@ -704,16 +703,31 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
704703

705704
let mut monitors = new_hash_map();
706705
let mut old_monitors = $old_monitors.latest_monitors.lock().unwrap();
706+
let mut use_old_mons = $use_old_mons;
707707
for (channel_id, mut prev_state) in old_monitors.drain() {
708-
monitors.insert(
709-
channel_id,
710-
<(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
711-
&mut Cursor::new(&prev_state.persisted_monitor),
712-
(&*$keys_manager, &*$keys_manager),
713-
)
714-
.expect("Failed to read monitor")
715-
.1,
716-
);
708+
let serialized_mon = if use_old_mons % 3 == 0 {
709+
// Reload with the oldest `ChannelMonitor` (the one that we already told
710+
// `ChannelManager` we finished persisting).
711+
prev_state.persisted_monitor
712+
} else if use_old_mons % 3 == 1 {
713+
// Reload with the second-oldest `ChannelMonitor`
714+
let old_mon = prev_state.persisted_monitor;
715+
prev_state.pending_monitors.drain(..).next().map(|(_, v)| v).unwrap_or(old_mon)
716+
} else {
717+
// Reload with the newest `ChannelMonitor`
718+
let old_mon = prev_state.persisted_monitor;
719+
prev_state.pending_monitors.pop().map(|(_, v)| v).unwrap_or(old_mon)
720+
};
721+
// Use a different value of `use_old_mons` if we have another monitor (only node B)
722+
use_old_mons /= 3;
723+
let mon = <(BlockHash, ChannelMonitor<TestChannelSigner>)>::read(
724+
&mut &serialized_mon[..],
725+
(&*$keys, &*$keys),
726+
)
727+
.expect("Failed to read monitor");
728+
monitors.insert(channel_id, mon.1);
729+
// Update the latest `ChannelMonitor` state to match what we just told LDK.
730+
prev_state.persisted_monitor = serialized_mon;
717731
// Wipe any `ChannelMonitor`s which we never told LDK we finished persisting,
718732
// considering them discarded. LDK should replay these for us as they're stored in
719733
// the `ChannelManager`.
@@ -726,9 +740,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
726740
}
727741

728742
let read_args = ChannelManagerReadArgs {
729-
entropy_source: keys_manager.clone(),
730-
node_signer: keys_manager.clone(),
731-
signer_provider: keys_manager.clone(),
743+
entropy_source: Arc::clone(&keys_manager),
744+
node_signer: Arc::clone(&keys_manager),
745+
signer_provider: keys_manager,
732746
fee_estimator: $fee_estimator.clone(),
733747
chain_monitor: chain_monitor.clone(),
734748
tx_broadcaster: broadcast.clone(),
@@ -739,12 +753,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
739753
channel_monitors: monitor_refs,
740754
};
741755

742-
let res = (
743-
<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args)
744-
.expect("Failed to read manager")
745-
.1,
746-
chain_monitor.clone(),
747-
);
756+
let manager = <(BlockHash, ChanMan)>::read(&mut &$ser.0[..], read_args)
757+
.expect("Failed to read manager");
758+
let res = (manager.1, chain_monitor.clone());
748759
for (channel_id, mon) in monitors.drain() {
749760
assert_eq!(
750761
chain_monitor.chain_monitor.watch_channel(channel_id, mon),
@@ -1503,7 +1514,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
15031514
0x26 => process_ev_noret!(2, true),
15041515
0x27 => process_ev_noret!(2, false),
15051516

1506-
0x2c => {
1517+
0xb0 | 0xb1 | 0xb2 => {
1518+
// Restart node A, picking among the in-flight `ChannelMonitor`s to use based on
1519+
// the value of `v` we're matching.
15071520
if !chan_a_disconnected {
15081521
nodes[1].peer_disconnected(nodes[0].get_our_node_id());
15091522
chan_a_disconnected = true;
@@ -1515,11 +1528,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
15151528
ba_events.clear();
15161529
}
15171530
let (new_node_a, new_monitor_a) =
1518-
reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
1531+
reload_node!(node_a_ser, 0, monitor_a, v, keys_manager_a, fee_est_a);
15191532
nodes[0] = new_node_a;
15201533
monitor_a = new_monitor_a;
15211534
},
1522-
0x2d => {
1535+
0xb3..=0xbb => {
1536+
// Restart node B, picking among the in-flight `ChannelMonitor`s to use based on
1537+
// the value of `v` we're matching.
15231538
if !chan_a_disconnected {
15241539
nodes[0].peer_disconnected(nodes[1].get_our_node_id());
15251540
chan_a_disconnected = true;
@@ -1535,11 +1550,13 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
15351550
cb_events.clear();
15361551
}
15371552
let (new_node_b, new_monitor_b) =
1538-
reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b);
1553+
reload_node!(node_b_ser, 1, monitor_b, v, keys_manager_b, fee_est_b);
15391554
nodes[1] = new_node_b;
15401555
monitor_b = new_monitor_b;
15411556
},
1542-
0x2e => {
1557+
0xbc | 0xbd | 0xbe => {
1558+
// Restart node C, picking among the in-flight `ChannelMonitor`s to use based on
1559+
// the value of `v` we're matching.
15431560
if !chan_b_disconnected {
15441561
nodes[1].peer_disconnected(nodes[2].get_our_node_id());
15451562
chan_b_disconnected = true;
@@ -1551,7 +1568,7 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out, anchors: bool) {
15511568
cb_events.clear();
15521569
}
15531570
let (new_node_c, new_monitor_c) =
1554-
reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
1571+
reload_node!(node_c_ser, 2, monitor_c, v, keys_manager_c, fee_est_c);
15551572
nodes[2] = new_node_c;
15561573
monitor_c = new_monitor_c;
15571574
},

0 commit comments

Comments
 (0)