diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 239ff3f0c98..ce3608bdbe8 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -38,6 +38,7 @@ use lightning::sign::ChangeDestinationSource; #[cfg(feature = "std")] use lightning::sign::ChangeDestinationSourceSync; use lightning::sign::OutputSpender; +use lightning::util::async_poll::FutureSpawner; use lightning::util::logger::Logger; use lightning::util::persist::{KVStore, Persister}; use lightning::util::sweep::OutputSweeper; @@ -374,7 +375,7 @@ macro_rules! define_run_body { if $channel_manager.get_cm().get_and_clear_needs_persistence() { log_trace!($logger, "Persisting ChannelManager..."); - $persister.persist_manager(&$channel_manager)?; + $persister.persist_manager(&$channel_manager).await?; log_trace!($logger, "Done persisting ChannelManager."); } if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) { @@ -435,7 +436,7 @@ macro_rules! define_run_body { log_trace!($logger, "Persisting network graph."); } - if let Err(e) = $persister.persist_graph(network_graph) { + if let Err(e) = $persister.persist_graph(network_graph).await { log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e) } @@ -463,7 +464,7 @@ macro_rules! define_run_body { } else { log_trace!($logger, "Persisting scorer"); } - if let Err(e) = $persister.persist_scorer(&scorer) { + if let Err(e) = $persister.persist_scorer(&scorer).await { log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e) } } @@ -486,16 +487,16 @@ macro_rules! define_run_body { // After we exit, ensure we persist the ChannelManager one final time - this avoids // some races where users quit while channel updates were in-flight, with // ChannelMonitor update(s) persisted without a corresponding ChannelManager update. - $persister.persist_manager(&$channel_manager)?; + $persister.persist_manager(&$channel_manager).await?; // Persist Scorer on exit if let Some(ref scorer) = $scorer { - $persister.persist_scorer(&scorer)?; + $persister.persist_scorer(&scorer).await?; } // Persist NetworkGraph on exit if let Some(network_graph) = $gossip_sync.network_graph() { - $persister.persist_graph(network_graph)?; + $persister.persist_graph(network_graph).await?; } Ok(()) @@ -780,8 +781,9 @@ pub async fn process_events_async< EventHandlerFuture: core::future::Future>, EventHandler: Fn(Event) -> EventHandlerFuture, PS: 'static + Deref + Send, + FS: FutureSpawner, M: 'static - + Deref::Signer, CF, T, F, L, P>> + + Deref::Signer, CF, T, F, L, P, FS>> + Send + Sync, CM: 'static + Deref, @@ -838,7 +840,7 @@ where if let Some(duration_since_epoch) = fetch_time() { if update_scorer(scorer, &event, duration_since_epoch) { log_trace!(logger, "Persisting scorer after update"); - if let Err(e) = persister.persist_scorer(&*scorer) { + if let Err(e) = persister.persist_scorer(&*scorer).await { log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e); // We opt not to abort early on persistence failure here as persisting // the scorer is non-critical and we still hope that it will have @@ -977,7 +979,9 @@ impl BackgroundProcessor { EH: 'static + EventHandler + Send, PS: 'static + Deref + Send, M: 'static - + Deref::Signer, CF, T, F, L, P>> + + Deref< + Target = ChainMonitor<::Signer, CF, T, F, L, P, FS>, + > + Send + Sync, CM: 'static + Deref + Send, @@ -992,6 +996,7 @@ impl BackgroundProcessor { O: 'static + Deref, K: 'static + Deref, OS: 'static + Deref> + Send, + FS: FutureSpawner, >( persister: PS, event_handler: EH, chain_monitor: M, channel_manager: CM, onion_messenger: Option, gossip_sync: GossipSync, peer_manager: PM, @@ -1028,70 +1033,72 @@ impl BackgroundProcessor { .expect("Time should be sometime after 1970"); if update_scorer(scorer, &event, duration_since_epoch) { log_trace!(logger, "Persisting scorer after update"); - if let Err(e) = persister.persist_scorer(&scorer) { - log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e) - } + // if let Err(e) = persister.persist_scorer(&scorer).await { + // log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e) + // } } } event_handler.handle_event(event) }; - define_run_body!( - persister, - chain_monitor, - chain_monitor.process_pending_events(&event_handler), - channel_manager, - channel_manager.get_cm().process_pending_events(&event_handler), - onion_messenger, - if let Some(om) = &onion_messenger { - om.get_om().process_pending_events(&event_handler) - }, - peer_manager, - gossip_sync, - { - if let Some(ref sweeper) = sweeper { - let _ = sweeper.regenerate_and_broadcast_spend_if_necessary(); - } - }, - logger, - scorer, - stop_thread.load(Ordering::Acquire), - { - let sleeper = match (onion_messenger.as_ref(), liquidity_manager.as_ref()) { - (Some(om), Some(lm)) => Sleeper::from_four_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &om.get_om().get_update_future(), - &lm.get_lm().get_pending_msgs_future(), - ), - (Some(om), None) => Sleeper::from_three_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &om.get_om().get_update_future(), - ), - (None, Some(lm)) => Sleeper::from_three_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &lm.get_lm().get_pending_msgs_future(), - ), - (None, None) => Sleeper::from_two_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - ), - }; - sleeper.wait_timeout(Duration::from_millis(100)); - }, - |_| Instant::now(), - |time: &Instant, dur| time.elapsed().as_secs() > dur, - false, - || { - use std::time::SystemTime; - Some( - SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("Time should be sometime after 1970"), - ) - }, - ) + // define_run_body!( + // persister, + // chain_monitor, + // chain_monitor.process_pending_events(&event_handler), + // channel_manager, + // channel_manager.get_cm().process_pending_events(&event_handler), + // onion_messenger, + // if let Some(om) = &onion_messenger { + // om.get_om().process_pending_events(&event_handler) + // }, + // peer_manager, + // gossip_sync, + // { + // if let Some(ref sweeper) = sweeper { + // let _ = sweeper.regenerate_and_broadcast_spend_if_necessary(); + // } + // }, + // logger, + // scorer, + // stop_thread.load(Ordering::Acquire), + // { + // let sleeper = match (onion_messenger.as_ref(), liquidity_manager.as_ref()) { + // (Some(om), Some(lm)) => Sleeper::from_four_futures( + // &channel_manager.get_cm().get_event_or_persistence_needed_future(), + // &chain_monitor.get_update_future(), + // &om.get_om().get_update_future(), + // &lm.get_lm().get_pending_msgs_future(), + // ), + // (Some(om), None) => Sleeper::from_three_futures( + // &channel_manager.get_cm().get_event_or_persistence_needed_future(), + // &chain_monitor.get_update_future(), + // &om.get_om().get_update_future(), + // ), + // (None, Some(lm)) => Sleeper::from_three_futures( + // &channel_manager.get_cm().get_event_or_persistence_needed_future(), + // &chain_monitor.get_update_future(), + // &lm.get_lm().get_pending_msgs_future(), + // ), + // (None, None) => Sleeper::from_two_futures( + // &channel_manager.get_cm().get_event_or_persistence_needed_future(), + // &chain_monitor.get_update_future(), + // ), + // }; + // sleeper.wait_timeout(Duration::from_millis(100)); + // }, + // |_| Instant::now(), + // |time: &Instant, dur| time.elapsed().as_secs() > dur, + // false, + // || { + // use std::time::SystemTime; + // Some( + // SystemTime::now() + // .duration_since(SystemTime::UNIX_EPOCH) + // .expect("Time should be sometime after 1970"), + // ) + // }, + // ) + + Ok(()) }); Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) } } diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index 850a0786671..ea00615457d 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -1,6 +1,7 @@ //! Objects related to [`FilesystemStore`] live here. use crate::utils::{check_namespace_key_validity, is_valid_kvstore_str}; +use lightning::util::async_poll::{AsyncResult, AsyncResultType}; use lightning::util::persist::{KVStore, MigratableKVStore}; use lightning::util::string::PrintableString; @@ -92,35 +93,10 @@ impl FilesystemStore { } } -impl KVStore for FilesystemStore { - fn read( - &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> lightning::io::Result> { - check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; - - let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?; - dest_file_path.push(key); - - let mut buf = Vec::new(); - { - let inner_lock_ref = { - let mut outer_lock = self.locks.lock().unwrap(); - Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default()) - }; - let _guard = inner_lock_ref.read().unwrap(); - - let mut f = fs::File::open(dest_file_path)?; - f.read_to_end(&mut buf)?; - } - - self.garbage_collect_locks(); - - Ok(buf) - } - +impl FilesystemStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], - ) -> lightning::io::Result<()> { + ) -> Result<(), lightning::io::Error> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?; @@ -204,6 +180,41 @@ impl KVStore for FilesystemStore { res } +} + +impl KVStore for FilesystemStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> lightning::io::Result> { + check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; + + let mut dest_file_path = self.get_dest_dir_path(primary_namespace, secondary_namespace)?; + dest_file_path.push(key); + + let mut buf = Vec::new(); + { + let inner_lock_ref = { + let mut outer_lock = self.locks.lock().unwrap(); + Arc::clone(&outer_lock.entry(dest_file_path.clone()).or_default()) + }; + let _guard = inner_lock_ref.read().unwrap(); + + let mut f = fs::File::open(dest_file_path)?; + f.read_to_end(&mut buf)?; + } + + self.garbage_collect_locks(); + + Ok(buf) + } + + fn write_async( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], + ) -> AsyncResultType<'static, (), lightning::io::Error> { + let res = self.write(primary_namespace, secondary_namespace, key, buf); + + Box::pin(async move { res }) + } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 09d87b775be..a97ef8c6b5b 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -36,11 +36,13 @@ use crate::chain::transaction::{OutPoint, TransactionData}; use crate::ln::types::ChannelId; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::events::{self, Event, EventHandler, ReplayEvent}; +use crate::util::async_poll::{poll_or_spawn, AsyncResult, AsyncVoid, FutureSpawner}; use crate::util::logger::{Logger, WithContext}; use crate::util::errors::APIError; use crate::util::persist::MonitorName; use crate::util::wakers::{Future, Notifier}; use crate::ln::channel_state::ChannelDetails; +use crate::sync::{Arc}; use crate::prelude::*; use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard}; @@ -122,7 +124,7 @@ pub trait Persist { /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`Writeable::write`]: crate::util::ser::Writeable::write - fn persist_new_channel(&self, monitor_name: MonitorName, monitor: &ChannelMonitor) -> ChannelMonitorUpdateStatus; + fn persist_new_channel(&self, monitor_name: MonitorName, monitor: &ChannelMonitor) -> AsyncResult<'static, ()>; /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given /// update. @@ -161,7 +163,7 @@ pub trait Persist { /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors. /// /// [`Writeable::write`]: crate::util::ser::Writeable::write - fn update_persisted_channel(&self, monitor_name: MonitorName, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor) -> ChannelMonitorUpdateStatus; + fn update_persisted_channel(&self, monitor_name: MonitorName, monitor_update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor) -> AsyncResult<'static, ()>; /// Prevents the channel monitor from being loaded on startup. /// /// Archiving the data in a backup location (rather than deleting it fully) is useful for @@ -173,7 +175,7 @@ pub trait Persist { /// the archive process. Additionally, because the archive operation could be retried on /// restart, this method must in that case be idempotent, ensuring it can handle scenarios where /// the monitor already exists in the archive. - fn archive_persisted_channel(&self, monitor_name: MonitorName); + fn archive_persisted_channel(&self, monitor_name: MonitorName) -> AsyncVoid; } struct MonitorHolder { @@ -233,14 +235,14 @@ impl Deref for LockedChannelMonitor<'_, Chann /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [module-level documentation]: crate::chain::chainmonitor /// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims -pub struct ChainMonitor +pub struct ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, F::Target: FeeEstimator, L::Target: Logger, P::Target: Persist, { - monitors: RwLock>>, + monitors: Arc>>>, chain_source: Option, broadcaster: T, logger: L, @@ -248,16 +250,18 @@ pub struct ChainMonitor, PublicKey)>>, + pending_monitor_events: Arc, PublicKey)>>>, /// The best block height seen, used as a proxy for the passage of time. highest_chain_height: AtomicUsize, /// A [`Notifier`] used to wake up the background processor in case we have any [`Event`]s for /// it to give to users (or [`MonitorEvent`]s for `ChannelManager` to process). - event_notifier: Notifier, + event_notifier: Arc, + + future_spawner: Arc, } -impl ChainMonitor +impl ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -347,18 +351,31 @@ where C::Target: chain::Filter, // `ChannelMonitorUpdate` after a channel persist for a channel with the same // `latest_update_id`. let _pending_monitor_updates = monitor_state.pending_monitor_updates.lock().unwrap(); - match self.persister.update_persisted_channel(monitor.persistence_key(), None, monitor) { - ChannelMonitorUpdateStatus::Completed => - log_trace!(logger, "Finished syncing Channel Monitor for channel {} for block-data", - log_funding_info!(monitor) - ), - ChannelMonitorUpdateStatus::InProgress => { - log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor)); - } - ChannelMonitorUpdateStatus::UnrecoverableError => { - return Err(()); + let max_update_id = _pending_monitor_updates.iter().copied().max().unwrap_or(0); + + let persist_res = self.persister.update_persisted_channel(monitor.persistence_key(), None, monitor); + + let monitors = self.monitors.clone(); + let pending_monitor_updates_cb = self.pending_monitor_events.clone(); + let event_notifier = self.event_notifier.clone(); + let future_spawner = self.future_spawner.clone(); + let channel_id = *channel_id; + + match poll_or_spawn(persist_res, move || { + // TODO: Log error if the monitor is not persisted. + let _ = ChainMonitor::::channel_monitor_updated_internal(&monitors, &pending_monitor_updates_cb, &event_notifier, + channel_id, max_update_id); + }, future_spawner.deref()) { + Ok(true) => { + // log + }, + Ok(false) => { + // log + } + Err(_) => { + return Err(()); + }, } - } } // Register any new outputs with the chain source for filtering, storing any dependent @@ -388,17 +405,18 @@ where C::Target: chain::Filter, /// pre-filter blocks or only fetch blocks matching a compact filter. Otherwise, clients may /// always need to fetch full blocks absent another means for determining which blocks contain /// transactions relevant to the watched channels. - pub fn new(chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P) -> Self { + pub fn new(chain_source: Option, broadcaster: T, logger: L, feeest: F, persister: P, future_spawner: FS) -> Self { Self { - monitors: RwLock::new(new_hash_map()), + monitors: Arc::new(RwLock::new(new_hash_map())), chain_source, broadcaster, logger, fee_estimator: feeest, persister, - pending_monitor_events: Mutex::new(Vec::new()), + pending_monitor_events: Arc::new(Mutex::new(Vec::new())), highest_chain_height: AtomicUsize::new(0), - event_notifier: Notifier::new(), + event_notifier: Arc::new(Notifier::new()), + future_spawner: Arc::new(future_spawner), } } @@ -531,6 +549,40 @@ where C::Target: chain::Filter, Ok(()) } + fn channel_monitor_updated_internal( + monitors: &RwLock>>, + pending_monitor_events: &Mutex, PublicKey)>>, + event_notifier: &Notifier, + channel_id: ChannelId, completed_update_id: u64) -> Result<(), APIError> { + let monitors = monitors.read().unwrap(); + let monitor_data = if let Some(mon) = monitors.get(&channel_id) { mon } else { + return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching channel ID {} found", channel_id) }); + }; + let mut pending_monitor_updates = monitor_data.pending_monitor_updates.lock().unwrap(); + pending_monitor_updates.retain(|update_id| *update_id != completed_update_id); + + // Note that we only check for pending non-chainsync monitor updates and we don't track monitor + // updates resulting from chainsync in `pending_monitor_updates`. + let monitor_is_pending_updates = monitor_data.has_pending_updates(&pending_monitor_updates); + + // TODO: Add logger + + if monitor_is_pending_updates { + // If there are still monitor updates pending, we cannot yet construct a + // Completed event. + return Ok(()); + } + let funding_txo = monitor_data.monitor.get_funding_txo(); + pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { + funding_txo, + channel_id, + monitor_update_id: monitor_data.monitor.get_latest_update_id(), + }], monitor_data.monitor.get_counterparty_node_id())); + + event_notifier.notify(); + Ok(()) + } + /// This wrapper avoids having to update some of our tests for now as they assume the direct /// chain::Watch API wherein we mark a monitor fully-updated by just calling /// channel_monitor_updated once with the highest ID. @@ -669,8 +721,8 @@ where C::Target: chain::Filter, } } -impl -chain::Listen for ChainMonitor +impl +chain::Listen for ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, @@ -698,8 +750,8 @@ where } } -impl -chain::Confirm for ChainMonitor +impl +chain::Confirm for ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, @@ -752,8 +804,8 @@ where } } -impl -chain::Watch for ChainMonitor +impl +chain::Watch for ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -774,15 +826,28 @@ where C::Target: chain::Filter, let update_id = monitor.get_latest_update_id(); let mut pending_monitor_updates = Vec::new(); let persist_res = self.persister.persist_new_channel(monitor.persistence_key(), &monitor); - match persist_res { - ChannelMonitorUpdateStatus::InProgress => { - log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor)); - pending_monitor_updates.push(update_id); - }, - ChannelMonitorUpdateStatus::Completed => { + + let update_status; + let monitors = self.monitors.clone(); + let pending_monitor_updates_cb = self.pending_monitor_events.clone(); + let event_notifier = self.event_notifier.clone(); + let future_spawner = self.future_spawner.clone(); + + match poll_or_spawn(persist_res, move || { + // TODO: Log error if the monitor is not persisted. + let _ = ChainMonitor::::channel_monitor_updated_internal(&monitors, &pending_monitor_updates_cb, &event_notifier, + channel_id, update_id); + }, future_spawner.deref()) { + Ok(true) => { log_info!(logger, "Persistence of new ChannelMonitor for channel {} completed", log_funding_info!(monitor)); + update_status = ChannelMonitorUpdateStatus::Completed; }, - ChannelMonitorUpdateStatus::UnrecoverableError => { + Ok(false) => { + log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor)); + pending_monitor_updates.push(update_id); + update_status = ChannelMonitorUpdateStatus::InProgress; + } + Err(_) => { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; log_error!(logger, "{}", err_str); panic!("{}", err_str); @@ -795,7 +860,7 @@ where C::Target: chain::Filter, monitor, pending_monitor_updates: Mutex::new(pending_monitor_updates), }); - Ok(persist_res) + Ok(update_status) } fn update_channel(&self, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { @@ -840,27 +905,40 @@ where C::Target: chain::Filter, } else { self.persister.update_persisted_channel(monitor.persistence_key(), Some(update), monitor) }; - match persist_res { - ChannelMonitorUpdateStatus::InProgress => { - pending_monitor_updates.push(update_id); + + let monitors = self.monitors.clone(); + let pending_monitor_updates_cb = self.pending_monitor_events.clone(); + let event_notifier = self.event_notifier.clone(); + let future_spawner = self.future_spawner.clone(); + + let update_status; + match poll_or_spawn(persist_res, move || { + // TODO: Log error if the monitor is not persisted. + let _ = ChainMonitor::::channel_monitor_updated_internal(&monitors, &pending_monitor_updates_cb, &event_notifier, + channel_id, update_id); + }, future_spawner.deref()) { + Ok(true) => { log_debug!(logger, - "Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress", + "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed", update_id, log_funding_info!(monitor) ); + update_status = ChannelMonitorUpdateStatus::Completed; }, - ChannelMonitorUpdateStatus::Completed => { + Ok(false) => { log_debug!(logger, - "Persistence of ChannelMonitorUpdate id {:?} for channel {} completed", + "Persistence of ChannelMonitorUpdate id {:?} for channel {} in progress", update_id, log_funding_info!(monitor) ); - }, - ChannelMonitorUpdateStatus::UnrecoverableError => { + pending_monitor_updates.push(update_id); + update_status = ChannelMonitorUpdateStatus::InProgress; + } + Err(_) => { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. core::mem::drop(pending_monitor_updates); - core::mem::drop(monitors); + // core::mem::drop(monitors); let _poison = self.monitors.write().unwrap(); let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; log_error!(logger, "{}", err_str); @@ -870,7 +948,7 @@ where C::Target: chain::Filter, if update_res.is_err() { ChannelMonitorUpdateStatus::InProgress } else { - persist_res + update_status } } } @@ -891,7 +969,7 @@ where C::Target: chain::Filter, } } -impl events::EventsProvider for ChainMonitor +impl events::EventsProvider for ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, F::Target: FeeEstimator, @@ -1138,4 +1216,3 @@ mod tests { }).is_err()); } } - diff --git a/lightning/src/sign/ecdsa.rs b/lightning/src/sign/ecdsa.rs index f9c330bbc4c..52c388bd511 100644 --- a/lightning/src/sign/ecdsa.rs +++ b/lightning/src/sign/ecdsa.rs @@ -33,7 +33,7 @@ use crate::sign::{ChannelSigner, ChannelTransactionParameters, HTLCDescriptor}; /// /// [`ChannelManager::signer_unblocked`]: crate::ln::channelmanager::ChannelManager::signer_unblocked /// [`ChainMonitor::signer_unblocked`]: crate::chain::chainmonitor::ChainMonitor::signer_unblocked -pub trait EcdsaChannelSigner: ChannelSigner { +pub trait EcdsaChannelSigner: ChannelSigner + Send { /// Create a signature for a counterparty's commitment transaction and associated HTLC transactions. /// /// Policy checks should be implemented in this function, including checking the amount diff --git a/lightning/src/util/anchor_channel_reserves.rs b/lightning/src/util/anchor_channel_reserves.rs index 968a60ada0b..aa910fda840 100644 --- a/lightning/src/util/anchor_channel_reserves.rs +++ b/lightning/src/util/anchor_channel_reserves.rs @@ -38,6 +38,8 @@ use bitcoin::Weight; use core::cmp::min; use core::ops::Deref; +use super::async_poll::FutureSpawner; + // Transaction weights based on: // https://github.com/lightning/bolts/blob/master/03-transactions.md#appendix-a-expected-weights const COMMITMENT_TRANSACTION_BASE_WEIGHT: u64 = 900 + 224; @@ -270,12 +272,13 @@ pub fn get_supportable_anchor_channels( /// [Event::OpenChannelRequest]: crate::events::Event::OpenChannelRequest pub fn can_support_additional_anchor_channel< AChannelManagerRef: Deref, - ChannelSigner: EcdsaChannelSigner, + ChannelSigner: EcdsaChannelSigner + Send + Sync + 'static, FilterRef: Deref, BroadcasterRef: Deref, EstimatorRef: Deref, LoggerRef: Deref, PersistRef: Deref, + FS: FutureSpawner, ChainMonitorRef: Deref< Target = ChainMonitor< ChannelSigner, @@ -284,6 +287,7 @@ pub fn can_support_additional_anchor_channel< EstimatorRef, LoggerRef, PersistRef, + FS, >, >, >( diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index a0034a6caae..9eaed995c26 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -96,5 +96,46 @@ pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } } +/// A type alias for a future that returns nothing. +pub type AsyncVoid = Pin + 'static + Send>>; + /// A type alias for a future that returns a result of type T. pub type AsyncResult<'a, T> = Pin> + 'a + Send>>; + +/// A type alias for a future that returns a result of type T with error type V. +pub type AsyncResultType<'a, T, V> = Pin> + 'a + Send>>; + +/// A type alias for a future that returns a result of type T. +pub trait FutureSpawner: Send + Sync + 'static { + /// Spawns a future on a runtime. + fn spawn + Send + 'static>(&self, future: T); +} + +/// Polls a future and either returns true if it is ready or spawns it on the tokio runtime if it is not. +pub fn poll_or_spawn( + mut fut: Pin>, callback: C, future_spawner: &S, +) -> Result +where + F: Future> + Send + 'static + ?Sized, + C: FnOnce() + Send + 'static, + S: FutureSpawner, +{ + let waker = dummy_waker(); + let mut cx = Context::from_waker(&waker); + + match fut.as_mut().poll(&mut cx) { + Poll::Ready(Ok(())) => Ok(true), + Poll::Ready(Err(_)) => Err(()), + Poll::Pending => { + println!("Future not ready, using tokio runtime"); + + let callback = Box::new(callback); + future_spawner.spawn(async move { + fut.await; + callback(); + }); + + Ok(false) + }, + } +} diff --git a/lightning/src/util/mod.rs b/lightning/src/util/mod.rs index e4b8b0b4429..9f685785c94 100644 --- a/lightning/src/util/mod.rs +++ b/lightning/src/util/mod.rs @@ -32,7 +32,7 @@ pub mod ser; pub mod sweep; pub mod wakers; -pub(crate) mod async_poll; +pub mod async_poll; pub(crate) mod atomic_counter; pub(crate) mod byte_utils; pub mod hash_tables; diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 6f1f9d0862a..c6cec0c0bba 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -19,7 +19,6 @@ use core::str::FromStr; use crate::prelude::*; use crate::{io, log_error}; -use crate::chain; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use crate::chain::chainmonitor::Persist; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate}; @@ -29,9 +28,12 @@ use crate::ln::types::ChannelId; use crate::routing::gossip::NetworkGraph; use crate::routing::scoring::WriteableScore; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; +use crate::sync::Arc; use crate::util::logger::Logger; use crate::util::ser::{Readable, ReadableArgs, Writeable}; +use super::async_poll::{AsyncResult, AsyncResultType, AsyncVoid}; + /// The alphabet of characters allowed for namespaces and keys. pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-"; @@ -134,9 +136,9 @@ pub trait KVStore { /// /// Will create the given `primary_namespace` and `secondary_namespace` if not already present /// in the store. - fn write( + fn write_async( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], - ) -> Result<(), io::Error>; + ) -> AsyncResultType<'static, (), io::Error>; /// Removes any data that had previously been persisted under the given `key`. /// /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily @@ -186,14 +188,22 @@ pub trait MigratableKVStore: KVStore { /// /// Will abort and return an error if any IO operation fails. Note that in this case the /// `target_store` might get left in an intermediate state. -pub fn migrate_kv_store_data( +pub async fn migrate_kv_store_data( source_store: &mut S, target_store: &mut T, ) -> Result<(), io::Error> { let keys_to_migrate = source_store.list_all_keys()?; for (primary_namespace, secondary_namespace, key) in &keys_to_migrate { let data = source_store.read(primary_namespace, secondary_namespace, key)?; - target_store.write(primary_namespace, secondary_namespace, key, &data)?; + target_store + .write_async(primary_namespace, secondary_namespace, key, &data) + .await + .map_err(|_| { + io::Error::new( + io::ErrorKind::Other, + "Failed to write data to target store during migration", + ) + })?; } Ok(()) @@ -211,32 +221,44 @@ where /// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager - fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error>; + fn persist_manager(&self, channel_manager: &CM) -> AsyncResultType<'static, (), io::Error>; /// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed. - fn persist_graph(&self, network_graph: &NetworkGraph) -> Result<(), io::Error>; + fn persist_graph( + &self, network_graph: &NetworkGraph, + ) -> AsyncResultType<'static, (), io::Error>; /// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed. - fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>; + fn persist_scorer(&self, scorer: &S) -> AsyncResultType<'static, (), io::Error>; } -impl<'a, A: KVStore + ?Sized, CM: Deref, L: Deref, S: Deref> Persister<'a, CM, L, S> for A +impl<'a, A: KVStore + ?Sized + Send + Sync + 'static, CM: Deref, L: Deref, S: Deref> + Persister<'a, CM, L, S> for Arc where CM::Target: 'static + AChannelManager, L::Target: 'static + Logger, S::Target: WriteableScore<'a>, { - fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error> { - self.write( - CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, - CHANNEL_MANAGER_PERSISTENCE_KEY, - &channel_manager.get_cm().encode(), - ) + fn persist_manager(&self, channel_manager: &CM) -> AsyncResultType<'static, (), io::Error> { + let encoded = channel_manager.get_cm().encode(); + let kv_store = self.clone(); + + Box::pin(async move { + kv_store + .write_async( + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + &encoded, + ) + .await + }) } - fn persist_graph(&self, network_graph: &NetworkGraph) -> Result<(), io::Error> { - self.write( + fn persist_graph( + &self, network_graph: &NetworkGraph, + ) -> AsyncResultType<'static, (), io::Error> { + self.write_async( NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_KEY, @@ -244,8 +266,8 @@ where ) } - fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> { - self.write( + fn persist_scorer(&self, scorer: &S) -> AsyncResultType<'static, (), io::Error> { + self.write_async( SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, @@ -254,7 +276,9 @@ where } } -impl Persist for K { +impl + Persist for Arc +{ // TODO: We really need a way for the persister to inform the user that its time to crash/shut // down once these start returning failure. // Then we should return InProgress rather than UnrecoverableError, implying we should probably @@ -262,58 +286,75 @@ impl Persist, - ) -> chain::ChannelMonitorUpdateStatus { - match self.write( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - &monitor_name.to_string(), - &monitor.encode(), - ) { - Ok(()) => chain::ChannelMonitorUpdateStatus::Completed, - Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError, - } + ) -> AsyncResult<'static, ()> { + let encoded = monitor.encode(); + let kv_store = self.clone(); + + Box::pin(async move { + kv_store + .write_async( + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + &monitor_name.to_string(), + &encoded, + ) + .await + .map_err(|_| ()) + }) } fn update_persisted_channel( &self, monitor_name: MonitorName, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor, - ) -> chain::ChannelMonitorUpdateStatus { - match self.write( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - &monitor_name.to_string(), - &monitor.encode(), - ) { - Ok(()) => chain::ChannelMonitorUpdateStatus::Completed, - Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError, - } + ) -> AsyncResult<'static, ()> { + let encoded = monitor.encode(); + let kv_store = self.clone(); + + Box::pin(async move { + kv_store + .write_async( + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + &monitor_name.to_string(), + &encoded, + ) + .await + .map_err(|_| ()) + }) } - fn archive_persisted_channel(&self, monitor_name: MonitorName) { - let monitor_key = monitor_name.to_string(); - let monitor = match self.read( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - monitor_key.as_str(), - ) { - Ok(monitor) => monitor, - Err(_) => return, - }; - match self.write( - ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - monitor_key.as_str(), - &monitor, - ) { - Ok(()) => {}, - Err(_e) => return, - }; - let _ = self.remove( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - monitor_key.as_str(), - true, - ); + fn archive_persisted_channel(&self, monitor_name: MonitorName) -> AsyncVoid { + let kv_store = self.clone(); + + Box::pin(async move { + let monitor_key = monitor_name.to_string(); + let monitor = match kv_store.read( + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_key.as_str(), + ) { + Ok(monitor) => monitor, + Err(_) => return, + }; + match kv_store + .write_async( + ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_key.as_str(), + &monitor, + ) + .await + { + Ok(()) => {}, + Err(_e) => return, + }; + let _ = kv_store.remove( + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_key.as_str(), + true, + ); + }) } } @@ -447,6 +488,18 @@ where /// would like to get rid of them, consider using the /// [`MonitorUpdatingPersister::cleanup_stale_updates`] function. pub struct MonitorUpdatingPersister +where + K::Target: KVStore, + L::Target: Logger, + ES::Target: EntropySource + Sized, + SP::Target: SignerProvider + Sized, + BI::Target: BroadcasterInterface, + FE::Target: FeeEstimator, +{ + state: Arc>, +} + +struct MonitorUpdatingPersisterState where K::Target: KVStore, L::Target: Logger, @@ -466,7 +519,7 @@ where #[allow(dead_code)] impl - MonitorUpdatingPersister + MonitorUpdatingPersisterState where K::Target: KVStore, L::Target: Logger, @@ -495,7 +548,7 @@ where kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES, signer_provider: SP, broadcaster: BI, fee_estimator: FE, ) -> Self { - MonitorUpdatingPersister { + MonitorUpdatingPersisterState { kv_store, logger, maximum_pending_updates, @@ -687,19 +740,19 @@ where } impl< - ChannelSigner: EcdsaChannelSigner, - K: Deref, - L: Deref, - ES: Deref, - SP: Deref, - BI: Deref, - FE: Deref, + ChannelSigner: EcdsaChannelSigner + Send + Sync, + K: Deref + Send + Sync + 'static, + L: Deref + Send + Sync + 'static, + ES: Deref + Send + Sync + 'static, + SP: Deref + Send + Sync + 'static, + BI: Deref + Send + Sync + 'static, + FE: Deref + Send + Sync + 'static, > Persist for MonitorUpdatingPersister where - K::Target: KVStore, + K::Target: KVStore + Sync, L::Target: Logger, ES::Target: EntropySource + Sized, - SP::Target: SignerProvider + Sized, + SP::Target: SignerProvider + Sync + Sized, BI::Target: BroadcasterInterface, FE::Target: FeeEstimator, { @@ -707,34 +760,119 @@ where /// parametrized [`KVStore`]. fn persist_new_channel( &self, monitor_name: MonitorName, monitor: &ChannelMonitor, - ) -> chain::ChannelMonitorUpdateStatus { - // Determine the proper key for this monitor - let monitor_key = monitor_name.to_string(); + ) -> AsyncResult<'static, ()> { + let state = self.state.clone(); + + let encoded_monitor = Self::encode_monitor(monitor); + + Box::pin(async move { state.persist_new_channel(monitor_name, &encoded_monitor).await }) + } + + /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible. + /// + /// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]: + /// + /// - No full monitor is found in [`KVStore`] + /// - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`] + /// - LDK commands re-persisting the entire monitor through this function, specifically when + /// `update` is `None`. + /// - The update is at [`u64::MAX`], indicating an update generated by pre-0.1 LDK. + fn update_persisted_channel( + &self, monitor_name: MonitorName, update: Option<&ChannelMonitorUpdate>, + monitor: &ChannelMonitor, + ) -> AsyncResult<'static, ()> { + let state = self.state.clone(); + + let encoded_monitor = Self::encode_monitor(monitor); + let encoded_update = update.map(|update| (update.update_id, update.encode())); + let monitor_latest_update_id = monitor.get_latest_update_id(); + + Box::pin(async move { + state + .update_persisted_channel( + monitor_name, + encoded_update, + &encoded_monitor, + monitor_latest_update_id, + ) + .await + }) + } + + fn archive_persisted_channel(&self, monitor_name: MonitorName) -> AsyncVoid { + let monitor_name = monitor_name; + let state = self.state.clone(); + + Box::pin(async move { + state.archive_persisted_channel(monitor_name).await; + }) + } +} + +impl< + K: Deref + Send + Sync + 'static, + L: Deref + Send + Sync + 'static, + ES: Deref + Send + Sync + 'static, + SP: Deref + Send + Sync + 'static, + BI: Deref + Send + Sync + 'static, + FE: Deref + Send + Sync + 'static, + > MonitorUpdatingPersister +where + K::Target: KVStore + Sync, + L::Target: Logger, + ES::Target: EntropySource + Sized, + SP::Target: SignerProvider + Sync + Sized, + BI::Target: BroadcasterInterface, + FE::Target: FeeEstimator, +{ + fn encode_monitor( + monitor: &ChannelMonitor, + ) -> Vec { // Serialize and write the new monitor let mut monitor_bytes = Vec::with_capacity( MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(), ); monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL); monitor.write(&mut monitor_bytes).unwrap(); - match self.kv_store.write( - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - monitor_key.as_str(), - &monitor_bytes, - ) { - Ok(_) => chain::ChannelMonitorUpdateStatus::Completed, - Err(e) => { - log_error!( - self.logger, - "Failed to write ChannelMonitor {}/{}/{} reason: {}", - CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - monitor_key.as_str(), - e - ); - chain::ChannelMonitorUpdateStatus::UnrecoverableError - }, - } + + monitor_bytes + } +} + +impl< + K: Deref + Send + Sync + 'static, + L: Deref + Send + Sync + 'static, + ES: Deref + Send + Sync + 'static, + SP: Deref + Send + Sync + 'static, + BI: Deref + Send + Sync + 'static, + FE: Deref + Send + Sync + 'static, + > MonitorUpdatingPersisterState +where + K::Target: KVStore + Sync, + L::Target: Logger, + ES::Target: EntropySource + Sized, + SP::Target: SignerProvider + Sync + Sized, + BI::Target: BroadcasterInterface, + FE::Target: FeeEstimator, +{ + /// Persists a new channel. This means writing the entire monitor to the + /// parametrized [`KVStore`]. + async fn persist_new_channel( + self: Arc, monitor_name: MonitorName, monitor_bytes: &[u8], + ) -> Result<(), ()> { + // Determine the proper key for this monitor + let monitor_key = monitor_name.to_string(); + + // Serialize and write the new monitor + self.kv_store + .write_async( + CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_key.as_str(), + &monitor_bytes, + ) + .await + .map_err(|_| ()) } /// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible. @@ -746,40 +884,30 @@ where /// - LDK commands re-persisting the entire monitor through this function, specifically when /// `update` is `None`. /// - The update is at [`u64::MAX`], indicating an update generated by pre-0.1 LDK. - fn update_persisted_channel( - &self, monitor_name: MonitorName, update: Option<&ChannelMonitorUpdate>, - monitor: &ChannelMonitor, - ) -> chain::ChannelMonitorUpdateStatus { + async fn update_persisted_channel( + self: Arc, monitor_name: MonitorName, update: Option<(u64, Vec)>, monitor: &[u8], + monitor_latest_update_id: u64, + ) -> Result<(), ()> { const LEGACY_CLOSED_CHANNEL_UPDATE_ID: u64 = u64::MAX; - if let Some(update) = update { - let persist_update = update.update_id != LEGACY_CLOSED_CHANNEL_UPDATE_ID - && update.update_id % self.maximum_pending_updates != 0; + if let Some((update_id, update)) = update { + let persist_update = update_id != LEGACY_CLOSED_CHANNEL_UPDATE_ID + && update_id % self.maximum_pending_updates != 0; if persist_update { let monitor_key = monitor_name.to_string(); - let update_name = UpdateName::from(update.update_id); - match self.kv_store.write( - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - monitor_key.as_str(), - update_name.as_str(), - &update.encode(), - ) { - Ok(()) => chain::ChannelMonitorUpdateStatus::Completed, - Err(e) => { - log_error!( - self.logger, - "Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}", - CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, - monitor_key.as_str(), - update_name.as_str(), - e - ); - chain::ChannelMonitorUpdateStatus::UnrecoverableError - }, - } + let update_name = UpdateName::from(update_id); + self.kv_store + .write_async( + CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, + monitor_key.as_str(), + update_name.as_str(), + &update, + ) + .await + .map_err(|_| ()) } else { // In case of channel-close monitor update, we need to read old monitor before persisting // the new one in order to determine the cleanup range. - let maybe_old_monitor = match monitor.get_latest_update_id() { + let maybe_old_monitor = match monitor_latest_update_id { LEGACY_CLOSED_CHANNEL_UPDATE_ID => { let monitor_key = monitor_name.to_string(); self.read_monitor(&monitor_name, &monitor_key).ok() @@ -788,11 +916,12 @@ where }; // We could write this update, but it meets criteria of our design that calls for a full monitor write. - let monitor_update_status = self.persist_new_channel(monitor_name, monitor); + let monitor_update_status = + self.clone().persist_new_channel(monitor_name, &monitor).await; - if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status { + if monitor_update_status.is_ok() { let channel_closed_legacy = - monitor.get_latest_update_id() == LEGACY_CLOSED_CHANNEL_UPDATE_ID; + monitor_latest_update_id == LEGACY_CLOSED_CHANNEL_UPDATE_ID; let cleanup_range = if channel_closed_legacy { // If there is an error while reading old monitor, we skip clean up. maybe_old_monitor.map(|(_, ref old_monitor)| { @@ -805,7 +934,7 @@ where (start, end) }) } else { - let end = monitor.get_latest_update_id(); + let end = monitor_latest_update_id; let start = end.saturating_sub(self.maximum_pending_updates); Some((start, end)) }; @@ -819,22 +948,26 @@ where } } else { // There is no update given, so we must persist a new monitor. - self.persist_new_channel(monitor_name, monitor) + self.persist_new_channel(monitor_name, &monitor).await } } - fn archive_persisted_channel(&self, monitor_name: MonitorName) { + async fn archive_persisted_channel(&self, monitor_name: MonitorName) { let monitor_key = monitor_name.to_string(); let monitor = match self.read_channel_monitor_with_updates(&monitor_key) { Ok((_block_hash, monitor)) => monitor, Err(_) => return, }; - match self.kv_store.write( - ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, - ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, - monitor_key.as_str(), - &monitor.encode(), - ) { + match self + .kv_store + .write_async( + ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, + ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, + monitor_key.as_str(), + &monitor.encode(), + ) + .await + { Ok(()) => {}, Err(_e) => return, }; @@ -848,7 +981,7 @@ where } impl - MonitorUpdatingPersister + MonitorUpdatingPersisterState where ES::Target: EntropySource + Sized, K::Target: KVStore, diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 0fae91bebc2..a8e4a0a1ded 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -411,7 +411,7 @@ where /// Returns `Err` on persistence failure, in which case the call may be safely retried. /// /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs - pub fn track_spendable_outputs( + pub async fn track_spendable_outputs( &self, output_descriptors: Vec, channel_id: Option, exclude_static_outputs: bool, delay_until_height: Option, ) -> Result<(), ()> { @@ -444,7 +444,7 @@ where state_lock.outputs.push(output_info); } - self.persist_state(&*state_lock).map_err(|e| { + self.persist_state(&*state_lock).await.map_err(|e| { log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); }) } @@ -560,7 +560,7 @@ where output_info.status.broadcast(cur_hash, cur_height, spending_tx.clone()); } - self.persist_state(&sweeper_state).map_err(|e| { + self.persist_state(&sweeper_state).await.map_err(|e| { log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); })?; @@ -590,14 +590,15 @@ where }); } - fn persist_state(&self, sweeper_state: &SweeperState) -> Result<(), io::Error> { + async fn persist_state(&self, sweeper_state: &SweeperState) -> Result<(), io::Error> { self.kv_store - .write( + .write_async( OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, &sweeper_state.encode(), ) + .await .map_err(|e| { log_error!( self.logger, @@ -674,9 +675,9 @@ where self.transactions_confirmed_internal(&mut *state_lock, header, txdata, height); self.best_block_updated_internal(&mut *state_lock, header, height); - let _ = self.persist_state(&*state_lock).map_err(|e| { - log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); - }); + // let _ = self.persist_state(&*state_lock).map_err(|e| { + // log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); + // }); } fn block_disconnected(&self, header: &Header, height: u32) { @@ -698,9 +699,9 @@ where } } - self.persist_state(&*state_lock).unwrap_or_else(|e| { - log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); - }); + // self.persist_state(&*state_lock).unwrap_or_else(|e| { + // log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); + // }); } } @@ -720,9 +721,9 @@ where ) { let mut state_lock = self.sweeper_state.lock().unwrap(); self.transactions_confirmed_internal(&mut *state_lock, header, txdata, height); - self.persist_state(&*state_lock).unwrap_or_else(|e| { - log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); - }); + // self.persist_state(&*state_lock).unwrap_or_else(|e| { + // log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); + // }); } fn transaction_unconfirmed(&self, txid: &Txid) { @@ -743,18 +744,18 @@ where .filter(|o| o.status.confirmation_height() >= Some(unconf_height)) .for_each(|o| o.status.unconfirmed()); - self.persist_state(&*state_lock).unwrap_or_else(|e| { - log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); - }); + // self.persist_state(&*state_lock).unwrap_or_else(|e| { + // log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); + // }); } } fn best_block_updated(&self, header: &Header, height: u32) { let mut state_lock = self.sweeper_state.lock().unwrap(); self.best_block_updated_internal(&mut *state_lock, header, height); - let _ = self.persist_state(&*state_lock).map_err(|e| { - log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); - }); + // let _ = self.persist_state(&*state_lock).map_err(|e| { + // log_error!(self.logger, "Error persisting OutputSweeper: {:?}", e); + // }); } fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option)> { @@ -970,16 +971,18 @@ where } /// Tells the sweeper to track the given outputs descriptors. Wraps [`OutputSweeper::track_spendable_outputs`]. - pub fn track_spendable_outputs( + pub async fn track_spendable_outputs( &self, output_descriptors: Vec, channel_id: Option, exclude_static_outputs: bool, delay_until_height: Option, ) -> Result<(), ()> { - self.sweeper.track_spendable_outputs( - output_descriptors, - channel_id, - exclude_static_outputs, - delay_until_height, - ) + self.sweeper + .track_spendable_outputs( + output_descriptors, + channel_id, + exclude_static_outputs, + delay_until_height, + ) + .await } /// Returns a list of the currently tracked spendable outputs. Wraps [`OutputSweeper::tracked_spendable_outputs`].