diff --git a/Cargo.lock b/Cargo.lock index 08c69832ce..3090c0b468 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2915,7 +2915,7 @@ dependencies = [ [[package]] name = "massa-proto-rs" version = "0.1.0" -source = "git+https://github.com/massalabs/massa-proto-rs?rev=b5267178eaf266ec724691d7de163e4c34343416#b5267178eaf266ec724691d7de163e4c34343416" +source = "git+https://github.com/massalabs/massa-proto-rs?rev=c9a7c55914f3d308996113df0936f15d96490313#c9a7c55914f3d308996113df0936f15d96490313" dependencies = [ "glob", "prost", @@ -2929,7 +2929,7 @@ dependencies = [ [[package]] name = "massa-sc-runtime" version = "0.10.0" -source = "git+https://github.com/massalabs/massa-sc-runtime?rev=f5a584b9f8050f332c9ed332bd0a40f8e0372807#f5a584b9f8050f332c9ed332bd0a40f8e0372807" +source = "git+https://github.com/massalabs/massa-sc-runtime?rev=cd14c775aff31b8c0f49064613118f428f9dd45f#cd14c775aff31b8c0f49064613118f428f9dd45f" dependencies = [ "anyhow", "as-ffi-bindings", diff --git a/Cargo.toml b/Cargo.toml index 4842ee90c7..92009e4a3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -111,9 +111,8 @@ massa_event_cache = { path = "./massa-event-cache" } # Massa projects dependencies # massa-proto-rs = { git = "https://github.com/massalabs/massa-proto-rs", branch = "deferred_calls" } # massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", "branch" = "deferred_calls" } -massa-proto-rs = { git = "https://github.com/massalabs/massa-proto-rs", "rev" = "b5267178eaf266ec724691d7de163e4c34343416" } -massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", "rev" = "f5a584b9f8050f332c9ed332bd0a40f8e0372807" } - +massa-proto-rs = { git = "https://github.com/massalabs/massa-proto-rs", "rev" = "c9a7c55914f3d308996113df0936f15d96490313" } +massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", "rev" = "cd14c775aff31b8c0f49064613118f428f9dd45f" } peernet = { git = "https://github.com/massalabs/PeerNet", "rev" = "04b05ddd320fbe76cc858115af7b5fc28bdb8310" } # Dev only - use local dependencies diff --git a/massa-api-exports/src/node.rs b/massa-api-exports/src/node.rs index 9676aab2dd..117978a1a2 100644 --- a/massa-api-exports/src/node.rs +++ b/massa-api-exports/src/node.rs @@ -46,6 +46,8 @@ pub struct NodeStatus { pub chain_id: u64, /// minimal fees to include an operation in a block pub minimal_fees: Amount, + /// current mip version + pub current_mip_version: u32, } impl std::fmt::Display for NodeStatus { diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 391120833d..1b0ea13e0d 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -458,6 +458,12 @@ impl MassaRpcServer for API { let config = CompactConfig::default(); let now = MassaTime::now(); + let current_mip_version = self + .0 + .keypair_factory + .mip_store + .get_network_version_current(); + let last_slot_result = get_latest_block_slot_at_timestamp( api_settings.thread_count, api_settings.t0, @@ -555,6 +561,7 @@ impl MassaRpcServer for API { current_cycle, chain_id: self.0.api_settings.chain_id, minimal_fees: self.0.api_settings.minimal_fees, + current_mip_version, }) } @@ -1160,6 +1167,16 @@ impl MassaRpcServer for API { &self, req: Vec, ) -> RpcResult> { + let current_network_version = self + .0 + .keypair_factory + .mip_store + .get_network_version_current(); + + if current_network_version < 1 { + return Err(ApiError::NotFound.into()); + } + if req.len() as u64 > self.0.api_settings.max_arguments { return Err(ApiError::BadRequest("too many arguments".into()).into()); } @@ -1205,6 +1222,16 @@ impl MassaRpcServer for API { &self, arg: Vec, ) -> RpcResult> { + let current_network_version = self + .0 + .keypair_factory + .mip_store + .get_network_version_current(); + + if current_network_version < 1 { + return Err(ApiError::NotFound.into()); + } + if arg.len() as u64 > self.0.api_settings.max_arguments { return Err(ApiError::BadRequest("too many arguments".into()).into()); } @@ -1245,6 +1272,16 @@ impl MassaRpcServer for API { &self, slots: Vec, ) -> RpcResult> { + let current_network_version = self + .0 + .keypair_factory + .mip_store + .get_network_version_current(); + + if current_network_version < 1 { + return Err(ApiError::NotFound.into()); + } + if slots.len() as u64 > self.0.api_settings.max_arguments { return Err(ApiError::BadRequest("too many arguments".into()).into()); } diff --git a/massa-bootstrap/src/tests/universe_server.rs b/massa-bootstrap/src/tests/universe_server.rs index 8ca827e1dd..3ec34d77e8 100644 --- a/massa-bootstrap/src/tests/universe_server.rs +++ b/massa-bootstrap/src/tests/universe_server.rs @@ -202,7 +202,7 @@ impl BootstrapServerTestUniverseBuilder { let mut batch = DBBatch::default(); let versioning_batch = DBBatch::default(); self.final_ledger - .apply_changes_to_batch(ledger_changes, &mut batch); + .apply_changes_to_batch(ledger_changes, &mut batch, 1); self.controllers .database .write() @@ -216,7 +216,7 @@ impl BootstrapServerTestUniverseBuilder { let mut batch = DBBatch::default(); let versioning_batch = DBBatch::default(); self.final_ledger - .apply_changes_to_batch(ledger_changes, &mut batch); + .apply_changes_to_batch(ledger_changes, &mut batch, 1); self.controllers .database .write() @@ -232,7 +232,7 @@ impl BootstrapServerTestUniverseBuilder { let mut batch = DBBatch::default(); let versioning_batch = DBBatch::default(); self.final_ledger - .apply_changes_to_batch(ledger_changes, &mut batch); + .apply_changes_to_batch(ledger_changes, &mut batch, 1); self.controllers .database .write() diff --git a/massa-client/src/display.rs b/massa-client/src/display.rs index 5973228ef5..f7bdc43301 100644 --- a/massa-client/src/display.rs +++ b/massa-client/src/display.rs @@ -275,6 +275,7 @@ impl Output for NodeStatus { println!(); println!("Chain id: {}", self.chain_id); + println!("Current MIP version: {}", self.current_mip_version); } } diff --git a/massa-event-cache/src/event_cache.rs b/massa-event-cache/src/event_cache.rs index fb51220235..0000ddc880 100644 --- a/massa-event-cache/src/event_cache.rs +++ b/massa-event-cache/src/event_cache.rs @@ -944,7 +944,7 @@ mod tests { use tempfile::TempDir; // internal use massa_models::config::{ - MAX_EVENT_DATA_SIZE, MAX_EVENT_PER_OPERATION, MAX_OPERATIONS_PER_BLOCK, + MAX_EVENT_DATA_SIZE_V1, MAX_EVENT_PER_OPERATION, MAX_OPERATIONS_PER_BLOCK, MAX_RECURSIVE_CALLS_DEPTH, THREAD_COUNT, }; use massa_models::operation::OperationId; @@ -959,7 +959,7 @@ mod tests { 300, THREAD_COUNT, MAX_RECURSIVE_CALLS_DEPTH, - MAX_EVENT_DATA_SIZE as u64, + MAX_EVENT_DATA_SIZE_V1 as u64, MAX_EVENT_PER_OPERATION as u64, MAX_OPERATIONS_PER_BLOCK as u64, 5000, // MAX_EVENTS_PER_QUERY, diff --git a/massa-execution-exports/src/mapping_grpc.rs b/massa-execution-exports/src/mapping_grpc.rs index 425d8d2942..0be923d81d 100644 --- a/massa-execution-exports/src/mapping_grpc.rs +++ b/massa-execution-exports/src/mapping_grpc.rs @@ -21,6 +21,7 @@ use massa_proto_rs::massa::model::v1 as grpc_model; /// Convert a `grpc_api::ScExecutionEventsRequest` to a `ScExecutionEventsRequest` pub fn to_querystate_filter( query: grpc_api::ExecutionQueryRequestItem, + network_version: u32, ) -> Result { if let Some(item) = query.request_item { match item { @@ -135,6 +136,12 @@ pub fn to_querystate_filter( Ok(ExecutionQueryRequestItem::Events(event_filter)) } exec::RequestItem::DeferredCallQuote(value) => { + if network_version < 1 { + return Err(ModelsError::InvalidVersionError( + "deferred call quote is not supported in this network version".to_string(), + )); + } + Ok(ExecutionQueryRequestItem::DeferredCallQuote { target_slot: value .target_slot @@ -147,10 +154,22 @@ pub fn to_querystate_filter( }) } exec::RequestItem::DeferredCallInfo(info) => { + if network_version < 1 { + return Err(ModelsError::InvalidVersionError( + "deferred call quote is not supported in this network version".to_string(), + )); + } + let id = DeferredCallId::from_str(&info.call_id)?; Ok(ExecutionQueryRequestItem::DeferredCallInfo(id)) } exec::RequestItem::DeferredCallsBySlot(value) => { + if network_version < 1 { + return Err(ModelsError::InvalidVersionError( + "deferred call quote is not supported in this network version".to_string(), + )); + } + Ok(ExecutionQueryRequestItem::DeferredCallsBySlot( value .slot diff --git a/massa-execution-exports/src/settings.rs b/massa-execution-exports/src/settings.rs index 7ddc042e19..24d91d7ad0 100644 --- a/massa-execution-exports/src/settings.rs +++ b/massa-execution-exports/src/settings.rs @@ -92,7 +92,9 @@ pub struct ExecutionConfig { /// slot execution outputs channel capacity pub broadcast_slot_execution_output_channel_capacity: usize, /// max size of event data, in bytes - pub max_event_size: usize, + pub max_event_size_v0: usize, + /// max size of event data, in bytes + pub max_event_size_v1: usize, /// chain id pub chain_id: u64, /// whether slot execution traces broadcast is enabled diff --git a/massa-execution-exports/src/test_exports/config.rs b/massa-execution-exports/src/test_exports/config.rs index 06cb1cb3dc..718f555a16 100644 --- a/massa-execution-exports/src/test_exports/config.rs +++ b/massa-execution-exports/src/test_exports/config.rs @@ -93,7 +93,8 @@ impl Default for ExecutionConfig { denunciation_expire_periods: DENUNCIATION_EXPIRE_PERIODS, broadcast_enabled: true, broadcast_slot_execution_output_channel_capacity: 5000, - max_event_size: 512, + max_event_size_v0: 50_000, + max_event_size_v1: 512, max_event_per_operation: 25, max_function_length: 1000, max_parameter_length: 1000, diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml index c4fc5cc791..b99db0e25e 100644 --- a/massa-execution-worker/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -28,6 +28,7 @@ test-exports = [ "massa_pos_worker", "massa_metrics/test-exports", "massa_metrics/test-exports", + "massa_versioning/test-exports", "massa_db_worker", "massa_event_cache/test-exports", "tempfile", @@ -109,6 +110,7 @@ massa_metrics = { workspace = true, features = ["test-exports"] } massa_db_worker = { workspace = true } tempfile = { workspace = true } massa_test_framework = { workspace = true, "features" = ["test-exports"] } +massa_versioning = { workspace = true, "features" = ["test-exports"] } tokio = { workspace = true, features = ["sync"] } hex-literal = { workspace = true } mockall = { workspace = true } diff --git a/massa-execution-worker/src/active_history.rs b/massa-execution-worker/src/active_history.rs index db6079080b..1661adb05b 100644 --- a/massa-execution-worker/src/active_history.rs +++ b/massa-execution-worker/src/active_history.rs @@ -68,6 +68,7 @@ impl ActiveHistory { &self, message_id: &AsyncMessageId, mut current_updates: AsyncMessageUpdate, + execution_compound_version: u32, ) -> HistorySearchResult> { for history_element in self.0.iter().rev() { match history_element @@ -81,11 +82,16 @@ impl ActiveHistory { msg.apply(current_updates); return HistorySearchResult::Present(SetUpdateOrDelete::Set(msg)); } - Some(SetUpdateOrDelete::Update(msg_update)) => { - let mut combined_message_update = msg_update.clone(); - combined_message_update.apply(current_updates); - current_updates = combined_message_update; - } + Some(SetUpdateOrDelete::Update(msg_update)) => match execution_compound_version { + 0 => { + current_updates.apply(msg_update.clone()); + } + _ => { + let mut combined_message_update = msg_update.clone(); + combined_message_update.apply(current_updates); + current_updates = combined_message_update; + } + }, Some(SetUpdateOrDelete::Delete) => return HistorySearchResult::Absent, _ => (), } @@ -498,14 +504,14 @@ mod test { // Test fetch_message with message_id (expect HistorySearchResult::Absent) { let current_updates = AsyncMessageUpdate::default(); - let fetched = active_history.fetch_message(&message_id, current_updates); + let fetched = active_history.fetch_message(&message_id, current_updates, 1); assert_matches!(fetched, HistorySearchResult::Absent); } // Test fetch_message with message_id_2 (expect HistorySearchResult::Set) { let current_updates = AsyncMessageUpdate::default(); - let fetched = active_history.fetch_message(&message_id_2, current_updates); + let fetched = active_history.fetch_message(&message_id_2, current_updates, 1); if let HistorySearchResult::Present(SetUpdateOrDelete::Set(msg)) = fetched { assert_eq!(msg, msg_2); @@ -526,7 +532,7 @@ mod test { validity_end: SetOrKeep::Set(validity_end_new), ..Default::default() }; - let fetched = active_history.fetch_message(&message_id_2, current_updates); + let fetched = active_history.fetch_message(&message_id_2, current_updates, 1); if let HistorySearchResult::Present(SetUpdateOrDelete::Set(msg)) = fetched { assert_ne!(msg, msg_2); @@ -542,7 +548,7 @@ mod test { // Test fetch_message with message_id_3 (expect HistorySearchResult::Present) { let current_updates = AsyncMessageUpdate::default(); - let fetched = active_history.fetch_message(&message_id_3, current_updates); + let fetched = active_history.fetch_message(&message_id_3, current_updates, 1); if let HistorySearchResult::Present(SetUpdateOrDelete::Set(msg)) = fetched { // Check the updates were applied correctly @@ -561,7 +567,7 @@ mod test { // Expect updates to be empty (or default) here { let current_updates = AsyncMessageUpdate::default(); - let fetched = active_history.fetch_message(&message_id_3_2, current_updates); + let fetched = active_history.fetch_message(&message_id_3_2, current_updates, 1); if let HistorySearchResult::Present(SetUpdateOrDelete::Update(updates)) = fetched { assert_eq!(updates, AsyncMessageUpdate::default()); } else { diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 7ead166159..6ecaaeff50 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -43,9 +43,10 @@ use massa_models::{ }; use massa_module_cache::controller::ModuleCache; use massa_pos_exports::PoSChanges; +use massa_sc_runtime::CondomLimits; use massa_serialization::Serializer; use massa_versioning::address_factory::{AddressArgs, AddressFactory}; -use massa_versioning::versioning::MipStore; +use massa_versioning::versioning::{MipComponent, MipStore}; use massa_versioning::versioning_factory::{FactoryStrategy, VersioningFactory}; use parking_lot::RwLock; use rand::SeedableRng; @@ -196,6 +197,9 @@ pub struct ExecutionContext { /// so *excluding* the gas used by the last sc call. pub gas_remaining_before_subexecution: Option, + /// The version of the execution component + pub execution_component_version: u32, + /// recursion counter, incremented for each new nested call pub recursion_counter: u16, } @@ -221,6 +225,15 @@ impl ExecutionContext { mip_store: MipStore, execution_trail_hash: massa_hash::Hash, ) -> Self { + let slot = Slot::new(0, 0); + let ts = get_block_slot_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + slot, + ) + .expect("Time overflow when getting block slot timestamp for MIP"); + ExecutionContext { speculative_ledger: SpeculativeLedger::new( final_state.clone(), @@ -252,7 +265,7 @@ impl ExecutionContext { active_history, ), creator_min_balance: Default::default(), - slot: Slot::new(0, 0), + slot, created_addr_index: Default::default(), created_event_index: Default::default(), created_message_index: Default::default(), @@ -265,9 +278,13 @@ impl ExecutionContext { origin_operation_id: Default::default(), module_cache, config, - address_factory: AddressFactory { mip_store }, + address_factory: AddressFactory { + mip_store: mip_store.clone(), + }, execution_trail_hash, gas_remaining_before_subexecution: None, + execution_component_version: mip_store + .get_latest_component_version_at(&MipComponent::Execution, ts), recursion_counter: 0, } } @@ -309,8 +326,12 @@ impl ExecutionContext { serde_json::json!({ "massa_execution_error": format!("{}", error) }).to_string(), true, ); - if event.data.len() > self.config.max_event_size { - event.data.truncate(self.config.max_event_size); + let max_event_size = match self.execution_component_version { + 0 => self.config.max_event_size_v0, + _ => self.config.max_event_size_v1, + }; + if event.data.len() > max_event_size { + event.data.truncate(max_event_size); } self.event_emit(event); @@ -370,11 +391,21 @@ impl ExecutionContext { let execution_trail_hash = generate_execution_trail_hash(&prev_execution_trail_hash, &slot, None, true); + let ts = get_block_slot_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + slot, + ) + .expect("Time overflow when getting block slot timestamp for MIP"); + // return readonly context ExecutionContext { slot, stack: call_stack, read_only: true, + execution_component_version: mip_store + .get_latest_component_version_at(&MipComponent::Execution, ts), ..ExecutionContext::new( config, final_state, @@ -395,7 +426,19 @@ impl ExecutionContext { /// A vector of `(Option, AsyncMessage)` pairs where: /// * `Option` is the bytecode to execute (or `None` if not found) /// * `AsyncMessage` is the asynchronous message to execute - pub(crate) fn take_async_batch( + pub(crate) fn take_async_batch_v0( + &mut self, + max_gas: u64, + async_msg_cst_gas_cost: u64, + ) -> Vec<(Option, AsyncMessage)> { + self.speculative_async_pool + .take_batch_to_execute(self.slot, max_gas, async_msg_cst_gas_cost) + .into_iter() + .map(|(_id, msg)| (self.get_bytecode(&msg.destination), msg)) + .collect() + } + + pub(crate) fn take_async_batch_v1( &mut self, max_gas: u64, async_msg_cst_gas_cost: u64, @@ -440,10 +483,20 @@ impl ExecutionContext { false, ); + let ts = get_block_slot_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + slot, + ) + .expect("Time overflow when getting block slot timestamp for MIP"); + // return active slot execution context ExecutionContext { slot, opt_block_id, + execution_component_version: mip_store + .get_latest_component_version_at(&MipComponent::Execution, ts), ..ExecutionContext::new( config, final_state, @@ -547,6 +600,7 @@ impl ExecutionContext { self.get_current_address()?, address, bytecode, + self.execution_component_version, )?; // add the address to owned addresses @@ -618,8 +672,13 @@ impl ExecutionContext { } // set data entry - self.speculative_ledger - .set_data_entry(&self.get_current_address()?, address, key, data) + self.speculative_ledger.set_data_entry( + &self.get_current_address()?, + address, + key, + data, + self.execution_component_version, + ) } /// Appends data to a datastore entry for an address in the speculative ledger. @@ -659,8 +718,13 @@ impl ExecutionContext { res_data.extend(data); // set data entry - self.speculative_ledger - .set_data_entry(&self.get_current_address()?, address, key, res_data) + self.speculative_ledger.set_data_entry( + &self.get_current_address()?, + address, + key, + res_data, + self.execution_component_version, + ) } /// Deletes a datastore entry for an address. @@ -683,8 +747,12 @@ impl ExecutionContext { } // delete entry - self.speculative_ledger - .delete_data_entry(&self.get_current_address()?, address, key) + self.speculative_ledger.delete_data_entry( + &self.get_current_address()?, + address, + key, + self.execution_component_version, + ) } /// Transfers coins from one address to another. @@ -703,20 +771,54 @@ impl ExecutionContext { amount: Amount, check_rights: bool, ) -> Result<(), ExecutionError> { + let execution_component_version = self.execution_component_version; + if let Some(from_addr) = &from_addr { // check access rights // ensure we can't spend from an address on which we have no write access - if check_rights && !self.has_write_rights_on(from_addr) { - return Err(ExecutionError::RuntimeError(format!( - "spending from address {} is not allowed in this context", - from_addr - ))); + // If execution component version is 0, we need to disallow sending to SC addresses + + match execution_component_version { + 0 => { + if check_rights { + if !self.has_write_rights_on(from_addr) { + return Err(ExecutionError::RuntimeError(format!( + "spending from address {} is not allowed in this context", + from_addr + ))); + } + + // ensure we can't transfer towards SC addresses on which we have no write access + if let Some(to_addr) = &to_addr { + if matches!(to_addr, Address::SC(..)) + && !self.has_write_rights_on(to_addr) + { + return Err(ExecutionError::RuntimeError(format!( + "crediting SC address {} is not allowed without write access to it", + to_addr + ))); + } + } + } + } + _ => { + if check_rights && !self.has_write_rights_on(from_addr) { + return Err(ExecutionError::RuntimeError(format!( + "spending from address {} is not allowed in this context", + from_addr + ))); + } + } } } // do the transfer - self.speculative_ledger - .transfer_coins(from_addr, to_addr, amount) + self.speculative_ledger.transfer_coins( + from_addr, + to_addr, + amount, + execution_component_version, + ) } /// Add a new asynchronous message to speculative pool @@ -796,6 +898,76 @@ impl ExecutionContext { &mut self, denounced_addr: &Address, roll_count: u64, + ) -> Result { + let execution_component_version = self.execution_component_version; + + match execution_component_version { + 0 => self.try_slash_rolls_v0(denounced_addr, roll_count), + _ => self.try_slash_rolls_v1(denounced_addr, roll_count), + } + } + + pub fn try_slash_rolls_v0( + &mut self, + denounced_addr: &Address, + roll_count: u64, + ) -> Result { + // try to slash as many roll as available + let slashed_rolls = self + .speculative_roll_state + .try_slash_rolls(denounced_addr, roll_count); + // convert slashed rolls to coins (as deferred credits => coins) + let mut slashed_coins = self + .config + .roll_price + .checked_mul_u64(slashed_rolls.unwrap_or_default()) + .ok_or_else(|| { + ExecutionError::RuntimeError(format!( + "Cannot multiply roll price by {}", + roll_count + )) + })?; + + // what remains to slash (then will try to slash as many deferred credits as avail/what remains to be slashed) + let amount_remaining_to_slash = self + .config + .roll_price + .checked_mul_u64(roll_count) + .ok_or_else(|| { + ExecutionError::RuntimeError(format!( + "Cannot multiply roll price by {}", + roll_count + )) + })? + .saturating_sub(slashed_coins); + + if amount_remaining_to_slash > Amount::zero() { + // There is still an amount to slash for this denunciation so we need to slash + // in deferred credits + let slashed_coins_in_deferred_credits = self + .speculative_roll_state + .try_slash_deferred_credits(&self.slot, denounced_addr, &amount_remaining_to_slash); + + slashed_coins = slashed_coins.saturating_add(slashed_coins_in_deferred_credits); + + let amount_remaining_to_slash_2 = + slashed_coins.saturating_sub(slashed_coins_in_deferred_credits); + if amount_remaining_to_slash_2 > Amount::zero() { + // Use saturating_mul_u64 to avoid an error (for just a warn!(..)) + warn!("Slashed {} coins (by selling rolls) and {} coins from deferred credits of address: {} but cumulative amount is lower than expected: {} coins", + slashed_coins, slashed_coins_in_deferred_credits, denounced_addr, + self.config.roll_price.saturating_mul_u64(roll_count) + ); + } + } + + Ok(slashed_coins) + } + + pub fn try_slash_rolls_v1( + &mut self, + denounced_addr: &Address, + roll_count: u64, ) -> Result { // try to slash as many roll as available let slashed_rolls = self @@ -907,23 +1079,92 @@ impl ExecutionContext { result } - /// Finishes a slot and generates the execution output. - /// Settles emitted asynchronous messages, reimburse the senders of deleted messages. - /// Moves the output of the execution out of the context, - /// resetting some context fields in the process. - /// - /// This is used to get the output of an execution before discarding the context. - /// Note that we are not taking self by value to consume it because the context is shared. - pub fn settle_slot(&mut self, block_info: Option) -> ExecutionOutput { + fn settle_slot_v0(&mut self, block_info: Option) -> ExecutionOutput { + let slot = self.slot; + // execute the deferred credits coming from roll sells + let deferred_credits_transfers = self.execute_deferred_credits(&slot); + + // take the ledger changes first as they are needed for async messages and cache + let ledger_changes = self.speculative_ledger.take(); + + // settle emitted async messages and reimburse the senders of deleted messages + let deleted_messages = + self.speculative_async_pool + .settle_slot(&slot, &ledger_changes, false); + + let mut cancel_async_message_transfers = vec![]; + for (_msg_id, msg) in deleted_messages { + if let Some(t) = self.cancel_async_message(&msg) { + cancel_async_message_transfers.push(t) + } + } + + // update module cache + let bc_updates = ledger_changes.get_bytecode_updates(); + + { + let mut cache_write_lock = self.module_cache.write(); + for bytecode in bc_updates { + cache_write_lock.save_module(&bytecode.0, CondomLimits::default()); + } + } + // if the current slot is last in cycle check the production stats and act accordingly + let auto_sell_rolls = if self + .slot + .is_last_of_cycle(self.config.periods_per_cycle, self.config.thread_count) + { + self.speculative_roll_state.settle_production_stats( + &slot, + self.config.periods_per_cycle, + self.config.thread_count, + self.config.roll_price, + self.config.max_miss_ratio, + ) + } else { + vec![] + }; + + // generate the execution output + let state_changes = StateChanges { + ledger_changes, + async_pool_changes: self.speculative_async_pool.take(), + pos_changes: self.speculative_roll_state.take(), + executed_ops_changes: self.speculative_executed_ops.take(), + executed_denunciations_changes: self.speculative_executed_denunciations.take(), + execution_trail_hash_change: SetOrKeep::Set(self.execution_trail_hash), + deferred_call_changes: self.speculative_deferred_calls.take(), + }; + std::mem::take(&mut self.opt_block_id); + ExecutionOutput { + slot, + block_info, + state_changes, + events: std::mem::take(&mut self.events), + #[cfg(feature = "execution-trace")] + slot_trace: None, + #[cfg(feature = "dump-block")] + storage: None, + deferred_credits_execution: deferred_credits_transfers, + cancel_async_message_execution: cancel_async_message_transfers, + auto_sell_execution: auto_sell_rolls, + } + } + + fn settle_slot_with_fixed_ledger_change_handling( + &mut self, + block_info: Option, + ) -> ExecutionOutput { let slot = self.slot; // execute the deferred credits coming from roll sells let deferred_credits_transfers = self.execute_deferred_credits(&slot); // settle emitted async messages and reimburse the senders of deleted messages - let deleted_messages = self - .speculative_async_pool - .settle_slot(&slot, &self.speculative_ledger.added_changes); + let deleted_messages = self.speculative_async_pool.settle_slot( + &slot, + &self.speculative_ledger.added_changes, + true, + ); let mut cancel_async_message_transfers = vec![]; for (_msg_id, msg) in deleted_messages { @@ -937,7 +1178,7 @@ impl ExecutionContext { { let mut cache_write_lock = self.module_cache.write(); for bytecode in bc_updates { - cache_write_lock.save_module(&bytecode.0); + cache_write_lock.save_module(&bytecode.0, self.config.condom_limits.clone()); } } @@ -984,6 +1225,20 @@ impl ExecutionContext { } } + /// Finishes a slot and generates the execution output. + /// Settles emitted asynchronous messages, reimburse the senders of deleted messages. + /// Moves the output of the execution out of the context, + /// resetting some context fields in the process. + /// + /// This is used to get the output of an execution before discarding the context. + /// Note that we are not taking self by value to consume it because the context is shared. + pub fn settle_slot(&mut self, block_info: Option) -> ExecutionOutput { + match self.execution_component_version { + 0 => self.settle_slot_v0(block_info), + _ => self.settle_slot_with_fixed_ledger_change_handling(block_info), + } + } + /// Sets a bytecode for an address in the speculative ledger. /// Fail if the address is absent from the ledger. /// @@ -1013,8 +1268,12 @@ impl ExecutionContext { } // set data entry - self.speculative_ledger - .set_bytecode(&self.get_current_address()?, address, bytecode) + self.speculative_ledger.set_bytecode( + &self.get_current_address()?, + address, + bytecode, + self.execution_component_version, + ) } /// Creates a new event but does not emit it. @@ -1205,8 +1464,12 @@ impl ExecutionContext { let mut event = self.event_create(format!("DeferredCall execution fail call_id:{}", id), true); - if event.data.len() > self.config.max_event_size { - event.data.truncate(self.config.max_event_size); + let max_event_size = match self.execution_component_version { + 0 => self.config.max_event_size_v0, + _ => self.config.max_event_size_v1, + }; + if event.data.len() > max_event_size { + event.data.truncate(max_event_size); } self.event_emit(event); @@ -1262,6 +1525,14 @@ impl ExecutionContext { .get_calls_by_slot(slot) .slot_calls } + + /// Get the condom limits to pass to the VM depending on the current execution component version + pub fn get_condom_limits(&self) -> CondomLimits { + match self.execution_component_version { + 0 => Default::default(), + _ => self.config.condom_limits.clone(), + } + } } /// Generate the execution trail hash diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index a11b8ddda7..076c4b46ff 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -45,7 +45,7 @@ use massa_models::{amount::Amount, slot::Slot}; use massa_module_cache::config::ModuleCacheConfig; use massa_module_cache::controller::ModuleCache; use massa_pos_exports::SelectorController; -use massa_sc_runtime::{Interface, Response, VMError}; +use massa_sc_runtime::{CondomLimits, Interface, Response, VMError}; use massa_versioning::versioning::MipStore; use massa_wallet::Wallet; use parking_lot::{Mutex, RwLock}; @@ -140,6 +140,7 @@ pub(crate) struct ExecutionState { pub(crate) execution_info: Arc>, #[cfg(feature = "dump-block")] block_storage_backend: Arc>, + cur_execution_version: u32, } impl ExecutionState { @@ -188,14 +189,16 @@ impl ExecutionState { }))); // Create an empty placeholder execution context, with shared atomic access - let execution_context = Arc::new(Mutex::new(ExecutionContext::new( + let execution_context = ExecutionContext::new( config.clone(), final_state.clone(), active_history.clone(), module_cache.clone(), mip_store.clone(), execution_trail_hash, - ))); + ); + let cur_execution_version = execution_context.execution_component_version; + let execution_context = Arc::new(Mutex::new(execution_context)); // Instantiate the interface providing ABI access to the VM, share the execution context with it let execution_interface = Box::new(InterfaceImpl::new( @@ -238,6 +241,7 @@ impl ExecutionState { config, #[cfg(feature = "dump-block")] block_storage_backend, + cur_execution_version, } } @@ -301,6 +305,30 @@ impl ExecutionState { // append generated events to the final event store exec_out.events.finalize(); + + let ts = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + exec_out.slot, + ) + .expect("Time overflow"); + + let cur_version = self + .final_state + .read() + .get_mip_store() + .get_network_version_active_at(ts); + + if cur_version == 0 { + // Truncate the events before saving them to the event store + // Note: this is only needed during the MIP transition period + // When it becomes active, we will refuse such events so no need to truncate them + for event in exec_out.events.0.iter_mut() { + event.data.truncate(self.config.max_event_size_v1); + } + } + self.final_events_cache.save_events(exec_out.events.0); // update the prometheus metrics @@ -446,6 +474,7 @@ impl ExecutionState { // lock execution context let mut context = context_guard!(self); + let execution_component_version = context.execution_component_version; // ignore the operation if it was already executed if context.is_op_executed(&operation_id) { @@ -467,15 +496,21 @@ impl ExecutionState { // set the context origin operation ID // Note: set operation ID early as if context.transfer_coins fails, event_create will use // operation ID in the event message - context.origin_operation_id = Some(operation_id); + if execution_component_version >= 1 { + context.origin_operation_id = Some(operation_id); + } // debit the fee from the operation sender if let Err(err) = context.transfer_coins(Some(sender_addr), None, operation.content.fee, false) { let mut error = format!("could not spend fees: {}", err); - if error.len() > self.config.max_event_size { - error.truncate(self.config.max_event_size); + let max_event_size = match execution_component_version { + 0 => self.config.max_event_size_v0, + _ => self.config.max_event_size_v1, + }; + if error.len() > max_event_size { + error.truncate(max_event_size); } let event = context.event_create(error.clone(), true); context.event_emit(event); @@ -491,6 +526,10 @@ impl ExecutionState { // set the creator address context.creator_address = Some(operation.content_creator_address); + if execution_component_version == 0 { + context.origin_operation_id = Some(operation_id); + } + Ok(context_snapshot) } @@ -953,10 +992,12 @@ impl ExecutionState { _ => panic!("unexpected operation type"), }; + let condom_limits; { // acquire write access to the context let mut context = context_guard!(self); + condom_limits = context.get_condom_limits(); // Set the call stack to a single element: // * the execution will happen in the context of the address of the operation's sender // * the context will give the operation's sender write access to its own ledger entry @@ -971,17 +1012,17 @@ impl ExecutionState { }; // load the tmp module - let module = self - .module_cache - .read() - .load_tmp_module(bytecode, *max_gas)?; + let module = + self.module_cache + .read() + .load_tmp_module(bytecode, *max_gas, condom_limits.clone())?; // run the VM let _res = massa_sc_runtime::run_main( &*self.execution_interface, module, *max_gas, self.config.gas_costs.clone(), - self.config.condom_limits.clone(), + condom_limits, ) .map_err(|error| ExecutionError::VMError { context: "ExecuteSC".to_string(), @@ -1026,10 +1067,13 @@ impl ExecutionState { // prepare the current slot context for executing the operation let bytecode; + let condom_limits; { // acquire write access to the context let mut context = context_guard!(self); + condom_limits = context.get_condom_limits(); + // Set the call stack // This needs to be defined before anything can fail, so that the emitted event contains the right stack context.stack = vec![ @@ -1074,7 +1118,11 @@ impl ExecutionState { // load and execute the compiled module // IMPORTANT: do not keep a lock here as `run_function` uses the `get_module` interface - let module = self.module_cache.write().load_module(&bytecode, max_gas)?; + + let module = + self.module_cache + .write() + .load_module(&bytecode, max_gas, condom_limits.clone())?; let response = massa_sc_runtime::run_function( &*self.execution_interface, module, @@ -1082,7 +1130,7 @@ impl ExecutionState { param, max_gas, self.config.gas_costs.clone(), - self.config.condom_limits.clone(), + condom_limits, ); match response { Ok(Response { init_gas_cost, .. }) @@ -1117,6 +1165,7 @@ impl ExecutionState { &self, message: AsyncMessage, bytecode: Option, + execution_version: u32, ) -> Result { let mut result = AsyncMessageExecutionResult::new(); #[cfg(feature = "execution-info")] @@ -1136,7 +1185,10 @@ impl ExecutionState { context.stack = vec![ ExecutionStackElement { address: message.sender, - coins: Default::default(), + coins: match execution_version { + 0 => message.coins, + _ => Default::default(), + }, owned_addresses: vec![message.sender], operation_datastore: None, }, @@ -1188,17 +1240,31 @@ impl ExecutionState { // load and execute the compiled module // IMPORTANT: do not keep a lock here as `run_function` uses the `get_module` interface - let Ok(module) = self - .module_cache - .write() - .load_module(&bytecode, message.max_gas) - else { - let err = - ExecutionError::RuntimeError("could not load module for async execution".into()); - let mut context = context_guard!(self); - context.reset_to_snapshot(context_snapshot, err.clone()); - context.cancel_async_message(&message); - return Err(err); + let module = match context_guard!(self).execution_component_version { + 0 => self.module_cache.write().load_module( + &bytecode, + message.max_gas, + CondomLimits::default(), + )?, + _ => { + match self.module_cache.write().load_module( + &bytecode, + message.max_gas, + self.config.condom_limits.clone(), + ) { + Ok(module) => module, + Err(err) => { + let err = ExecutionError::RuntimeError(format!( + "could not load module for async execution: {}", + err + )); + let mut context = context_guard!(self); + context.reset_to_snapshot(context_snapshot, err.clone()); + context.cancel_async_message(&message); + return Err(err); + } + } + } }; let response = massa_sc_runtime::run_function( @@ -1326,6 +1392,7 @@ impl ExecutionState { let module = self.module_cache.write().load_module( &bytecode, call.get_effective_gas(self.config.deferred_calls_config.call_cst_gas_cost), + self.config.condom_limits.clone(), )?; let response = massa_sc_runtime::run_function( &*self.execution_interface, @@ -1395,7 +1462,7 @@ impl ExecutionState { /// # Returns /// An `ExecutionOutput` structure summarizing the output of the executed slot pub fn execute_slot( - &self, + &mut self, slot: &Slot, exec_target: Option<&(BlockId, ExecutionBlockMetadata)>, selector: Box, @@ -1424,39 +1491,93 @@ impl ExecutionState { self.mip_store.clone(), ); - // Deferred calls - let calls = execution_context.deferred_calls_advance_slot(*slot); + let execution_version = execution_context.execution_component_version; + if self.cur_execution_version != execution_version { + // Reset the cache because a new execution version has become active + info!("A new execution version has become active! Resetting the module-cache."); + self.module_cache.write().reset(); + self.cur_execution_version = execution_version; + } + + let mut deferred_calls_slot_gas = 0; - // Apply the created execution context for slot execution - *context_guard!(self) = execution_context; + // deferred calls execution - for (id, call) in calls.slot_calls { - let cancelled = call.cancelled; - match self.execute_deferred_call(&id, call) { - Ok(_exec) => { - if cancelled { - continue; - } - info!("executed deferred call: {:?}", id); - cfg_if::cfg_if! { - if #[cfg(feature = "execution-trace")] { - // Safe to unwrap - slot_trace.deferred_call_stacks.push(_exec.traces.unwrap().0); - } else if #[cfg(feature = "execution-info")] { - slot_trace.deferred_call_stacks.push(_exec.traces.clone().unwrap().0); - exec_info.deferred_calls_messages.push(Ok(_exec)); + match execution_version { + 0 => { + // Get asynchronous messages to execute + let messages = execution_context.take_async_batch_v0( + self.config.max_async_gas, + self.config.async_msg_cst_gas_cost, + ); + + // Apply the created execution context for slot execution + *context_guard!(self) = execution_context; + + // Try executing asynchronous messages. + // Effects are cancelled on failure and the sender is reimbursed. + for (opt_bytecode, message) in messages { + match self.execute_async_message(message, opt_bytecode, execution_version) { + Ok(_message_return) => { + cfg_if::cfg_if! { + if #[cfg(feature = "execution-trace")] { + // Safe to unwrap + slot_trace.asc_call_stacks.push(_message_return.traces.unwrap().0); + } else if #[cfg(feature = "execution-info")] { + slot_trace.asc_call_stacks.push(_message_return.traces.clone().unwrap().0); + exec_info.async_messages.push(Ok(_message_return)); + } + } + } + Err(err) => { + let msg = format!("failed executing async message: {}", err); + #[cfg(feature = "execution-info")] + exec_info.async_messages.push(Err(msg.clone())); + debug!(msg); } } } - Err(err) => { - let msg = format!("failed executing deferred call: {}", err); - #[cfg(feature = "execution-info")] - exec_info.deferred_calls_messages.push(Err(msg.clone())); - dbg!(msg); + } + _ => { + // Deferred calls + let calls = execution_context.deferred_calls_advance_slot(*slot); + + deferred_calls_slot_gas = calls.effective_slot_gas; + + // Apply the created execution context for slot execution + *context_guard!(self) = execution_context; + + for (id, call) in calls.slot_calls { + let cancelled = call.cancelled; + match self.execute_deferred_call(&id, call) { + Ok(_exec) => { + if cancelled { + continue; + } + info!("executed deferred call: {:?}", id); + cfg_if::cfg_if! { + if #[cfg(feature = "execution-trace")] { + // Safe to unwrap + slot_trace.deferred_call_stacks.push(_exec.traces.unwrap().0); + } else if #[cfg(feature = "execution-info")] { + slot_trace.deferred_call_stacks.push(_exec.traces.clone().unwrap().0); + exec_info.deferred_calls_messages.push(Ok(_exec)); + } + } + } + Err(err) => { + let msg = format!("failed executing deferred call: {}", err); + #[cfg(feature = "execution-info")] + exec_info.deferred_calls_messages.push(Err(msg.clone())); + dbg!(msg); + } + } } } } + // Block execution + let mut block_info: Option = None; // Set block gas (max_gas_per_block - gas used by deferred calls) let mut remaining_block_gas = self.config.max_gas_per_block; @@ -1639,89 +1760,173 @@ impl ExecutionState { // Update speculative rolls state production stats context.update_production_stats(&block_creator_addr, *slot, Some(*block_id)); - // Divide the total block credits into parts + remainder - let block_credit_part_count = 3 * (1 + self.config.endorsement_count); - let block_credit_part = block_credits - .checked_div_u64(block_credit_part_count) - .expect("critical: block_credits checked_div factor is 0"); - let remainder = block_credits - .checked_rem_u64(block_credit_part_count) - .expect("critical: block_credits checked_rem factor is 0"); - - // Give 3 parts + remainder to the block producer to stimulate block production - // even in the absence of endorsements. - let mut block_producer_credit = block_credit_part - .saturating_mul_u64(3) - .saturating_add(remainder); - - for endorsement_creator in endorsement_creators { - // Credit the creator of the block with 1 part to stimulate endorsement inclusion of endorsements, - // and dissuade from emitting the block too early (before the endorsements have propageted). - block_producer_credit = block_producer_credit.saturating_add(block_credit_part); - - // Credit creator of the endorsement with 1 part to stimulate the production of endorsements. - // This also motivates endorsers to not publish their endorsements too early (will not endorse the right block), - // and to not publish too late (will not be included in the block). - match context.transfer_coins( - None, - Some(endorsement_creator), - block_credit_part, - false, - ) { - Ok(_) => { - #[cfg(feature = "execution-info")] - exec_info - .endorsement_creator_rewards - .insert(endorsement_creator, block_credit_part); + match execution_version { + 0 => { + // Credit endorsement producers and endorsed block producers + let mut remaining_credit = block_credits; + let block_credit_part = block_credits + .checked_div_u64(3 * (1 + (self.config.endorsement_count))) + .expect("critical: block_credits checked_div factor is 0"); + + for endorsement_creator in endorsement_creators { + // credit creator of the endorsement with coins + match context.transfer_coins( + None, + Some(endorsement_creator), + block_credit_part, + false, + ) { + Ok(_) => { + remaining_credit = + remaining_credit.saturating_sub(block_credit_part); + + #[cfg(feature = "execution-info")] + exec_info + .endorsement_creator_rewards + .insert(endorsement_creator, block_credit_part); + } + Err(err) => { + debug!( + "failed to credit {} coins to endorsement creator {} for an endorsed block execution: {}", + block_credit_part, endorsement_creator, err + ) + } + } + + // credit creator of the endorsed block with coins + match context.transfer_coins( + None, + Some(endorsement_target_creator), + block_credit_part, + false, + ) { + Ok(_) => { + remaining_credit = + remaining_credit.saturating_sub(block_credit_part); + #[cfg(feature = "execution-info")] + { + exec_info.endorsement_target_reward = + Some((endorsement_target_creator, block_credit_part)); + } + } + Err(err) => { + debug!( + "failed to credit {} coins to endorsement target creator {} on block execution: {}", + block_credit_part, endorsement_target_creator, err + ) + } + } } - Err(err) => { + + // Credit block creator with remaining_credit + if let Err(err) = context.transfer_coins( + None, + Some(block_creator_addr), + remaining_credit, + false, + ) { debug!( - "failed to credit {} coins to endorsement creator {} for an endorsed block execution: {}", - block_credit_part, endorsement_creator, err + "failed to credit {} coins to block creator {} on block execution: {}", + remaining_credit, block_creator_addr, err ) + } else { + #[cfg(feature = "execution-info")] + { + exec_info.block_producer_reward = + Some((block_creator_addr, remaining_credit)); + } } } + _ => { + // Divide the total block credits into parts + remainder + let block_credit_part_count = 3 * (1 + self.config.endorsement_count); + let block_credit_part = block_credits + .checked_div_u64(block_credit_part_count) + .expect("critical: block_credits checked_div factor is 0"); + let remainder = block_credits + .checked_rem_u64(block_credit_part_count) + .expect("critical: block_credits checked_rem factor is 0"); + + // Give 3 parts + remainder to the block producer to stimulate block production + // even in the absence of endorsements. + let mut block_producer_credit = block_credit_part + .saturating_mul_u64(3) + .saturating_add(remainder); + + for endorsement_creator in endorsement_creators { + // Credit the creator of the block with 1 part to stimulate endorsement inclusion of endorsements, + // and dissuade from emitting the block too early (before the endorsements have propageted). + block_producer_credit = + block_producer_credit.saturating_add(block_credit_part); + + // Credit creator of the endorsement with 1 part to stimulate the production of endorsements. + // This also motivates endorsers to not publish their endorsements too early (will not endorse the right block), + // and to not publish too late (will not be included in the block). + match context.transfer_coins( + None, + Some(endorsement_creator), + block_credit_part, + false, + ) { + Ok(_) => { + #[cfg(feature = "execution-info")] + exec_info + .endorsement_creator_rewards + .insert(endorsement_creator, block_credit_part); + } + Err(err) => { + debug!( + "failed to credit {} coins to endorsement creator {} for an endorsed block execution: {}", + block_credit_part, endorsement_creator, err + ) + } + } - // Credit the creator of the endorsed block with 1 part. - // This is done to incentivize block producers to be endorsed, - // typically by not publishing their blocks too late. - match context.transfer_coins( - None, - Some(endorsement_target_creator), - block_credit_part, - false, - ) { - Ok(_) => { - #[cfg(feature = "execution-info")] - { - exec_info.endorsement_target_reward = - Some((endorsement_target_creator, block_credit_part)); + // Credit the creator of the endorsed block with 1 part. + // This is done to incentivize block producers to be endorsed, + // typically by not publishing their blocks too late. + match context.transfer_coins( + None, + Some(endorsement_target_creator), + block_credit_part, + false, + ) { + Ok(_) => { + #[cfg(feature = "execution-info")] + { + exec_info.endorsement_target_reward = + Some((endorsement_target_creator, block_credit_part)); + } + } + Err(err) => { + debug!( + "failed to credit {} coins to endorsement target creator {} on block execution: {}", + block_credit_part, endorsement_target_creator, err + ) + } } } - Err(err) => { + + // Credit block producer + if let Err(err) = context.transfer_coins( + None, + Some(block_creator_addr), + block_producer_credit, + false, + ) { debug!( - "failed to credit {} coins to endorsement target creator {} on block execution: {}", - block_credit_part, endorsement_target_creator, err + "failed to credit {} coins to block creator {} on block execution: {}", + block_producer_credit, block_creator_addr, err ) + } else { + #[cfg(feature = "execution-info")] + { + exec_info.block_producer_reward = + Some((block_creator_addr, block_producer_credit)); + } } } } - - // Credit block producer - if let Err(err) = - context.transfer_coins(None, Some(block_creator_addr), block_producer_credit, false) - { - debug!( - "failed to credit {} coins to block creator {} on block execution: {}", - block_producer_credit, block_creator_addr, err - ) - } else { - #[cfg(feature = "execution-info")] - { - exec_info.block_producer_reward = - Some((block_creator_addr, block_producer_credit)); - } - } } else { // the slot is a miss, check who was supposed to be the creator and update production stats let producer_addr = selector @@ -1730,37 +1935,44 @@ impl ExecutionState { context_guard!(self).update_production_stats(&producer_addr, *slot, None); } - // Get asynchronous messages to execute - // The gas available for async messages is the remaining block gas + async remaining gas (max_async - gas used by deferred calls) - let async_msg_gas_available = self - .config - .max_async_gas - .saturating_sub(calls.effective_slot_gas) - .saturating_add(remaining_block_gas); - let messages = context_guard!(self) - .take_async_batch(async_msg_gas_available, self.config.async_msg_cst_gas_cost); - - // Try executing asynchronous messages. - // Effects are cancelled on failure and the sender is reimbursed. - for (_message_id, message) in messages { - let opt_bytecode = context_guard!(self).get_bytecode(&message.destination); - match self.execute_async_message(message, opt_bytecode) { - Ok(_message_return) => { - cfg_if::cfg_if! { - if #[cfg(feature = "execution-trace")] { - // Safe to unwrap - slot_trace.asc_call_stacks.push(_message_return.traces.unwrap().0); - } else if #[cfg(feature = "execution-info")] { - slot_trace.asc_call_stacks.push(_message_return.traces.clone().unwrap().0); - exec_info.async_messages.push(Ok(_message_return)); + // Async msg execution + + if execution_version > 0 { + // Get asynchronous messages to execute + // The gas available for async messages is the remaining block gas + async remaining gas (max_async - gas used by deferred calls) + let async_msg_gas_available = self + .config + .max_async_gas + .saturating_sub(deferred_calls_slot_gas) + .saturating_add(remaining_block_gas); + + // Get asynchronous messages to execute + let messages = context_guard!(self) + .take_async_batch_v1(async_msg_gas_available, self.config.async_msg_cst_gas_cost); + + // Try executing asynchronous messages. + // Effects are cancelled on failure and the sender is reimbursed. + for (_message_id, message) in messages { + let opt_bytecode = context_guard!(self).get_bytecode(&message.destination); + + match self.execute_async_message(message, opt_bytecode, execution_version) { + Ok(_message_return) => { + cfg_if::cfg_if! { + if #[cfg(feature = "execution-trace")] { + // Safe to unwrap + slot_trace.asc_call_stacks.push(_message_return.traces.unwrap().0); + } else if #[cfg(feature = "execution-info")] { + slot_trace.asc_call_stacks.push(_message_return.traces.clone().unwrap().0); + exec_info.async_messages.push(Ok(_message_return)); + } } } - } - Err(err) => { - let msg = format!("failed executing async message: {}", err); - #[cfg(feature = "execution-info")] - exec_info.async_messages.push(Err(msg.clone())); - debug!(msg); + Err(err) => { + let msg = format!("failed executing async message: {}", err); + #[cfg(feature = "execution-info")] + exec_info.async_messages.push(Err(msg.clone())); + debug!(msg); + } } } } @@ -1987,6 +2199,7 @@ impl ExecutionState { // run the interpreter according to the target type let exec_response = match req.target { ReadOnlyExecutionTarget::BytecodeExecution(bytecode) => { + let condom_limits = execution_context.get_condom_limits(); { let mut context = context_guard!(self); *context = execution_context; @@ -2000,10 +2213,11 @@ impl ExecutionState { } // load the tmp module - let module = self - .module_cache - .read() - .load_tmp_module(&bytecode, req.max_gas)?; + let module = self.module_cache.read().load_tmp_module( + &bytecode, + req.max_gas, + condom_limits.clone(), + )?; // run the VM massa_sc_runtime::run_main( @@ -2011,7 +2225,7 @@ impl ExecutionState { module, req.max_gas, self.config.gas_costs.clone(), - self.config.condom_limits.clone(), + condom_limits, ) .map_err(|error| ExecutionError::VMError { context: "ReadOnlyExecutionTarget::BytecodeExecution".to_string(), @@ -2030,6 +2244,7 @@ impl ExecutionState { .unwrap_or_default() .0; + let condom_limits = execution_context.get_condom_limits(); { let mut context = context_guard!(self); *context = execution_context; @@ -2054,10 +2269,11 @@ impl ExecutionState { // load and execute the compiled module // IMPORTANT: do not keep a lock here as `run_function` uses the `get_module` interface - let module = self - .module_cache - .write() - .load_module(&bytecode, req.max_gas)?; + let module = self.module_cache.write().load_module( + &bytecode, + req.max_gas, + condom_limits.clone(), + )?; let response = massa_sc_runtime::run_function( &*self.execution_interface, @@ -2066,7 +2282,7 @@ impl ExecutionState { ¶meter, req.max_gas, self.config.gas_costs.clone(), - self.config.condom_limits.clone(), + condom_limits, ); match response { diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index bb93ef98c8..db81d67345 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -101,7 +101,10 @@ impl InterfaceImpl { use massa_module_cache::{config::ModuleCacheConfig, controller::ModuleCache}; use massa_pos_exports::SelectorConfig; use massa_pos_worker::start_selector_worker; - use massa_versioning::versioning::{MipStatsConfig, MipStore}; + use massa_versioning::{ + mips::get_mip_list, + versioning::{MipStatsConfig, MipStore}, + }; use parking_lot::RwLock; use tempfile::TempDir; @@ -143,8 +146,8 @@ impl InterfaceImpl { block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, warn_announced_version_ratio: Ratio::new_raw(30, 100), }; - let mip_store = - MipStore::try_from(([], mip_stats_config)).expect("Cannot create an empty MIP store"); + let mip_store = MipStore::try_from((get_mip_list(), mip_stats_config)) + .expect("Cannot create an empty MIP store"); let mut execution_context = ExecutionContext::new( config.clone(), @@ -232,12 +235,21 @@ impl Interface for InterfaceImpl { Ok(()) } + fn get_interface_version(&self) -> Result { + let context = context_guard!(self); + Ok(context.execution_component_version) + } + fn increment_recursion_counter(&self) -> Result<()> { + let execution_component_version = self.get_interface_version()?; + let mut context = context_guard!(self); context.recursion_counter += 1; - if context.recursion_counter > self.config.max_recursive_calls_depth { + if execution_component_version > 0 + && context.recursion_counter > self.config.max_recursive_calls_depth + { bail!("recursion depth limit reached"); } @@ -332,10 +344,15 @@ impl Interface for InterfaceImpl { /// # Returns /// A `massa-sc-runtime` CL compiled module & the remaining gas after loading the module fn get_module(&self, bytecode: &[u8], gas_limit: u64) -> Result { - Ok((context_guard!(self)) + let context = context_guard!(self); + let condom_limits = context.get_condom_limits(); + + let ret = context .module_cache .write() - .load_module(bytecode, gas_limit)?) + .load_module(bytecode, gas_limit, condom_limits)?; + + Ok(ret) } /// Compile and return a temporary module @@ -343,10 +360,16 @@ impl Interface for InterfaceImpl { /// # Returns /// A `massa-sc-runtime` SP compiled module & the remaining gas after loading the module fn get_tmp_module(&self, bytecode: &[u8], gas_limit: u64) -> Result { - Ok((context_guard!(self)) - .module_cache - .write() - .load_tmp_module(bytecode, gas_limit)?) + let context = context_guard!(self); + let condom_limits = context.get_condom_limits(); + + let ret = + context + .module_cache + .write() + .load_tmp_module(bytecode, gas_limit, condom_limits)?; + + Ok(ret) } /// Gets the balance of the current address address (top of the stack). @@ -905,13 +928,21 @@ impl Interface for InterfaceImpl { signature_: &[u8], public_key_: &[u8], ) -> Result { + let execution_component_version = self.get_interface_version()?; + // check the signature length if signature_.len() != 65 { return Err(anyhow!("invalid signature length in evm_signature_verify")); } // parse the public key - let public_key = libsecp256k1::PublicKey::parse_slice(public_key_, None)?; + let public_key = match execution_component_version { + 0 => libsecp256k1::PublicKey::parse_slice( + public_key_, + Some(libsecp256k1::PublicKeyFormat::Raw), + )?, + _ => libsecp256k1::PublicKey::parse_slice(public_key_, None)?, + }; // build the message let prefix = format!("\x19Ethereum Signed Message:\n{}", message_.len()); @@ -925,34 +956,36 @@ impl Interface for InterfaceImpl { // s is the signature proof for R.x (32 bytes) // v is a recovery parameter used to ease the signature verification (1 byte) // see test_evm_verify for an example of its usage - let recovery_id: u8 = libsecp256k1::RecoveryId::parse_rpc(signature_[64])?.into(); - // Note: parse_rpc returns p - 27 and allow for 27, 28, 29, 30 - // restrict to only 27 & 28 (=> 0 & 1) - if recovery_id != 0 && recovery_id != 1 { - // Note: - // The v value in an EVM signature serves as a recovery ID, - // aiding in the recovery of the public key from the signature. - // Typically, v should be either 27 or 28 - // (or sometimes 0 or 1, depending on the implementation). - // Ensuring that v is within the expected range is crucial - // for correctly recovering the public key. - // the Ethereum yellow paper specifies only 27 and 28, requiring additional checks. - return Err(anyhow!( - "invalid recovery id value (v = {recovery_id}) in evm_signature_verify" - )); - } - let signature = libsecp256k1::Signature::parse_standard_slice(&signature_[..64])?; - // Note: - // The s value in an EVM signature should be in the lower half of the elliptic curve - // in order to prevent malleability attacks. - // If s is in the high-order range, it can be converted to its low-order equivalent, - // which should be enforced during signature verification. - if signature.s.is_high() { - return Err(anyhow!( - "High-Order s Value are prohibited in evm_get_pubkey_from_signature" - )); + if execution_component_version != 0 { + let recovery_id: u8 = libsecp256k1::RecoveryId::parse_rpc(signature_[64])?.into(); + // Note: parse_rpc returns p - 27 and allow for 27, 28, 29, 30 + // restrict to only 27 & 28 (=> 0 & 1) + if recovery_id != 0 && recovery_id != 1 { + // Note: + // The v value in an EVM signature serves as a recovery ID, + // aiding in the recovery of the public key from the signature. + // Typically, v should be either 27 or 28 + // (or sometimes 0 or 1, depending on the implementation). + // Ensuring that v is within the expected range is crucial + // for correctly recovering the public key. + // the Ethereum yellow paper specifies only 27 and 28, requiring additional checks. + return Err(anyhow!( + "invalid recovery id value (v = {recovery_id}) in evm_signature_verify" + )); + } + + // Note: + // The s value in an EVM signature should be in the lower half of the elliptic curve + // in order to prevent malleability attacks. + // If s is in the high-order range, it can be converted to its low-order equivalent, + // which should be enforced during signature verification. + if signature.s.is_high() { + return Err(anyhow!( + "High-Order s Value are prohibited in evm_get_pubkey_from_signature" + )); + } } // verify the signature @@ -967,11 +1000,25 @@ impl Interface for InterfaceImpl { /// Get an EVM address from a raw secp256k1 public key (64 bytes). /// Address is the last 20 bytes of the hash of the public key. fn evm_get_address_from_pubkey(&self, public_key_: &[u8]) -> Result> { - // parse the public key - let public_key = libsecp256k1::PublicKey::parse_slice(public_key_, None)?; - - // compute the hash of the public key - let hash = sha3::Keccak256::digest(&public_key.serialize()[1..]); + let execution_component_version = self.get_interface_version()?; + + let hash = match execution_component_version { + 0 => { + // parse the public key + let public_key = libsecp256k1::PublicKey::parse_slice( + public_key_, + Some(libsecp256k1::PublicKeyFormat::Raw), + )?; + // compute the hash of the public key + sha3::Keccak256::digest(public_key.serialize()) + } + _ => { + // parse the public key + let public_key = libsecp256k1::PublicKey::parse_slice(public_key_, None)?; + // compute the hash of the public key + sha3::Keccak256::digest(&public_key.serialize()[1..]) + } + }; // ignore the first 12 bytes of the hash let address = hash[12..].to_vec(); @@ -982,6 +1029,8 @@ impl Interface for InterfaceImpl { /// Get a raw secp256k1 public key from an EVM signature and the signed hash. fn evm_get_pubkey_from_signature(&self, hash_: &[u8], signature_: &[u8]) -> Result> { + let execution_component_version = self.get_interface_version()?; + // check the signature length if signature_.len() != 65 { return Err(anyhow!( @@ -989,37 +1038,58 @@ impl Interface for InterfaceImpl { )); } - // parse the message - let message = libsecp256k1::Message::parse_slice(hash_)?; + match execution_component_version { + 0 => { + // parse the message + let message = libsecp256k1::Message::parse_slice(hash_).unwrap(); - // parse the signature as being (r, s, v) use only r and s - let signature = libsecp256k1::Signature::parse_standard_slice(&signature_[..64])?; + // parse the signature as being (r, s, v) use only r and s + let signature = + libsecp256k1::Signature::parse_standard_slice(&signature_[..64]).unwrap(); - // Note: - // See evm_signature_verify explanation - if signature.s.is_high() { - return Err(anyhow!( - "High-Order s Value are prohibited in evm_get_pubkey_from_signature" - )); - } + // parse v as a recovery id + let recovery_id = libsecp256k1::RecoveryId::parse_rpc(signature_[64]).unwrap(); - // parse v as a recovery id - let recovery_id = libsecp256k1::RecoveryId::parse_rpc(signature_[64])?; + // recover the public key + let recovered = libsecp256k1::recover(&message, &signature, &recovery_id).unwrap(); - let recovery_id_: u8 = recovery_id.into(); - if recovery_id_ != 0 && recovery_id_ != 1 { - // Note: - // See evm_signature_verify explanation - return Err(anyhow!( - "invalid recovery id value (v = {recovery_id_}) in evm_get_pubkey_from_signature" - )); + // return its serialized value + Ok(recovered.serialize().to_vec()) + } + _ => { + // parse the message + let message = libsecp256k1::Message::parse_slice(hash_)?; + + // parse the signature as being (r, s, v) use only r and s + let signature = libsecp256k1::Signature::parse_standard_slice(&signature_[..64])?; + + // Note: + // See evm_signature_verify explanation + if signature.s.is_high() { + return Err(anyhow!( + "High-Order s Value are prohibited in evm_get_pubkey_from_signature" + )); + } + + // parse v as a recovery id + let recovery_id = libsecp256k1::RecoveryId::parse_rpc(signature_[64])?; + + let recovery_id_: u8 = recovery_id.into(); + if recovery_id_ != 0 && recovery_id_ != 1 { + // Note: + // See evm_signature_verify explanation + return Err(anyhow!( + "invalid recovery id value (v = {recovery_id_}) in evm_get_pubkey_from_signature" + )); + } + + // recover the public key + let recovered = libsecp256k1::recover(&message, &signature, &recovery_id)?; + + // return its serialized value + Ok(recovered.serialize().to_vec()) + } } - - // recover the public key - let recovered = libsecp256k1::recover(&message, &signature, &recovery_id)?; - - // return its serialized value - Ok(recovered.serialize().to_vec()) } // Return true if the address is a User address, false if it is an SC address. @@ -1146,7 +1216,13 @@ impl Interface for InterfaceImpl { /// /// [DeprecatedByNewRuntime] Replaced by `get_current_slot` fn generate_event(&self, data: String) -> Result<()> { - if data.len() > self.config.max_event_size { + let execution_component_version = self.get_interface_version()?; + let max_event_size = match execution_component_version { + 0 => self.config.max_event_size_v0, + _ => self.config.max_event_size_v1, + }; + + if data.len() > max_event_size { bail!("Event data size is too large"); }; @@ -1162,14 +1238,15 @@ impl Interface for InterfaceImpl { is_final: None, is_error: None, }; - let event_per_op = context - .events - .get_filtered_sc_output_events_iter(&event_filter) - .count(); - if event_per_op >= self.config.max_event_per_operation { - bail!("Too many event for this operation"); + if execution_component_version > 0 { + let event_per_op = context + .events + .get_filtered_sc_output_events_iter(&event_filter) + .count(); + if event_per_op >= self.config.max_event_per_operation { + bail!("Too many event for this operation"); + } } - context.event_emit(event); Ok(()) } @@ -1179,7 +1256,13 @@ impl Interface for InterfaceImpl { /// # Arguments: /// data: the bytes_array data that is the payload of the event fn generate_event_wasmv1(&self, data: Vec) -> Result<()> { - if data.len() > self.config.max_event_size { + let execution_component_version = self.get_interface_version()?; + let max_event_size = match execution_component_version { + 0 => self.config.max_event_size_v0, + _ => self.config.max_event_size_v1, + }; + + if data.len() > max_event_size { bail!("Event data size is too large"); }; @@ -1196,14 +1279,15 @@ impl Interface for InterfaceImpl { is_final: None, is_error: None, }; - let event_per_op = context - .events - .get_filtered_sc_output_events_iter(&event_filter) - .count(); - if event_per_op >= self.config.max_event_per_operation { - bail!("Too many event for this operation"); + if execution_component_version > 0 { + let event_per_op = context + .events + .get_filtered_sc_output_events_iter(&event_filter) + .count(); + if event_per_op >= self.config.max_event_per_operation { + bail!("Too many event for this operation"); + } } - context.event_emit(event); Ok(()) @@ -1287,10 +1371,6 @@ impl Interface for InterfaceImpl { bail!("validity end thread exceeds the configuration thread count") } - if max_gas < self.config.gas_costs.max_instance_cost { - bail!("max gas is lower than the minimum instance cost") - } - let target_addr = Address::from_str(target_address)?; // check that the target address is an SC address @@ -1309,12 +1389,19 @@ impl Interface for InterfaceImpl { let mut execution_context = context_guard!(self); let emission_slot = execution_context.slot; - if Slot::new(validity_end.0, validity_end.1) < Slot::new(validity_start.0, validity_start.1) - { - bail!("validity end is earlier than the validity start") - } - if Slot::new(validity_end.0, validity_end.1) < emission_slot { - bail!("validity end is earlier than the current slot") + let execution_component_version = execution_context.execution_component_version; + if execution_component_version > 0 { + if max_gas < self.config.gas_costs.max_instance_cost { + bail!("max gas is lower than the minimum instance cost") + } + if Slot::new(validity_end.0, validity_end.1) + < Slot::new(validity_start.0, validity_start.1) + { + bail!("validity end is earlier than the validity start") + } + if Slot::new(validity_end.0, validity_end.1) < emission_slot { + bail!("validity end is earlier than the current slot") + } } let emission_index = execution_context.created_message_index; @@ -1462,9 +1549,7 @@ impl Interface for InterfaceImpl { params_size: u64, ) -> Result<(bool, u64)> { // write-lock context - let context = context_guard!(self); - let current_slot = context.slot; let target_slot = Slot::new(target_slot.0, target_slot.1); @@ -1565,8 +1650,9 @@ impl Interface for InterfaceImpl { /// true if the call exists, false otherwise fn deferred_call_exists(&self, id: &str) -> Result { // write-lock context - let call_id = DeferredCallId::from_str(id)?; let context = context_guard!(self); + + let call_id = DeferredCallId::from_str(id)?; Ok(context.deferred_call_exists(&call_id)) } @@ -1761,11 +1847,18 @@ impl Interface for InterfaceImpl { fn get_address_category_wasmv1(&self, to_check: &str) -> Result { let addr = Address::from_str(to_check)?; - match addr { - Address::User(_) => Ok(AddressCategory::UserAddress), - Address::SC(_) => Ok(AddressCategory::ScAddress), + let execution_component_version = context_guard!(self).execution_component_version; + + // Fixed behavior for this ABI in https://github.com/massalabs/massa/pull/4728 + // We keep the previous (bugged) code if the execution component version is 0 + // to avoid a breaking change + match (addr, execution_component_version) { + (Address::User(_), 0) => Ok(AddressCategory::ScAddress), + (Address::SC(_), 0) => Ok(AddressCategory::UserAddress), + (Address::User(_), _) => Ok(AddressCategory::UserAddress), + (Address::SC(_), _) => Ok(AddressCategory::ScAddress), #[allow(unreachable_patterns)] - _ => Ok(AddressCategory::Unspecified), + (_, _) => Ok(AddressCategory::Unspecified), } } @@ -1945,11 +2038,6 @@ impl Interface for InterfaceImpl { } } } - - /// Interface version to sync with the runtime for its versioning - fn get_interface_version(&self) -> Result { - Ok(1) - } } #[cfg(test)] diff --git a/massa-execution-worker/src/speculative_async_pool.rs b/massa-execution-worker/src/speculative_async_pool.rs index 3c1cb5bd9a..39daf21bc1 100644 --- a/massa-execution-worker/src/speculative_async_pool.rs +++ b/massa-execution-worker/src/speculative_async_pool.rs @@ -10,8 +10,13 @@ use massa_async_pool::{ }; use massa_final_state::FinalStateController; use massa_ledger_exports::LedgerChanges; -use massa_models::slot::Slot; use massa_models::types::{Applicable, SetUpdateOrDelete}; +use massa_models::{ + config::{GENESIS_TIMESTAMP, T0, THREAD_COUNT}, + slot::Slot, + timeslots::get_block_slot_timestamp, +}; +use massa_versioning::versioning::MipComponent; use parking_lot::RwLock; use std::{ collections::{BTreeMap, HashMap}, @@ -131,6 +136,7 @@ impl SpeculativeAsyncPool { &slot, &message_info.validity_start, &message_info.validity_end, + self.get_execution_component_version(&slot), ) && message_info.can_be_executed { @@ -140,7 +146,11 @@ impl SpeculativeAsyncPool { } } - let taken = self.fetch_msgs(wanted_messages, true); + let taken = self.fetch_msgs( + wanted_messages, + true, + self.get_execution_component_version(&slot), + ); for (message_id, _) in taken.iter() { self.message_infos.remove(message_id); @@ -162,14 +172,17 @@ impl SpeculativeAsyncPool { &mut self, slot: &Slot, ledger_changes: &LedgerChanges, + fix_eliminated_msg: bool, ) -> Vec<(AsyncMessageId, AsyncMessage)> { + let execution_component_version = self.get_execution_component_version(slot); + // Update the messages_info: remove messages that should be removed // Filter out all messages for which the validity end is expired. // Note: that the validity_end bound is included in the validity interval of the message. let mut eliminated_infos = Vec::new(); self.message_infos.retain(|id, info| { - if Self::is_message_expired(slot, &info.validity_end) { + if Self::is_message_expired(slot, &info.validity_end, execution_component_version) { eliminated_infos.push((*id, info.clone())); false } else { @@ -180,7 +193,11 @@ impl SpeculativeAsyncPool { let mut eliminated_new_messages = Vec::new(); self.pool_changes.0.retain(|k, v| match v { SetUpdateOrDelete::Set(message) => { - if Self::is_message_expired(slot, &message.validity_end) { + if Self::is_message_expired( + slot, + &message.validity_end, + execution_component_version, + ) { eliminated_new_messages.push((*k, v.clone())); false } else { @@ -220,22 +237,29 @@ impl SpeculativeAsyncPool { } // Query triggered messages - let triggered_msg = - self.fetch_msgs(triggered_info.iter().map(|(id, _)| id).collect(), false); + let triggered_msg = self.fetch_msgs( + triggered_info.iter().map(|(id, _)| id).collect(), + false, + execution_component_version, + ); for (msg_id, _msg) in triggered_msg.iter() { self.pool_changes.push_activate(*msg_id); } // Query eliminated messages - let mut eliminated_msg = - self.fetch_msgs(eliminated_infos.iter().map(|(id, _)| id).collect(), true); - - eliminated_msg.extend(eliminated_new_messages.iter().filter_map(|(k, v)| match v { - SetUpdateOrDelete::Set(v) => Some((*k, v.clone())), - SetUpdateOrDelete::Update(_v) => None, - SetUpdateOrDelete::Delete => None, - })); + let mut eliminated_msg = self.fetch_msgs( + eliminated_infos.iter().map(|(id, _)| id).collect(), + true, + execution_component_version, + ); + if fix_eliminated_msg { + eliminated_msg.extend(eliminated_new_messages.iter().filter_map(|(k, v)| match v { + SetUpdateOrDelete::Set(v) => Some((*k, v.clone())), + SetUpdateOrDelete::Update(_v) => None, + SetUpdateOrDelete::Delete => None, + })); + } eliminated_msg } @@ -243,6 +267,7 @@ impl SpeculativeAsyncPool { &mut self, mut wanted_ids: Vec<&AsyncMessageId>, delete_existing: bool, + execution_component_version: u32, ) -> Vec<(AsyncMessageId, AsyncMessage)> { let mut msgs = Vec::new(); @@ -277,6 +302,7 @@ impl SpeculativeAsyncPool { match self.active_history.read().fetch_message( message_id, current_changes.get(message_id).cloned().unwrap_or_default(), + execution_component_version, ) { Present(SetUpdateOrDelete::Set(mut msg)) => { msg.apply(current_changes.get(message_id).cloned().unwrap_or_default()); @@ -288,7 +314,14 @@ impl SpeculativeAsyncPool { } Present(SetUpdateOrDelete::Update(msg_update)) => { current_changes.entry(message_id).and_modify(|e| { - *e = msg_update.clone(); + match execution_component_version { + 0 => { + e.apply(msg_update.clone()); + } + _ => { + *e = msg_update.clone(); + } + } }); return true; } @@ -318,12 +351,29 @@ impl SpeculativeAsyncPool { msgs } + fn get_execution_component_version(&self, slot: &Slot) -> u32 { + let ts = get_block_slot_timestamp(THREAD_COUNT, T0, *GENESIS_TIMESTAMP, *slot) + .expect("Time overflow when getting block slot timestamp for MIP"); + + self.final_state + .read() + .get_mip_store() + .get_latest_component_version_at(&MipComponent::Execution, ts) + } + /// Return true if a message (given its validity end) is expired /// Must be consistent with is_message_valid - fn is_message_expired(slot: &Slot, message_validity_end: &Slot) -> bool { + fn is_message_expired( + slot: &Slot, + message_validity_end: &Slot, + execution_component_version: u32, + ) -> bool { // Note: SecureShareOperation.get_validity_range(...) returns RangeInclusive // (for operation validity) so apply the same rule for message validity - *slot > *message_validity_end + match execution_component_version { + 0 => *slot >= *message_validity_end, + _ => *slot > *message_validity_end, + } } /// Return true if a message (given its validity_start & validity end) is ready to execute @@ -332,10 +382,14 @@ impl SpeculativeAsyncPool { slot: &Slot, message_validity_start: &Slot, message_validity_end: &Slot, + execution_component_version: u32, ) -> bool { // Note: SecureShareOperation.get_validity_range(...) returns RangeInclusive // (for operation validity) so apply the same rule for message validity - slot >= message_validity_start && slot <= message_validity_end + match execution_component_version { + 0 => slot >= message_validity_start && slot < message_validity_end, + _ => slot >= message_validity_start && slot <= message_validity_end, + } } } @@ -358,42 +412,50 @@ mod tests { assert!(!SpeculativeAsyncPool::is_message_expired( &slot1, - &slot_validity_end + &slot_validity_end, + 1 )); assert!(SpeculativeAsyncPool::is_message_ready_to_execute( &slot1, &slot_validity_start, - &slot_validity_end + &slot_validity_end, + 1 )); assert!(!SpeculativeAsyncPool::is_message_expired( &slot_validity_start, - &slot_validity_end + &slot_validity_end, + 1 )); assert!(SpeculativeAsyncPool::is_message_ready_to_execute( &slot_validity_start, &slot_validity_start, - &slot_validity_end + &slot_validity_end, + 1 )); assert!(!SpeculativeAsyncPool::is_message_expired( &slot_validity_end, - &slot_validity_end + &slot_validity_end, + 1 )); assert!(SpeculativeAsyncPool::is_message_ready_to_execute( &slot_validity_end, &slot_validity_start, - &slot_validity_end + &slot_validity_end, + 1 )); assert!(SpeculativeAsyncPool::is_message_expired( &slot2, - &slot_validity_end + &slot_validity_end, + 1 )); assert!(!SpeculativeAsyncPool::is_message_ready_to_execute( &slot2, &slot_validity_start, - &slot_validity_end + &slot_validity_end, + 1 )); } } diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index 15f22c2a34..8df53071e2 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -144,6 +144,7 @@ impl SpeculativeLedger { from_addr: Option
, to_addr: Option
, amount: Amount, + execution_component_version: u32, ) -> Result<(), ExecutionError> { // init empty ledger changes let mut changes = LedgerChanges::default(); @@ -177,7 +178,7 @@ impl SpeculativeLedger { )) })?; changes.set_balance(to_addr, new_balance); - } else if matches!(to_addr, Address::SC(..)) { + } else if execution_component_version > 0 && matches!(to_addr, Address::SC(..)) { return Err(ExecutionError::RuntimeError(format!( "cannot transfer coins to non-existing smart contract address {}", to_addr @@ -236,6 +237,7 @@ impl SpeculativeLedger { creator_address: Address, addr: Address, bytecode: Bytecode, + execution_component_version: u32, ) -> Result<(), ExecutionError> { // check for address existence if !self.entry_exists(&creator_address) { @@ -283,7 +285,12 @@ impl SpeculativeLedger { ExecutionError::RuntimeError("overflow in ledger cost for bytecode".to_string()) })?; - self.transfer_coins(Some(creator_address), None, address_storage_cost)?; + self.transfer_coins( + Some(creator_address), + None, + address_storage_cost, + execution_component_version, + )?; self.added_changes.create_address(&addr); self.added_changes.set_bytecode(addr, bytecode); Ok(()) @@ -301,6 +308,7 @@ impl SpeculativeLedger { caller_addr: &Address, addr: &Address, bytecode: Bytecode, + execution_component_version: u32, ) -> Result<(), ExecutionError> { // check for address existence if !self.entry_exists(addr) { @@ -330,8 +338,18 @@ impl SpeculativeLedger { })?; match diff_size_storage.signum() { - 1 => self.transfer_coins(Some(*caller_addr), None, storage_cost_bytecode)?, - -1 => self.transfer_coins(None, Some(*caller_addr), storage_cost_bytecode)?, + 1 => self.transfer_coins( + Some(*caller_addr), + None, + storage_cost_bytecode, + execution_component_version, + )?, + -1 => self.transfer_coins( + None, + Some(*caller_addr), + storage_cost_bytecode, + execution_component_version, + )?, _ => {} }; } else { @@ -344,7 +362,12 @@ impl SpeculativeLedger { "overflow when calculating storage cost of bytecode".to_string(), ) })?; - self.transfer_coins(Some(*caller_addr), None, bytecode_storage_cost)?; + self.transfer_coins( + Some(*caller_addr), + None, + bytecode_storage_cost, + execution_component_version, + )?; } // set the bytecode of that address self.added_changes.set_bytecode(*addr, bytecode); @@ -528,6 +551,7 @@ impl SpeculativeLedger { caller_addr: &Address, old_key_value: Option<(&[u8], &[u8])>, new_key_value: Option<(&[u8], &[u8])>, + execution_component_version: u32, ) -> Result<(), ExecutionError> { // compute the old storage cost of the entry let old_storage_cost = old_key_value.map_or_else( @@ -549,6 +573,7 @@ impl SpeculativeLedger { Some(*caller_addr), None, new_storage_cost.saturating_sub(old_storage_cost), + execution_component_version, ) } Ordering::Less => { @@ -557,6 +582,7 @@ impl SpeculativeLedger { None, Some(*caller_addr), old_storage_cost.saturating_sub(new_storage_cost), + execution_component_version, ) } Ordering::Equal => { @@ -587,6 +613,7 @@ impl SpeculativeLedger { addr: &Address, key: Vec, value: Vec, + execution_component_version: u32, ) -> Result<(), ExecutionError> { // check for address existence if !self.entry_exists(addr) { @@ -620,6 +647,7 @@ impl SpeculativeLedger { caller_addr, prev_value.as_ref().map(|v| (&key[..], &v[..])), Some((&key, &value)), + execution_component_version, )?; } @@ -641,11 +669,17 @@ impl SpeculativeLedger { caller_addr: &Address, addr: &Address, key: &[u8], + execution_component_version: u32, ) -> Result<(), ExecutionError> { // check if the entry exists if let Some(value) = self.get_data_entry(addr, key) { // reimburse the storage costs of the entry - self.charge_datastore_entry_change_storage(caller_addr, Some((key, &value)), None)?; + self.charge_datastore_entry_change_storage( + caller_addr, + Some((key, &value)), + None, + execution_component_version, + )?; } else { return Err(ExecutionError::RuntimeError(format!( "could not delete data entry {:?} for address {}: entry or address does not exist", diff --git a/massa-execution-worker/src/tests/interface.rs b/massa-execution-worker/src/tests/interface.rs index c2ce6d9060..c01cfa1812 100644 --- a/massa-execution-worker/src/tests/interface.rs +++ b/massa-execution-worker/src/tests/interface.rs @@ -161,7 +161,7 @@ fn test_emit_event_too_large() { // emit 2 events and check that the 2nd event is rejected (because the msg is too large) let config = ExecutionConfig { - max_event_size: 10, + max_event_size_v1: 10, ..Default::default() }; @@ -171,9 +171,9 @@ fn test_emit_event_too_large() { Some(config.clone()), ); - let res = interface.generate_event("a".repeat(config.max_event_size).to_string()); + let res = interface.generate_event("a".repeat(config.max_event_size_v1).to_string()); assert!(res.is_ok()); - let res_2 = interface.generate_event("b".repeat(config.max_event_size + 1).to_string()); + let res_2 = interface.generate_event("b".repeat(config.max_event_size_v1 + 1).to_string()); assert!(res_2.is_err()); println!("res_2: {:?}", res_2); if let Err(e) = res_2 { diff --git a/massa-execution-worker/src/tests/scenarios_mandatories.rs b/massa-execution-worker/src/tests/scenarios_mandatories.rs index e1708ae5bd..ad82b331c0 100644 --- a/massa-execution-worker/src/tests/scenarios_mandatories.rs +++ b/massa-execution-worker/src/tests/scenarios_mandatories.rs @@ -17,7 +17,8 @@ use massa_hash::Hash; use massa_ledger_exports::{LedgerEntryUpdate, MockLedgerControllerWrapper}; use massa_models::bytecode::Bytecode; use massa_models::config::{ - CHAINID, ENDORSEMENT_COUNT, GENESIS_KEY, LEDGER_ENTRY_DATASTORE_BASE_SIZE, THREAD_COUNT, + CHAINID, ENDORSEMENT_COUNT, GENESIS_KEY, LEDGER_ENTRY_DATASTORE_BASE_SIZE, + MIP_STORE_STATS_BLOCK_CONSIDERED, THREAD_COUNT, }; use massa_models::deferred_calls::DeferredCallId; use massa_models::prehash::PreHashMap; @@ -35,6 +36,8 @@ use massa_pos_exports::{ }; use massa_signature::KeyPair; use massa_test_framework::{TestUniverse, WaitPoint}; +use massa_versioning::mips::get_mip_list; +use massa_versioning::versioning::{MipStatsConfig, MipStore}; use mockall::predicate; use num::rational::Ratio; use parking_lot::RwLock; @@ -107,6 +110,19 @@ fn final_state_boilerplate( .expect_get_pos_state() .return_const(pos_final_state); + let mip_stats_config = MipStatsConfig { + block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, + warn_announced_version_ratio: Ratio::new_raw(30, 100), + }; + let mip_list = get_mip_list(); + let mip_store = + MipStore::try_from((mip_list, mip_stats_config)).expect("mip store creation failed"); + + mock_final_state + .write() + .expect_get_mip_store() + .return_const(mip_store); + let async_pool = custom_async_pool.unwrap_or(AsyncPool::new(AsyncPoolConfig::default(), db.clone())); mock_final_state diff --git a/massa-execution-worker/src/tests/universe.rs b/massa-execution-worker/src/tests/universe.rs index 49f7ea2873..08cf137130 100644 --- a/massa-execution-worker/src/tests/universe.rs +++ b/massa-execution-worker/src/tests/universe.rs @@ -41,7 +41,10 @@ use massa_pos_exports::MockSelectorControllerWrapper; use massa_signature::KeyPair; use massa_storage::Storage; use massa_test_framework::TestUniverse; -use massa_versioning::versioning::{MipStatsConfig, MipStore}; +use massa_versioning::{ + mips::get_mip_list, + versioning::{MipStatsConfig, MipStore}, +}; use massa_wallet::test_exports::create_test_wallet; use num::rational::Ratio; use parking_lot::RwLock; @@ -176,7 +179,12 @@ impl TestUniverse for ExecutionTestUniverse { block_count_considered: MIP_STORE_STATS_BLOCK_CONSIDERED, warn_announced_version_ratio: Ratio::new_raw(30, 100), }; - let mip_store = MipStore::try_from(([], mip_stats_config)).unwrap(); + let mip_list: [( + massa_versioning::versioning::MipInfo, + massa_versioning::versioning::MipState, + ); 1] = get_mip_list(); + let mip_store = + MipStore::try_from((mip_list, mip_stats_config)).expect("mip store creation failed"); let (tx, rx) = broadcast::channel(16); #[cfg(feature = "execution-trace")] let (tx_traces, rx_traces) = broadcast::channel(16); diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index faf31b3495..003ba9901c 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -26,7 +26,7 @@ use massa_models::slot::Slot; use massa_models::timeslots::get_block_slot_timestamp; use massa_models::types::SetOrKeep; use massa_pos_exports::{PoSFinalState, SelectorController}; -use massa_versioning::versioning::MipStore; +use massa_versioning::versioning::{MipComponent, MipStore}; use tracing::{debug, info, warn}; /// Represents a final state `(ledger, async pool, executed_ops, executed_de and the state of the PoS)` @@ -435,7 +435,17 @@ impl FinalState { // check slot consistency let next_slot = cur_slot.get_next_slot(self.config.thread_count)?; - // .expect("overflow in execution state slot"); + let ts = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + slot, + ) + .expect("Time overflow when getting block slot timestamp for MIP"); + + let final_state_component_version = self + .get_mip_store() + .get_latest_component_version_at(&MipComponent::FinalState, ts); if slot != next_slot { return Err(anyhow!( @@ -456,8 +466,11 @@ impl FinalState { // do not panic above, it might just mean that the lookback cycle is not available // bootstrap again instead - self.ledger - .apply_changes_to_batch(changes.ledger_changes, &mut db_batch); + self.ledger.apply_changes_to_batch( + changes.ledger_changes, + &mut db_batch, + final_state_component_version, + ); self.executed_ops .apply_changes_to_batch(changes.executed_ops_changes, slot, &mut db_batch); diff --git a/massa-final-state/src/state_changes.rs b/massa-final-state/src/state_changes.rs index ce1b02746a..7d9f4a2a67 100644 --- a/massa-final-state/src/state_changes.rs +++ b/massa-final-state/src/state_changes.rs @@ -240,6 +240,7 @@ impl StateChanges { self.pos_changes.extend(changes.pos_changes); self.executed_ops_changes .extend(changes.executed_ops_changes); + // Note: no need to version the changes, as this function is not used in the codebase self.executed_denunciations_changes .extend(changes.executed_denunciations_changes); self.execution_trail_hash_change diff --git a/massa-grpc/src/private.rs b/massa-grpc/src/private.rs index dc362bf2a6..ab2fd0d540 100644 --- a/massa-grpc/src/private.rs +++ b/massa-grpc/src/private.rs @@ -305,6 +305,7 @@ pub(crate) fn get_node_status( execution_stats: Some(execution_stats.into()), config: Some(config.into()), chain_id: grpc.grpc_config.chain_id, + current_mip_version: grpc.mip_store.get_network_version_current(), }; Ok(grpc_api::GetNodeStatusResponse { diff --git a/massa-grpc/src/public.rs b/massa-grpc/src/public.rs index 9da185f5d7..7dd1ec2e0f 100644 --- a/massa-grpc/src/public.rs +++ b/massa-grpc/src/public.rs @@ -960,6 +960,7 @@ pub(crate) fn get_status( config: Some(config.into()), chain_id: grpc.grpc_config.chain_id, minimal_fees: Some(grpc.grpc_config.minimal_fees.into()), + current_mip_version: grpc.keypair_factory.mip_store.get_network_version_current(), }; Ok(grpc_api::GetStatusResponse { @@ -992,11 +993,13 @@ pub(crate) fn query_state( grpc: &MassaPublicGrpc, request: tonic::Request, ) -> Result { + let current_network_version = grpc.keypair_factory.mip_store.get_network_version_current(); + let queries = request .into_inner() .queries .into_iter() - .map(to_querystate_filter) + .map(|q| to_querystate_filter(q, current_network_version)) .collect::, _>>()?; if queries.is_empty() { diff --git a/massa-ledger-exports/src/controller.rs b/massa-ledger-exports/src/controller.rs index 8bf7771501..24a37a31f8 100644 --- a/massa-ledger-exports/src/controller.rs +++ b/massa-ledger-exports/src/controller.rs @@ -51,7 +51,12 @@ pub trait LedgerController: Send + Sync { /// USED FOR BOOTSTRAP ONLY fn reset(&mut self); - fn apply_changes_to_batch(&mut self, changes: LedgerChanges, ledger_batch: &mut DBBatch); + fn apply_changes_to_batch( + &mut self, + changes: LedgerChanges, + ledger_batch: &mut DBBatch, + final_state_component_version: u32, + ); /// Deserializes the key and value, useful after bootstrap fn is_key_value_valid(&self, serialized_key: &[u8], serialized_value: &[u8]) -> bool; diff --git a/massa-ledger-worker/src/ledger.rs b/massa-ledger-worker/src/ledger.rs index 94684a089a..e74c3d675b 100644 --- a/massa-ledger-worker/src/ledger.rs +++ b/massa-ledger-worker/src/ledger.rs @@ -150,9 +150,17 @@ impl LedgerController for FinalLedger { } /// Allows applying `LedgerChanges` to the final ledger - fn apply_changes_to_batch(&mut self, changes: LedgerChanges, ledger_batch: &mut DBBatch) { - self.sorted_ledger - .apply_changes_to_batch(changes, ledger_batch); + fn apply_changes_to_batch( + &mut self, + changes: LedgerChanges, + ledger_batch: &mut DBBatch, + final_state_component_version: u32, + ) { + self.sorted_ledger.apply_changes_to_batch( + changes, + ledger_batch, + final_state_component_version, + ); } /// Deserializes the key and value, useful after bootstrap diff --git a/massa-ledger-worker/src/ledger_db.rs b/massa-ledger-worker/src/ledger_db.rs index a2e517d6f0..1d93d415c2 100644 --- a/massa-ledger-worker/src/ledger_db.rs +++ b/massa-ledger-worker/src/ledger_db.rs @@ -112,7 +112,7 @@ impl LedgerDB { let mut batch = DBBatch::new(); for (address, entry) in initial_ledger { - self.put_entry(&address, entry, &mut batch); + self.put_entry(&address, entry, &mut batch, 0); } self.db.write().write_batch( @@ -127,14 +127,19 @@ impl LedgerDB { /// # Arguments /// * changes: ledger changes to be applied /// * batch: the batch to apply the changes to - pub fn apply_changes_to_batch(&self, changes: LedgerChanges, batch: &mut DBBatch) { + pub fn apply_changes_to_batch( + &self, + changes: LedgerChanges, + batch: &mut DBBatch, + final_state_component_version: u32, + ) { // for all incoming changes for (addr, change) in changes.0 { match change { // the incoming change sets a ledger entry to a new one SetUpdateOrDelete::Set(new_entry) => { // inserts/overwrites the entry with the incoming one - self.put_entry(&addr, new_entry, batch); + self.put_entry(&addr, new_entry, batch, final_state_component_version); } // the incoming change updates an existing ledger entry SetUpdateOrDelete::Update(entry_update) => { @@ -145,7 +150,7 @@ impl LedgerDB { // the incoming change deletes a ledger entry SetUpdateOrDelete::Delete => { // delete the entry, if it exists - self.delete_entry(&addr, batch); + self.delete_entry(&addr, batch, final_state_component_version); } } } @@ -285,11 +290,19 @@ impl LedgerDB { /// * `addr`: associated address /// * `ledger_entry`: complete entry to be added /// * `batch`: the given operation batch to update - fn put_entry(&self, addr: &Address, ledger_entry: LedgerEntry, batch: &mut DBBatch) { + fn put_entry( + &self, + addr: &Address, + ledger_entry: LedgerEntry, + batch: &mut DBBatch, + final_state_component_version: u32, + ) { let db = self.db.read(); - // Ensures any potential previous entry is fully deleted. - delete_datastore_entries(addr, &db, batch); + if final_state_component_version > 0 { + // Ensures any potential previous entry is fully deleted. + delete_datastore_entries(addr, &db, batch, final_state_component_version); + } // Version //TODO: Get version number from parameters @@ -437,7 +450,12 @@ impl LedgerDB { /// /// # Arguments /// * batch: the given operation batch to update - fn delete_entry(&self, addr: &Address, batch: &mut DBBatch) { + fn delete_entry( + &self, + addr: &Address, + batch: &mut DBBatch, + final_state_component_version: u32, + ) { let db = self.db.read(); // version @@ -461,7 +479,7 @@ impl LedgerDB { .expect(KEY_SER_ERROR); db.delete_key(batch, serialized_key); - delete_datastore_entries(addr, &db, batch); + delete_datastore_entries(addr, &db, batch, final_state_component_version); } } @@ -472,6 +490,7 @@ fn delete_datastore_entries( addr: &Address, db: &RwLockReadGuard>, batch: &mut std::collections::BTreeMap, Option>>, + final_state_component_version: u32, ) { // datastore let key_prefix = datastore_prefix_from_address(addr, &[]); @@ -481,7 +500,10 @@ fn delete_datastore_entries( STATE_CF, MassaIteratorMode::From(&key_prefix, MassaDirection::Forward), ) - .take_while(|(key, _)| key < &end_prefix(&key_prefix).unwrap()) + .take_while(|(key, _)| match final_state_component_version { + 0 => key <= &end_prefix(&key_prefix).unwrap(), + _ => key < &end_prefix(&key_prefix).unwrap(), + }) { db.delete_key(batch, serialized_key.to_vec()); } @@ -628,7 +650,7 @@ mod tests { let ledger_db = LedgerDB::new(db.clone(), 32, 255, 1000); let mut batch = DBBatch::new(); - ledger_db.put_entry(&addr, entry, &mut batch); + ledger_db.put_entry(&addr, entry, &mut batch, 0); ledger_db.update_entry(&addr, entry_update, &mut batch); ledger_db .db @@ -675,7 +697,7 @@ mod tests { // delete entry let mut batch = DBBatch::new(); - ledger_db.delete_entry(&addr, &mut batch); + ledger_db.delete_entry(&addr, &mut batch, 1); ledger_db .db .write() diff --git a/massa-models/src/config/constants.rs b/massa-models/src/config/constants.rs index 7d284a5b0a..f129f8e197 100644 --- a/massa-models/src/config/constants.rs +++ b/massa-models/src/config/constants.rs @@ -309,7 +309,9 @@ pub const ASYNC_MSG_CST_GAS_COST: u64 = 750_000; /// Gas used by a base operation (transaction, roll buy, roll sell) pub const BASE_OPERATION_GAS_COST: u64 = 800_000; // approx MAX_GAS_PER_BLOCK / MAX_OPERATIONS_PER_BLOCK /// Maximum event size in bytes -pub const MAX_EVENT_DATA_SIZE: usize = 512; +pub const MAX_EVENT_DATA_SIZE_V0: usize = 50_000; +/// Maximum event size in bytes +pub const MAX_EVENT_DATA_SIZE_V1: usize = 512; /// Maximum event number that can be emitted for an operation pub const MAX_EVENT_PER_OPERATION: usize = 25; /// Maximum number of recursion for calls diff --git a/massa-module-cache/src/controller.rs b/massa-module-cache/src/controller.rs index 18d12ea393..c6605366ec 100644 --- a/massa-module-cache/src/controller.rs +++ b/massa-module-cache/src/controller.rs @@ -1,6 +1,6 @@ use massa_hash::Hash; use massa_models::prehash::BuildHashMapper; -use massa_sc_runtime::{Compiler, RuntimeModule}; +use massa_sc_runtime::{Compiler, CondomLimits, RuntimeModule}; use schnellru::{ByLength, LruMap}; use tracing::debug; @@ -39,13 +39,23 @@ impl ModuleCache { } } + pub fn reset(&mut self) { + self.lru_cache.reset(); + self.hd_cache.reset(); + } + /// Internal function to compile and build `ModuleInfo` - fn compile_cached(&mut self, bytecode: &[u8], hash: Hash) -> ModuleInfo { + fn compile_cached( + &mut self, + bytecode: &[u8], + hash: Hash, + condom_limits: CondomLimits, + ) -> ModuleInfo { match RuntimeModule::new( bytecode, self.cfg.gas_costs.clone(), Compiler::CL, - self.cfg.condom_limits.clone(), + condom_limits, ) { Ok(module) => { debug!("compilation of module {} succeeded", hash); @@ -60,13 +70,12 @@ impl ModuleCache { } /// Save a new or an already existing module in the cache - pub fn save_module(&mut self, bytecode: &[u8]) { + pub fn save_module(&mut self, bytecode: &[u8], condom_limits: CondomLimits) { let hash = Hash::compute_from(bytecode); - if let Some(hd_module_info) = self.hd_cache.get( - hash, - self.cfg.gas_costs.clone(), - self.cfg.condom_limits.clone(), - ) { + if let Some(hd_module_info) = + self.hd_cache + .get(hash, self.cfg.gas_costs.clone(), condom_limits.clone()) + { debug!("save_module: {} present in hd", hash); self.lru_cache.insert(hash, hd_module_info); } else if let Some(lru_module_info) = self.lru_cache.get(hash) { @@ -74,7 +83,7 @@ impl ModuleCache { self.hd_cache.insert(hash, lru_module_info); } else { debug!("save_module: {} missing", hash); - let module_info = self.compile_cached(bytecode, hash); + let module_info = self.compile_cached(bytecode, hash, condom_limits); self.hd_cache.insert(hash, module_info.clone()); self.lru_cache.insert(hash, module_info); } @@ -100,7 +109,7 @@ impl ModuleCache { /// * `ModuleInfo::Invalid` if the module is invalid /// * `ModuleInfo::Module` if the module is valid and has no delta /// * `ModuleInfo::ModuleAndDelta` if the module is valid and has a delta - fn load_module_info(&mut self, bytecode: &[u8]) -> ModuleInfo { + fn load_module_info(&mut self, bytecode: &[u8], condom_limits: CondomLimits) -> ModuleInfo { if bytecode.is_empty() { let error_msg = "load_module: bytecode is absent".to_string(); debug!(error_msg); @@ -119,17 +128,16 @@ impl ModuleCache { if let Some(lru_module_info) = self.lru_cache.get(hash) { debug!("load_module: {} present in lru", hash); lru_module_info - } else if let Some(hd_module_info) = self.hd_cache.get( - hash, - self.cfg.gas_costs.clone(), - self.cfg.condom_limits.clone(), - ) { + } else if let Some(hd_module_info) = + self.hd_cache + .get(hash, self.cfg.gas_costs.clone(), condom_limits.clone()) + { debug!("load_module: {} missing in lru but present in hd", hash); self.lru_cache.insert(hash, hd_module_info.clone()); hd_module_info } else { debug!("load_module: {} missing", hash); - let module_info = self.compile_cached(bytecode, hash); + let module_info = self.compile_cached(bytecode, hash, condom_limits); self.hd_cache.insert(hash, module_info.clone()); self.lru_cache.insert(hash, module_info.clone()); module_info @@ -144,6 +152,7 @@ impl ModuleCache { &mut self, bytecode: &[u8], execution_gas: u64, + condom_limits: CondomLimits, ) -> Result { // Do not actually debit the instance creation cost from the provided gas // This is only supposed to be a check @@ -155,7 +164,7 @@ impl ModuleCache { )))?; // TODO: interesting but unimportant optim // remove max_instance_cost hard check if module is cached and has a delta - let module_info = self.load_module_info(bytecode); + let module_info = self.load_module_info(bytecode, condom_limits); let module = match module_info { ModuleInfo::Invalid(err) => { let err_msg = format!("invalid module: {}", err); @@ -184,6 +193,7 @@ impl ModuleCache { &self, bytecode: &[u8], limit: u64, + condom_limits: CondomLimits, ) -> Result { debug!("load_tmp_module"); // Do not actually debit the instance creation cost from the provided gas @@ -198,7 +208,7 @@ impl ModuleCache { bytecode, self.cfg.gas_costs.clone(), Compiler::SP, - self.cfg.condom_limits.clone(), + condom_limits, )?; Ok(module) } diff --git a/massa-module-cache/src/hd_cache.rs b/massa-module-cache/src/hd_cache.rs index f088b0f2fc..eece9790c3 100644 --- a/massa-module-cache/src/hd_cache.rs +++ b/massa-module-cache/src/hd_cache.rs @@ -36,7 +36,7 @@ macro_rules! metadata_key { pub(crate) struct HDCache { /// RocksDB database - db: DB, + db: Option, /// How many entries are in the db. Count is initialized at creation time by iterating /// over all the entries in the db then it is maintained in memory entry_count: usize, @@ -69,7 +69,7 @@ impl HDCache { let entry_count = 0; Self { - db, + db: Some(db), entry_count, max_entry_count, snip_amount, @@ -78,6 +78,24 @@ impl HDCache { } } + pub fn reset(&mut self) { + let path = self.db.as_ref().unwrap().path().to_path_buf(); + + // Close the existing database by dropping it + let _ = self.db.take(); + + // Destroy the database files + if path.exists() { + if let Err(e) = DB::destroy(&Options::default(), path.clone()) { + warn!("Failed to destroy the db: {:?}", e); + } + } + // Reopen the database + let db = DB::open_default(&path).expect(OPEN_ERROR); + self.db = Some(db); + self.entry_count = 0; + } + /// Insert a new module in the cache pub fn insert(&mut self, hash: Hash, module_info: ModuleInfo) { if self.entry_count >= self.max_entry_count { @@ -109,7 +127,11 @@ impl HDCache { let mut batch = WriteBatch::default(); batch.put(module_key!(hash), ser_module); batch.put(metadata_key!(hash), ser_metadata); - self.db.write(batch).expect(CRUD_ERROR); + self.db + .as_ref() + .expect(CRUD_ERROR) + .write(batch) + .expect(CRUD_ERROR); self.entry_count = self.entry_count.saturating_add(1); @@ -127,6 +149,8 @@ impl HDCache { .serialize(&ModuleMetadata::Delta(init_cost), &mut ser_metadata) .expect(DATA_SER_ERROR); self.db + .as_ref() + .expect(CRUD_ERROR) .put(metadata_key!(hash), ser_metadata) .expect(CRUD_ERROR); } @@ -138,6 +162,8 @@ impl HDCache { .serialize(&ModuleMetadata::Invalid(err_msg), &mut ser_metadata) .expect(DATA_SER_ERROR); self.db + .as_ref() + .expect(CRUD_ERROR) .put(metadata_key!(hash), ser_metadata) .expect(CRUD_ERROR); } @@ -151,6 +177,8 @@ impl HDCache { ) -> Option { let mut iterator = self .db + .as_ref() + .expect(CRUD_ERROR) .iterator(IteratorMode::From(&module_key!(hash), Direction::Forward)); if let (Some(Ok((key_1, ser_module))), Some(Ok((key_2, ser_metadata)))) = @@ -187,7 +215,7 @@ impl HDCache { /// Try to remove as much as `self.amount_to_snip` entries from the db fn snip(&mut self) { - let mut iter = self.db.raw_iterator(); + let mut iter = self.db.as_ref().expect(CRUD_ERROR).raw_iterator(); let mut batch = WriteBatch::default(); let mut snipped_count: usize = 0; @@ -224,7 +252,11 @@ impl HDCache { } // delete the key and reduce entry_count - self.db.write(batch).expect(CRUD_ERROR); + self.db + .as_ref() + .expect(CRUD_ERROR) + .write(batch) + .expect(CRUD_ERROR); self.entry_count -= snipped_count; } } diff --git a/massa-module-cache/src/lru_cache.rs b/massa-module-cache/src/lru_cache.rs index a6ea66eeea..af810a8f71 100644 --- a/massa-module-cache/src/lru_cache.rs +++ b/massa-module-cache/src/lru_cache.rs @@ -27,6 +27,10 @@ impl LRUCache { } } + pub fn reset(&mut self) { + self.cache.clear(); + } + /// If the module is contained in the cache: /// * retrieve a copy of it /// * move it up in the LRU cache diff --git a/massa-node/base_config/openrpc.json b/massa-node/base_config/openrpc.json index b6d2b090e6..6c04908089 100644 --- a/massa-node/base_config/openrpc.json +++ b/massa-node/base_config/openrpc.json @@ -1999,7 +1999,6 @@ "candidate_value": { "description": "The candidate datastore entry value bytes", "$ref": "#/components/schemas/BytesOption" - }, "final_value": { "description": "The final datastore entry value bytes", @@ -2781,7 +2780,8 @@ "pool_stats", "version", "execution_stats", - "chain_id" + "chain_id", + "current_mip_version" ], "type": "object", "properties": { @@ -2859,6 +2859,10 @@ "minimal_fees": { "description": "Minimal fee", "$ref": "#/components/schemas/Amount" + }, + "current_mip_version": { + "description": "Current MIP version", + "type": "number" } }, "additionalProperties": false diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 5a4de19292..d1b2b878a4 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -92,10 +92,10 @@ use massa_models::config::{ DEFERRED_CALL_MAX_ASYNC_GAS, DEFERRED_CALL_MAX_POOL_CHANGES, DEFERRED_CALL_MIN_GAS_COST, DEFERRED_CALL_MIN_GAS_INCREMENT, DEFERRED_CALL_SLOT_OVERBOOKING_PENALTY, KEEP_EXECUTED_HISTORY_EXTRA_PERIODS, MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, - MAX_BOOTSTRAP_VERSIONING_ELEMENTS_SIZE, MAX_EVENT_DATA_SIZE, MAX_EVENT_PER_OPERATION, - MAX_MESSAGE_SIZE, MAX_RECURSIVE_CALLS_DEPTH, MAX_RUNTIME_MODULE_CUSTOM_SECTION_DATA_LEN, - MAX_RUNTIME_MODULE_CUSTOM_SECTION_LEN, MAX_RUNTIME_MODULE_EXPORTS, - MAX_RUNTIME_MODULE_FUNCTIONS, MAX_RUNTIME_MODULE_FUNCTION_NAME_LEN, + MAX_BOOTSTRAP_VERSIONING_ELEMENTS_SIZE, MAX_EVENT_DATA_SIZE_V0, MAX_EVENT_DATA_SIZE_V1, + MAX_EVENT_PER_OPERATION, MAX_MESSAGE_SIZE, MAX_RECURSIVE_CALLS_DEPTH, + MAX_RUNTIME_MODULE_CUSTOM_SECTION_DATA_LEN, MAX_RUNTIME_MODULE_CUSTOM_SECTION_LEN, + MAX_RUNTIME_MODULE_EXPORTS, MAX_RUNTIME_MODULE_FUNCTIONS, MAX_RUNTIME_MODULE_FUNCTION_NAME_LEN, MAX_RUNTIME_MODULE_GLOBAL_INITIALIZER, MAX_RUNTIME_MODULE_IMPORTS, MAX_RUNTIME_MODULE_MEMORIES, MAX_RUNTIME_MODULE_NAME_LEN, MAX_RUNTIME_MODULE_PASSIVE_DATA, MAX_RUNTIME_MODULE_PASSIVE_ELEMENT, MAX_RUNTIME_MODULE_SIGNATURE_LEN, MAX_RUNTIME_MODULE_TABLE, @@ -481,7 +481,7 @@ async fn launch( event_cache_path: SETTINGS.execution.event_cache_path.clone(), max_event_cache_length: SETTINGS.execution.event_cache_size, snip_amount: SETTINGS.execution.event_snip_amount, - max_event_data_length: MAX_EVENT_DATA_SIZE as u64, + max_event_data_length: MAX_EVENT_DATA_SIZE_V0 as u64, thread_count: THREAD_COUNT, // Note: SCOutputEvent call stack comes from the execution module, and we assume // this should return a limited call stack length @@ -571,7 +571,8 @@ async fn launch( broadcast_slot_execution_output_channel_capacity: SETTINGS .execution .broadcast_slot_execution_output_channel_capacity, - max_event_size: MAX_EVENT_DATA_SIZE, + max_event_size_v0: MAX_EVENT_DATA_SIZE_V0, + max_event_size_v1: MAX_EVENT_DATA_SIZE_V1, max_function_length: MAX_FUNCTION_NAME_LENGTH, max_parameter_length: MAX_PARAMETERS_SIZE, chain_id: *CHAINID, diff --git a/massa-versioning/src/lib.rs b/massa-versioning/src/lib.rs index 60af9a824d..78e97dd9e0 100644 --- a/massa-versioning/src/lib.rs +++ b/massa-versioning/src/lib.rs @@ -81,5 +81,5 @@ pub mod versioning_factory; pub mod versioning_ser_der; /// Test utils -#[cfg(test)] +#[cfg(any(test, feature = "test-exports"))] pub mod test_helpers; diff --git a/massa-versioning/src/mips.rs b/massa-versioning/src/mips.rs index 3c3e86592e..a840861647 100644 --- a/massa-versioning/src/mips.rs +++ b/massa-versioning/src/mips.rs @@ -7,26 +7,57 @@ use massa_time::MassaTime; #[allow(unused_imports)] use crate::versioning::{MipComponent, MipInfo, MipState}; -pub fn get_mip_list() -> [(MipInfo, MipState); 0] { - // placeholder +#[cfg(not(feature = "test-exports"))] +pub fn get_mip_list() -> [(MipInfo, MipState); 1] { let mip_list = [ - /* - (MipInfo { - name: "MIP-0000".to_string(), - version: 0, - components: BTreeMap::from([ - (MipComponent::Address, 0), - (MipComponent::KeyPair, 0), - ]), - start: MassaTime::from_millis(0), - timeout: MassaTime::from_millis(0), - activation_delay: MassaTime::from_millis(0), - }, - MipState::new(MassaTime::from_millis(0))) - */ + ( + MipInfo { + name: "MIP-0001-Execution-BugFix".to_string(), + version: 1, + components: BTreeMap::from([ + (MipComponent::Execution, 1), + (MipComponent::FinalState, 1), + ]), + start: MassaTime::from_utc_ymd_hms(2024, 11, 28, 2, 0, 0).unwrap(), // TODO: set when known, ex: MassaTime::from_utc_ymd_hms(2024, 7, 10, 15, 0, 0).unwrap(); + timeout: MassaTime::from_utc_ymd_hms(2025, 11, 28, 2, 0, 0).unwrap(), // TODO: set when known + activation_delay: MassaTime::from_millis(3 * 24 * 60 * 60 * 1000), // TODO: set when known, 3 days as an example + }, + MipState::new(MassaTime::from_utc_ymd_hms(2024, 11, 28, 0, 0, 0).unwrap()), + ), // TODO: set when known, (when the MIP becomes defined, e.g. when merged to main branch) ]; // debug!("MIP list: {:?}", mip_list); #[allow(clippy::let_and_return)] mip_list } + +#[cfg(feature = "test-exports")] +pub fn get_mip_list() -> [(MipInfo, MipState); 1] { + use crate::{ + test_helpers::versioning_helpers::advance_state_until, + versioning::{Active, ComponentState}, + }; + + println!("Running with test-exports feature"); + + let mip_info_1 = MipInfo { + name: "MIP-0001-Execution-BugFix".to_string(), + version: 1, + components: BTreeMap::from([(MipComponent::Execution, 1), (MipComponent::FinalState, 1)]), + start: MassaTime::from_millis(2), // TODO: set when known, ex: MassaTime::from_utc_ymd_hms(2024, 7, 10, 15, 0, 0).unwrap(); + timeout: MassaTime::from_millis(10), // TODO: set when known + activation_delay: MassaTime::from_millis(2), // TODO: set when known, 3 days as an example + }; + let mip_state_1 = advance_state_until( + ComponentState::Active(Active { + at: MassaTime::from_millis(3), + }), + &mip_info_1, + ); + + let mip_list = [(mip_info_1, mip_state_1)]; + + println!("MIP list: {:?}", mip_list); + #[allow(clippy::let_and_return)] + mip_list +} diff --git a/massa-versioning/src/versioning.rs b/massa-versioning/src/versioning.rs index 6eaaa6cf67..e26096e245 100644 --- a/massa-versioning/src/versioning.rs +++ b/massa-versioning/src/versioning.rs @@ -45,6 +45,8 @@ pub enum MipComponent { Block, VM, FinalStateHashKind, + Execution, + FinalState, #[doc(hidden)] #[num_enum(default)] __Nonexhaustive,