From 368a7e9a8483db928c58461d612044dc51f6be17 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 23 Apr 2025 00:15:06 +0000 Subject: [PATCH 01/13] Squashed commit of the following: commit 4472263a6e96104bc7d471caff262c6bb6fb4d12 Author: Yi Lin Date: Thu Apr 17 04:32:13 2025 +0000 Remove an outdated assertion commit abbde8bc091000f61d068de3ab733a505117da20 Author: Yi Lin Date: Thu Apr 17 03:39:41 2025 +0000 Remove ALLOC_TABLE from local specs. Fix build for 1.74.1 commit 1c64dcb7e844f14583d0809bce8f866295d24dd8 Author: Yi Lin Date: Thu Apr 17 01:21:45 2025 +0000 Make chunk map as global side metadata commit b7b5988a6e65aa977ee134084bc09aeeacd8246b Author: Yi Lin Date: Tue Apr 15 04:33:21 2025 +0000 Refactor ChunkState to encode space index --- src/policy/immix/immixspace.rs | 11 +- src/policy/marksweepspace/native_ms/global.rs | 16 +-- src/util/heap/chunk_map.rs | 133 +++++++++++++----- src/util/metadata/side_metadata/global.rs | 5 + src/util/metadata/side_metadata/spec_defs.rs | 4 +- src/util/object_enum.rs | 15 +- .../mock_test_allocate_nonmoving.rs | 37 +++++ src/vm/tests/mock_tests/mod.rs | 1 + 8 files changed, 163 insertions(+), 59 deletions(-) create mode 100644 src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index b932808d8f..49b5691cd5 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -252,7 +252,6 @@ impl ImmixSpace { vec![ MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, @@ -264,7 +263,6 @@ impl ImmixSpace { MetadataSpec::OnSide(Line::MARK_TABLE), MetadataSpec::OnSide(Block::DEFRAG_STATE_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_BITS_SPEC, *VM::VMObjectModel::LOCAL_FORWARDING_POINTER_SPEC, @@ -299,6 +297,7 @@ impl ImmixSpace { let scheduler = args.scheduler.clone(); let common = CommonSpace::new(args.into_policy_args(true, false, Self::side_metadata_specs())); + let space_index = common.descriptor.get_index(); ImmixSpace { pr: if common.vmrequest.is_discontiguous() { BlockPageResource::new_discontiguous( @@ -316,7 +315,7 @@ impl ImmixSpace { ) }, common, - chunk_map: ChunkMap::new(), + chunk_map: ChunkMap::new(space_index), line_mark_state: AtomicU8::new(Line::RESET_MARK_STATE), line_unavail_state: AtomicU8::new(Line::RESET_MARK_STATE), lines_consumed: AtomicUsize::new(0), @@ -524,7 +523,7 @@ impl ImmixSpace { self.defrag.notify_new_clean_block(copy); let block = Block::from_aligned_address(block_address); block.init(copy); - self.chunk_map.set(block.chunk(), ChunkState::Allocated); + self.chunk_map.set_allocated(block.chunk(), true); self.lines_consumed .fetch_add(Block::LINES, Ordering::SeqCst); Some(block) @@ -899,7 +898,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, mmtk: &'static MMTK) { - assert_eq!(self.space.chunk_map.get(self.chunk), ChunkState::Allocated); + assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); let mut histogram = self.space.defrag.new_histogram(); let line_mark_state = if super::BLOCK_ONLY { @@ -950,7 +949,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set_allocated(self.chunk, false) } self.space.defrag.add_completed_mark_histogram(histogram); self.epilogue.finish_one_work_packet(); diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 0b349c4508..26570e41ba 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -288,7 +288,7 @@ impl MarkSweepSpace { let vm_map = args.vm_map; let is_discontiguous = args.vmrequest.is_discontiguous(); let local_specs = { - metadata::extract_side_metadata(&vec![ + metadata::extract_side_metadata(&[ MetadataSpec::OnSide(Block::NEXT_BLOCK_TABLE), MetadataSpec::OnSide(Block::PREV_BLOCK_TABLE), MetadataSpec::OnSide(Block::FREE_LIST_TABLE), @@ -300,11 +300,11 @@ impl MarkSweepSpace { MetadataSpec::OnSide(Block::BLOCK_LIST_TABLE), MetadataSpec::OnSide(Block::TLS_TABLE), MetadataSpec::OnSide(Block::MARK_TABLE), - MetadataSpec::OnSide(ChunkMap::ALLOC_TABLE), *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC, ]) }; let common = CommonSpace::new(args.into_policy_args(false, false, local_specs)); + let space_index = common.descriptor.get_index(); MarkSweepSpace { pr: if is_discontiguous { BlockPageResource::new_discontiguous( @@ -322,7 +322,7 @@ impl MarkSweepSpace { ) }, common, - chunk_map: ChunkMap::new(), + chunk_map: ChunkMap::new(space_index), scheduler, abandoned: Mutex::new(AbandonedBlockLists::new()), abandoned_in_gc: Mutex::new(AbandonedBlockLists::new()), @@ -402,7 +402,7 @@ impl MarkSweepSpace { pub fn record_new_block(&self, block: Block) { block.init(); - self.chunk_map.set(block.chunk(), ChunkState::Allocated); + self.chunk_map.set_allocated(block.chunk(), true); } pub fn prepare(&mut self, full_heap: bool) { @@ -567,7 +567,7 @@ struct PrepareChunkMap { impl GCWork for PrepareChunkMap { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - debug_assert!(self.space.chunk_map.get(self.chunk) == ChunkState::Allocated); + debug_assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); // number of allocated blocks. let mut n_occupied_blocks = 0; self.chunk @@ -581,7 +581,7 @@ impl GCWork for PrepareChunkMap { }); if n_occupied_blocks == 0 { // Set this chunk as free if there is no live blocks. - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set_allocated(self.chunk, false) } else { // Otherwise this chunk is occupied, and we reset the mark bit if it is on the side. if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::LOCAL_MARK_BIT_SPEC { @@ -617,7 +617,7 @@ struct SweepChunk { impl GCWork for SweepChunk { fn do_work(&mut self, _worker: &mut GCWorker, _mmtk: &'static MMTK) { - assert_eq!(self.space.chunk_map.get(self.chunk), ChunkState::Allocated); + assert!(self.space.chunk_map.get(self.chunk).unwrap().is_allocated()); // number of allocated blocks. let mut allocated_blocks = 0; @@ -636,7 +636,7 @@ impl GCWork for SweepChunk { probe!(mmtk, sweep_chunk, allocated_blocks); // Set this chunk as free if there is not live blocks. if allocated_blocks == 0 { - self.space.chunk_map.set(self.chunk, ChunkState::Free) + self.space.chunk_map.set_allocated(self.chunk, false); } self.epilogue.finish_one_work_packet(); } diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index 26912b3f89..d07601c1d7 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -44,20 +44,68 @@ impl Chunk { } } -/// Chunk allocation state -#[repr(u8)] -#[derive(Debug, PartialEq, Clone, Copy)] -pub enum ChunkState { - /// The chunk is not allocated. - Free = 0, - /// The chunk is allocated. - Allocated = 1, +/// The allocation state for a chunk in the chunk map. It includes whether each chunk is allocated or free, and the space the chunk belongs to. +/// Highest bit: 0 = free, 1 = allocated +/// Lower 4 bits: Space index (0-15) +#[repr(transparent)] +#[derive(PartialEq, Clone, Copy)] +pub struct ChunkState(u8); + +impl ChunkState { + const ALLOC_BIT_MASK: u8 = 0x80; + const SPACE_INDEX_MASK: u8 = 0x0F; + + /// Create a new ChunkState that represents being allocated in the given space + pub fn allocated(space_index: usize) -> ChunkState { + debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + let mut encode = space_index as u8; + encode |= Self::ALLOC_BIT_MASK; + ChunkState(encode) + } + /// Create a new ChunkState that represents being free in the given space + pub fn free(space_index: usize) -> ChunkState { + debug_assert!(space_index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + ChunkState(space_index as u8) + } + /// Is the chunk free? + pub fn is_free(&self) -> bool { + self.0 & Self::ALLOC_BIT_MASK == 0 + } + /// Is the chunk allocated? + pub fn is_allocated(&self) -> bool { + !self.is_free() + } + /// Get the space index of the chunk + pub fn get_space_index(&self) -> usize { + let index = (self.0 & Self::SPACE_INDEX_MASK) as usize; + debug_assert!(index < crate::util::heap::layout::heap_parameters::MAX_SPACES); + index + } +} + +impl std::fmt::Debug for ChunkState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if self.is_free() { + write!(f, "Free({})", self.get_space_index()) + } else { + write!(f, "Allocated({})", self.get_space_index()) + } + } } /// A byte-map to record all the allocated chunks. /// A plan can use this to maintain records for the chunks that they used, and the states of the chunks. -/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs +/// Any plan that uses the chunk map should include the `ALLOC_TABLE` spec in their local sidemetadata specs. +/// +/// A chunk map is created for a space (identified by the space index), and will only update or list chunks for that space. pub struct ChunkMap { + /// The space that uses this chunk map. + space_index: usize, + /// The range of chunks that are used by the space. The range only records the lowest chunk and the highest chunk. + /// All the chunks that are used for the space are within the range, but not necessarily that all the chunks in the range + /// are used for the space. Spaces may be discontiguous, thus the range may include chunks that do not belong to the space. + /// We need to use the space index in the chunk map and the space index encoded with the chunk state to know if + /// the chunk belongs to the current space. chunk_range: Mutex>, } @@ -66,22 +114,40 @@ impl ChunkMap { pub const ALLOC_TABLE: SideMetadataSpec = crate::util::metadata::side_metadata::spec_defs::CHUNK_MARK; - pub fn new() -> Self { + pub fn new(space_index: usize) -> Self { Self { + space_index, chunk_range: Mutex::new(Chunk::ZERO..Chunk::ZERO), } } - /// Set chunk state - pub fn set(&self, chunk: Chunk, state: ChunkState) { + /// Set a chunk as allocated, or as free. + pub fn set_allocated(&self, chunk: Chunk, allocated: bool) { + let state = if allocated { + ChunkState::allocated(self.space_index) + } else { + ChunkState::free(self.space_index) + }; // Do nothing if the chunk is already in the expected state. - if self.get(chunk) == state { + if self.get_any(chunk) == state { return; } + #[cfg(debug_assertions)] + { + let old_state = self.get_any(chunk); + // If a chunk is free, any space may use it. If a chunk is not free, only the current space may update its state. + assert!( + old_state.is_free() || old_state.get_space_index() == state.get_space_index(), + "Chunk {:?}: old state {:?}, new state {:?}. Cannot set to new state.", + chunk, + old_state, + state + ); + } // Update alloc byte - unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state as u8) }; + unsafe { Self::ALLOC_TABLE.store::(chunk.start(), state.0) }; // If this is a newly allcoated chunk, then expand the chunk range. - if state == ChunkState::Allocated { + if state.is_allocated() { debug_assert!(!chunk.start().is_zero()); let mut range = self.chunk_range.lock(); if range.start == Chunk::ZERO { @@ -96,20 +162,30 @@ impl ChunkMap { } } - /// Get chunk state - pub fn get(&self, chunk: Chunk) -> ChunkState { + /// Get chunk state. Return None if the chunk does not belong to the space. + pub fn get(&self, chunk: Chunk) -> Option { + let state = self.get_any(chunk); + (state.get_space_index() == self.space_index).then_some(state) + } + + /// Get chunk state, regardless of the space. This should always be private. + fn get_any(&self, chunk: Chunk) -> ChunkState { let byte = unsafe { Self::ALLOC_TABLE.load::(chunk.start()) }; - match byte { - 0 => ChunkState::Free, - 1 => ChunkState::Allocated, - _ => unreachable!(), - } + ChunkState(byte) + } + + /// A range of all chunks in the heap. + pub fn all_chunks(&self) -> impl Iterator + '_ { + let chunk_range = self.chunk_range.lock(); + RegionIterator::::new(chunk_range.start, chunk_range.end) + .filter(|c| self.get(*c).is_some()) } /// A range of all chunks in the heap. - pub fn all_chunks(&self) -> RegionIterator { + pub fn all_allocated_chunks(&self) -> impl Iterator + '_ { let chunk_range = self.chunk_range.lock(); RegionIterator::::new(chunk_range.start, chunk_range.end) + .filter(|c| self.get(*c).is_some_and(|state| state.is_allocated())) } /// Helper function to create per-chunk processing work packets for each allocated chunks. @@ -118,18 +194,9 @@ impl ChunkMap { func: impl Fn(Chunk) -> Box>, ) -> Vec>> { let mut work_packets: Vec>> = vec![]; - for chunk in self - .all_chunks() - .filter(|c| self.get(*c) == ChunkState::Allocated) - { + for chunk in self.all_allocated_chunks() { work_packets.push(func(chunk)); } work_packets } } - -impl Default for ChunkMap { - fn default() -> Self { - Self::new() - } -} diff --git a/src/util/metadata/side_metadata/global.rs b/src/util/metadata/side_metadata/global.rs index 8730602a4b..387d2f1d75 100644 --- a/src/util/metadata/side_metadata/global.rs +++ b/src/util/metadata/side_metadata/global.rs @@ -1345,6 +1345,11 @@ impl SideMetadataContext { } } + // Any plan that uses the chunk map needs to reserve the chunk map table. + // As we use either the mark sweep or (non moving) immix as the non moving space, + // and both policies use the chunk map, we just add the chunk map table globally. + ret.push(crate::util::heap::chunk_map::ChunkMap::ALLOC_TABLE); + ret.extend_from_slice(specs); ret } diff --git a/src/util/metadata/side_metadata/spec_defs.rs b/src/util/metadata/side_metadata/spec_defs.rs index be7a7730d3..621f11a3cf 100644 --- a/src/util/metadata/side_metadata/spec_defs.rs +++ b/src/util/metadata/side_metadata/spec_defs.rs @@ -60,6 +60,8 @@ define_side_metadata_specs!( MS_ACTIVE_CHUNK = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), // Track the index in SFT map for a chunk (only used for SFT sparse chunk map) SFT_DENSE_CHUNK_MAP_INDEX = (global: true, log_num_of_bits: 3, log_bytes_in_region: LOG_BYTES_IN_CHUNK), + // Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs) + CHUNK_MARK = (global: true, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES), ); // This defines all LOCAL side metadata used by mmtk-core. @@ -75,8 +77,6 @@ define_side_metadata_specs!( IX_BLOCK_DEFRAG = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES), // Mark blocks by immix IX_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::immix::block::Block::LOG_BYTES), - // Mark chunks (any plan that uses the chunk map should include this spec in their local sidemetadata specs) - CHUNK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::util::heap::chunk_map::Chunk::LOG_BYTES), // Mark blocks by (native mimalloc) marksweep MS_BLOCK_MARK = (global: false, log_num_of_bits: 3, log_bytes_in_region: crate::policy::marksweepspace::native_ms::Block::LOG_BYTES), // Next block in list for native mimalloc diff --git a/src/util/object_enum.rs b/src/util/object_enum.rs index 18508f088b..776885e11b 100644 --- a/src/util/object_enum.rs +++ b/src/util/object_enum.rs @@ -5,10 +5,7 @@ use std::marker::PhantomData; use crate::vm::VMBinding; use super::{ - heap::{ - chunk_map::{ChunkMap, ChunkState}, - MonotonePageResource, - }, + heap::{chunk_map::ChunkMap, MonotonePageResource}, linear_scan::Region, metadata::{side_metadata::spec_defs::VO_BIT, vo_bit}, Address, ObjectReference, @@ -84,12 +81,10 @@ pub(crate) fn enumerate_blocks_from_chunk_map( ) where B: BlockMayHaveObjects, { - for chunk in chunk_map.all_chunks() { - if chunk_map.get(chunk) == ChunkState::Allocated { - for block in chunk.iter_region::() { - if block.may_have_objects() { - enumerator.visit_address_range(block.start(), block.end()); - } + for chunk in chunk_map.all_allocated_chunks() { + for block in chunk.iter_region::() { + if block.may_have_objects() { + enumerator.visit_address_range(block.start(), block.end()); } } } diff --git a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs new file mode 100644 index 0000000000..82d1c5e958 --- /dev/null +++ b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs @@ -0,0 +1,37 @@ +// GITHUB-CI: MMTK_PLAN=all + +use super::mock_test_prelude::*; +use crate::plan::AllocationSemantics; + +#[test] +pub fn allocate_nonmoving() { + with_mockvm( + || -> MockVM { + MockVM { + is_collection_enabled: MockMethod::new_fixed(Box::new(|_| false)), + ..MockVM::default() + } + }, + || { + // 1MB heap + const MB: usize = 1024 * 1024; + let mut fixture = MutatorFixture::create_with_heapsize(MB); + + // Normal alloc + let addr = + memory_manager::alloc(&mut fixture.mutator, 16, 8, 0, AllocationSemantics::Default); + assert!(!addr.is_zero()); + + // Non moving alloc + let addr = memory_manager::alloc( + &mut fixture.mutator, + 16, + 8, + 0, + AllocationSemantics::NonMoving, + ); + assert!(!addr.is_zero()); + }, + no_cleanup, + ) +} diff --git a/src/vm/tests/mock_tests/mod.rs b/src/vm/tests/mock_tests/mod.rs index aab9aafd8b..55b08b3b12 100644 --- a/src/vm/tests/mock_tests/mod.rs +++ b/src/vm/tests/mock_tests/mod.rs @@ -24,6 +24,7 @@ pub(crate) mod mock_test_prelude { } mod mock_test_allocate_align_offset; +mod mock_test_allocate_nonmoving; mod mock_test_allocate_with_disable_collection; mod mock_test_allocate_with_initialize_collection; mod mock_test_allocate_with_re_enable_collection; From e885f63b4245a0eccbb3c63a625151e7c1935e81 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 23 Apr 2025 00:15:24 +0000 Subject: [PATCH 02/13] Squashed commit of the following: commit 530d80c6b03d1006750cf50a01c2933b4d3c6b5f Author: Yi Lin Date: Tue Apr 22 04:03:27 2025 +0000 Fix commit 836cac59d5fd3030fb35054aa8f0df894d050ba0 Author: Yi Lin Date: Thu Apr 17 04:27:11 2025 +0000 Allow configuring each Immix space to be non moving --- src/plan/generational/immix/global.rs | 7 ++-- src/plan/immix/global.rs | 6 ++- src/plan/sticky/immix/global.rs | 8 ++-- src/policy/immix/defrag.rs | 9 ++--- src/policy/immix/immixspace.rs | 57 ++++++++++++++++++++------- src/policy/immix/mod.rs | 25 ------------ 6 files changed, 58 insertions(+), 54 deletions(-) diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 80d2fbc2f5..09a53e26cf 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -87,9 +87,9 @@ impl Plan for GenImmix { fn last_collection_was_exhaustive(&self) -> bool { self.last_gc_was_full_heap.load(Ordering::Relaxed) - && ImmixSpace::::is_last_gc_exhaustive( - self.last_gc_was_defrag.load(Ordering::Relaxed), - ) + && self + .immix_space + .is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed)) } fn collection_required(&self, space_full: bool, space: Option>) -> bool @@ -254,6 +254,7 @@ impl GenImmix { // In GenImmix, young objects are not allocated in ImmixSpace directly. #[cfg(feature = "vo_bit")] mixed_age: false, + never_move_objects: cfg!(feature = "immix_non_moving"), }, ); diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 2e0e665f78..2be8b3d3a5 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -38,7 +38,7 @@ pub struct Immix { /// The plan constraints for the immix plan. pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { - moves_objects: crate::policy::immix::DEFRAG, + moves_objects: true, // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, needs_prepare_mutator: false, @@ -51,7 +51,8 @@ impl Plan for Immix { } fn last_collection_was_exhaustive(&self) -> bool { - ImmixSpace::::is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed)) + self.immix_space + .is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed)) } fn constraints(&self) -> &'static PlanConstraints { @@ -139,6 +140,7 @@ impl Immix { unlog_object_when_traced: false, #[cfg(feature = "vo_bit")] mixed_age: false, + never_move_objects: cfg!(feature = "immix_non_moving"), }, ) } diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 7e78347477..3d2ee99f6b 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -7,7 +7,6 @@ use crate::plan::PlanConstraints; use crate::policy::gc_work::TraceKind; use crate::policy::gc_work::TRACE_KIND_TRANSITIVE_PIN; use crate::policy::immix::ImmixSpace; -use crate::policy::immix::PREFER_COPY_ON_NURSERY_GC; use crate::policy::immix::TRACE_KIND_FAST; use crate::policy::sft::SFT; use crate::policy::space::Space; @@ -41,7 +40,7 @@ pub struct StickyImmix { /// The plan constraints for the sticky immix plan. pub const STICKY_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { - moves_objects: crate::policy::immix::DEFRAG || crate::policy::immix::PREFER_COPY_ON_NURSERY_GC, + moves_objects: true, needs_log_bit: true, barrier: crate::plan::BarrierSelector::ObjectBarrier, // We may trace duplicate edges in sticky immix (or any plan that uses object remembering barrier). See https://github.com/mmtk/mmtk-core/issues/743. @@ -164,7 +163,7 @@ impl Plan for StickyImmix { fn current_gc_may_move_object(&self) -> bool { if self.is_current_gc_nursery() { - PREFER_COPY_ON_NURSERY_GC + self.get_immix_space().prefer_copy_on_nursery_gc() } else { self.get_immix_space().in_defrag() } @@ -263,7 +262,7 @@ impl crate::plan::generational::global::GenerationalPlanExt f self.immix .immix_space .trace_object_without_moving(queue, object) - } else if crate::policy::immix::PREFER_COPY_ON_NURSERY_GC { + } else if self.immix.immix_space.prefer_copy_on_nursery_gc() { let ret = self.immix.immix_space.trace_object_with_opportunistic_copy( queue, object, @@ -330,6 +329,7 @@ impl StickyImmix { // In StickyImmix, both young and old objects are allocated in the ImmixSpace. #[cfg(feature = "vo_bit")] mixed_age: true, + never_move_objects: cfg!(feature = "immix_non_moving"), }, ); Self { diff --git a/src/policy/immix/defrag.rs b/src/policy/immix/defrag.rs index 8861e30bdc..fb1fe9ea0e 100644 --- a/src/policy/immix/defrag.rs +++ b/src/policy/immix/defrag.rs @@ -63,8 +63,10 @@ impl Defrag { } /// Determine whether the current GC should do defragmentation. + #[allow(clippy::too_many_arguments)] pub fn decide_whether_to_defrag( &self, + defrag_enabled: bool, emergency_collection: bool, collect_whole_heap: bool, collection_attempts: usize, @@ -72,7 +74,7 @@ impl Defrag { exhausted_reusable_space: bool, full_heap_system_gc: bool, ) { - let in_defrag = super::DEFRAG + let in_defrag = defrag_enabled && (emergency_collection || (collection_attempts > 1) || !exhausted_reusable_space @@ -116,9 +118,8 @@ impl Defrag { } /// Prepare work. Should be called in ImmixSpace::prepare. - #[allow(clippy::assertions_on_constants)] pub fn prepare(&self, space: &ImmixSpace, plan_stats: StatsForDefrag) { - debug_assert!(super::DEFRAG); + debug_assert!(space.is_defrag_enabled()); self.defrag_space_exhausted.store(false, Ordering::Release); // Calculate available free space for defragmentation. @@ -207,9 +208,7 @@ impl Defrag { } /// Reset the in-defrag state. - #[allow(clippy::assertions_on_constants)] pub fn reset_in_defrag(&self) { - debug_assert!(super::DEFRAG); self.in_defrag_collection.store(false, Ordering::Release); } } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 49b5691cd5..5e55fc194a 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -73,6 +73,8 @@ pub struct ImmixSpaceArgs { // Currently only used when "vo_bit" is enabled. Using #[cfg(...)] to eliminate dead code warning. #[cfg(feature = "vo_bit")] pub mixed_age: bool, + /// Disable copying for this Immix space. + pub never_move_objects: bool, } unsafe impl Sync for ImmixSpace {} @@ -84,7 +86,7 @@ impl SFT for ImmixSpace { fn get_forwarded_object(&self, object: ObjectReference) -> Option { // If we never move objects, look no further. - if super::NEVER_MOVE_OBJECTS { + if !self.is_movable() { return None; } @@ -102,7 +104,7 @@ impl SFT for ImmixSpace { } // If we never move objects, look no further. - if super::NEVER_MOVE_OBJECTS { + if !self.is_movable() { return false; } @@ -122,7 +124,7 @@ impl SFT for ImmixSpace { VM::VMObjectModel::LOCAL_PINNING_BIT_SPEC.is_object_pinned::(object) } fn is_movable(&self) -> bool { - !super::NEVER_MOVE_OBJECTS + !self.space_args.never_move_objects } #[cfg(feature = "sanity")] @@ -276,12 +278,13 @@ impl ImmixSpace { args: crate::policy::space::PlanCreateSpaceArgs, space_args: ImmixSpaceArgs, ) -> Self { - #[cfg(feature = "immix_non_moving")] - info!( - "Creating non-moving ImmixSpace: {}. Block size: 2^{}", - args.name, - Block::LOG_BYTES - ); + if space_args.never_move_objects { + info!( + "Creating non-moving ImmixSpace: {}. Block size: 2^{}", + args.name, + Block::LOG_BYTES + ); + } if space_args.unlog_object_when_traced { assert!( @@ -290,7 +293,18 @@ impl ImmixSpace { ); } - super::validate_features(); + // validate features + if super::BLOCK_ONLY { + assert!( + space_args.never_move_objects, + "Block-only immix must not move objects" + ); + } + assert!( + Block::LINES / 2 <= u8::MAX as usize - 2, + "Number of lines in a block should not exceed BlockState::MARK_MARKED" + ); + #[cfg(feature = "vo_bit")] vo_bit::helper::validate_config::(); let vm_map = args.vm_map; @@ -355,6 +369,7 @@ impl ImmixSpace { full_heap_system_gc: bool, ) -> bool { self.defrag.decide_whether_to_defrag( + self.is_defrag_enabled(), emergency_collection, collect_whole_heap, collection_attempts, @@ -389,7 +404,7 @@ impl ImmixSpace { } // Prepare defrag info - if super::DEFRAG { + if self.is_defrag_enabled() { self.defrag.prepare(self, plan_stats); } @@ -482,7 +497,7 @@ impl ImmixSpace { /// Return whether this GC was a defrag GC, as a plan may want to know this. pub fn end_of_gc(&mut self) -> bool { let did_defrag = self.defrag.in_defrag(); - if super::DEFRAG { + if self.is_defrag_enabled() { self.defrag.reset_in_defrag(); } did_defrag @@ -805,8 +820,8 @@ impl ImmixSpace { Some((start, end)) } - pub fn is_last_gc_exhaustive(did_defrag_for_last_gc: bool) -> bool { - if super::DEFRAG { + pub fn is_last_gc_exhaustive(&self, did_defrag_for_last_gc: bool) -> bool { + if self.is_defrag_enabled() { did_defrag_for_last_gc } else { // If defrag is disabled, every GC is exhaustive. @@ -832,6 +847,18 @@ impl ImmixSpace { self.mark_lines(object); } } + + pub(crate) fn prefer_copy_on_nursery_gc(&self) -> bool { + self.is_nursery_copy_enabled() + } + + pub(crate) fn is_nursery_copy_enabled(&self) -> bool { + !self.space_args.never_move_objects && !cfg!(feature = "sticky_immix_non_moving_nursery") + } + + pub(crate) fn is_defrag_enabled(&self) -> bool { + !self.space_args.never_move_objects + } } /// A work packet to prepare each block for a major GC. @@ -866,7 +893,7 @@ impl GCWork for PrepareBlockState { continue; } // Check if this block needs to be defragmented. - let is_defrag_source = if !super::DEFRAG { + let is_defrag_source = if !self.space.is_defrag_enabled() { // Do not set any block as defrag source if defrag is disabled. false } else if super::DEFRAG_EVERY_BLOCK { diff --git a/src/policy/immix/mod.rs b/src/policy/immix/mod.rs index 5e34caaf52..13e1637297 100644 --- a/src/policy/immix/mod.rs +++ b/src/policy/immix/mod.rs @@ -14,9 +14,6 @@ pub const MAX_IMMIX_OBJECT_SIZE: usize = Block::BYTES >> 1; /// Mark/sweep memory for block-level only pub const BLOCK_ONLY: bool = false; -/// Do we allow Immix to do defragmentation? -pub const DEFRAG: bool = !cfg!(feature = "immix_non_moving"); // defrag if we are allowed to move. - // STRESS COPYING: Set the feature 'immix_stress_copying' so that Immix will copy as many objects as possible. // Useful for debugging copying GC if you cannot use SemiSpace. // @@ -46,28 +43,6 @@ pub const DEFRAG_HEADROOM_PERCENT: usize = if cfg!(feature = "immix_stress_copyi 2 }; -/// If Immix is used as a nursery space, do we prefer copy? -pub const PREFER_COPY_ON_NURSERY_GC: bool = - !cfg!(feature = "immix_non_moving") && !cfg!(feature = "sticky_immix_non_moving_nursery"); // copy nursery objects if we are allowed to move. - -/// In some cases/settings, Immix may never move objects. -/// Currently we only have two cases where we move objects: 1. defrag, 2. nursery copy. -/// If we do neither, we will not move objects. -/// If we have other reasons to move objects, we need to add them here. -pub const NEVER_MOVE_OBJECTS: bool = !DEFRAG && !PREFER_COPY_ON_NURSERY_GC; - /// Mark lines when scanning objects. /// Otherwise, do it at mark time. pub const MARK_LINE_AT_SCAN_TIME: bool = true; - -macro_rules! validate { - ($x: expr) => { assert!($x, stringify!($x)) }; - ($x: expr => $y: expr) => { if $x { assert!($y, stringify!($x implies $y)) } }; -} - -fn validate_features() { - // Block-only immix cannot do defragmentation - validate!(DEFRAG => !BLOCK_ONLY); - // Number of lines in a block should not exceed BlockState::MARK_MARKED - assert!(Block::LINES / 2 <= u8::MAX as usize - 2); -} From de4d5f812ad881fdc88ae260ab25d53732bb1537 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 17 Apr 2025 00:19:12 +0000 Subject: [PATCH 03/13] Use Immix as non moving space --- src/plan/generational/immix/global.rs | 2 +- src/plan/global.rs | 19 +++++--- src/plan/immix/global.rs | 2 +- src/plan/mutator_context.rs | 46 ++++++++++++++++--- src/plan/sticky/immix/global.rs | 2 +- src/policy/immix/immixspace.rs | 4 +- src/util/alloc/allocators.rs | 2 +- src/util/heap/chunk_map.rs | 6 +++ .../mock_test_allocate_nonmoving.rs | 2 + 9 files changed, 66 insertions(+), 19 deletions(-) diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 09a53e26cf..6bc73f6de5 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -131,7 +131,7 @@ impl Plan for GenImmix { if full_heap { self.immix_space.prepare( full_heap, - crate::policy::immix::defrag::StatsForDefrag::new(self), + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), ); } } diff --git a/src/plan/global.rs b/src/plan/global.rs index 062cdc969b..1dd4c7c383 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -7,6 +7,8 @@ use crate::plan::tracing::ObjectQueue; use crate::plan::Mutator; use crate::policy::immortalspace::ImmortalSpace; use crate::policy::largeobjectspace::LargeObjectSpace; +use crate::policy::marksweepspace::native_ms::MarkSweepSpace; +use crate::policy::immix::ImmixSpace; use crate::policy::space::{PlanCreateSpaceArgs, Space}; #[cfg(feature = "vm_space")] use crate::policy::vmspace::VMSpace; @@ -553,7 +555,7 @@ pub struct CommonPlan { pub los: LargeObjectSpace, // TODO: We should use a marksweep space for nonmoving. #[space] - pub nonmoving: ImmortalSpace, + pub nonmoving: ImmixSpace, #[parent] pub base: BasePlan, } @@ -571,12 +573,17 @@ impl CommonPlan { args.get_space_args("los", true, false, VMRequest::discontiguous()), false, ), - nonmoving: ImmortalSpace::new(args.get_space_args( + nonmoving: ImmixSpace::new(args.get_space_args( "nonmoving", true, false, VMRequest::discontiguous(), - )), + ), crate::policy::immix::ImmixSpaceArgs { + unlog_object_when_traced: false, + #[cfg(feature = "vo_bit")] + mixed_age: false, + never_move_objects: true, + }), base: BasePlan::new(args), } } @@ -591,14 +598,14 @@ impl CommonPlan { pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.prepare(); self.los.prepare(full_heap); - self.nonmoving.prepare(); + self.nonmoving.prepare(full_heap, None); self.base.prepare(tls, full_heap) } pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.release(); self.los.release(full_heap); - self.nonmoving.release(); + self.nonmoving.release(full_heap); self.base.release(tls, full_heap) } @@ -610,7 +617,7 @@ impl CommonPlan { &self.los } - pub fn get_nonmoving(&self) -> &ImmortalSpace { + pub fn get_nonmoving(&self) -> &ImmixSpace { &self.nonmoving } } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 2be8b3d3a5..e6a779b080 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -87,7 +87,7 @@ impl Plan for Immix { self.common.prepare(tls, true); self.immix_space.prepare( true, - crate::policy::immix::defrag::StatsForDefrag::new(self), + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), ); } diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index c72c44f140..382fa6de03 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -5,7 +5,7 @@ use crate::plan::global::Plan; use crate::plan::AllocationSemantics; use crate::policy::space::Space; use crate::util::alloc::allocators::{AllocatorSelector, Allocators}; -use crate::util::alloc::Allocator; +use crate::util::alloc::{Allocator, FreeListAllocator, ImmixAllocator}; use crate::util::{Address, ObjectReference}; use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; @@ -27,6 +27,19 @@ pub(crate) fn unreachable_prepare_func( unreachable!("`MutatorConfig::prepare_func` must not be called for the current plan.") } +/// An mutator prepare implementation for plans that use [`crate::plan::global::CommonPlan`]. +pub(crate) fn common_prepare_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + // Prepare the free list allocator used for non moving + // unsafe { + // mutator + // .allocators + // .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + // } + // .downcast_mut::>() + // .unwrap() + // .prepare(); +} + /// A place-holder implementation for `MutatorConfig::release_func` that should not be called. /// Currently only used by `NoGC`. pub(crate) fn unreachable_release_func( @@ -36,6 +49,27 @@ pub(crate) fn unreachable_release_func( unreachable!("`MutatorConfig::release_func` must not be called for the current plan.") } +/// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`]. +pub(crate) fn common_release_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + // // Release the free list allocator used for non moving + // unsafe { + // mutator + // .allocators + // .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + // } + // .downcast_mut::>() + // .unwrap() + // .release(); + let immix_allocator = unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap(); + immix_allocator.reset(); +} + /// A place-holder implementation for `MutatorConfig::release_func` that does nothing. pub(crate) fn no_op_release_func(_mutator: &mut Mutator, _tls: VMWorkerThread) {} @@ -455,10 +489,8 @@ pub(crate) fn create_allocator_mapping( map[AllocationSemantics::Los] = AllocatorSelector::LargeObject(reserved.n_large_object); reserved.n_large_object += 1; - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. - map[AllocationSemantics::NonMoving] = - AllocatorSelector::BumpPointer(reserved.n_bump_pointer); - reserved.n_bump_pointer += 1; + map[AllocationSemantics::NonMoving] = AllocatorSelector::Immix(reserved.n_immix); + reserved.n_immix += 1; } reserved.validate(); @@ -522,10 +554,10 @@ pub(crate) fn create_space_mapping( reserved.n_large_object += 1; // TODO: This should be freelist allocator once we use marksweep for nonmoving space. vec.push(( - AllocatorSelector::BumpPointer(reserved.n_bump_pointer), + AllocatorSelector::Immix(reserved.n_immix), plan.common().get_nonmoving(), )); - reserved.n_bump_pointer += 1; + reserved.n_immix += 1; } reserved.validate(); diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 3d2ee99f6b..2998d96b04 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -116,7 +116,7 @@ impl Plan for StickyImmix { // Prepare both large object space and immix space self.immix.immix_space.prepare( false, - crate::policy::immix::defrag::StatsForDefrag::new(self), + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), ); self.immix.common.los.prepare(false); } else { diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 5e55fc194a..6f5758d8c9 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -385,7 +385,7 @@ impl ImmixSpace { &self.scheduler } - pub fn prepare(&mut self, major_gc: bool, plan_stats: StatsForDefrag) { + pub fn prepare(&mut self, major_gc: bool, plan_stats: Option) { if major_gc { // Update mark_state if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_on_side() { @@ -405,7 +405,7 @@ impl ImmixSpace { // Prepare defrag info if self.is_defrag_enabled() { - self.defrag.prepare(self, plan_stats); + self.defrag.prepare(self, plan_stats.unwrap()); } // Prepare each block for GC diff --git a/src/util/alloc/allocators.rs b/src/util/alloc/allocators.rs index 58b591b1a8..58dd790ae2 100644 --- a/src/util/alloc/allocators.rs +++ b/src/util/alloc/allocators.rs @@ -22,7 +22,7 @@ use super::MarkCompactAllocator; pub(crate) const MAX_BUMP_ALLOCATORS: usize = 6; pub(crate) const MAX_LARGE_OBJECT_ALLOCATORS: usize = 2; pub(crate) const MAX_MALLOC_ALLOCATORS: usize = 1; -pub(crate) const MAX_IMMIX_ALLOCATORS: usize = 1; +pub(crate) const MAX_IMMIX_ALLOCATORS: usize = 2; pub(crate) const MAX_FREE_LIST_ALLOCATORS: usize = 2; pub(crate) const MAX_MARK_COMPACT_ALLOCATORS: usize = 1; diff --git a/src/util/heap/chunk_map.rs b/src/util/heap/chunk_map.rs index d07601c1d7..2f553e7b34 100644 --- a/src/util/heap/chunk_map.rs +++ b/src/util/heap/chunk_map.rs @@ -174,6 +174,12 @@ impl ChunkMap { ChunkState(byte) } + fn is_my_chunk(&self, chunk: Chunk) -> bool { + let byte = unsafe { Self::ALLOC_TABLE.load::(chunk.start()) }; + let state = ChunkState(byte); + state.get_space_index() == self.space_index + } + /// A range of all chunks in the heap. pub fn all_chunks(&self) -> impl Iterator + '_ { let chunk_range = self.chunk_range.lock(); diff --git a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs index 82d1c5e958..7c7df436fe 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs @@ -21,6 +21,7 @@ pub fn allocate_nonmoving() { let addr = memory_manager::alloc(&mut fixture.mutator, 16, 8, 0, AllocationSemantics::Default); assert!(!addr.is_zero()); + info!("Allocated default at: {:#x}", addr); // Non moving alloc let addr = memory_manager::alloc( @@ -31,6 +32,7 @@ pub fn allocate_nonmoving() { AllocationSemantics::NonMoving, ); assert!(!addr.is_zero()); + info!("Allocated nonmoving at: {:#x}", addr); }, no_cleanup, ) From f45d87ee7a319df1a44561dfe7121f374c4e6bd8 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 23 Apr 2025 06:27:53 +0000 Subject: [PATCH 04/13] Choose nonmoving policy based on the feature --- Cargo.toml | 12 +++-- src/plan/global.rs | 66 +++++++++++++++++++++------- src/plan/mutator_context.rs | 88 +++++++++++++++++++++++++------------ 3 files changed, 119 insertions(+), 47 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 340756ecc9..263f057f36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,7 +82,7 @@ name = "main" harness = false [features] -default = ["builtin_env_logger"] +default = ["builtin_env_logger", "immortal_as_nonmoving"] # Built-in env_logger. This feature is enabled by default. # The user can disable this default feature to remove `env_logger` from the dependencies. @@ -224,9 +224,6 @@ malloc_jemalloc = ["dep:jemalloc-sys"] # is not compiled in default builds. malloc_native_mimalloc = [] -# If there are more groups, they should be inserted above this line -# Group:end - # Group:marksweepallocation # default is native allocator with lazy sweeping eager_sweeping = [] @@ -234,4 +231,11 @@ eager_sweeping = [] # normal heap range, we will have to use chunk-based SFT table. Turning on this feature will use a different SFT map implementation on 64bits, # and will affect all the plans in the build. Please be aware of the consequence, and this is only meant to be experimental use. malloc_mark_sweep = [] + +# Group:nonmovingspace +immortal_as_nonmoving = [] +immix_as_nonmoving = [] +marksweep_as_nonmoving = [] + +# If there are more groups, they should be inserted above this line # Group:end diff --git a/src/plan/global.rs b/src/plan/global.rs index 1dd4c7c383..436174165a 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -7,7 +7,6 @@ use crate::plan::tracing::ObjectQueue; use crate::plan::Mutator; use crate::policy::immortalspace::ImmortalSpace; use crate::policy::largeobjectspace::LargeObjectSpace; -use crate::policy::marksweepspace::native_ms::MarkSweepSpace; use crate::policy::immix::ImmixSpace; use crate::policy::space::{PlanCreateSpaceArgs, Space}; #[cfg(feature = "vm_space")] @@ -544,6 +543,15 @@ impl BasePlan { } } +#[cfg(feature = "immortal_as_nonmoving")] +pub type NonMovingSpace = crate::policy::immortalspace::ImmortalSpace; + +#[cfg(feature = "immix_as_nonmoving")] +pub type NonMovingSpace = crate::policy::immix::ImmixSpace; + +#[cfg(feature = "marksweep_as_nonmoving")] +pub type NonMovingSpace = crate::policy::marksweepspace::native_ms::MarkSweepSpace; + /** CommonPlan is for representing state and features used by _many_ plans, but that are not fundamental to _all_ plans. Examples include the Large Object Space and an Immortal space. Features that are fundamental to _all_ plans must be included in BasePlan. */ @@ -555,7 +563,7 @@ pub struct CommonPlan { pub los: LargeObjectSpace, // TODO: We should use a marksweep space for nonmoving. #[space] - pub nonmoving: ImmixSpace, + pub nonmoving: NonMovingSpace, #[parent] pub base: BasePlan, } @@ -573,17 +581,7 @@ impl CommonPlan { args.get_space_args("los", true, false, VMRequest::discontiguous()), false, ), - nonmoving: ImmixSpace::new(args.get_space_args( - "nonmoving", - true, - false, - VMRequest::discontiguous(), - ), crate::policy::immix::ImmixSpaceArgs { - unlog_object_when_traced: false, - #[cfg(feature = "vo_bit")] - mixed_age: false, - never_move_objects: true, - }), + nonmoving: Self::new_nonmoving_space(&mut args), base: BasePlan::new(args), } } @@ -598,14 +596,14 @@ impl CommonPlan { pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.prepare(); self.los.prepare(full_heap); - self.nonmoving.prepare(full_heap, None); + self.prepare_nonmoving_space(full_heap); self.base.prepare(tls, full_heap) } pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.release(); self.los.release(full_heap); - self.nonmoving.release(full_heap); + self.release_nonmoving_space(full_heap); self.base.release(tls, full_heap) } @@ -617,9 +615,45 @@ impl CommonPlan { &self.los } - pub fn get_nonmoving(&self) -> &ImmixSpace { + pub fn get_nonmoving(&self) -> &NonMovingSpace { &self.nonmoving } + + fn new_nonmoving_space(args: &mut CreateSpecificPlanArgs) -> NonMovingSpace { + let space_args = args.get_space_args( + "nonmoving", + true, + false, + VMRequest::discontiguous(), + ); + #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] + return NonMovingSpace::new(space_args); + #[cfg(feature = "immix_as_nonmoving")] + return NonMovingSpace::new(space_args, crate::policy::immix::ImmixSpaceArgs { + unlog_object_when_traced: false, + #[cfg(feature = "vo_bit")] + mixed_age: false, + never_move_objects: true, + }); + } + + fn prepare_nonmoving_space(&mut self, _full_heap: bool) { + #[cfg(feature = "immortal_as_nonmoving")] + self.nonmoving.prepare(); + #[cfg(feature = "immix_as_nonmoving")] + self.nonmoving.prepare(_full_heap, None); + #[cfg(feature = "marksweep_as_nonmoving")] + self.nonmoving.prepare(_full_heap); + } + + fn release_nonmoving_space(&mut self, _full_heap: bool) { + #[cfg(feature = "immortal_as_nonmoving")] + self.nonmoving.release(); + #[cfg(feature = "immix_as_nonmoving")] + self.nonmoving.release(_full_heap); + #[cfg(feature = "marksweep_as_nonmoving")] + self.nonmoving.release(); + } } use crate::policy::gc_work::TraceKind; diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 382fa6de03..20603f3b02 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -30,14 +30,15 @@ pub(crate) fn unreachable_prepare_func( /// An mutator prepare implementation for plans that use [`crate::plan::global::CommonPlan`]. pub(crate) fn common_prepare_func(mutator: &mut Mutator, _tls: VMWorkerThread) { // Prepare the free list allocator used for non moving - // unsafe { - // mutator - // .allocators - // .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) - // } - // .downcast_mut::>() - // .unwrap() - // .prepare(); + #[cfg(feature = "marksweep_as_nonmoving")] + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .prepare(); } /// A place-holder implementation for `MutatorConfig::release_func` that should not be called. @@ -51,23 +52,25 @@ pub(crate) fn unreachable_release_func( /// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`]. pub(crate) fn common_release_func(mutator: &mut Mutator, _tls: VMWorkerThread) { - // // Release the free list allocator used for non moving - // unsafe { - // mutator - // .allocators - // .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) - // } - // .downcast_mut::>() - // .unwrap() - // .release(); + // Release the free list allocator used for non moving + #[cfg(feature = "marksweep_as_nonmoving")] + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .release(); + #[cfg(feature = "immix_as_nonmoving")] let immix_allocator = unsafe { mutator .allocators .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) } .downcast_mut::>() - .unwrap(); - immix_allocator.reset(); + .unwrap() + .reset(); } /// A place-holder implementation for `MutatorConfig::release_func` that does nothing. @@ -489,8 +492,21 @@ pub(crate) fn create_allocator_mapping( map[AllocationSemantics::Los] = AllocatorSelector::LargeObject(reserved.n_large_object); reserved.n_large_object += 1; - map[AllocationSemantics::NonMoving] = AllocatorSelector::Immix(reserved.n_immix); - reserved.n_immix += 1; + #[cfg(feature = "immix_as_nonmoving")] + { + map[AllocationSemantics::NonMoving] = AllocatorSelector::Immix(reserved.n_immix); + reserved.n_immix += 1; + } + #[cfg(feature = "immortal_as_nonmoving")] + { + map[AllocationSemantics::NonMoving] = AllocatorSelector::BumpPointer(reserved.n_bump_pointer); + reserved.n_bump_pointer += 1; + } + #[cfg(feature = "marksweep_as_nonmoving")] + { + map[AllocationSemantics::NonMoving] = AllocatorSelector::FreeList(reserved.n_free_list); + reserved.n_free_list += 1; + } } reserved.validate(); @@ -552,12 +568,30 @@ pub(crate) fn create_space_mapping( plan.common().get_los(), )); reserved.n_large_object += 1; - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. - vec.push(( - AllocatorSelector::Immix(reserved.n_immix), - plan.common().get_nonmoving(), - )); - reserved.n_immix += 1; + #[cfg(feature = "immix_as_nonmoving")] + { + vec.push(( + AllocatorSelector::Immix(reserved.n_immix), + plan.common().get_nonmoving(), + )); + reserved.n_immix += 1; + } + #[cfg(feature = "marksweep_as_nonmoving")] + { + vec.push(( + AllocatorSelector::FreeList(reserved.n_free_list), + plan.common().get_nonmoving(), + )); + reserved.n_free_list += 1; + } + #[cfg(feature = "immortal_as_nonmoving")] + { + vec.push(( + AllocatorSelector::BumpPointer(reserved.n_bump_pointer), + plan.common().get_nonmoving(), + )); + reserved.n_bump_pointer += 1; + } } reserved.validate(); From 89b934ed910bb7c947a2a72481f3d6443d1ddc54 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 5 May 2025 04:27:53 +0000 Subject: [PATCH 05/13] Make sure prepare, release, end_of_gc is properly called --- Cargo.toml | 3 +- src/plan/generational/copying/global.rs | 6 +-- src/plan/generational/copying/mutator.rs | 9 +++-- src/plan/generational/global.rs | 5 +++ src/plan/generational/immix/global.rs | 6 +-- src/plan/generational/immix/mutator.rs | 9 +++-- src/plan/generational/mod.rs | 1 - src/plan/global.rs | 49 ++++++++++++++++-------- src/plan/immix/global.rs | 4 +- src/plan/immix/mutator.rs | 9 +++-- src/plan/markcompact/global.rs | 5 ++- src/plan/markcompact/mutator.rs | 16 ++++---- src/plan/marksweep/global.rs | 5 +-- src/plan/marksweep/mutator.rs | 21 ++++++++-- src/plan/mutator_context.rs | 15 ++++++-- src/plan/nogc/global.rs | 4 ++ src/plan/pageprotect/global.rs | 5 ++- src/plan/pageprotect/mutator.rs | 8 ++-- src/plan/plan_constraints.rs | 4 +- src/plan/semispace/global.rs | 5 ++- src/plan/semispace/mutator.rs | 9 +++-- src/plan/sticky/immix/global.rs | 4 +- src/plan/sticky/immix/mutator.rs | 7 ++-- 23 files changed, 138 insertions(+), 71 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cdee39baf2..32ef12b5bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,7 +82,7 @@ name = "main" harness = false [features] -default = ["builtin_env_logger", "immortal_as_nonmoving"] +default = ["builtin_env_logger"] # Built-in env_logger. This feature is enabled by default. # The user can disable this default feature to remove `env_logger` from the dependencies. @@ -234,7 +234,6 @@ malloc_mark_sweep = [] # Group:nonmovingspace immortal_as_nonmoving = [] -immix_as_nonmoving = [] marksweep_as_nonmoving = [] # If there are more groups, they should be inserted above this line diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index 2c920fda7d..b23a81c998 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -114,9 +114,9 @@ impl Plan for GenCopy { } } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { - self.gen - .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); + fn end_of_gc(&mut self, tls: VMWorkerThread) { + let next_gc_full_heap = CommonGenPlan::should_next_gc_be_full_heap(self); + self.gen.end_of_gc(tls, next_gc_full_heap); } fn get_collection_reserved_pages(&self) -> usize { diff --git a/src/plan/generational/copying/mutator.rs b/src/plan/generational/copying/mutator.rs index fd33203b77..eb7e8c1511 100644 --- a/src/plan/generational/copying/mutator.rs +++ b/src/plan/generational/copying/mutator.rs @@ -3,7 +3,8 @@ use super::GenCopy; use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::generational::create_gen_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -13,7 +14,7 @@ use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; use crate::MMTK; -pub fn gencopy_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn gencopy_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset nursery allocator let bump_allocator = unsafe { mutator @@ -23,6 +24,8 @@ pub fn gencopy_mutator_release(mutator: &mut Mutator, _tls: V .downcast_mut::>() .unwrap(); bump_allocator.reset(); + + common_release_func(mutator, tls); } pub fn create_gencopy_mutator( @@ -36,7 +39,7 @@ pub fn create_gencopy_mutator( mmtk.get_plan(), &gencopy.gen.nursery, )), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &gencopy_mutator_release, }; diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 33d4651bcf..0404fc93bb 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -78,6 +78,11 @@ impl CommonGenPlan { self.nursery.release(); } + pub fn end_of_gc(&mut self, tls: VMWorkerThread, next_gc_full_heap: bool) { + self.set_next_gc_full_heap(next_gc_full_heap); + self.common.end_of_gc(tls); + } + /// Independent of how many pages remain in the page budget (a function of heap size), we must /// ensure we never exhaust virtual memory. Therefore we must never let the nursery grow to the /// extent that it can't be copied into the mature space. diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index efb845ac3f..ff76d5ac55 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -146,9 +146,9 @@ impl Plan for GenImmix { .store(full_heap, Ordering::Relaxed); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { - self.gen - .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); + fn end_of_gc(&mut self, tls: VMWorkerThread) { + let next_gc_full_heap = CommonGenPlan::should_next_gc_be_full_heap(self); + self.gen.end_of_gc(tls, next_gc_full_heap); let did_defrag = self.immix_space.end_of_gc(); self.last_gc_was_defrag.store(did_defrag, Ordering::Relaxed); diff --git a/src/plan/generational/immix/mutator.rs b/src/plan/generational/immix/mutator.rs index 7eb691a166..e3d9346938 100644 --- a/src/plan/generational/immix/mutator.rs +++ b/src/plan/generational/immix/mutator.rs @@ -3,7 +3,8 @@ use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::generational::create_gen_space_mapping; use crate::plan::generational::immix::GenImmix; -use crate::plan::mutator_context::unreachable_prepare_func; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -13,7 +14,7 @@ use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; use crate::MMTK; -pub fn genimmix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn genimmix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset nursery allocator let bump_allocator = unsafe { mutator @@ -23,6 +24,8 @@ pub fn genimmix_mutator_release(mutator: &mut Mutator, _tls: .downcast_mut::>() .unwrap(); bump_allocator.reset(); + + common_release_func(mutator, tls); } pub fn create_genimmix_mutator( @@ -36,7 +39,7 @@ pub fn create_genimmix_mutator( mmtk.get_plan(), &genimmix.gen.nursery, )), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &genimmix_mutator_release, }; diff --git a/src/plan/generational/mod.rs b/src/plan/generational/mod.rs index 2bb61dc55f..15b63abb33 100644 --- a/src/plan/generational/mod.rs +++ b/src/plan/generational/mod.rs @@ -50,7 +50,6 @@ pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints { may_trace_duplicate_edges: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier), max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; diff --git a/src/plan/global.rs b/src/plan/global.rs index b2f0b3b013..572e1a89af 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -197,7 +197,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// Inform the plan about the end of a GC. It is guaranteed that there is no further work for this GC. /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method. - fn end_of_gc(&mut self, _tls: VMWorkerThread) {} + fn end_of_gc(&mut self, _tls: VMWorkerThread); /// Notify the plan that an emergency collection will happen. The plan should try to free as much memory as possible. /// The default implementation will force a full heap collection for generational plans. @@ -511,6 +511,10 @@ impl BasePlan { self.vm_space.release(); } + pub fn end_of_gc(&mut self, _tls: VMWorkerThread) { + // Do nothing here. None of the spaces needs end_of_gc. + } + pub(crate) fn collection_required(&self, plan: &P, space_full: bool) -> bool { let stress_force_gc = crate::util::heap::gc_trigger::GCTrigger::::should_do_stress_gc_inner( @@ -545,7 +549,7 @@ impl BasePlan { #[cfg(feature = "immortal_as_nonmoving")] pub type NonMovingSpace = crate::policy::immortalspace::ImmortalSpace; -#[cfg(feature = "immix_as_nonmoving")] +#[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] pub type NonMovingSpace = crate::policy::immix::ImmixSpace; #[cfg(feature = "marksweep_as_nonmoving")] @@ -606,6 +610,11 @@ impl CommonPlan { self.base.release(tls, full_heap) } + pub fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.end_of_gc_nonmoving_space(); + self.base.end_of_gc(tls); + } + pub fn get_immortal(&self) -> &ImmortalSpace { &self.immortal } @@ -619,27 +628,25 @@ impl CommonPlan { } fn new_nonmoving_space(args: &mut CreateSpecificPlanArgs) -> NonMovingSpace { - let space_args = args.get_space_args( - "nonmoving", - true, - false, - VMRequest::discontiguous(), - ); + let space_args = args.get_space_args("nonmoving", true, false, VMRequest::discontiguous()); #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] return NonMovingSpace::new(space_args); - #[cfg(feature = "immix_as_nonmoving")] - return NonMovingSpace::new(space_args, crate::policy::immix::ImmixSpaceArgs { - unlog_object_when_traced: false, - #[cfg(feature = "vo_bit")] - mixed_age: false, - never_move_objects: true, - }); + #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] + return NonMovingSpace::new( + space_args, + crate::policy::immix::ImmixSpaceArgs { + unlog_object_when_traced: false, + #[cfg(feature = "vo_bit")] + mixed_age: false, + never_move_objects: true, + }, + ); } fn prepare_nonmoving_space(&mut self, _full_heap: bool) { #[cfg(feature = "immortal_as_nonmoving")] self.nonmoving.prepare(); - #[cfg(feature = "immix_as_nonmoving")] + #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] self.nonmoving.prepare(_full_heap, None); #[cfg(feature = "marksweep_as_nonmoving")] self.nonmoving.prepare(_full_heap); @@ -648,11 +655,19 @@ impl CommonPlan { fn release_nonmoving_space(&mut self, _full_heap: bool) { #[cfg(feature = "immortal_as_nonmoving")] self.nonmoving.release(); - #[cfg(feature = "immix_as_nonmoving")] + #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] self.nonmoving.release(_full_heap); #[cfg(feature = "marksweep_as_nonmoving")] self.nonmoving.release(); } + + fn end_of_gc_nonmoving_space(&mut self) { + // Only mark sweep and immix need end of GC. + #[cfg(feature = "marksweep_as_nonmoving")] + self.nonmoving.end_of_gc(); + #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] + self.nonmoving.end_of_gc(); + } } use crate::policy::gc_work::TraceKind; diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 7b0bdce500..d8b5f40935 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -42,7 +42,6 @@ pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: !cfg!(feature = "immix_non_moving"), // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -98,9 +97,10 @@ impl Plan for Immix { self.immix_space.release(true); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.last_gc_was_defrag .store(self.immix_space.end_of_gc(), Ordering::Relaxed); + self.common.end_of_gc(tls); } fn current_gc_may_move_object(&self) -> bool { diff --git a/src/plan/immix/mutator.rs b/src/plan/immix/mutator.rs index 59931a5ecf..aa6354ebee 100644 --- a/src/plan/immix/mutator.rs +++ b/src/plan/immix/mutator.rs @@ -1,7 +1,8 @@ use super::Immix; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::create_allocator_mapping; use crate::plan::mutator_context::create_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -14,7 +15,7 @@ use crate::vm::VMBinding; use crate::MMTK; use enum_map::EnumMap; -pub fn immix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn immix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { let immix_allocator = unsafe { mutator .allocators @@ -23,6 +24,8 @@ pub fn immix_mutator_release(mutator: &mut Mutator, _tls: VMW .downcast_mut::>() .unwrap(); immix_allocator.reset(); + + common_release_func(mutator, tls); } pub(in crate::plan) const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { @@ -50,7 +53,7 @@ pub fn create_immix_mutator( vec.push((AllocatorSelector::Immix(0), &immix.immix_space)); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &immix_mutator_release, }; diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index fb68100ddf..f776840bfd 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -42,7 +42,6 @@ pub const MARKCOMPACT_CONSTRAINTS: PlanConstraints = PlanConstraints { needs_forward_after_liveness: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -73,6 +72,10 @@ impl Plan for MarkCompact { self.mc_space.release(); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn get_allocator_mapping(&self) -> &'static EnumMap { &ALLOCATOR_MAPPING } diff --git a/src/plan/markcompact/mutator.rs b/src/plan/markcompact/mutator.rs index aefcff7132..4e40743a62 100644 --- a/src/plan/markcompact/mutator.rs +++ b/src/plan/markcompact/mutator.rs @@ -1,7 +1,8 @@ use super::MarkCompact; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::create_allocator_mapping; use crate::plan::mutator_context::create_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -39,24 +40,23 @@ pub fn create_markcompact_mutator( vec.push((AllocatorSelector::MarkCompact(0), markcompact.mc_space())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &markcompact_mutator_release, }; let builder = MutatorBuilder::new(mutator_tls, mmtk, config); builder.build() } -pub fn markcompact_mutator_release( - _mutator: &mut Mutator, - _tls: VMWorkerThread, -) { +pub fn markcompact_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset the thread-local allocation bump pointer let markcompact_allocator = unsafe { - _mutator + mutator .allocators - .get_allocator_mut(_mutator.config.allocator_mapping[AllocationSemantics::Default]) + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) } .downcast_mut::>() .unwrap(); markcompact_allocator.reset(); + + common_release_func(mutator, tls); } diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index d413d85632..61712e71ab 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -41,8 +41,6 @@ pub const MS_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: false, max_non_los_default_alloc_bytes: MAX_OBJECT_SIZE, may_trace_duplicate_edges: true, - needs_prepare_mutator: !cfg!(feature = "malloc_mark_sweep") - && !cfg!(feature = "eager_sweeping"), ..PlanConstraints::default() }; @@ -65,8 +63,9 @@ impl Plan for MarkSweep { self.common.release(tls, true); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.ms.end_of_gc(); + self.common.end_of_gc(tls); } fn collection_required(&self, space_full: bool, _space: Option>) -> bool { diff --git a/src/plan/marksweep/mutator.rs b/src/plan/marksweep/mutator.rs index 41a3495df9..8d5b045e07 100644 --- a/src/plan/marksweep/mutator.rs +++ b/src/plan/marksweep/mutator.rs @@ -16,12 +16,18 @@ use enum_map::EnumMap; #[cfg(feature = "malloc_mark_sweep")] mod malloc_mark_sweep { + use crate::plan::mutator_context::{common_prepare_func, common_release_func}; + use super::*; // Do nothing for malloc mark sweep (malloc allocator) - pub fn ms_mutator_prepare(_mutator: &mut Mutator, _tls: VMWorkerThread) {} - pub fn ms_mutator_release(_mutator: &mut Mutator, _tls: VMWorkerThread) {} + pub fn ms_mutator_prepare(mutator: &mut Mutator, tls: VMWorkerThread) { + common_prepare_func(mutator, tls); + } + pub fn ms_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { + common_release_func(mutator, tls); + } // malloc mark sweep uses 1 malloc allocator @@ -69,13 +75,20 @@ mod native_mark_sweep { // We forward calls to the allocator prepare and release #[cfg(not(feature = "malloc_mark_sweep"))] - pub fn ms_mutator_prepare(mutator: &mut Mutator, _tls: VMWorkerThread) { + pub fn ms_mutator_prepare(mutator: &mut Mutator, tls: VMWorkerThread) { + use crate::plan::mutator_context::common_prepare_func; + get_freelist_allocator_mut::(mutator).prepare(); + common_prepare_func(mutator, tls); } #[cfg(not(feature = "malloc_mark_sweep"))] - pub fn ms_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { + pub fn ms_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { + use crate::plan::mutator_context::common_release_func; + get_freelist_allocator_mut::(mutator).release(); + + common_release_func(mutator, tls); } // native mark sweep uses 1 free list allocator diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 995dd3825b..34deca2556 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -70,10 +70,10 @@ pub(crate) fn common_release_func(mutator: &mut Mutator, _tls .unwrap() .release(); } - #[cfg(feature = "immix_as_nonmoving")] + #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] { use crate::util::alloc::ImmixAllocator; - let immix_allocator = unsafe { + unsafe { mutator .allocators .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) @@ -85,6 +85,7 @@ pub(crate) fn common_release_func(mutator: &mut Mutator, _tls } /// A place-holder implementation for `MutatorConfig::release_func` that does nothing. +#[allow(dead_code)] pub(crate) fn no_op_release_func(_mutator: &mut Mutator, _tls: VMWorkerThread) {} // This struct is part of the Mutator struct. @@ -529,7 +530,10 @@ pub(crate) fn create_allocator_mapping( if include_common_plan { map[AllocationSemantics::Immortal] = reserved.add_bump_pointer_allocator(); map[AllocationSemantics::Los] = reserved.add_large_object_allocator(); - map[AllocationSemantics::NonMoving] = if cfg!(feature = "immix_as_nonmoving") { + map[AllocationSemantics::NonMoving] = if cfg!(not(any( + feature = "immortal_as_nonmoving", + feature = "marksweep_as_nonmoving" + ))) { reserved.add_immix_allocator() } else if cfg!(feature = "marksweep_as_nonmoving") { reserved.add_free_list_allocator() @@ -592,7 +596,10 @@ pub(crate) fn create_space_mapping( vec.push(( if cfg!(feature = "marksweep_as_nonmoving") { reserved.add_free_list_allocator() - } else if cfg!(feature = "immix_as_nonmoving") { + } else if cfg!(not(any( + feature = "immortal_as_nonmoving", + feature = "marksweep_as_nonmoving" + ))) { reserved.add_immix_allocator() } else if cfg!(feature = "immortal_as_nonmoving") { reserved.add_bump_pointer_allocator() diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index a05034c1c4..d6e6306fef 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -66,6 +66,10 @@ impl Plan for NoGC { unreachable!() } + fn end_of_gc(&mut self, _tls: VMWorkerThread) { + unreachable!() + } + fn get_allocator_mapping(&self) -> &'static EnumMap { &ALLOCATOR_MAPPING } diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 65a6b2eab8..5d0d868f95 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -31,7 +31,6 @@ pub struct PageProtect { /// The plan constraints for the page protect plan. pub const CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: false, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -58,6 +57,10 @@ impl Plan for PageProtect { self.space.release(true); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { self.base().collection_required(self, space_full) } diff --git a/src/plan/pageprotect/mutator.rs b/src/plan/pageprotect/mutator.rs index dd3e49a56e..ddea08f84d 100644 --- a/src/plan/pageprotect/mutator.rs +++ b/src/plan/pageprotect/mutator.rs @@ -1,6 +1,6 @@ use super::PageProtect; -use crate::plan::mutator_context::no_op_release_func; -use crate::plan::mutator_context::unreachable_prepare_func; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -41,8 +41,8 @@ pub fn create_pp_mutator( vec.push((AllocatorSelector::LargeObject(0), &page.space)); vec }), - prepare_func: &unreachable_prepare_func, - release_func: &no_op_release_func, + prepare_func: &common_prepare_func, + release_func: &common_release_func, }; let builder = MutatorBuilder::new(mutator_tls, mmtk, config); diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 1220792fb4..00cc131d57 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -60,10 +60,12 @@ impl PlanConstraints { needs_linear_scan: crate::util::constants::SUPPORT_CARD_SCANNING || crate::util::constants::LAZY_SWEEP, needs_concurrent_workers: false, - may_trace_duplicate_edges: false, + // We may trace duplicate edges in mark sweep. If we use mark sweep as the non moving policy, it will be included in every + may_trace_duplicate_edges: cfg!(feature = "marksweep_as_nonmoving"), needs_forward_after_liveness: false, needs_log_bit: false, barrier: BarrierSelector::NoBarrier, + // FIXME: To make things easy, we just require prepare mutator. This should be fixed before reviewing. needs_prepare_mutator: true, } } diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index 361003ef59..0205c7a77f 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -40,7 +40,6 @@ pub const SS_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -96,6 +95,10 @@ impl Plan for SemiSpace { self.fromspace().release(); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls) + } + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { self.base().collection_required(self, space_full) } diff --git a/src/plan/semispace/mutator.rs b/src/plan/semispace/mutator.rs index 3526eef394..2a190a3134 100644 --- a/src/plan/semispace/mutator.rs +++ b/src/plan/semispace/mutator.rs @@ -1,5 +1,6 @@ use super::SemiSpace; -use crate::plan::mutator_context::unreachable_prepare_func; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -14,7 +15,7 @@ use crate::vm::VMBinding; use crate::MMTK; use enum_map::EnumMap; -pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn ss_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // rebind the allocation bump pointer to the appropriate semispace let bump_allocator = unsafe { mutator @@ -30,6 +31,8 @@ pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWork .unwrap() .tospace(), ); + + common_release_func(mutator, tls); } const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { @@ -57,7 +60,7 @@ pub fn create_ss_mutator( vec.push((AllocatorSelector::BumpPointer(0), ss.tospace())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &ss_mutator_release, }; diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 3ee5c9ece7..a06dee9816 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -135,7 +135,7 @@ impl Plan for StickyImmix { } } - fn end_of_gc(&mut self, _tls: crate::util::opaque_pointer::VMWorkerThread) { + fn end_of_gc(&mut self, tls: crate::util::opaque_pointer::VMWorkerThread) { let next_gc_full_heap = crate::plan::generational::global::CommonGenPlan::should_next_gc_be_full_heap(self); self.next_gc_full_heap @@ -144,6 +144,8 @@ impl Plan for StickyImmix { let was_defrag = self.immix.immix_space.end_of_gc(); self.immix .set_last_gc_was_defrag(was_defrag, Ordering::Relaxed); + + self.immix.common.end_of_gc(tls); } fn collection_required(&self, space_full: bool, space: Option>) -> bool { diff --git a/src/plan/sticky/immix/mutator.rs b/src/plan/sticky/immix/mutator.rs index 10030ee352..83debfb089 100644 --- a/src/plan/sticky/immix/mutator.rs +++ b/src/plan/sticky/immix/mutator.rs @@ -2,7 +2,7 @@ use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::immix; use crate::plan::mutator_context::{ - create_space_mapping, unreachable_prepare_func, MutatorBuilder, MutatorConfig, + common_prepare_func, common_release_func, create_space_mapping, MutatorBuilder, MutatorConfig, }; use crate::plan::sticky::immix::global::StickyImmix; use crate::util::alloc::AllocatorSelector; @@ -12,7 +12,8 @@ use crate::vm::VMBinding; use crate::{Mutator, MMTK}; pub fn stickyimmix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { - immix::mutator::immix_mutator_release(mutator, tls) + immix::mutator::immix_mutator_release(mutator, tls); + common_release_func(mutator, tls); } pub use immix::mutator::ALLOCATOR_MAPPING; @@ -30,7 +31,7 @@ pub fn create_stickyimmix_mutator( vec.push((AllocatorSelector::Immix(0), stickyimmix.get_immix_space())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &stickyimmix_mutator_release, }; From d98137f8686682817caae8e7cd3dbda542ca44aa Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 6 May 2025 06:09:32 +0000 Subject: [PATCH 06/13] Work around an issue about trace kind --- src/policy/immix/immixspace.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index b6c1416574..6c50ed77b5 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -2,7 +2,7 @@ use super::defrag::StatsForDefrag; use super::line::*; use super::{block::*, defrag::Defrag}; use crate::plan::VectorObjectQueue; -use crate::policy::gc_work::{TraceKind, TRACE_KIND_TRANSITIVE_PIN}; +use crate::policy::gc_work::{TraceKind, DEFAULT_TRACE, TRACE_KIND_TRANSITIVE_PIN}; use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; use crate::policy::sft_map::SFTMap; @@ -237,6 +237,16 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace true } else if KIND == TRACE_KIND_FAST || KIND == TRACE_KIND_TRANSITIVE_PIN { false + } else if KIND == DEFAULT_TRACE{ + // FIXME: This is quite hacky. + // 1. When we use immix as a non moving space, we will check this method to see + // if the non moving space may move objects or not. The answer should always be false. + // It doesn't matter what trace it is. + // 2. For StickyImmix, we use DEFAULT_TRACE for nursery GC. We may move objects. Luckily, + // this function is only used in PlanProcessEdges, and sticky immix nursery GC does not + // use PlanProcessEdges. + // I should fix this before merging. + false } else { unreachable!() } From 9ef1cb7243e57448c7c547f2062b7d5d7eb34ef1 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 6 May 2025 06:12:36 +0000 Subject: [PATCH 07/13] Fix doc --- docs/userguide/src/tutorial/code/mygc_semispace/global.rs | 6 ++++++ docs/userguide/src/tutorial/mygc/ss/collection.md | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index 98a9f3c934..51f9f9361b 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -128,6 +128,12 @@ impl Plan for MyGC { } // ANCHOR_END: release + // ANCHOR: end_of_gc + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + // ANCHOR_END: end_of_gc + // Modify // ANCHOR: plan_get_collection_reserve fn get_collection_reserved_pages(&self) -> usize { diff --git a/docs/userguide/src/tutorial/mygc/ss/collection.md b/docs/userguide/src/tutorial/mygc/ss/collection.md index 5c89ac0685..fd287beb8f 100644 --- a/docs/userguide/src/tutorial/mygc/ss/collection.md +++ b/docs/userguide/src/tutorial/mygc/ss/collection.md @@ -157,6 +157,14 @@ with `&mygc_mutator_release`. This function will be called at the release stage allocation semantics to the new tospace. When the mutator threads resume, any new allocations for `Default` will then go to the new tospace. +### End of GC + +Find the method `end_of_gc()` in `mygc/global.rs`. Call `end_of_gc` from the common plan instead. + +```rust +{{#include ../../code/mygc_semispace/global.rs:end_of_gc}} +``` + ## ProcessEdgesWork for MyGC [`ProcessEdgesWork`](https://docs.mmtk.io/api/mmtk/scheduler/gc_work/trait.ProcessEdgesWork.html) From 98e9ed5e4116042d675e3eef97d83f2fead6e064 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 6 May 2025 18:47:44 +1200 Subject: [PATCH 08/13] Update immixspace.rs --- src/policy/immix/immixspace.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 6c50ed77b5..c63eb82aca 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -237,7 +237,7 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace true } else if KIND == TRACE_KIND_FAST || KIND == TRACE_KIND_TRANSITIVE_PIN { false - } else if KIND == DEFAULT_TRACE{ + } else if KIND == DEFAULT_TRACE { // FIXME: This is quite hacky. // 1. When we use immix as a non moving space, we will check this method to see // if the non moving space may move objects or not. The answer should always be false. From 0763fb7ec8909b0ebb4008caeff47b332a468879 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 7 May 2025 06:59:48 +0000 Subject: [PATCH 09/13] Immix needs post_scan. Fix style check --- src/plan/global.rs | 5 ++++- src/policy/immix/immixspace.rs | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/plan/global.rs b/src/plan/global.rs index 572e1a89af..072117f646 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -564,8 +564,11 @@ pub struct CommonPlan { pub immortal: ImmortalSpace, #[space] pub los: LargeObjectSpace, - // TODO: We should use a marksweep space for nonmoving. #[space] + #[cfg_attr( + not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")), + post_scan + )] // Immix space needs post_scan pub nonmoving: NonMovingSpace, #[parent] pub base: BasePlan, diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index c63eb82aca..2c611a634a 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -232,6 +232,7 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace } } + #[allow(clippy::if_same_then_else)] // DEFAULT_TRACE needs a workaround which is documented below. fn may_move_objects() -> bool { if KIND == TRACE_KIND_DEFRAG { true From c541b2fee644c34eb472f0b0a3d82cb3743f722c Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 8 May 2025 01:46:59 +0000 Subject: [PATCH 10/13] Fix the leftover FIXMEs --- src/plan/generational/mod.rs | 1 + src/plan/immix/global.rs | 1 + src/plan/markcompact/global.rs | 1 + src/plan/marksweep/global.rs | 3 +++ src/plan/pageprotect/global.rs | 1 + src/plan/plan_constraints.rs | 4 ++-- src/plan/semispace/global.rs | 1 + src/policy/immix/immixspace.rs | 13 +++++-------- 8 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/plan/generational/mod.rs b/src/plan/generational/mod.rs index 15b63abb33..c4aa66566e 100644 --- a/src/plan/generational/mod.rs +++ b/src/plan/generational/mod.rs @@ -50,6 +50,7 @@ pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints { may_trace_duplicate_edges: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier), max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, + needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index d8b5f40935..f12dc70b36 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -42,6 +42,7 @@ pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: !cfg!(feature = "immix_non_moving"), // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, + needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index f776840bfd..36eec608f9 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -42,6 +42,7 @@ pub const MARKCOMPACT_CONSTRAINTS: PlanConstraints = PlanConstraints { needs_forward_after_liveness: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, + needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 61712e71ab..f6d4b9449b 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -41,6 +41,9 @@ pub const MS_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: false, max_non_los_default_alloc_bytes: MAX_OBJECT_SIZE, may_trace_duplicate_edges: true, + needs_prepare_mutator: (!cfg!(feature = "malloc_mark_sweep") + && !cfg!(feature = "eager_sweeping")) + || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 5d0d868f95..a0da00c1fc 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -31,6 +31,7 @@ pub struct PageProtect { /// The plan constraints for the page protect plan. pub const CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: false, + needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 00cc131d57..3110eb7538 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -65,8 +65,8 @@ impl PlanConstraints { needs_forward_after_liveness: false, needs_log_bit: false, barrier: BarrierSelector::NoBarrier, - // FIXME: To make things easy, we just require prepare mutator. This should be fixed before reviewing. - needs_prepare_mutator: true, + // If we use mark sweep as non moving space, we need to prepare mutator. See [`common_prepare_func`]. + needs_prepare_mutator: cfg!(feature = "marksweep_as_nonmoving"), } } } diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index 0205c7a77f..a06960ec54 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -40,6 +40,7 @@ pub const SS_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, + needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 2c611a634a..75b1db1414 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -239,14 +239,11 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace } else if KIND == TRACE_KIND_FAST || KIND == TRACE_KIND_TRANSITIVE_PIN { false } else if KIND == DEFAULT_TRACE { - // FIXME: This is quite hacky. - // 1. When we use immix as a non moving space, we will check this method to see - // if the non moving space may move objects or not. The answer should always be false. - // It doesn't matter what trace it is. - // 2. For StickyImmix, we use DEFAULT_TRACE for nursery GC. We may move objects. Luckily, - // this function is only used in PlanProcessEdges, and sticky immix nursery GC does not - // use PlanProcessEdges. - // I should fix this before merging. + // FIXME: This is hacky. When we do a default trace, this should be a nonmoving space. + // The only exception is the nursery GC for sticky immix, for which, we use default trace. + // This function is only used for PlanProcessEdges, and for sticky immix nursery GC, we use + // GenNurseryProcessEdges. So it still works. But this is quite hacky anyway. + // See https://github.com/mmtk/mmtk-core/issues/1314 for details. false } else { unreachable!() From aa5b6eebad7b82cacaa3e43e092330bd3517bbd3 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 9 May 2025 00:09:32 +0000 Subject: [PATCH 11/13] Address reviews about cfg_if --- src/plan/global.rs | 46 +++++++++++++++++++++---------------- src/plan/mutator_context.rs | 16 +++---------- 2 files changed, 29 insertions(+), 33 deletions(-) diff --git a/src/plan/global.rs b/src/plan/global.rs index 072117f646..c5b51ffba0 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -546,14 +546,16 @@ impl BasePlan { } } -#[cfg(feature = "immortal_as_nonmoving")] -pub type NonMovingSpace = crate::policy::immortalspace::ImmortalSpace; - -#[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] -pub type NonMovingSpace = crate::policy::immix::ImmixSpace; - -#[cfg(feature = "marksweep_as_nonmoving")] -pub type NonMovingSpace = crate::policy::marksweepspace::native_ms::MarkSweepSpace; +cfg_if::cfg_if! { + // Use immortal or mark sweep as the non moving space if the features are enabled. Otherwise use Immix. + if #[cfg(feature = "immortal_as_nonmoving")] { + pub type NonMovingSpace = crate::policy::immortalspace::ImmortalSpace; + } else if #[cfg(feature = "marksweep_as_nonmoving")] { + pub type NonMovingSpace = crate::policy::marksweepspace::native_ms::MarkSweepSpace; + } else { + pub type NonMovingSpace = crate::policy::immix::ImmixSpace; + } +} /** CommonPlan is for representing state and features used by _many_ plans, but that are not fundamental to _all_ plans. Examples include the Large Object Space and an Immortal space. Features that are fundamental to _all_ plans must be included in BasePlan. @@ -632,18 +634,22 @@ impl CommonPlan { fn new_nonmoving_space(args: &mut CreateSpecificPlanArgs) -> NonMovingSpace { let space_args = args.get_space_args("nonmoving", true, false, VMRequest::discontiguous()); - #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] - return NonMovingSpace::new(space_args); - #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] - return NonMovingSpace::new( - space_args, - crate::policy::immix::ImmixSpaceArgs { - unlog_object_when_traced: false, - #[cfg(feature = "vo_bit")] - mixed_age: false, - never_move_objects: true, - }, - ); + cfg_if::cfg_if! { + if #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] { + NonMovingSpace::new(space_args) + } else { + // Immix requires extra args. + NonMovingSpace::new( + space_args, + crate::policy::immix::ImmixSpaceArgs { + unlog_object_when_traced: false, + #[cfg(feature = "vo_bit")] + mixed_age: false, + never_move_objects: true, + }, + ) + } + } } fn prepare_nonmoving_space(&mut self, _full_heap: bool) { diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 34deca2556..e224ec1362 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -530,17 +530,12 @@ pub(crate) fn create_allocator_mapping( if include_common_plan { map[AllocationSemantics::Immortal] = reserved.add_bump_pointer_allocator(); map[AllocationSemantics::Los] = reserved.add_large_object_allocator(); - map[AllocationSemantics::NonMoving] = if cfg!(not(any( - feature = "immortal_as_nonmoving", - feature = "marksweep_as_nonmoving" - ))) { - reserved.add_immix_allocator() - } else if cfg!(feature = "marksweep_as_nonmoving") { + map[AllocationSemantics::NonMoving] = if cfg!(feature = "marksweep_as_nonmoving") { reserved.add_free_list_allocator() } else if cfg!(feature = "immortal_as_nonmoving") { reserved.add_bump_pointer_allocator() } else { - panic!("No policy selected for nonmoving space") + reserved.add_immix_allocator() }; } @@ -596,15 +591,10 @@ pub(crate) fn create_space_mapping( vec.push(( if cfg!(feature = "marksweep_as_nonmoving") { reserved.add_free_list_allocator() - } else if cfg!(not(any( - feature = "immortal_as_nonmoving", - feature = "marksweep_as_nonmoving" - ))) { - reserved.add_immix_allocator() } else if cfg!(feature = "immortal_as_nonmoving") { reserved.add_bump_pointer_allocator() } else { - panic!("No policy selected for nonmoving space") + reserved.add_immix_allocator() }, plan.common().get_nonmoving(), )); From 57624ad22517c2063077eb481a063399a15b77fc Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 9 May 2025 06:59:27 +0000 Subject: [PATCH 12/13] Use cfg_if where we can. Remove unnecessary needs_mutator_prepare. --- src/plan/generational/mod.rs | 1 - src/plan/global.rs | 44 +++++++++++++++++++------------ src/plan/immix/global.rs | 1 - src/plan/markcompact/global.rs | 1 - src/plan/mutator_context.rs | 48 ++++++++++++++++++---------------- src/plan/pageprotect/global.rs | 1 - src/plan/semispace/global.rs | 1 - 7 files changed, 52 insertions(+), 45 deletions(-) diff --git a/src/plan/generational/mod.rs b/src/plan/generational/mod.rs index c4aa66566e..15b63abb33 100644 --- a/src/plan/generational/mod.rs +++ b/src/plan/generational/mod.rs @@ -50,7 +50,6 @@ pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints { may_trace_duplicate_edges: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier), max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/plan/global.rs b/src/plan/global.rs index c5b51ffba0..5a5bb38ab5 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -653,29 +653,39 @@ impl CommonPlan { } fn prepare_nonmoving_space(&mut self, _full_heap: bool) { - #[cfg(feature = "immortal_as_nonmoving")] - self.nonmoving.prepare(); - #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] - self.nonmoving.prepare(_full_heap, None); - #[cfg(feature = "marksweep_as_nonmoving")] - self.nonmoving.prepare(_full_heap); + cfg_if::cfg_if! { + if #[cfg(feature = "immortal_as_nonmoving")] { + self.nonmoving.prepare(); + } else if #[cfg(feature = "marksweep_as_nonmoving")] { + self.nonmoving.prepare(_full_heap); + } else { + self.nonmoving.prepare(_full_heap, None); + } + } } fn release_nonmoving_space(&mut self, _full_heap: bool) { - #[cfg(feature = "immortal_as_nonmoving")] - self.nonmoving.release(); - #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] - self.nonmoving.release(_full_heap); - #[cfg(feature = "marksweep_as_nonmoving")] - self.nonmoving.release(); + cfg_if::cfg_if! { + if #[cfg(feature = "immortal_as_nonmoving")] { + self.nonmoving.release(); + } else if #[cfg(feature = "marksweep_as_nonmoving")] { + self.nonmoving.prepare(_full_heap); + } else { + self.nonmoving.release(_full_heap); + } + } } fn end_of_gc_nonmoving_space(&mut self) { - // Only mark sweep and immix need end of GC. - #[cfg(feature = "marksweep_as_nonmoving")] - self.nonmoving.end_of_gc(); - #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] - self.nonmoving.end_of_gc(); + cfg_if::cfg_if! { + if #[cfg(feature = "immortal_as_nonmoving")] { + // Nothing we need to do for immortal space. + } else if #[cfg(feature = "marksweep_as_nonmoving")] { + self.nonmoving.end_of_gc(); + } else { + self.nonmoving.end_of_gc(); + } + } } } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index f12dc70b36..d8b5f40935 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -42,7 +42,6 @@ pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: !cfg!(feature = "immix_non_moving"), // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, - needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 36eec608f9..f776840bfd 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -42,7 +42,6 @@ pub const MARKCOMPACT_CONSTRAINTS: PlanConstraints = PlanConstraints { needs_forward_after_liveness: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index e224ec1362..6786bef568 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -57,30 +57,32 @@ pub(crate) fn unreachable_release_func( /// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`]. #[allow(unused_variables)] pub(crate) fn common_release_func(mutator: &mut Mutator, _tls: VMWorkerThread) { - // Release the free list allocator used for non moving - #[cfg(feature = "marksweep_as_nonmoving")] - { - use crate::util::alloc::FreeListAllocator; - unsafe { - mutator - .allocators - .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) - } - .downcast_mut::>() - .unwrap() - .release(); - } - #[cfg(not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")))] - { - use crate::util::alloc::ImmixAllocator; - unsafe { - mutator - .allocators - .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + cfg_if::cfg_if! { + if #[cfg(feature = "marksweep_as_nonmoving")] { + // Release the free list allocator used for non moving + use crate::util::alloc::FreeListAllocator; + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .release(); + } else if #[cfg(feature = "immortal_as_nonmoving")] { + // Do nothig for the bump pointer allocator + } else { + // Reset the Immix allocator + use crate::util::alloc::ImmixAllocator; + unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) + } + .downcast_mut::>() + .unwrap() + .reset(); } - .downcast_mut::>() - .unwrap() - .reset(); } } diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index a0da00c1fc..5d0d868f95 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -31,7 +31,6 @@ pub struct PageProtect { /// The plan constraints for the page protect plan. pub const CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: false, - needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index a06960ec54..0205c7a77f 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -40,7 +40,6 @@ pub const SS_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; From bdf69fdb050eae636782c8762deb4b50e160545e Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 9 May 2025 07:09:09 +0000 Subject: [PATCH 13/13] Minor refactoring --- src/plan/mutator_context.rs | 61 ++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 6786bef568..ac2080c79e 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -32,17 +32,12 @@ pub(crate) fn unreachable_prepare_func( pub(crate) fn common_prepare_func(mutator: &mut Mutator, _tls: VMWorkerThread) { // Prepare the free list allocator used for non moving #[cfg(feature = "marksweep_as_nonmoving")] - { - use crate::util::alloc::FreeListAllocator; - unsafe { - mutator - .allocators - .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) - } - .downcast_mut::>() - .unwrap() - .prepare(); + unsafe { + mutator.allocator_impl_mut_for_semantic::>( + AllocationSemantics::NonMoving, + ) } + .prepare(); } /// A place-holder implementation for `MutatorConfig::release_func` that should not be called. @@ -60,28 +55,16 @@ pub(crate) fn common_release_func(mutator: &mut Mutator, _tls cfg_if::cfg_if! { if #[cfg(feature = "marksweep_as_nonmoving")] { // Release the free list allocator used for non moving - use crate::util::alloc::FreeListAllocator; - unsafe { - mutator - .allocators - .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) - } - .downcast_mut::>() - .unwrap() - .release(); + unsafe { mutator.allocator_impl_mut_for_semantic::>( + AllocationSemantics::NonMoving, + )}.release(); } else if #[cfg(feature = "immortal_as_nonmoving")] { // Do nothig for the bump pointer allocator } else { // Reset the Immix allocator - use crate::util::alloc::ImmixAllocator; - unsafe { - mutator - .allocators - .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::NonMoving]) - } - .downcast_mut::>() - .unwrap() - .reset(); + unsafe { mutator.allocator_impl_mut_for_semantic::>( + AllocationSemantics::NonMoving, + )}.reset(); } } } @@ -314,6 +297,28 @@ impl Mutator { self.allocators.get_typed_allocator_mut(selector) } + /// Get the allocator of a concrete type for the semantic. + /// + /// # Safety + /// The semantic needs to match the allocator type. + pub unsafe fn allocator_impl_for_semantic>( + &self, + semantic: AllocationSemantics, + ) -> &T { + self.allocator_impl::(self.config.allocator_mapping[semantic]) + } + + /// Get the mutable allocator of a concrete type for the semantic. + /// + /// # Safety + /// The semantic needs to match the allocator type. + pub unsafe fn allocator_impl_mut_for_semantic>( + &mut self, + semantic: AllocationSemantics, + ) -> &mut T { + self.allocator_impl_mut::(self.config.allocator_mapping[semantic]) + } + /// Return the base offset from a mutator pointer to the allocator specified by the selector. pub fn get_allocator_base_offset(selector: AllocatorSelector) -> usize { use crate::util::alloc::*;