diff --git a/Cargo.toml b/Cargo.toml index 59f93599ca..32ef12b5bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -224,9 +224,6 @@ malloc_jemalloc = ["dep:jemalloc-sys"] # is not compiled in default builds. malloc_native_mimalloc = [] -# If there are more groups, they should be inserted above this line -# Group:end - # Group:marksweepallocation # default is native allocator with lazy sweeping eager_sweeping = [] @@ -234,4 +231,10 @@ eager_sweeping = [] # normal heap range, we will have to use chunk-based SFT table. Turning on this feature will use a different SFT map implementation on 64bits, # and will affect all the plans in the build. Please be aware of the consequence, and this is only meant to be experimental use. malloc_mark_sweep = [] + +# Group:nonmovingspace +immortal_as_nonmoving = [] +marksweep_as_nonmoving = [] + +# If there are more groups, they should be inserted above this line # Group:end diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index 98a9f3c934..51f9f9361b 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -128,6 +128,12 @@ impl Plan for MyGC { } // ANCHOR_END: release + // ANCHOR: end_of_gc + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + // ANCHOR_END: end_of_gc + // Modify // ANCHOR: plan_get_collection_reserve fn get_collection_reserved_pages(&self) -> usize { diff --git a/docs/userguide/src/tutorial/mygc/ss/collection.md b/docs/userguide/src/tutorial/mygc/ss/collection.md index 5c89ac0685..fd287beb8f 100644 --- a/docs/userguide/src/tutorial/mygc/ss/collection.md +++ b/docs/userguide/src/tutorial/mygc/ss/collection.md @@ -157,6 +157,14 @@ with `&mygc_mutator_release`. This function will be called at the release stage allocation semantics to the new tospace. When the mutator threads resume, any new allocations for `Default` will then go to the new tospace. +### End of GC + +Find the method `end_of_gc()` in `mygc/global.rs`. Call `end_of_gc` from the common plan instead. + +```rust +{{#include ../../code/mygc_semispace/global.rs:end_of_gc}} +``` + ## ProcessEdgesWork for MyGC [`ProcessEdgesWork`](https://docs.mmtk.io/api/mmtk/scheduler/gc_work/trait.ProcessEdgesWork.html) diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index 2c920fda7d..b23a81c998 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -114,9 +114,9 @@ impl Plan for GenCopy { } } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { - self.gen - .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); + fn end_of_gc(&mut self, tls: VMWorkerThread) { + let next_gc_full_heap = CommonGenPlan::should_next_gc_be_full_heap(self); + self.gen.end_of_gc(tls, next_gc_full_heap); } fn get_collection_reserved_pages(&self) -> usize { diff --git a/src/plan/generational/copying/mutator.rs b/src/plan/generational/copying/mutator.rs index fd33203b77..eb7e8c1511 100644 --- a/src/plan/generational/copying/mutator.rs +++ b/src/plan/generational/copying/mutator.rs @@ -3,7 +3,8 @@ use super::GenCopy; use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::generational::create_gen_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -13,7 +14,7 @@ use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; use crate::MMTK; -pub fn gencopy_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn gencopy_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset nursery allocator let bump_allocator = unsafe { mutator @@ -23,6 +24,8 @@ pub fn gencopy_mutator_release(mutator: &mut Mutator, _tls: V .downcast_mut::>() .unwrap(); bump_allocator.reset(); + + common_release_func(mutator, tls); } pub fn create_gencopy_mutator( @@ -36,7 +39,7 @@ pub fn create_gencopy_mutator( mmtk.get_plan(), &gencopy.gen.nursery, )), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &gencopy_mutator_release, }; diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 33d4651bcf..0404fc93bb 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -78,6 +78,11 @@ impl CommonGenPlan { self.nursery.release(); } + pub fn end_of_gc(&mut self, tls: VMWorkerThread, next_gc_full_heap: bool) { + self.set_next_gc_full_heap(next_gc_full_heap); + self.common.end_of_gc(tls); + } + /// Independent of how many pages remain in the page budget (a function of heap size), we must /// ensure we never exhaust virtual memory. Therefore we must never let the nursery grow to the /// extent that it can't be copied into the mature space. diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index cc0ada516e..ff76d5ac55 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -131,7 +131,7 @@ impl Plan for GenImmix { if full_heap { self.immix_space.prepare( full_heap, - crate::policy::immix::defrag::StatsForDefrag::new(self), + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), ); } } @@ -146,9 +146,9 @@ impl Plan for GenImmix { .store(full_heap, Ordering::Relaxed); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { - self.gen - .set_next_gc_full_heap(CommonGenPlan::should_next_gc_be_full_heap(self)); + fn end_of_gc(&mut self, tls: VMWorkerThread) { + let next_gc_full_heap = CommonGenPlan::should_next_gc_be_full_heap(self); + self.gen.end_of_gc(tls, next_gc_full_heap); let did_defrag = self.immix_space.end_of_gc(); self.last_gc_was_defrag.store(did_defrag, Ordering::Relaxed); diff --git a/src/plan/generational/immix/mutator.rs b/src/plan/generational/immix/mutator.rs index 7eb691a166..e3d9346938 100644 --- a/src/plan/generational/immix/mutator.rs +++ b/src/plan/generational/immix/mutator.rs @@ -3,7 +3,8 @@ use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::generational::create_gen_space_mapping; use crate::plan::generational::immix::GenImmix; -use crate::plan::mutator_context::unreachable_prepare_func; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -13,7 +14,7 @@ use crate::util::{VMMutatorThread, VMWorkerThread}; use crate::vm::VMBinding; use crate::MMTK; -pub fn genimmix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn genimmix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset nursery allocator let bump_allocator = unsafe { mutator @@ -23,6 +24,8 @@ pub fn genimmix_mutator_release(mutator: &mut Mutator, _tls: .downcast_mut::>() .unwrap(); bump_allocator.reset(); + + common_release_func(mutator, tls); } pub fn create_genimmix_mutator( @@ -36,7 +39,7 @@ pub fn create_genimmix_mutator( mmtk.get_plan(), &genimmix.gen.nursery, )), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &genimmix_mutator_release, }; diff --git a/src/plan/generational/mod.rs b/src/plan/generational/mod.rs index 2bb61dc55f..15b63abb33 100644 --- a/src/plan/generational/mod.rs +++ b/src/plan/generational/mod.rs @@ -50,7 +50,6 @@ pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints { may_trace_duplicate_edges: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier), max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; diff --git a/src/plan/global.rs b/src/plan/global.rs index 062cdc969b..5a5bb38ab5 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -197,7 +197,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// Inform the plan about the end of a GC. It is guaranteed that there is no further work for this GC. /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method. - fn end_of_gc(&mut self, _tls: VMWorkerThread) {} + fn end_of_gc(&mut self, _tls: VMWorkerThread); /// Notify the plan that an emergency collection will happen. The plan should try to free as much memory as possible. /// The default implementation will force a full heap collection for generational plans. @@ -511,6 +511,10 @@ impl BasePlan { self.vm_space.release(); } + pub fn end_of_gc(&mut self, _tls: VMWorkerThread) { + // Do nothing here. None of the spaces needs end_of_gc. + } + pub(crate) fn collection_required(&self, plan: &P, space_full: bool) -> bool { let stress_force_gc = crate::util::heap::gc_trigger::GCTrigger::::should_do_stress_gc_inner( @@ -542,6 +546,17 @@ impl BasePlan { } } +cfg_if::cfg_if! { + // Use immortal or mark sweep as the non moving space if the features are enabled. Otherwise use Immix. + if #[cfg(feature = "immortal_as_nonmoving")] { + pub type NonMovingSpace = crate::policy::immortalspace::ImmortalSpace; + } else if #[cfg(feature = "marksweep_as_nonmoving")] { + pub type NonMovingSpace = crate::policy::marksweepspace::native_ms::MarkSweepSpace; + } else { + pub type NonMovingSpace = crate::policy::immix::ImmixSpace; + } +} + /** CommonPlan is for representing state and features used by _many_ plans, but that are not fundamental to _all_ plans. Examples include the Large Object Space and an Immortal space. Features that are fundamental to _all_ plans must be included in BasePlan. */ @@ -551,9 +566,12 @@ pub struct CommonPlan { pub immortal: ImmortalSpace, #[space] pub los: LargeObjectSpace, - // TODO: We should use a marksweep space for nonmoving. #[space] - pub nonmoving: ImmortalSpace, + #[cfg_attr( + not(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving")), + post_scan + )] // Immix space needs post_scan + pub nonmoving: NonMovingSpace, #[parent] pub base: BasePlan, } @@ -571,12 +589,7 @@ impl CommonPlan { args.get_space_args("los", true, false, VMRequest::discontiguous()), false, ), - nonmoving: ImmortalSpace::new(args.get_space_args( - "nonmoving", - true, - false, - VMRequest::discontiguous(), - )), + nonmoving: Self::new_nonmoving_space(&mut args), base: BasePlan::new(args), } } @@ -591,17 +604,22 @@ impl CommonPlan { pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.prepare(); self.los.prepare(full_heap); - self.nonmoving.prepare(); + self.prepare_nonmoving_space(full_heap); self.base.prepare(tls, full_heap) } pub fn release(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.release(); self.los.release(full_heap); - self.nonmoving.release(); + self.release_nonmoving_space(full_heap); self.base.release(tls, full_heap) } + pub fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.end_of_gc_nonmoving_space(); + self.base.end_of_gc(tls); + } + pub fn get_immortal(&self) -> &ImmortalSpace { &self.immortal } @@ -610,9 +628,65 @@ impl CommonPlan { &self.los } - pub fn get_nonmoving(&self) -> &ImmortalSpace { + pub fn get_nonmoving(&self) -> &NonMovingSpace { &self.nonmoving } + + fn new_nonmoving_space(args: &mut CreateSpecificPlanArgs) -> NonMovingSpace { + let space_args = args.get_space_args("nonmoving", true, false, VMRequest::discontiguous()); + cfg_if::cfg_if! { + if #[cfg(any(feature = "immortal_as_nonmoving", feature = "marksweep_as_nonmoving"))] { + NonMovingSpace::new(space_args) + } else { + // Immix requires extra args. + NonMovingSpace::new( + space_args, + crate::policy::immix::ImmixSpaceArgs { + unlog_object_when_traced: false, + #[cfg(feature = "vo_bit")] + mixed_age: false, + never_move_objects: true, + }, + ) + } + } + } + + fn prepare_nonmoving_space(&mut self, _full_heap: bool) { + cfg_if::cfg_if! { + if #[cfg(feature = "immortal_as_nonmoving")] { + self.nonmoving.prepare(); + } else if #[cfg(feature = "marksweep_as_nonmoving")] { + self.nonmoving.prepare(_full_heap); + } else { + self.nonmoving.prepare(_full_heap, None); + } + } + } + + fn release_nonmoving_space(&mut self, _full_heap: bool) { + cfg_if::cfg_if! { + if #[cfg(feature = "immortal_as_nonmoving")] { + self.nonmoving.release(); + } else if #[cfg(feature = "marksweep_as_nonmoving")] { + self.nonmoving.prepare(_full_heap); + } else { + self.nonmoving.release(_full_heap); + } + } + } + + fn end_of_gc_nonmoving_space(&mut self) { + cfg_if::cfg_if! { + if #[cfg(feature = "immortal_as_nonmoving")] { + // Nothing we need to do for immortal space. + } else if #[cfg(feature = "marksweep_as_nonmoving")] { + self.nonmoving.end_of_gc(); + } else { + self.nonmoving.end_of_gc(); + } + } + } } use crate::policy::gc_work::TraceKind; diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 1fcced92ef..d8b5f40935 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -42,7 +42,6 @@ pub const IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: !cfg!(feature = "immix_non_moving"), // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -88,7 +87,7 @@ impl Plan for Immix { self.common.prepare(tls, true); self.immix_space.prepare( true, - crate::policy::immix::defrag::StatsForDefrag::new(self), + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), ); } @@ -98,9 +97,10 @@ impl Plan for Immix { self.immix_space.release(true); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.last_gc_was_defrag .store(self.immix_space.end_of_gc(), Ordering::Relaxed); + self.common.end_of_gc(tls); } fn current_gc_may_move_object(&self) -> bool { diff --git a/src/plan/immix/mutator.rs b/src/plan/immix/mutator.rs index 59931a5ecf..aa6354ebee 100644 --- a/src/plan/immix/mutator.rs +++ b/src/plan/immix/mutator.rs @@ -1,7 +1,8 @@ use super::Immix; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::create_allocator_mapping; use crate::plan::mutator_context::create_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -14,7 +15,7 @@ use crate::vm::VMBinding; use crate::MMTK; use enum_map::EnumMap; -pub fn immix_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn immix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { let immix_allocator = unsafe { mutator .allocators @@ -23,6 +24,8 @@ pub fn immix_mutator_release(mutator: &mut Mutator, _tls: VMW .downcast_mut::>() .unwrap(); immix_allocator.reset(); + + common_release_func(mutator, tls); } pub(in crate::plan) const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { @@ -50,7 +53,7 @@ pub fn create_immix_mutator( vec.push((AllocatorSelector::Immix(0), &immix.immix_space)); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &immix_mutator_release, }; diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index fb68100ddf..f776840bfd 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -42,7 +42,6 @@ pub const MARKCOMPACT_CONSTRAINTS: PlanConstraints = PlanConstraints { needs_forward_after_liveness: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -73,6 +72,10 @@ impl Plan for MarkCompact { self.mc_space.release(); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn get_allocator_mapping(&self) -> &'static EnumMap { &ALLOCATOR_MAPPING } diff --git a/src/plan/markcompact/mutator.rs b/src/plan/markcompact/mutator.rs index aefcff7132..4e40743a62 100644 --- a/src/plan/markcompact/mutator.rs +++ b/src/plan/markcompact/mutator.rs @@ -1,7 +1,8 @@ use super::MarkCompact; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::create_allocator_mapping; use crate::plan::mutator_context::create_space_mapping; -use crate::plan::mutator_context::unreachable_prepare_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -39,24 +40,23 @@ pub fn create_markcompact_mutator( vec.push((AllocatorSelector::MarkCompact(0), markcompact.mc_space())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &markcompact_mutator_release, }; let builder = MutatorBuilder::new(mutator_tls, mmtk, config); builder.build() } -pub fn markcompact_mutator_release( - _mutator: &mut Mutator, - _tls: VMWorkerThread, -) { +pub fn markcompact_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // reset the thread-local allocation bump pointer let markcompact_allocator = unsafe { - _mutator + mutator .allocators - .get_allocator_mut(_mutator.config.allocator_mapping[AllocationSemantics::Default]) + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) } .downcast_mut::>() .unwrap(); markcompact_allocator.reset(); + + common_release_func(mutator, tls); } diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index d413d85632..f6d4b9449b 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -41,8 +41,9 @@ pub const MS_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: false, max_non_los_default_alloc_bytes: MAX_OBJECT_SIZE, may_trace_duplicate_edges: true, - needs_prepare_mutator: !cfg!(feature = "malloc_mark_sweep") - && !cfg!(feature = "eager_sweeping"), + needs_prepare_mutator: (!cfg!(feature = "malloc_mark_sweep") + && !cfg!(feature = "eager_sweeping")) + || PlanConstraints::default().needs_prepare_mutator, ..PlanConstraints::default() }; @@ -65,8 +66,9 @@ impl Plan for MarkSweep { self.common.release(tls, true); } - fn end_of_gc(&mut self, _tls: VMWorkerThread) { + fn end_of_gc(&mut self, tls: VMWorkerThread) { self.ms.end_of_gc(); + self.common.end_of_gc(tls); } fn collection_required(&self, space_full: bool, _space: Option>) -> bool { diff --git a/src/plan/marksweep/mutator.rs b/src/plan/marksweep/mutator.rs index 41a3495df9..8d5b045e07 100644 --- a/src/plan/marksweep/mutator.rs +++ b/src/plan/marksweep/mutator.rs @@ -16,12 +16,18 @@ use enum_map::EnumMap; #[cfg(feature = "malloc_mark_sweep")] mod malloc_mark_sweep { + use crate::plan::mutator_context::{common_prepare_func, common_release_func}; + use super::*; // Do nothing for malloc mark sweep (malloc allocator) - pub fn ms_mutator_prepare(_mutator: &mut Mutator, _tls: VMWorkerThread) {} - pub fn ms_mutator_release(_mutator: &mut Mutator, _tls: VMWorkerThread) {} + pub fn ms_mutator_prepare(mutator: &mut Mutator, tls: VMWorkerThread) { + common_prepare_func(mutator, tls); + } + pub fn ms_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { + common_release_func(mutator, tls); + } // malloc mark sweep uses 1 malloc allocator @@ -69,13 +75,20 @@ mod native_mark_sweep { // We forward calls to the allocator prepare and release #[cfg(not(feature = "malloc_mark_sweep"))] - pub fn ms_mutator_prepare(mutator: &mut Mutator, _tls: VMWorkerThread) { + pub fn ms_mutator_prepare(mutator: &mut Mutator, tls: VMWorkerThread) { + use crate::plan::mutator_context::common_prepare_func; + get_freelist_allocator_mut::(mutator).prepare(); + common_prepare_func(mutator, tls); } #[cfg(not(feature = "malloc_mark_sweep"))] - pub fn ms_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { + pub fn ms_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { + use crate::plan::mutator_context::common_release_func; + get_freelist_allocator_mut::(mutator).release(); + + common_release_func(mutator, tls); } // native mark sweep uses 1 free list allocator diff --git a/src/plan/mutator_context.rs b/src/plan/mutator_context.rs index 9911a65847..ee60269d72 100644 --- a/src/plan/mutator_context.rs +++ b/src/plan/mutator_context.rs @@ -28,6 +28,19 @@ pub(crate) fn unreachable_prepare_func( unreachable!("`MutatorConfig::prepare_func` must not be called for the current plan.") } +/// An mutator prepare implementation for plans that use [`crate::plan::global::CommonPlan`]. +#[allow(unused_variables)] +pub(crate) fn common_prepare_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + // Prepare the free list allocator used for non moving + #[cfg(feature = "marksweep_as_nonmoving")] + unsafe { + mutator.allocator_impl_mut_for_semantic::>( + AllocationSemantics::NonMoving, + ) + } + .prepare(); +} + /// A place-holder implementation for `MutatorConfig::release_func` that should not be called. /// Currently only used by `NoGC`. pub(crate) fn unreachable_release_func( @@ -37,7 +50,28 @@ pub(crate) fn unreachable_release_func( unreachable!("`MutatorConfig::release_func` must not be called for the current plan.") } +/// An mutator release implementation for plans that use [`crate::plan::global::CommonPlan`]. +#[allow(unused_variables)] +pub(crate) fn common_release_func(mutator: &mut Mutator, _tls: VMWorkerThread) { + cfg_if::cfg_if! { + if #[cfg(feature = "marksweep_as_nonmoving")] { + // Release the free list allocator used for non moving + unsafe { mutator.allocator_impl_mut_for_semantic::>( + AllocationSemantics::NonMoving, + )}.release(); + } else if #[cfg(feature = "immortal_as_nonmoving")] { + // Do nothig for the bump pointer allocator + } else { + // Reset the Immix allocator + unsafe { mutator.allocator_impl_mut_for_semantic::>( + AllocationSemantics::NonMoving, + )}.reset(); + } + } +} + /// A place-holder implementation for `MutatorConfig::release_func` that does nothing. +#[allow(dead_code)] pub(crate) fn no_op_release_func(_mutator: &mut Mutator, _tls: VMWorkerThread) {} // This struct is part of the Mutator struct. @@ -302,6 +336,28 @@ impl Mutator { self.allocators.get_typed_allocator_mut(selector) } + /// Get the allocator of a concrete type for the semantic. + /// + /// # Safety + /// The semantic needs to match the allocator type. + pub unsafe fn allocator_impl_for_semantic>( + &self, + semantic: AllocationSemantics, + ) -> &T { + self.allocator_impl::(self.config.allocator_mapping[semantic]) + } + + /// Get the mutable allocator of a concrete type for the semantic. + /// + /// # Safety + /// The semantic needs to match the allocator type. + pub unsafe fn allocator_impl_mut_for_semantic>( + &mut self, + semantic: AllocationSemantics, + ) -> &mut T { + self.allocator_impl_mut::(self.config.allocator_mapping[semantic]) + } + /// Return the base offset from a mutator pointer to the allocator specified by the selector. pub fn get_allocator_base_offset(selector: AllocatorSelector) -> usize { use crate::util::alloc::*; @@ -551,8 +607,13 @@ pub(crate) fn create_allocator_mapping( if include_common_plan { map[AllocationSemantics::Immortal] = reserved.add_bump_pointer_allocator(); map[AllocationSemantics::Los] = reserved.add_large_object_allocator(); - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. - map[AllocationSemantics::NonMoving] = reserved.add_bump_pointer_allocator(); + map[AllocationSemantics::NonMoving] = if cfg!(feature = "marksweep_as_nonmoving") { + reserved.add_free_list_allocator() + } else if cfg!(feature = "immortal_as_nonmoving") { + reserved.add_bump_pointer_allocator() + } else { + reserved.add_immix_allocator() + }; } reserved.validate(); @@ -604,9 +665,14 @@ pub(crate) fn create_space_mapping( reserved.add_large_object_allocator(), plan.common().get_los(), )); - // TODO: This should be freelist allocator once we use marksweep for nonmoving space. vec.push(( - reserved.add_bump_pointer_allocator(), + if cfg!(feature = "marksweep_as_nonmoving") { + reserved.add_free_list_allocator() + } else if cfg!(feature = "immortal_as_nonmoving") { + reserved.add_bump_pointer_allocator() + } else { + reserved.add_immix_allocator() + }, plan.common().get_nonmoving(), )); } diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index a05034c1c4..d6e6306fef 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -66,6 +66,10 @@ impl Plan for NoGC { unreachable!() } + fn end_of_gc(&mut self, _tls: VMWorkerThread) { + unreachable!() + } + fn get_allocator_mapping(&self) -> &'static EnumMap { &ALLOCATOR_MAPPING } diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 65a6b2eab8..5d0d868f95 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -31,7 +31,6 @@ pub struct PageProtect { /// The plan constraints for the page protect plan. pub const CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: false, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -58,6 +57,10 @@ impl Plan for PageProtect { self.space.release(true); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls); + } + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { self.base().collection_required(self, space_full) } diff --git a/src/plan/pageprotect/mutator.rs b/src/plan/pageprotect/mutator.rs index dd3e49a56e..ddea08f84d 100644 --- a/src/plan/pageprotect/mutator.rs +++ b/src/plan/pageprotect/mutator.rs @@ -1,6 +1,6 @@ use super::PageProtect; -use crate::plan::mutator_context::no_op_release_func; -use crate::plan::mutator_context::unreachable_prepare_func; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -41,8 +41,8 @@ pub fn create_pp_mutator( vec.push((AllocatorSelector::LargeObject(0), &page.space)); vec }), - prepare_func: &unreachable_prepare_func, - release_func: &no_op_release_func, + prepare_func: &common_prepare_func, + release_func: &common_release_func, }; let builder = MutatorBuilder::new(mutator_tls, mmtk, config); diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 1220792fb4..3110eb7538 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -60,11 +60,13 @@ impl PlanConstraints { needs_linear_scan: crate::util::constants::SUPPORT_CARD_SCANNING || crate::util::constants::LAZY_SWEEP, needs_concurrent_workers: false, - may_trace_duplicate_edges: false, + // We may trace duplicate edges in mark sweep. If we use mark sweep as the non moving policy, it will be included in every + may_trace_duplicate_edges: cfg!(feature = "marksweep_as_nonmoving"), needs_forward_after_liveness: false, needs_log_bit: false, barrier: BarrierSelector::NoBarrier, - needs_prepare_mutator: true, + // If we use mark sweep as non moving space, we need to prepare mutator. See [`common_prepare_func`]. + needs_prepare_mutator: cfg!(feature = "marksweep_as_nonmoving"), } } } diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index 361003ef59..0205c7a77f 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -40,7 +40,6 @@ pub const SS_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: true, max_non_los_default_alloc_bytes: crate::plan::plan_constraints::MAX_NON_LOS_ALLOC_BYTES_COPYING_PLAN, - needs_prepare_mutator: false, ..PlanConstraints::default() }; @@ -96,6 +95,10 @@ impl Plan for SemiSpace { self.fromspace().release(); } + fn end_of_gc(&mut self, tls: VMWorkerThread) { + self.common.end_of_gc(tls) + } + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { self.base().collection_required(self, space_full) } diff --git a/src/plan/semispace/mutator.rs b/src/plan/semispace/mutator.rs index 3526eef394..2a190a3134 100644 --- a/src/plan/semispace/mutator.rs +++ b/src/plan/semispace/mutator.rs @@ -1,5 +1,6 @@ use super::SemiSpace; -use crate::plan::mutator_context::unreachable_prepare_func; +use crate::plan::mutator_context::common_prepare_func; +use crate::plan::mutator_context::common_release_func; use crate::plan::mutator_context::Mutator; use crate::plan::mutator_context::MutatorBuilder; use crate::plan::mutator_context::MutatorConfig; @@ -14,7 +15,7 @@ use crate::vm::VMBinding; use crate::MMTK; use enum_map::EnumMap; -pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWorkerThread) { +pub fn ss_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { // rebind the allocation bump pointer to the appropriate semispace let bump_allocator = unsafe { mutator @@ -30,6 +31,8 @@ pub fn ss_mutator_release(mutator: &mut Mutator, _tls: VMWork .unwrap() .tospace(), ); + + common_release_func(mutator, tls); } const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { @@ -57,7 +60,7 @@ pub fn create_ss_mutator( vec.push((AllocatorSelector::BumpPointer(0), ss.tospace())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &ss_mutator_release, }; diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 73a21d2c0e..a06dee9816 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -117,7 +117,7 @@ impl Plan for StickyImmix { // Prepare both large object space and immix space self.immix.immix_space.prepare( false, - crate::policy::immix::defrag::StatsForDefrag::new(self), + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), ); self.immix.common.los.prepare(false); } else { @@ -135,7 +135,7 @@ impl Plan for StickyImmix { } } - fn end_of_gc(&mut self, _tls: crate::util::opaque_pointer::VMWorkerThread) { + fn end_of_gc(&mut self, tls: crate::util::opaque_pointer::VMWorkerThread) { let next_gc_full_heap = crate::plan::generational::global::CommonGenPlan::should_next_gc_be_full_heap(self); self.next_gc_full_heap @@ -144,6 +144,8 @@ impl Plan for StickyImmix { let was_defrag = self.immix.immix_space.end_of_gc(); self.immix .set_last_gc_was_defrag(was_defrag, Ordering::Relaxed); + + self.immix.common.end_of_gc(tls); } fn collection_required(&self, space_full: bool, space: Option>) -> bool { diff --git a/src/plan/sticky/immix/mutator.rs b/src/plan/sticky/immix/mutator.rs index 10030ee352..83debfb089 100644 --- a/src/plan/sticky/immix/mutator.rs +++ b/src/plan/sticky/immix/mutator.rs @@ -2,7 +2,7 @@ use crate::plan::barriers::ObjectBarrier; use crate::plan::generational::barrier::GenObjectBarrierSemantics; use crate::plan::immix; use crate::plan::mutator_context::{ - create_space_mapping, unreachable_prepare_func, MutatorBuilder, MutatorConfig, + common_prepare_func, common_release_func, create_space_mapping, MutatorBuilder, MutatorConfig, }; use crate::plan::sticky::immix::global::StickyImmix; use crate::util::alloc::AllocatorSelector; @@ -12,7 +12,8 @@ use crate::vm::VMBinding; use crate::{Mutator, MMTK}; pub fn stickyimmix_mutator_release(mutator: &mut Mutator, tls: VMWorkerThread) { - immix::mutator::immix_mutator_release(mutator, tls) + immix::mutator::immix_mutator_release(mutator, tls); + common_release_func(mutator, tls); } pub use immix::mutator::ALLOCATOR_MAPPING; @@ -30,7 +31,7 @@ pub fn create_stickyimmix_mutator( vec.push((AllocatorSelector::Immix(0), stickyimmix.get_immix_space())); vec }), - prepare_func: &unreachable_prepare_func, + prepare_func: &common_prepare_func, release_func: &stickyimmix_mutator_release, }; diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index db3463e0aa..7b0429406b 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -2,7 +2,7 @@ use super::defrag::StatsForDefrag; use super::line::*; use super::{block::*, defrag::Defrag}; use crate::plan::VectorObjectQueue; -use crate::policy::gc_work::{TraceKind, TRACE_KIND_TRANSITIVE_PIN}; +use crate::policy::gc_work::{TraceKind, DEFAULT_TRACE, TRACE_KIND_TRANSITIVE_PIN}; use crate::policy::sft::GCWorkerMutRef; use crate::policy::sft::SFT; use crate::policy::sft_map::SFTMap; @@ -233,11 +233,19 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace } } + #[allow(clippy::if_same_then_else)] // DEFAULT_TRACE needs a workaround which is documented below. fn may_move_objects() -> bool { if KIND == TRACE_KIND_DEFRAG { true } else if KIND == TRACE_KIND_FAST || KIND == TRACE_KIND_TRANSITIVE_PIN { false + } else if KIND == DEFAULT_TRACE { + // FIXME: This is hacky. When we do a default trace, this should be a nonmoving space. + // The only exception is the nursery GC for sticky immix, for which, we use default trace. + // This function is only used for PlanProcessEdges, and for sticky immix nursery GC, we use + // GenNurseryProcessEdges. So it still works. But this is quite hacky anyway. + // See https://github.com/mmtk/mmtk-core/issues/1314 for details. + false } else { unreachable!() } @@ -388,7 +396,7 @@ impl ImmixSpace { &self.scheduler } - pub fn prepare(&mut self, major_gc: bool, plan_stats: StatsForDefrag) { + pub fn prepare(&mut self, major_gc: bool, plan_stats: Option) { if major_gc { // Update mark_state if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_on_side() { @@ -408,7 +416,7 @@ impl ImmixSpace { // Prepare defrag info if self.is_defrag_enabled() { - self.defrag.prepare(self, plan_stats); + self.defrag.prepare(self, plan_stats.unwrap()); } // Prepare each block for GC diff --git a/src/util/alloc/allocators.rs b/src/util/alloc/allocators.rs index 58b591b1a8..58dd790ae2 100644 --- a/src/util/alloc/allocators.rs +++ b/src/util/alloc/allocators.rs @@ -22,7 +22,7 @@ use super::MarkCompactAllocator; pub(crate) const MAX_BUMP_ALLOCATORS: usize = 6; pub(crate) const MAX_LARGE_OBJECT_ALLOCATORS: usize = 2; pub(crate) const MAX_MALLOC_ALLOCATORS: usize = 1; -pub(crate) const MAX_IMMIX_ALLOCATORS: usize = 1; +pub(crate) const MAX_IMMIX_ALLOCATORS: usize = 2; pub(crate) const MAX_FREE_LIST_ALLOCATORS: usize = 2; pub(crate) const MAX_MARK_COMPACT_ALLOCATORS: usize = 1; diff --git a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs index 82d1c5e958..7c7df436fe 100644 --- a/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs +++ b/src/vm/tests/mock_tests/mock_test_allocate_nonmoving.rs @@ -21,6 +21,7 @@ pub fn allocate_nonmoving() { let addr = memory_manager::alloc(&mut fixture.mutator, 16, 8, 0, AllocationSemantics::Default); assert!(!addr.is_zero()); + info!("Allocated default at: {:#x}", addr); // Non moving alloc let addr = memory_manager::alloc( @@ -31,6 +32,7 @@ pub fn allocate_nonmoving() { AllocationSemantics::NonMoving, ); assert!(!addr.is_zero()); + info!("Allocated nonmoving at: {:#x}", addr); }, no_cleanup, )