From 702710eba324e2117e7f3e10ab073e307ab3303e Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 12 Jan 2024 01:06:26 +0000 Subject: [PATCH 01/61] Improve documentation of how Evac-OOM Protocol works --- .../shenandoah/shenandoahEvacOOMHandler.cpp | 59 ++++++- .../shenandoah/shenandoahEvacOOMHandler.hpp | 161 ++++++++++++++---- .../shenandoahEvacOOMHandler.inline.hpp | 29 ++++ 3 files changed, 208 insertions(+), 41 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp index 8334126b6f829..34b9fc0bf65c0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp @@ -46,6 +46,10 @@ void ShenandoahEvacOOMCounter::clear() { Atomic::release_store_fence(&_bits, (jint)0); } +// This sets the OOM bit for a single counter. If decrement is true, it also decrements the count of evacuating threads +// associated with this counter. After all _num_counters OOM bits have been set, all threads newly attempting to enter_evacuation +// will be informed that they cannot allocate for evacation. Threads that entered evacuation before the OOM bit was set may +// continue to allocate for evacuation until they exit_evacuation. void ShenandoahEvacOOMCounter::set_oom_bit(bool decrement) { jint threads_in_evac = Atomic::load_acquire(&_bits); while (true) { @@ -55,7 +59,7 @@ void ShenandoahEvacOOMCounter::set_oom_bit(bool decrement) { jint other = Atomic::cmpxchg(&_bits, threads_in_evac, newval); if (other == threads_in_evac) { - // Success: wait for other threads to get out of the protocol and return. + // Success: return so we can wait for other threads to stop allocating. break; } else { // Failure: try again with updated new value. @@ -121,20 +125,23 @@ ShenandoahEvacOOMCounter* ShenandoahEvacOOMHandler::counter_for_thread(Thread* t return &_threads_in_evac[key & (_num_counters - 1)]; } -void ShenandoahEvacOOMHandler::wait_for_one_counter(ShenandoahEvacOOMCounter* ptr) { +// Wait until this counter's OOM bit is set and there are no more evacuating threads associated with the counter. +void ShenandoahEvacOOMHandler::wait_for_no_evac_threads_on_counter(ShenandoahEvacOOMCounter* counter) { // We might be racing against handle_out_of_memory_during_evacuation() // setting the OOM_MARKER_MASK bit so we must make sure it is set here // *and* the counter is zero. - while (ptr->load_acquire() != ShenandoahEvacOOMCounter::OOM_MARKER_MASK) { + while (counter->load_acquire() != ShenandoahEvacOOMCounter::OOM_MARKER_MASK) { os::naked_short_sleep(1); } } +// Wait until every counter's OOM bit is set and the number of evacuating threads associated with every counter is zero. +// Then disable further allocations by the current thread by setting its thread-local oom_during_evag flag to true. void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() { // Once the OOM_MARKER_MASK bit is set the counter can only decrease // so it's safe to check each bucket in turn. for (int i = 0; i < _num_counters; i++) { - wait_for_one_counter(&_threads_in_evac[i]); + wait_for_no_evac_threads_on_counter(&_threads_in_evac[i]); } // At this point we are sure that no threads can evacuate anything. Raise // the thread-local oom_during_evac flag to indicate that any attempt @@ -142,6 +149,16 @@ void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() { ShenandoahThreadLocalData::set_oom_during_evac(Thread::current(), true); } +// Increment the count of evacuating threads if this thread is authorized to allocate and no other allocating thread +// has experienced out-of-memory when attempting an evacuation allocation. +// +// Upon return: +// +// 1. The thread is authorized to allocate for evacuation and the count of allocating threads has been incremented to +// include this thread, or +// 2. The thread is not authorized to allocate for evacuation and the count of allocating thread does not include this thread. +// +// Thread-local flag is_oom_during_evac(thr) is false iff thread thr is authorized to allocate for evacuation. void ShenandoahEvacOOMHandler::register_thread(Thread* thr) { assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set"); @@ -152,6 +169,17 @@ void ShenandoahEvacOOMHandler::register_thread(Thread* thr) { } } +// Decrement the count of evacuating threads if this thread is still authorized to allocate for evacuation. +// +// Upon return: +// +// 1. The thread is authorized to allocate for evacuation. +// 2. The count of threads that are authorized to allocate for evacuations does not include this thread. +// +// Note: Authorizing the thread to allocate for evacuation has "no effect". This is simply the "presumed" default state +// of every thread. When/if this thread subsequently attempts to re-register, we will check whether further +// allocations are authorized by this thread and we will adjust the thread-local authorization flag (is_oom_during_evac) +// if necessary. The thread will not attempt to allocate for evacuation without first re-registering. void ShenandoahEvacOOMHandler::unregister_thread(Thread* thr) { if (!ShenandoahThreadLocalData::is_oom_during_evac(thr)) { counter_for_thread(thr)->decrement(); @@ -164,6 +192,27 @@ void ShenandoahEvacOOMHandler::unregister_thread(Thread* thr) { assert(!ShenandoahThreadLocalData::is_oom_during_evac(thr), "TL oom-during-evac must be turned off"); } +// The current thread failed to allocate memory required by evacuation. Perform the following: +// +// Upon entry: +// +// 1. The current thread is known to be authorized to allocate for evacuation. +// +// Upon return: +// +// 1. The OOM bit is set for every counter. +// 2. This thread's thread-local is_oom_during_evac flag is true, denoting that this thread is no longer authorized +// to perform evacuation allocations. +// 3. The count of threads authorized to evacuate for allocation has been decremented, because this thread is no +// longer authorized. +// 4. We have waited for all evacuating threads to stop allocating, after which it is safe for this thread to resolve +// remaining objects as either forwarded or not forwarded. Hereafter, the status of these objects will not +// change until we STW to perform full GC. +// +// Note: Multiple threads may handle_out_of_memory_during_evacuation() at the same time. Setting the OOM bit on every +// counter is idempotent. Any particular thread will execute handle_out_of_memory_during_evacuation() only once +// per GC cycle. +// void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() { assert(ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity"); assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set"); @@ -179,6 +228,8 @@ void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() { wait_for_no_evac_threads(); } +// This method resets the count of evacuating threads to zero and clears the OOM bit for each counter. +// We call this at the start of each GC cycle. void ShenandoahEvacOOMHandler::clear() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); for (int i = 0; i < _num_counters; i++) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp index dd77f6216e061..eb85318e55577 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp @@ -59,52 +59,125 @@ class ShenandoahEvacOOMCounter { * Provides safe handling of out-of-memory situations during evacuation. * * When a Java thread encounters out-of-memory while evacuating an object in a - * load-reference-barrier (i.e. it cannot copy the object to to-space), it does not - * necessarily follow we can return immediately from the LRB (and store to from-space). - * - * In very basic case, on such failure we may wait until the evacuation is over, - * and then resolve the forwarded copy, and to the store there. This is possible - * because other threads might still have space in their GCLABs, and successfully - * evacuate the object. - * - * But, there is a race due to non-atomic evac_in_progress transition. Consider - * thread A is stuck waiting for the evacuation to be over -- it cannot leave with - * from-space copy yet. Control thread drops evacuation_in_progress preparing for - * next STW phase that has to recover from OOME. Thread B misses that update, and - * successfully evacuates the object, does the write to to-copy. But, before - * Thread B is able to install the fwdptr, thread A discovers evac_in_progress is - * down, exits from here, reads the fwdptr, discovers old from-copy, and stores there. - * Thread B then wakes up and installs to-copy. This breaks to-space invariant, and - * silently corrupts the heap: we accepted two writes to separate copies of the object. - * - * The way it is solved here is to maintain a counter of threads inside the - * 'evacuation path'. The 'evacuation path' is the part of evacuation that does the actual - * allocation, copying and CASing of the copy object, and is protected by this - * OOM-during-evac-handler. The handler allows multiple threads to enter and exit - * evacuation path, but on OOME it requires all threads that experienced OOME to wait - * for current threads to leave, and blocks other threads from entering. The counter state - * is striped across multiple cache lines to reduce contention when many threads attempt - * to enter or leave the protocol at the same time. + * load-reference-barrier (i.e. it cannot copy the object to to-space), a special + * protocol is required to assure that all threads see the same version of every + * object. + * + * This file and its accompanying .cpp and .inline.hpp files hold the implementation + * of this protocol. The general idea is as follows: + * + * 1. If we fail to evacuate the entirety of live memory from all cset regions, + * we will transition to STW full gc at the end of the evacuation cycle. Full GC + * marks and compacts the entire heap. If we fail to evacuate a single cset object, + * we will pay the price of a full GC. + * + * 2. If any thread A fails to evacuate object X, it will wait to see if some + * other mutator or GC worker thread can successfully evacuate object X. At the + * thread A fails to allocate, it launches the OOM-during-evacuation protocol. There + * is no going back (even though some other thread may successfully evacuate object X). + * + * 3. The protocol consists of: + * + * a) Thread A sets internal state to indicate that OOM-during-evac has been + * encountered. + * b) Thread A now waits for all other threads to finish any ongoing allocations + * for evacuation that might be in process. + * c) Other threads that announce intent to allocate for evacuation are informed + * that the OOM-during-evac protocol has been initated. As with thread A, + * these threads also wait for all other threads to finish any ongoing allocations + * for evacuation that might be in process. + * d) After all threads have finished whatever allocations for evacuation they + * were in the process of performing, the evacution state of the heap is considered + * to be "frozen". At this point, some cset objects may have been successfully + * evacuated and some cset objects may have failed to evacuate. There will be + * no more evaucations until we STW and perform Full GC. + * e) Now, all of the threads that were waiting for evacuating threads to finish + * allocations that were in progress are allowed to run, but they are not allowed + * to allocate for evacuation. Additional threads that newly announce intent to + * allocate for evacuation are immediately allowed to continue running, but without + * authorization to allocate. + * f) Threads that desire to allocate for evacuation but are not authorized to do so + * simply consult the head of each cset object. If the header denotes that the + * object has been evacuated by a different thread, then this thread will replace + * its pointer to the object with a pointer to the new location. If the header + * denotes that this object has not yet been copied, this thread will continue to + * use the original cset location as the official version of the object. Since + * no threads are allowed to allocate for evacuation in this phase, all threads + * accessing this same object will agree to refer to this object at its original + * location within the cset. + * g) Evacuation is cancelled and all threads will eventually reach a Full GC + * safepoint. Marking by Full GC will finish updating references that might + * be inconsistent within the heap, and will then compact all live memory within + * the heap. * * Detailed state change: * + * Maintain a count of how many threads are on an evac-path (which is allocating for evacuation) + * * Upon entry of the evac-path, entering thread will attempt to increase the counter, * using a CAS. Depending on the result of the CAS: * - success: carry on with evac * - failure: * - if offending value is a valid counter, then try again * - if offending value is OOM-during-evac special value: loop until - * counter drops to 0, then exit with resolving the ptr + * counter drops to 0, then continue without authorization to allocate. + * As such, the thread will treat unforwarded cset objects as residing + * permanently at their original location. + * * * Upon exit, exiting thread will decrease the counter using atomic dec. * * Upon OOM-during-evac, any thread will attempt to CAS OOM-during-evac * special value into the counter. Depending on result: - * - success: busy-loop until counter drops to zero, then exit with resolve + * - success: busy-loop until counter drops to zero, Then continue + * to execute without authnorization to allocate. Any unforwarded + * cset objects will be treated as residing permanently at their original + * location. * - failure: * - offender is valid counter update: try again * - offender is OOM-during-evac: busy loop until counter drops to - * zero, then exit with resolve + * zero, then continue to execute without autnorization to allocate, + * as above. + */ + +/* + * For most service workloads, OOM-during-evac will be very rare. Most services are provisioned + * with enough memory and CPU cores to avoid experiencing OOM during evac. The typical cause for + * OOM during evac is a spike in client requests, possibly related to a DOS attack. When OOM during + * evac does occur, there are opportunities to make the protocol more efficient. In some cases, + * OOM during evac can also occur because the heap becomes fragmented. For example, it may not be + * possible to find contiguous memory to evacuate an object that is 50% of the heap region size, even + * though there is an abundance of "fragmented" memory available to support evacuation of thousands of + * smaller (more normal-sized) objects. + * + * TODO: make refinements to the OOM-during-evac protocol so that it is less disruptive and more efficient. + * + * 1. Allow a mutator or GC worker thread that fails to allocate for evacuation to mark a single + * cset object as frozen-in-from-space and then continue to evacuate other objects while other + * threads continue to evacuate other objects. A draft solution is described here, along with discussion + * of prerequisites required for full implementation: https://github.com/openjdk/jdk/pull/12881 + * This allows all threads, including the one that failed to evacuate a single object, to fully utilize + * all of the memory available within their existing GCLABs. This allows more of evacuation to be + * performed concurrently rather than requiring STW operation. + * + * 2. At the end of evacuation, if there were any failures to evacuate, fixup the cset before + * we go to update-refs. This can be done concurrently. Fixup consists of: + * + * a. Take region out of cset if it contains objects that failed to evacuate. + * + * b. For each such region, set top to be address following last object that failed to evacuate. + * + * c. For each such region, make the garbage found below and between uncopied objects parseable: + * overwrite each run of garbage with array-of-integer object of appropriate size. Generational + * Shenandoah calls this coalesce-and-fill. + * + * 3. Do not automatically upgrade to Full GC. Continue with concurrent GC as long as possible. + * There is already a mechanism in place to escalate to Full GC if the mutator experiences out-of-memory + * and/or if concurrent GC is not "productive". Transitions to Full GC are very costly because (i) this + * results in a very long STW pause during which mutator threads are unresponsive, and (ii) Full GC + * redundantly repeats work that was already successfully performed concurrently. When OOM-during-evac + * transitions to Full GC, we throw away and repeat all of the previously completed work of marking and + * evacuating. */ class ShenandoahEvacOOMHandler { private: @@ -116,7 +189,7 @@ class ShenandoahEvacOOMHandler { ShenandoahEvacOOMCounter* counter_for_thread(Thread* t); void wait_for_no_evac_threads(); - void wait_for_one_counter(ShenandoahEvacOOMCounter* ptr); + void wait_for_no_evac_threads_on_counter(ShenandoahEvacOOMCounter* counter); static uint64_t hash_pointer(const void* p); static int calc_num_counters(); @@ -124,23 +197,37 @@ class ShenandoahEvacOOMHandler { ShenandoahEvacOOMHandler(); /** - * Attempt to enter the protected evacuation path. + * Enter a protected evacuation path. + * + * Upon return: + * + * 1. Thread t has authorization to allocate for evacuation and the count of evacuating threads includes thread t, or + * + * 2. Thread t has no authorization to allocate for evacuation and the count of evacuating threads does not include + * thread t. + * + * This function may pause while it waits for coordination with other allocating threads. * - * When this returns true, it is safe to continue with normal evacuation. - * When this method returns false, evacuation must not be entered, and caller - * may safely continue with a simple resolve (if Java thread). + * Authority to allocate for evacuation is represented by thread-local flag is_oom_during_evac(t) equal to false. + * If this thread is not authorized to allocate and it encounters an object residing within the cset, it uses + * the most current location of the object, as represented by the object's header. If the object was not previously + * allocated, the evac-OOM protocol assures that the object will not be subsequently evacuated during the remainder + * of the concurrent evacuation phase. */ inline void enter_evacuation(Thread* t); /** - * Leave evacuation path. + * Leave a protected evacuation path. */ inline void leave_evacuation(Thread* t); /** - * Signal out-of-memory during evacuation. It will prevent any other threads + * Signal out-of-memory during evacuation. This will prevent any other threads * from entering the evacuation path, then wait until all threads have left the - * evacuation path, and then return. It is then safe to continue with a simple resolve. + * evacuation path, and then return. Following this, it is safe to assume that + * any object residing in the cset and not previously forwarded will remain in + * the cset throughout the remainder of the concurrent evacuation phase. It will + * not be subsequently evacuated. */ void handle_out_of_memory_during_evacuation(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp index 5c49c2edbb2dc..ad71b4d67177d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp @@ -39,6 +39,23 @@ jint ShenandoahEvacOOMCounter::unmasked_count() { return Atomic::load_acquire(&_bits) & ~OOM_MARKER_MASK; } +// Announce the intent by thread thr to perform allocations for evacuation. +// +// Upon return: +// +// 1. The count of nested allocate-for-evacuation scopes for this thread has been incremented. +// 2. Thread thr is authorized to allocate for evacuation and the count of allocating threads represents this thread, or +// 3. Thread thr is not authorized to allocate for evacuation and the count of allocating thread does not include this thread. +// +// Thread-local flag is_oom_during_evac(thr) is false iff thread thr is authorized to allocate for evacuation. +// +// Notes: If this thread subsequently encounters a "need" to allocate memory for evacuation but it is not authorized to +// allocate for evacuation, this thread will simply treat the relevant cset object as "frozen within from-space". +// If this thread is forbidden to allocate, then all threads are forbidden to allocate. As soon as a first thread +// begins to execute within an "evacuation region" without authorization to allocate, the evac-OOM protocol requires +// that no additional objects be evacuated. Normally, this phase of executing without authorization to evacuate is +// immediately followed by a Full GC which compacts all of heap memory in STW mode. + void ShenandoahEvacOOMHandler::enter_evacuation(Thread* thr) { uint8_t level = ShenandoahThreadLocalData::push_evac_oom_scope(thr); if (level == 0) { @@ -55,6 +72,18 @@ void ShenandoahEvacOOMHandler::enter_evacuation(Thread* thr) { } } +// Announce intent to leave a control scope that performs allocation for evacuation. +// +// Upon return: +// +// 1. The thread-local count of nested allocation-for-evacuation scopes for this thread has been decremented. +// 2. If we have left the outer-most allocation-for-evacuation scope for this thread: +// a. The count of threads that are allocating for evacuation does not represent this thread +// b. This thread is authorized to allocate for evacuation. +// +// Notes: A thread that has already entered evacuation and not left may make a nested re-entry into evacuation. Each nested +// invocation of enter_evacuation should be matched by invocation of leave_evacuation. + void ShenandoahEvacOOMHandler::leave_evacuation(Thread* thr) { uint8_t level = ShenandoahThreadLocalData::pop_evac_oom_scope(thr); // Not top level, just return From 51d056f8e6d2549ba8bc0435bc86b2c5243dce52 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 17 Jan 2024 22:23:08 +0000 Subject: [PATCH 02/61] Revert "Improve documentation of how Evac-OOM Protocol works" This reverts commit 702710eba324e2117e7f3e10ab073e307ab3303e. --- .../shenandoah/shenandoahEvacOOMHandler.cpp | 59 +------ .../shenandoah/shenandoahEvacOOMHandler.hpp | 161 ++++-------------- .../shenandoahEvacOOMHandler.inline.hpp | 29 ---- 3 files changed, 41 insertions(+), 208 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp index 34b9fc0bf65c0..8334126b6f829 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.cpp @@ -46,10 +46,6 @@ void ShenandoahEvacOOMCounter::clear() { Atomic::release_store_fence(&_bits, (jint)0); } -// This sets the OOM bit for a single counter. If decrement is true, it also decrements the count of evacuating threads -// associated with this counter. After all _num_counters OOM bits have been set, all threads newly attempting to enter_evacuation -// will be informed that they cannot allocate for evacation. Threads that entered evacuation before the OOM bit was set may -// continue to allocate for evacuation until they exit_evacuation. void ShenandoahEvacOOMCounter::set_oom_bit(bool decrement) { jint threads_in_evac = Atomic::load_acquire(&_bits); while (true) { @@ -59,7 +55,7 @@ void ShenandoahEvacOOMCounter::set_oom_bit(bool decrement) { jint other = Atomic::cmpxchg(&_bits, threads_in_evac, newval); if (other == threads_in_evac) { - // Success: return so we can wait for other threads to stop allocating. + // Success: wait for other threads to get out of the protocol and return. break; } else { // Failure: try again with updated new value. @@ -125,23 +121,20 @@ ShenandoahEvacOOMCounter* ShenandoahEvacOOMHandler::counter_for_thread(Thread* t return &_threads_in_evac[key & (_num_counters - 1)]; } -// Wait until this counter's OOM bit is set and there are no more evacuating threads associated with the counter. -void ShenandoahEvacOOMHandler::wait_for_no_evac_threads_on_counter(ShenandoahEvacOOMCounter* counter) { +void ShenandoahEvacOOMHandler::wait_for_one_counter(ShenandoahEvacOOMCounter* ptr) { // We might be racing against handle_out_of_memory_during_evacuation() // setting the OOM_MARKER_MASK bit so we must make sure it is set here // *and* the counter is zero. - while (counter->load_acquire() != ShenandoahEvacOOMCounter::OOM_MARKER_MASK) { + while (ptr->load_acquire() != ShenandoahEvacOOMCounter::OOM_MARKER_MASK) { os::naked_short_sleep(1); } } -// Wait until every counter's OOM bit is set and the number of evacuating threads associated with every counter is zero. -// Then disable further allocations by the current thread by setting its thread-local oom_during_evag flag to true. void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() { // Once the OOM_MARKER_MASK bit is set the counter can only decrease // so it's safe to check each bucket in turn. for (int i = 0; i < _num_counters; i++) { - wait_for_no_evac_threads_on_counter(&_threads_in_evac[i]); + wait_for_one_counter(&_threads_in_evac[i]); } // At this point we are sure that no threads can evacuate anything. Raise // the thread-local oom_during_evac flag to indicate that any attempt @@ -149,16 +142,6 @@ void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() { ShenandoahThreadLocalData::set_oom_during_evac(Thread::current(), true); } -// Increment the count of evacuating threads if this thread is authorized to allocate and no other allocating thread -// has experienced out-of-memory when attempting an evacuation allocation. -// -// Upon return: -// -// 1. The thread is authorized to allocate for evacuation and the count of allocating threads has been incremented to -// include this thread, or -// 2. The thread is not authorized to allocate for evacuation and the count of allocating thread does not include this thread. -// -// Thread-local flag is_oom_during_evac(thr) is false iff thread thr is authorized to allocate for evacuation. void ShenandoahEvacOOMHandler::register_thread(Thread* thr) { assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set"); @@ -169,17 +152,6 @@ void ShenandoahEvacOOMHandler::register_thread(Thread* thr) { } } -// Decrement the count of evacuating threads if this thread is still authorized to allocate for evacuation. -// -// Upon return: -// -// 1. The thread is authorized to allocate for evacuation. -// 2. The count of threads that are authorized to allocate for evacuations does not include this thread. -// -// Note: Authorizing the thread to allocate for evacuation has "no effect". This is simply the "presumed" default state -// of every thread. When/if this thread subsequently attempts to re-register, we will check whether further -// allocations are authorized by this thread and we will adjust the thread-local authorization flag (is_oom_during_evac) -// if necessary. The thread will not attempt to allocate for evacuation without first re-registering. void ShenandoahEvacOOMHandler::unregister_thread(Thread* thr) { if (!ShenandoahThreadLocalData::is_oom_during_evac(thr)) { counter_for_thread(thr)->decrement(); @@ -192,27 +164,6 @@ void ShenandoahEvacOOMHandler::unregister_thread(Thread* thr) { assert(!ShenandoahThreadLocalData::is_oom_during_evac(thr), "TL oom-during-evac must be turned off"); } -// The current thread failed to allocate memory required by evacuation. Perform the following: -// -// Upon entry: -// -// 1. The current thread is known to be authorized to allocate for evacuation. -// -// Upon return: -// -// 1. The OOM bit is set for every counter. -// 2. This thread's thread-local is_oom_during_evac flag is true, denoting that this thread is no longer authorized -// to perform evacuation allocations. -// 3. The count of threads authorized to evacuate for allocation has been decremented, because this thread is no -// longer authorized. -// 4. We have waited for all evacuating threads to stop allocating, after which it is safe for this thread to resolve -// remaining objects as either forwarded or not forwarded. Hereafter, the status of these objects will not -// change until we STW to perform full GC. -// -// Note: Multiple threads may handle_out_of_memory_during_evacuation() at the same time. Setting the OOM bit on every -// counter is idempotent. Any particular thread will execute handle_out_of_memory_during_evacuation() only once -// per GC cycle. -// void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() { assert(ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity"); assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set"); @@ -228,8 +179,6 @@ void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() { wait_for_no_evac_threads(); } -// This method resets the count of evacuating threads to zero and clears the OOM bit for each counter. -// We call this at the start of each GC cycle. void ShenandoahEvacOOMHandler::clear() { assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); for (int i = 0; i < _num_counters; i++) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp index eb85318e55577..dd77f6216e061 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.hpp @@ -59,125 +59,52 @@ class ShenandoahEvacOOMCounter { * Provides safe handling of out-of-memory situations during evacuation. * * When a Java thread encounters out-of-memory while evacuating an object in a - * load-reference-barrier (i.e. it cannot copy the object to to-space), a special - * protocol is required to assure that all threads see the same version of every - * object. - * - * This file and its accompanying .cpp and .inline.hpp files hold the implementation - * of this protocol. The general idea is as follows: - * - * 1. If we fail to evacuate the entirety of live memory from all cset regions, - * we will transition to STW full gc at the end of the evacuation cycle. Full GC - * marks and compacts the entire heap. If we fail to evacuate a single cset object, - * we will pay the price of a full GC. - * - * 2. If any thread A fails to evacuate object X, it will wait to see if some - * other mutator or GC worker thread can successfully evacuate object X. At the - * thread A fails to allocate, it launches the OOM-during-evacuation protocol. There - * is no going back (even though some other thread may successfully evacuate object X). - * - * 3. The protocol consists of: - * - * a) Thread A sets internal state to indicate that OOM-during-evac has been - * encountered. - * b) Thread A now waits for all other threads to finish any ongoing allocations - * for evacuation that might be in process. - * c) Other threads that announce intent to allocate for evacuation are informed - * that the OOM-during-evac protocol has been initated. As with thread A, - * these threads also wait for all other threads to finish any ongoing allocations - * for evacuation that might be in process. - * d) After all threads have finished whatever allocations for evacuation they - * were in the process of performing, the evacution state of the heap is considered - * to be "frozen". At this point, some cset objects may have been successfully - * evacuated and some cset objects may have failed to evacuate. There will be - * no more evaucations until we STW and perform Full GC. - * e) Now, all of the threads that were waiting for evacuating threads to finish - * allocations that were in progress are allowed to run, but they are not allowed - * to allocate for evacuation. Additional threads that newly announce intent to - * allocate for evacuation are immediately allowed to continue running, but without - * authorization to allocate. - * f) Threads that desire to allocate for evacuation but are not authorized to do so - * simply consult the head of each cset object. If the header denotes that the - * object has been evacuated by a different thread, then this thread will replace - * its pointer to the object with a pointer to the new location. If the header - * denotes that this object has not yet been copied, this thread will continue to - * use the original cset location as the official version of the object. Since - * no threads are allowed to allocate for evacuation in this phase, all threads - * accessing this same object will agree to refer to this object at its original - * location within the cset. - * g) Evacuation is cancelled and all threads will eventually reach a Full GC - * safepoint. Marking by Full GC will finish updating references that might - * be inconsistent within the heap, and will then compact all live memory within - * the heap. + * load-reference-barrier (i.e. it cannot copy the object to to-space), it does not + * necessarily follow we can return immediately from the LRB (and store to from-space). + * + * In very basic case, on such failure we may wait until the evacuation is over, + * and then resolve the forwarded copy, and to the store there. This is possible + * because other threads might still have space in their GCLABs, and successfully + * evacuate the object. + * + * But, there is a race due to non-atomic evac_in_progress transition. Consider + * thread A is stuck waiting for the evacuation to be over -- it cannot leave with + * from-space copy yet. Control thread drops evacuation_in_progress preparing for + * next STW phase that has to recover from OOME. Thread B misses that update, and + * successfully evacuates the object, does the write to to-copy. But, before + * Thread B is able to install the fwdptr, thread A discovers evac_in_progress is + * down, exits from here, reads the fwdptr, discovers old from-copy, and stores there. + * Thread B then wakes up and installs to-copy. This breaks to-space invariant, and + * silently corrupts the heap: we accepted two writes to separate copies of the object. + * + * The way it is solved here is to maintain a counter of threads inside the + * 'evacuation path'. The 'evacuation path' is the part of evacuation that does the actual + * allocation, copying and CASing of the copy object, and is protected by this + * OOM-during-evac-handler. The handler allows multiple threads to enter and exit + * evacuation path, but on OOME it requires all threads that experienced OOME to wait + * for current threads to leave, and blocks other threads from entering. The counter state + * is striped across multiple cache lines to reduce contention when many threads attempt + * to enter or leave the protocol at the same time. * * Detailed state change: * - * Maintain a count of how many threads are on an evac-path (which is allocating for evacuation) - * * Upon entry of the evac-path, entering thread will attempt to increase the counter, * using a CAS. Depending on the result of the CAS: * - success: carry on with evac * - failure: * - if offending value is a valid counter, then try again * - if offending value is OOM-during-evac special value: loop until - * counter drops to 0, then continue without authorization to allocate. - * As such, the thread will treat unforwarded cset objects as residing - * permanently at their original location. - * + * counter drops to 0, then exit with resolving the ptr * * Upon exit, exiting thread will decrease the counter using atomic dec. * * Upon OOM-during-evac, any thread will attempt to CAS OOM-during-evac * special value into the counter. Depending on result: - * - success: busy-loop until counter drops to zero, Then continue - * to execute without authnorization to allocate. Any unforwarded - * cset objects will be treated as residing permanently at their original - * location. + * - success: busy-loop until counter drops to zero, then exit with resolve * - failure: * - offender is valid counter update: try again * - offender is OOM-during-evac: busy loop until counter drops to - * zero, then continue to execute without autnorization to allocate, - * as above. - */ - -/* - * For most service workloads, OOM-during-evac will be very rare. Most services are provisioned - * with enough memory and CPU cores to avoid experiencing OOM during evac. The typical cause for - * OOM during evac is a spike in client requests, possibly related to a DOS attack. When OOM during - * evac does occur, there are opportunities to make the protocol more efficient. In some cases, - * OOM during evac can also occur because the heap becomes fragmented. For example, it may not be - * possible to find contiguous memory to evacuate an object that is 50% of the heap region size, even - * though there is an abundance of "fragmented" memory available to support evacuation of thousands of - * smaller (more normal-sized) objects. - * - * TODO: make refinements to the OOM-during-evac protocol so that it is less disruptive and more efficient. - * - * 1. Allow a mutator or GC worker thread that fails to allocate for evacuation to mark a single - * cset object as frozen-in-from-space and then continue to evacuate other objects while other - * threads continue to evacuate other objects. A draft solution is described here, along with discussion - * of prerequisites required for full implementation: https://github.com/openjdk/jdk/pull/12881 - * This allows all threads, including the one that failed to evacuate a single object, to fully utilize - * all of the memory available within their existing GCLABs. This allows more of evacuation to be - * performed concurrently rather than requiring STW operation. - * - * 2. At the end of evacuation, if there were any failures to evacuate, fixup the cset before - * we go to update-refs. This can be done concurrently. Fixup consists of: - * - * a. Take region out of cset if it contains objects that failed to evacuate. - * - * b. For each such region, set top to be address following last object that failed to evacuate. - * - * c. For each such region, make the garbage found below and between uncopied objects parseable: - * overwrite each run of garbage with array-of-integer object of appropriate size. Generational - * Shenandoah calls this coalesce-and-fill. - * - * 3. Do not automatically upgrade to Full GC. Continue with concurrent GC as long as possible. - * There is already a mechanism in place to escalate to Full GC if the mutator experiences out-of-memory - * and/or if concurrent GC is not "productive". Transitions to Full GC are very costly because (i) this - * results in a very long STW pause during which mutator threads are unresponsive, and (ii) Full GC - * redundantly repeats work that was already successfully performed concurrently. When OOM-during-evac - * transitions to Full GC, we throw away and repeat all of the previously completed work of marking and - * evacuating. + * zero, then exit with resolve */ class ShenandoahEvacOOMHandler { private: @@ -189,7 +116,7 @@ class ShenandoahEvacOOMHandler { ShenandoahEvacOOMCounter* counter_for_thread(Thread* t); void wait_for_no_evac_threads(); - void wait_for_no_evac_threads_on_counter(ShenandoahEvacOOMCounter* counter); + void wait_for_one_counter(ShenandoahEvacOOMCounter* ptr); static uint64_t hash_pointer(const void* p); static int calc_num_counters(); @@ -197,37 +124,23 @@ class ShenandoahEvacOOMHandler { ShenandoahEvacOOMHandler(); /** - * Enter a protected evacuation path. - * - * Upon return: - * - * 1. Thread t has authorization to allocate for evacuation and the count of evacuating threads includes thread t, or - * - * 2. Thread t has no authorization to allocate for evacuation and the count of evacuating threads does not include - * thread t. - * - * This function may pause while it waits for coordination with other allocating threads. + * Attempt to enter the protected evacuation path. * - * Authority to allocate for evacuation is represented by thread-local flag is_oom_during_evac(t) equal to false. - * If this thread is not authorized to allocate and it encounters an object residing within the cset, it uses - * the most current location of the object, as represented by the object's header. If the object was not previously - * allocated, the evac-OOM protocol assures that the object will not be subsequently evacuated during the remainder - * of the concurrent evacuation phase. + * When this returns true, it is safe to continue with normal evacuation. + * When this method returns false, evacuation must not be entered, and caller + * may safely continue with a simple resolve (if Java thread). */ inline void enter_evacuation(Thread* t); /** - * Leave a protected evacuation path. + * Leave evacuation path. */ inline void leave_evacuation(Thread* t); /** - * Signal out-of-memory during evacuation. This will prevent any other threads + * Signal out-of-memory during evacuation. It will prevent any other threads * from entering the evacuation path, then wait until all threads have left the - * evacuation path, and then return. Following this, it is safe to assume that - * any object residing in the cset and not previously forwarded will remain in - * the cset throughout the remainder of the concurrent evacuation phase. It will - * not be subsequently evacuated. + * evacuation path, and then return. It is then safe to continue with a simple resolve. */ void handle_out_of_memory_during_evacuation(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp index ad71b4d67177d..5c49c2edbb2dc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp @@ -39,23 +39,6 @@ jint ShenandoahEvacOOMCounter::unmasked_count() { return Atomic::load_acquire(&_bits) & ~OOM_MARKER_MASK; } -// Announce the intent by thread thr to perform allocations for evacuation. -// -// Upon return: -// -// 1. The count of nested allocate-for-evacuation scopes for this thread has been incremented. -// 2. Thread thr is authorized to allocate for evacuation and the count of allocating threads represents this thread, or -// 3. Thread thr is not authorized to allocate for evacuation and the count of allocating thread does not include this thread. -// -// Thread-local flag is_oom_during_evac(thr) is false iff thread thr is authorized to allocate for evacuation. -// -// Notes: If this thread subsequently encounters a "need" to allocate memory for evacuation but it is not authorized to -// allocate for evacuation, this thread will simply treat the relevant cset object as "frozen within from-space". -// If this thread is forbidden to allocate, then all threads are forbidden to allocate. As soon as a first thread -// begins to execute within an "evacuation region" without authorization to allocate, the evac-OOM protocol requires -// that no additional objects be evacuated. Normally, this phase of executing without authorization to evacuate is -// immediately followed by a Full GC which compacts all of heap memory in STW mode. - void ShenandoahEvacOOMHandler::enter_evacuation(Thread* thr) { uint8_t level = ShenandoahThreadLocalData::push_evac_oom_scope(thr); if (level == 0) { @@ -72,18 +55,6 @@ void ShenandoahEvacOOMHandler::enter_evacuation(Thread* thr) { } } -// Announce intent to leave a control scope that performs allocation for evacuation. -// -// Upon return: -// -// 1. The thread-local count of nested allocation-for-evacuation scopes for this thread has been decremented. -// 2. If we have left the outer-most allocation-for-evacuation scope for this thread: -// a. The count of threads that are allocating for evacuation does not represent this thread -// b. This thread is authorized to allocate for evacuation. -// -// Notes: A thread that has already entered evacuation and not left may make a nested re-entry into evacuation. Each nested -// invocation of enter_evacuation should be matched by invocation of leave_evacuation. - void ShenandoahEvacOOMHandler::leave_evacuation(Thread* thr) { uint8_t level = ShenandoahThreadLocalData::pop_evac_oom_scope(thr); // Not top level, just return From 3a67b1f5fb66abdfdef53cb6346c7a7f1c67d843 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 14 Jun 2024 13:11:53 -0600 Subject: [PATCH 03/61] Make GC logging less verbose --- .../shenandoahAdaptiveHeuristics.cpp | 32 +++++++++++-------- .../shenandoahAggressiveHeuristics.cpp | 2 +- .../shenandoahCompactHeuristics.cpp | 13 ++++---- .../heuristics/shenandoahHeuristics.cpp | 4 +-- .../heuristics/shenandoahStaticHeuristics.cpp | 6 ++-- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 10 +++--- 6 files changed, 36 insertions(+), 31 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp index 6dc57139f45e5..7197d5aa2d68d 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp @@ -211,7 +211,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; if (available < min_threshold) { - log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + log_info(gc, ergo)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; @@ -221,7 +221,8 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { if (_gc_times_learned < max_learn) { size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold; if (available < init_threshold) { - log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", + log_info(gc, ergo)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT + ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", _gc_times_learned + 1, max_learn, byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold)); @@ -243,13 +244,15 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { double avg_cycle_time = _gc_time_history->davg() + (_margin_of_error_sd * _gc_time_history->dsd()); double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd); if (avg_cycle_time > allocation_headroom / avg_alloc_rate) { - log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (margin of error = %.2f)", - avg_cycle_time * 1000, - byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate), - byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), - _margin_of_error_sd); - - log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", + log_info(gc, ergo)("Trigger: Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s) " + "to deplete free headroom (" SIZE_FORMAT "%s) (margin of error = %.2f)", + avg_cycle_time * 1000, + byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), + _margin_of_error_sd); + + log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " + SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), @@ -261,11 +264,12 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd); if (is_spiking && avg_cycle_time > allocation_headroom / rate) { - log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (spike threshold = %.2f)", - avg_cycle_time * 1000, - byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate), - byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), - _spike_threshold_sd); + log_info(gc, ergo)("Trigger: Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) " + "to deplete free headroom (" SIZE_FORMAT "%s) (spike threshold = %.2f)", + avg_cycle_time * 1000, + byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), + _spike_threshold_sd); _last_trigger = SPIKE; return true; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp index fa6b3e67fee82..288a395ce4c11 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp @@ -52,7 +52,7 @@ void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(Shena } bool ShenandoahAggressiveHeuristics::should_start_gc() { - log_info(gc)("Trigger: Start next cycle immediately"); + log_info(gc, ergo)("Trigger: Start next cycle immediately"); return true; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp index c8e882a0f64e5..72c048dbc102c 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp @@ -58,17 +58,18 @@ bool ShenandoahCompactHeuristics::should_start_gc() { size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; if (available < min_threshold) { - log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", - byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), - byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); + log_info(gc, ergo)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; } size_t bytes_allocated = _space_info->bytes_allocated_since_gc_start(); if (bytes_allocated > threshold_bytes_allocated) { - log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)", - byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated), - byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated)); + log_info(gc, ergo)("Trigger: Allocated since last cycle (" SIZE_FORMAT + "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated), + byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated)); return true; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp index 2d5af892c809f..6b6f305369215 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp @@ -180,14 +180,14 @@ bool ShenandoahHeuristics::should_start_gc() { // Perform GC to cleanup metaspace if (has_metaspace_oom()) { // Some of vmTestbase/metaspace tests depend on following line to count GC cycles - log_info(gc)("Trigger: %s", GCCause::to_string(GCCause::_metadata_GC_threshold)); + log_info(gc, ergo)("Trigger: %s", GCCause::to_string(GCCause::_metadata_GC_threshold)); return true; } if (ShenandoahGuaranteedGCInterval > 0) { double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000; if (last_time_ms > ShenandoahGuaranteedGCInterval) { - log_info(gc)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)", + log_info(gc, ergo)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)", last_time_ms, ShenandoahGuaranteedGCInterval); return true; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp index ee59194feb689..a010e7b367133 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp @@ -52,9 +52,9 @@ bool ShenandoahStaticHeuristics::should_start_gc() { size_t threshold_available = capacity / 100 * ShenandoahMinFreeThreshold; if (available < threshold_available) { - log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", - byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), - byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available)); + log_info(gc, ergo)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available)); return true; } return ShenandoahHeuristics::should_start_gc(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index c11d7e814e4e0..ec0492f017bbd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -48,19 +48,19 @@ static const char* partition_name(ShenandoahFreeSetPartitionId t) { #ifndef PRODUCT void ShenandoahRegionPartitions::dump_bitmap() const { - log_info(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", + log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)], _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)], _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)], _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]); - log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT + log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]); - log_info(gc)("%6s: %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "NotFree Bits"); + log_debug(gc)("%6s: %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "NotFree Bits"); dump_bitmap_range(0, _max-1); } @@ -83,7 +83,7 @@ void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const { uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx); uintx free_bits = mutator_bits | collector_bits; uintx notfree_bits = ~free_bits; - log_info(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0, + log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0, aligned_idx, mutator_bits, collector_bits, notfree_bits); } #endif @@ -1048,7 +1048,7 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r } size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer; - log_info(gc)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free partition from Collector Reserve", + log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free partition from Collector Reserve", byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer)); } From 3692312a928439fc7f6dae5f4a22b75186ef8525 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 19 Jun 2024 08:58:52 -0600 Subject: [PATCH 04/61] Revert "Make GC logging less verbose" This reverts commit 3a67b1f5fb66abdfdef53cb6346c7a7f1c67d843. --- .../shenandoahAdaptiveHeuristics.cpp | 32 ++++++++----------- .../shenandoahAggressiveHeuristics.cpp | 2 +- .../shenandoahCompactHeuristics.cpp | 13 ++++---- .../heuristics/shenandoahHeuristics.cpp | 4 +-- .../heuristics/shenandoahStaticHeuristics.cpp | 6 ++-- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 10 +++--- 6 files changed, 31 insertions(+), 36 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp index 7197d5aa2d68d..6dc57139f45e5 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp @@ -211,7 +211,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; if (available < min_threshold) { - log_info(gc, ergo)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; @@ -221,8 +221,7 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { if (_gc_times_learned < max_learn) { size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold; if (available < init_threshold) { - log_info(gc, ergo)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT - ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", + log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)", _gc_times_learned + 1, max_learn, byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(init_threshold), proper_unit_for_byte_size(init_threshold)); @@ -244,15 +243,13 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { double avg_cycle_time = _gc_time_history->davg() + (_margin_of_error_sd * _gc_time_history->dsd()); double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd); if (avg_cycle_time > allocation_headroom / avg_alloc_rate) { - log_info(gc, ergo)("Trigger: Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s) " - "to deplete free headroom (" SIZE_FORMAT "%s) (margin of error = %.2f)", - avg_cycle_time * 1000, - byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate), - byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), - _margin_of_error_sd); - - log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " - SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", + log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (margin of error = %.2f)", + avg_cycle_time * 1000, + byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), + _margin_of_error_sd); + + log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s", byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), byte_size_in_proper_unit(spike_headroom), proper_unit_for_byte_size(spike_headroom), byte_size_in_proper_unit(penalties), proper_unit_for_byte_size(penalties), @@ -264,12 +261,11 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() { bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd); if (is_spiking && avg_cycle_time > allocation_headroom / rate) { - log_info(gc, ergo)("Trigger: Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) " - "to deplete free headroom (" SIZE_FORMAT "%s) (spike threshold = %.2f)", - avg_cycle_time * 1000, - byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate), - byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), - _spike_threshold_sd); + log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (spike threshold = %.2f)", + avg_cycle_time * 1000, + byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate), + byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom), + _spike_threshold_sd); _last_trigger = SPIKE; return true; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp index 288a395ce4c11..fa6b3e67fee82 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.cpp @@ -52,7 +52,7 @@ void ShenandoahAggressiveHeuristics::choose_collection_set_from_regiondata(Shena } bool ShenandoahAggressiveHeuristics::should_start_gc() { - log_info(gc, ergo)("Trigger: Start next cycle immediately"); + log_info(gc)("Trigger: Start next cycle immediately"); return true; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp index 72c048dbc102c..c8e882a0f64e5 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp @@ -58,18 +58,17 @@ bool ShenandoahCompactHeuristics::should_start_gc() { size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold; if (available < min_threshold) { - log_info(gc, ergo)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", - byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), - byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold)); return true; } size_t bytes_allocated = _space_info->bytes_allocated_since_gc_start(); if (bytes_allocated > threshold_bytes_allocated) { - log_info(gc, ergo)("Trigger: Allocated since last cycle (" SIZE_FORMAT - "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)", - byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated), - byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated)); + log_info(gc)("Trigger: Allocated since last cycle (" SIZE_FORMAT "%s) is larger than allocation threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(bytes_allocated), proper_unit_for_byte_size(bytes_allocated), + byte_size_in_proper_unit(threshold_bytes_allocated), proper_unit_for_byte_size(threshold_bytes_allocated)); return true; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp index 6b6f305369215..2d5af892c809f 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp @@ -180,14 +180,14 @@ bool ShenandoahHeuristics::should_start_gc() { // Perform GC to cleanup metaspace if (has_metaspace_oom()) { // Some of vmTestbase/metaspace tests depend on following line to count GC cycles - log_info(gc, ergo)("Trigger: %s", GCCause::to_string(GCCause::_metadata_GC_threshold)); + log_info(gc)("Trigger: %s", GCCause::to_string(GCCause::_metadata_GC_threshold)); return true; } if (ShenandoahGuaranteedGCInterval > 0) { double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000; if (last_time_ms > ShenandoahGuaranteedGCInterval) { - log_info(gc, ergo)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)", + log_info(gc)("Trigger: Time since last GC (%.0f ms) is larger than guaranteed interval (" UINTX_FORMAT " ms)", last_time_ms, ShenandoahGuaranteedGCInterval); return true; } diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp index a010e7b367133..ee59194feb689 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahStaticHeuristics.cpp @@ -52,9 +52,9 @@ bool ShenandoahStaticHeuristics::should_start_gc() { size_t threshold_available = capacity / 100 * ShenandoahMinFreeThreshold; if (available < threshold_available) { - log_info(gc, ergo)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", - byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), - byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available)); + log_info(gc)("Trigger: Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)", + byte_size_in_proper_unit(available), proper_unit_for_byte_size(available), + byte_size_in_proper_unit(threshold_available), proper_unit_for_byte_size(threshold_available)); return true; } return ShenandoahHeuristics::should_start_gc(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index ec0492f017bbd..c11d7e814e4e0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -48,19 +48,19 @@ static const char* partition_name(ShenandoahFreeSetPartitionId t) { #ifndef PRODUCT void ShenandoahRegionPartitions::dump_bitmap() const { - log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", + log_info(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)], _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)], _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)], _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]); - log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT + log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]", _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)], _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)]); - log_debug(gc)("%6s: %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "NotFree Bits"); + log_info(gc)("%6s: %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "NotFree Bits"); dump_bitmap_range(0, _max-1); } @@ -83,7 +83,7 @@ void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const { uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx); uintx free_bits = mutator_bits | collector_bits; uintx notfree_bits = ~free_bits; - log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0, + log_info(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0, aligned_idx, mutator_bits, collector_bits, notfree_bits); } #endif @@ -1048,7 +1048,7 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r } size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer; - log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free partition from Collector Reserve", + log_info(gc)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free partition from Collector Reserve", byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer)); } From c5f212d128843d3645e2d4a7876a4577736bf88f Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 16 Jun 2025 23:27:45 +0000 Subject: [PATCH 05/61] Add support for more bookkeeping in ShenandoahFreeSet --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 125 ++++++++++++++---- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 59 +++++++-- 2 files changed, 148 insertions(+), 36 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 1acb6a23e7a4c..e03b19f8dcd72 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -231,11 +231,16 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { _used[partition_id] = 0; _available[partition_id] = FreeSetUnderConstruction; } + _total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = + _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; + _empty_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = + _empty_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; } void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost, idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty, + size_t total_mutator_regions, size_t empty_mutator_regions, size_t mutator_region_count, size_t mutator_used) { shenandoah_assert_heaplocked(); @@ -250,6 +255,9 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm _available[int(ShenandoahFreeSetPartitionId::Mutator)] = _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]; + _total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = total_mutator_regions; + _empty_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = empty_mutator_regions; + _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max; _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1; _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max; @@ -259,12 +267,17 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _available[int(ShenandoahFreeSetPartitionId::Collector)] = 0; + + _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; + _empty_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; } void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost, idx_t old_collector_leftmost_empty, idx_t old_collector_rightmost_empty, - size_t old_collector_region_count, size_t old_collector_used) { + size_t total_old_collector_region_count, + size_t old_collector_empty, size_t old_collector_regions, + size_t old_collector_used) { shenandoah_assert_heaplocked(); _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost; @@ -272,11 +285,16 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty; _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost_empty; - _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count; + _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_regions; _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used; - _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count * _region_size_bytes; + _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_regions * _region_size_bytes; _available[int(ShenandoahFreeSetPartitionId::OldCollector)] = _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]; + + _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = total_old_collector_region_count; + _empty_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = old_collector_empty; + + } void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { @@ -359,7 +377,9 @@ void ShenandoahRegionPartitions::retire_range_from_partition( assert (in_free_set(partition, idx), "Must be in partition to remove from partition"); _membership[int(partition)].clear_bit(idx); } - _region_counts[int(partition)] -= high_idx + 1 - low_idx; + size_t num_regions = high_idx + 1 - low_idx; + _region_counts[int(partition)] -= num_regions; + _empty_region_counts[int(partition)] -= num_regions; shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx); } @@ -392,6 +412,10 @@ void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartition _available[int(which_partition)] += available; expand_interval_if_boundary_modified(which_partition, idx, available); _region_counts[int(which_partition)]++; + _total_region_counts[int(which_partition)]++; + if (available == _region_size_bytes) { + _empty_region_counts[int(which_partition)]++; + } } bool ShenandoahRegionPartitions::is_mutator_partition(ShenandoahFreeSetPartitionId p) { @@ -461,8 +485,16 @@ void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, She _available[int(new_partition)] += available; expand_interval_if_boundary_modified(new_partition, idx, available); + _total_region_counts[int(orig_partition)]--; _region_counts[int(orig_partition)]--; + + _total_region_counts[int(new_partition)]++; _region_counts[int(new_partition)]++; + + if (available == _region_size_bytes) { + _empty_region_counts[int(orig_partition)]--; + _empty_region_counts[int(new_partition)]++; + } } const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const { @@ -1034,7 +1066,6 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").", r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req)); assert(!r->is_affiliated(), "New region %zu should be unaffiliated", r->index()); - r->set_affiliation(req.affiliation()); if (r->is_old()) { // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because @@ -1140,6 +1171,21 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste)); size_t ac = alloc_capacity(r); + ShenandoahFreeSetPartitionId orig_partition; + if (req.is_mutator_alloc()) { + orig_partition = ShenandoahFreeSetPartitionId::Mutator; + } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) { + orig_partition = ShenandoahFreeSetPartitionId::Collector; + } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) { + orig_partition = ShenandoahFreeSetPartitionId::OldCollector; + } else { + assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type"); + if (req.is_old()) { + orig_partition = ShenandoahFreeSetPartitionId::OldCollector; + } else { + orig_partition = ShenandoahFreeSetPartitionId::Collector; + } + } if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) { // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region. // Note that retire_from_partition() increases used to account for waste. @@ -1148,23 +1194,10 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah // then retire the region so that subsequent searches can find available memory more quickly. size_t idx = r->index(); - ShenandoahFreeSetPartitionId orig_partition; - if (req.is_mutator_alloc()) { - orig_partition = ShenandoahFreeSetPartitionId::Mutator; - } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) { - orig_partition = ShenandoahFreeSetPartitionId::Collector; - } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) { - orig_partition = ShenandoahFreeSetPartitionId::OldCollector; - } else { - assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type"); - if (req.is_old()) { - orig_partition = ShenandoahFreeSetPartitionId::OldCollector; - } else { - orig_partition = ShenandoahFreeSetPartitionId::Collector; - } - } _partitions.retire_from_partition(orig_partition, idx, r->used()); _partitions.assert_bounds(); + } else if ((result != nullptr) && in_new_region) { + _partitions.one_region_is_no_longer_empty(orig_partition); } return result; } @@ -1423,6 +1456,12 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi size_t old_collector_regions = 0; size_t old_collector_used = 0; + size_t mutator_empty = 0; + size_t old_collector_empty = 0; + + size_t total_mutator_regions = 0; + size_t total_old_collector_regions = 0; + size_t num_regions = _heap->num_regions(); for (size_t idx = 0; idx < num_regions; idx++) { ShenandoahHeapRegion* region = _heap->get_region(idx); @@ -1459,6 +1498,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi mutator_rightmost = idx; } if (ac == region_size_bytes) { + mutator_empty++; if (idx < mutator_leftmost_empty) { mutator_leftmost_empty = idx; } @@ -1478,6 +1518,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi old_collector_rightmost = idx; } if (ac == region_size_bytes) { + old_collector_empty++; if (idx < old_collector_leftmost_empty) { old_collector_leftmost_empty = idx; } @@ -1489,8 +1530,39 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi old_collector_used += (region_size_bytes - ac); } } + if (region->is_trash() || !region->is_old()) { + total_mutator_regions++; + } else { + total_old_collector_regions++; + } } } +#ifdef KELVIN_ENHANCEMENTS + log_info(gc, free)(" At end of prep_to_rebuild, mutator_leftmost: %zu" + ", mutator_rightmost: %zu" + ", mutator_leftmost_empty: %zu" + ", mutator_rightmost_empty: %zu" + ", mutator_regions: %zu" + ", mutator_used: %zu", + mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty, + mutator_regions, mutator_used); + log_info(gc, free)(" old_collector_leftmost: %zu" + ", old_collector_rightmost: %zu" + ", old_collector_leftmost_empty: %zu" + ", old_collector_rightmost_empty: %zu" + ", old_collector_regions: %zu" + ", old_collector_used: %zu", + old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty, + old_collector_regions, old_collector_used); + log_info(gc, free)(" total_mutator_regions: %zu" + ", total_collector_regions: %zu" + ", total_old_collector_regions: %zu" + ", mutator_empty: %zu" + ", collector_empty: %zu" + ", old_collector_empty: %zu", + total_mutator_regions, (size_t) 0, total_old_collector_regions, + mutator_empty, collector_empty, old_collector_empty); +#endif log_debug(gc, free)(" At end of prep_to_rebuild, mutator_leftmost: %zu" ", mutator_rightmost: %zu" ", mutator_leftmost_empty: %zu" @@ -1499,7 +1571,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi ", mutator_used: %zu", mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty, mutator_regions, mutator_used); - log_debug(gc, free)(" old_collector_leftmost: %zu" ", old_collector_rightmost: %zu" ", old_collector_leftmost_empty: %zu" @@ -1508,16 +1579,20 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi ", old_collector_used: %zu", old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty, old_collector_regions, old_collector_used); - + log_debug(gc, free)(" total_mutator_regions: %zu, total_old_collector_regions: %zu" + ", mutator_empty: %zu, old_collector_empty: %zu", + total_mutator_regions, total_old_collector_regions, mutator_empty, old_collector_empty); idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost; idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty; + _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx, - mutator_regions, mutator_used); + total_mutator_regions, mutator_empty, mutator_regions, mutator_used); rightmost_idx = (old_collector_leftmost == max_regions)? -1: (idx_t) old_collector_rightmost; rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (idx_t) old_collector_rightmost_empty; - _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, old_collector_leftmost_empty, - rightmost_empty_idx, old_collector_regions, old_collector_used); + _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, + old_collector_leftmost_empty, rightmost_empty_idx, total_old_collector_regions, + old_collector_empty, old_collector_regions, old_collector_used); log_debug(gc, free)(" After find_regions_with_alloc_capacity(), Mutator range [%zd, %zd]," " Old Collector range [%zd, %zd]", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 55f23480618b9..844e43a1f179c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -72,16 +72,47 @@ class ShenandoahRegionPartitions { ssize_t _leftmosts_empty[UIntNumPartitions]; ssize_t _rightmosts_empty[UIntNumPartitions]; - // For each partition p, _capacity[p] represents the total amount of memory within the partition at the time - // of the most recent rebuild, _used[p] represents the total amount of memory that has been allocated within this - // partition (either already allocated as of the rebuild, or allocated since the rebuild). _capacity[p] and _used[p] - // are denoted in bytes. Note that some regions that had been assigned to a particular partition at rebuild time - // may have been retired following the rebuild. The tallies for these regions are still reflected in _capacity[p] - // and _used[p], even though the region may have been removed from the free set. + // For each partition p: + // _capacity[p] represents the total amount of memory within the partition at the time of the most recent rebuild + // _retired_capacity[p] represents the amount of memory that would be associated with p if it had not already been + // retired at the time of the most recent rebuild + // _total_capacity[p] is the sum of _capacity[p] and _retired_capacity[p] + // (The values are added under heap lock to assure coherency) + // _used[p] represents the total amount of memory that has been allocated within this partition (either already + // allocated as of the rebuild, or allocated since the rebuild). + // _available[p] represents the total amount of memory that can be allocated within partition p, calculated from + // _capacity[p] minus _used[p], where the difference is computed and assigned under heap lock + // + // Unlike capacity, which represents the total amount of memory representing each partition as of the moment + // the freeset was most recently constructed: + // + // _region_counts[p] represents the number of regions associated with the partition which currently have available memory. + // When a region is retired from partition p, _region_counts[p] is decremented. + // _total_region_counts[p] is _total_capacity[p] / RegionSizeBytes. probably do not need to keep separate field for this. + // _empty_region_counts[p] is number of regions associated with p which are entirely empty + // + // capacity and used values are expressed in bytes. + // + // When a region is retired, the used[p] is increased to account for alignment waste. capacity is unaffected. + // + // When a region is "flipped", we adjust capacities and region counts for original and destination partitions. We also + // adjust used values when flipping from mutator to collector. Flip to old collector does not need to adjust used because + // only empty regions can be flipped to old collector. + + size_t _retired_capacity[UIntNumPartitions]; size_t _capacity[UIntNumPartitions]; + size_t _used[UIntNumPartitions]; size_t _available[UIntNumPartitions]; + + // Some notes: + // _retired_regions[p] is _total_region_counts[p] - _region_counts[p] + // _empty_region_counts[p] <= _region_counts[p] <= _total_region_counts[p] + // _total_capacity[p] is _total_region_counts[p] * _region_size_bytes size_t _region_counts[UIntNumPartitions]; + size_t _total_region_counts[UIntNumPartitions]; + size_t _empty_region_counts[UIntNumPartitions]; + // For each partition p, _left_to_right_bias is true iff allocations are normally made from lower indexed regions // before higher indexed regions. @@ -119,19 +150,25 @@ class ShenandoahRegionPartitions { _membership[int(p)].set_bit(idx); } + inline void one_region_is_no_longer_empty(ShenandoahFreeSetPartitionId partition) { + _empty_region_counts[int(partition)] -= 1; + } + // Set the Mutator intervals, usage, and capacity according to arguments. Reset the Collector intervals, used, capacity // to represent empty Collector free set. We use this at the end of rebuild_free_set() to avoid the overhead of making // many redundant incremental adjustments to the mutator intervals as the free set is being rebuilt. - void establish_mutator_intervals(ssize_t mutator_leftmost, ssize_t mutator_rightmost, - ssize_t mutator_leftmost_empty, ssize_t mutator_rightmost_empty, + void establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost, + idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty, + size_t total_mutator_regions, size_t empty_mutator_regions, size_t mutator_region_count, size_t mutator_used); // Set the OldCollector intervals, usage, and capacity according to arguments. We use this at the end of rebuild_free_set() // to avoid the overhead of making many redundant incremental adjustments to the mutator intervals as the free set is being // rebuilt. - void establish_old_collector_intervals(ssize_t old_collector_leftmost, ssize_t old_collector_rightmost, - ssize_t old_collector_leftmost_empty, ssize_t old_collector_rightmost_empty, - size_t old_collector_region_count, size_t old_collector_used); + void establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost, + idx_t old_collector_leftmost_empty, idx_t old_collector_rightmost_empty, + size_t total_old_collector_region_count, size_t old_collector_empty, + size_t old_collector_regions, size_t old_collector_used); // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals. // Requires that region idx is in in the Mutator or Collector partitions. Hereafter, identifies this region as NotFree. From 428145b2e670923bdf73c910dd41d24e469d4bdb Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 17 Jun 2025 21:11:57 +0000 Subject: [PATCH 06/61] add freeset accounting of humongous waste --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 51 +++++++++++++++++-- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 29 +++++++++-- .../gc/shenandoah/shenandoahGeneration.hpp | 1 + .../shenandoahGenerationalEvacuationTask.cpp | 3 ++ 4 files changed, 76 insertions(+), 8 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index e03b19f8dcd72..2ec6a0088eb82 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -229,6 +229,7 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { _rightmosts_empty[partition_id] = -1;; _capacity[partition_id] = 0; _used[partition_id] = 0; + _humongous_waste[partition_id] = 0; _available[partition_id] = FreeSetUnderConstruction; } _total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = @@ -241,7 +242,8 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost, idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty, size_t total_mutator_regions, size_t empty_mutator_regions, - size_t mutator_region_count, size_t mutator_used) { + size_t mutator_region_count, size_t mutator_used, + size_t mutator_humongous_waste_words) { shenandoah_assert_heaplocked(); _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost; @@ -252,6 +254,7 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count; _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used; _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes; + _humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_humongous_waste_words; _available[int(ShenandoahFreeSetPartitionId::Mutator)] = _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]; @@ -277,7 +280,8 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col idx_t old_collector_rightmost_empty, size_t total_old_collector_region_count, size_t old_collector_empty, size_t old_collector_regions, - size_t old_collector_used) { + size_t old_collector_used, + size_t old_collector_humongous_waste_words) { shenandoah_assert_heaplocked(); _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost; @@ -288,6 +292,7 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_regions; _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used; _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_regions * _region_size_bytes; + _humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_humongous_waste_words; _available[int(ShenandoahFreeSetPartitionId::OldCollector)] = _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]; @@ -1309,7 +1314,9 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { _partitions.assert_bounds(); req.set_actual_size(words_size); if (remainder != 0) { - req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder); + size_t waste = ShenandoahHeapRegion::region_size_words() - remainder; + req.set_waste(waste); + _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste); } return _heap->get_region(beg)->bottom(); } @@ -1448,6 +1455,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi size_t mutator_rightmost_empty = 0; size_t mutator_regions = 0; size_t mutator_used = 0; + size_t mutator_humongous_waste = 0; size_t old_collector_leftmost = max_regions; size_t old_collector_rightmost = 0; @@ -1455,6 +1463,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi size_t old_collector_rightmost_empty = 0; size_t old_collector_regions = 0; size_t old_collector_used = 0; + size_t old_collector_humongous_waste = 0; size_t mutator_empty = 0; size_t old_collector_empty = 0; @@ -1462,6 +1471,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi size_t total_mutator_regions = 0; size_t total_old_collector_regions = 0; + bool is_generational = _heap->mode()->is_generational(); size_t num_regions = _heap->num_regions(); for (size_t idx = 0; idx < num_regions; idx++) { ShenandoahHeapRegion* region = _heap->get_region(idx); @@ -1535,6 +1545,22 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi } else { total_old_collector_regions++; } + } else { + if (region->is_humongous_start()) { + oop obj = cast_to_oop(region->bottom()); + size_t word_size = obj->size(); + size_t region_span = ShenandoahHeapRegion::required_regions(word_size * HeapWordSize); + size_t humongous_waste_words = region_span * ShenandoahHeapRegion::region_size_words() - word_size; + if (is_generational) { + if (region->is_young()) { + mutator_humongous_waste += word_size; + } else { + old_collector_humongous_waste += word_size; + } + } else { + mutator_humongous_waste += word_size; + } + } } } #ifdef KELVIN_ENHANCEMENTS @@ -1587,12 +1613,14 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty; _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx, - total_mutator_regions, mutator_empty, mutator_regions, mutator_used); + total_mutator_regions, mutator_empty, mutator_regions, mutator_used, + mutator_humongous_waste); rightmost_idx = (old_collector_leftmost == max_regions)? -1: (idx_t) old_collector_rightmost; rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (idx_t) old_collector_rightmost_empty; _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, old_collector_leftmost_empty, rightmost_empty_idx, total_old_collector_regions, - old_collector_empty, old_collector_regions, old_collector_used); + old_collector_empty, old_collector_regions, old_collector_used, + old_collector_humongous_waste); log_debug(gc, free)(" After find_regions_with_alloc_capacity(), Mutator range [%zd, %zd]," " Old Collector range [%zd, %zd]", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), @@ -1601,6 +1629,19 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); } +void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector(size_t xfer_regions, + size_t humongous_waste_words) { + shenandoah_assert_heaplocked(); + + _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, xfer_regions); + _partitions.decrease_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, humongous_waste_words); + + _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, xfer_regions); + _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::OldCollector, humongous_waste_words); + + // No need to adjust ranges because humongous regions are not allocatable +} + // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector, size_t max_xfer_regions, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 844e43a1f179c..b6280f971b31d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -113,6 +113,9 @@ class ShenandoahRegionPartitions { size_t _total_region_counts[UIntNumPartitions]; size_t _empty_region_counts[UIntNumPartitions]; + // Humongous waste, in words, can exist in Mutator partition for recently allocated humongous objects + // and in OldCollector partition for humongous objects that have been promoted in place. + size_t _humongous_waste[UIntNumPartitions]; // For each partition p, _left_to_right_bias is true iff allocations are normally made from lower indexed regions // before higher indexed regions. @@ -160,7 +163,7 @@ class ShenandoahRegionPartitions { void establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost, idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty, size_t total_mutator_regions, size_t empty_mutator_regions, - size_t mutator_region_count, size_t mutator_used); + size_t mutator_region_count, size_t mutator_used, size_t mutator_humongous_words_waste); // Set the OldCollector intervals, usage, and capacity according to arguments. We use this at the end of rebuild_free_set() // to avoid the overhead of making many redundant incremental adjustments to the mutator intervals as the free set is being @@ -168,7 +171,8 @@ class ShenandoahRegionPartitions { void establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost, idx_t old_collector_leftmost_empty, idx_t old_collector_rightmost_empty, size_t total_old_collector_region_count, size_t old_collector_empty, - size_t old_collector_regions, size_t old_collector_used); + size_t old_collector_regions, size_t old_collector_used, + size_t old_collector_humongous_words_waste); // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals. // Requires that region idx is in in the Mutator or Collector partitions. Hereafter, identifies this region as NotFree. @@ -236,8 +240,26 @@ class ShenandoahRegionPartitions { inline bool is_empty(ShenandoahFreeSetPartitionId which_partition) const; + inline void increase_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { + _total_region_counts[int(which_partition)] += regions; + } + + inline void decrease_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { + assert(_total_region_counts[int(which_partition)] >= regions, "Cannot remove more regions than are present"); + _total_region_counts[int(which_partition)] -= regions; + } + inline void increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline void increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words) { + _humongous_waste[int(which_partition)] += words; + } + + inline void decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words) { + assert(_humongous_waste[int(which_partition)] >= words, "Cannot decrease waste beyond what is there"); + _humongous_waste[int(which_partition)] -= words; + } + inline void set_bias_from_left_to_right(ShenandoahFreeSetPartitionId which_partition, bool value) { assert (which_partition < NumPartitions, "selected free set must be valid"); _left_to_right_bias[int(which_partition)] = value; @@ -436,7 +458,6 @@ class ShenandoahFreeSet : public CHeapObj { size_t max_xfer_regions, size_t& bytes_transferred); - // Determine whether we prefer to allocate from left to right or from right to left within the OldCollector free-set. void establish_old_collector_alloc_bias(); @@ -501,6 +522,8 @@ class ShenandoahFreeSet : public CHeapObj { // for evacuation, invoke this to make regions available for mutator allocations. void move_regions_from_collector_to_mutator(size_t cset_regions); + void transfer_humongous_regions_from_mutator_to_old_collector(size_t xfer_regions, size_t humongous_waste_words); + void recycle_trash(); // Acquire heap lock and log status, assuming heap lock is not acquired by the caller. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 242acbdea8cea..1f17b5bdcb3d0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -100,6 +100,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // to false. size_t select_aged_regions(size_t old_available); + // Return available assuming that we can allocate no more than capacity bytes within this generation. size_t available(size_t capacity) const; public: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index ba9ef5979a89d..5ccb779ff41ca 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -300,6 +300,9 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio r->set_affiliation(OLD_GENERATION); } + ShenandoahFreeSet* freeset = _heap->free_set(); + freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste / HeapWordSize); + old_gen->increase_affiliated_region_count(spanned_regions); old_gen->increase_used(used_bytes); old_gen->increase_humongous_waste(humongous_waste); From 27d2e1cd251dac8202959af71161bcd6589ef527 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 20 Jun 2025 21:21:50 +0000 Subject: [PATCH 07/61] Use free-set implementation of humongous waste --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 95 +++++++++++++++++-- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 33 +++---- .../gc/shenandoah/shenandoahGeneration.cpp | 18 +++- .../gc/shenandoah/shenandoahGeneration.hpp | 33 ++++++- .../shenandoahGenerationalEvacuationTask.cpp | 4 + .../shenandoah/shenandoahGenerationalHeap.cpp | 6 ++ .../shenandoah/shenandoahGenerationalHeap.hpp | 1 + .../share/gc/shenandoah/shenandoahHeap.cpp | 8 +- .../share/gc/shenandoah/shenandoahHeap.hpp | 1 + .../gc/shenandoah/shenandoahHeapRegion.cpp | 4 + 10 files changed, 175 insertions(+), 28 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 2ec6a0088eb82..601ec488feca3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -232,6 +232,11 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { _humongous_waste[partition_id] = 0; _available[partition_id] = FreeSetUnderConstruction; } +#define KELVIN_REGION_COUNTS +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Setting Mutator and Collector total_region_counts to zero, OldCollector is %zu", + _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)]); +#endif _total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; @@ -246,6 +251,10 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm size_t mutator_humongous_waste_words) { shenandoah_assert_heaplocked(); +#ifdef KELVIN_HUMONGOUS_WASTE + log_info(gc)("FreeSet mutator humongous waste set to %zu, collector to zero", mutator_humongous_waste_words); +#endif + _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost; _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost; _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty; @@ -269,10 +278,14 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0; + _humongous_waste[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _available[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _empty_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Setting Mutator total_region_counts to %zu Collector to 0", total_mutator_regions); +#endif } void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost, @@ -284,6 +297,9 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col size_t old_collector_humongous_waste_words) { shenandoah_assert_heaplocked(); +#ifdef KELVIN_HUMONGOUS_WASTE + log_info(gc)("FreeSet old_collector humongous waste set to %zu", old_collector_humongous_waste_words); +#endif _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost; _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost; _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty; @@ -297,9 +313,10 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]; _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = total_old_collector_region_count; - _empty_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = old_collector_empty; - - + _empty_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_empty; +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Setting OldCollector total_region_counts to %zu", total_old_collector_region_count); +#endif } void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { @@ -313,6 +330,40 @@ void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId whic _used[int(which_partition)], _capacity[int(which_partition)], bytes); } +void ShenandoahRegionPartitions::increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words) { + _humongous_waste[int(which_partition)] += words; +#ifdef KELVIN_HUMONGOUS_WASTE + log_info(gc)("FreeSet<%s>::increase_humongous_waste(%zu) yields: %zu", partition_name(which_partition), + words, _humongous_waste[int(which_partition)]); +#endif +} + +void ShenandoahRegionPartitions::decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words) { + assert(_humongous_waste[int(which_partition)] >= words, "Cannot decrease waste beyond what is there"); + _humongous_waste[int(which_partition)] -= words; +#ifdef KELVIN_HUMONGOUS_WASTE + log_info(gc)("FreeSet<%s>::decrease_humongous_waste(%zu) yields: %zu", partition_name(which_partition), + words, _humongous_waste[int(which_partition)]); +#endif +} + +void ShenandoahRegionPartitions::increase_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { + _total_region_counts[int(which_partition)] += regions; +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Increase total region counts[%s] by %zu to %zu", partition_name(which_partition), regions, + _total_region_counts[int(which_partition)]); +#endif +} + +void ShenandoahRegionPartitions::decrease_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Decreasing total region counts[%s] by %zu from %zu", partition_name(which_partition), regions, + _total_region_counts[int(which_partition)]); +#endif + assert(_total_region_counts[int(which_partition)] >= regions, "Cannot remove more regions than are present"); + _total_region_counts[int(which_partition)] -= regions; +} + inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary( ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values"); @@ -418,6 +469,10 @@ void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartition expand_interval_if_boundary_modified(which_partition, idx, available); _region_counts[int(which_partition)]++; _total_region_counts[int(which_partition)]++; +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Incrementing _total_region_counts[%s] to %zu in make_free", + partition_name(which_partition), _total_region_counts[int(which_partition)]); +#endif if (available == _region_size_bytes) { _empty_region_counts[int(which_partition)]++; } @@ -496,6 +551,12 @@ void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, She _total_region_counts[int(new_partition)]++; _region_counts[int(new_partition)]++; +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Moving one region from %s to %s, adjusting region counts to %zu and %zu", + partition_name(orig_partition), partition_name(new_partition), + _region_counts[int(orig_partition)], _region_counts[int(new_partition)]); +#endif + if (available == _region_size_bytes) { _empty_region_counts[int(orig_partition)]--; _empty_region_counts[int(new_partition)]++; @@ -1316,8 +1377,18 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { if (remainder != 0) { size_t waste = ShenandoahHeapRegion::region_size_words() - remainder; req.set_waste(waste); + +#ifdef KELVIN_HUMONGOUS_WASTE + log_info(gc)("FreeSet alloc_contiguous increasing mutator humongous waste by %zu", waste); +#endif + _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste); } + +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Allocating humongous to span %zu regions", num); +#endif + return _heap->get_region(beg)->bottom(); } @@ -1553,12 +1624,15 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi size_t humongous_waste_words = region_span * ShenandoahHeapRegion::region_size_words() - word_size; if (is_generational) { if (region->is_young()) { - mutator_humongous_waste += word_size; + total_mutator_regions += region_span; + mutator_humongous_waste += humongous_waste_words; } else { - old_collector_humongous_waste += word_size; + total_old_collector_regions += region_span; + old_collector_humongous_waste += humongous_waste_words; } } else { - mutator_humongous_waste += word_size; + total_mutator_regions += region_span; + mutator_humongous_waste += humongous_waste_words;; } } } @@ -1633,6 +1707,14 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector size_t humongous_waste_words) { shenandoah_assert_heaplocked(); +#ifdef KELVIN_HUMONGOUS_WASTE + log_info(gc)("FreeSet humongous promotion, with waste %zu", humongous_waste_words); +#endif + +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Transferring %zu humongous regions from mutator to old (promoting)", xfer_regions); +#endif + _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, xfer_regions); _partitions.decrease_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, humongous_waste_words); @@ -1793,6 +1875,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions bool have_evacuation_reserves, size_t& young_reserve_result, size_t& old_reserve_result) const { shenandoah_assert_generational(); + shenandoah_assert_heaplocked(); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); ShenandoahOldGeneration* const old_generation = _heap->old_generation(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index b6280f971b31d..bd2b2f8cad37d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -30,6 +30,9 @@ #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahSimpleBitMap.hpp" + +#define KELVIN_HUMONGOUS_WASTE + // Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId. enum class ShenandoahFreeSetPartitionId : uint8_t { Mutator, // Region is in the Mutator free set: available memory is available to mutators. @@ -240,25 +243,13 @@ class ShenandoahRegionPartitions { inline bool is_empty(ShenandoahFreeSetPartitionId which_partition) const; - inline void increase_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { - _total_region_counts[int(which_partition)] += regions; - } - - inline void decrease_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { - assert(_total_region_counts[int(which_partition)] >= regions, "Cannot remove more regions than are present"); - _total_region_counts[int(which_partition)] -= regions; - } + inline void increase_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions); + inline void decrease_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions); inline void increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes); - inline void increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words) { - _humongous_waste[int(which_partition)] += words; - } - - inline void decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words) { - assert(_humongous_waste[int(which_partition)] >= words, "Cannot decrease waste beyond what is there"); - _humongous_waste[int(which_partition)] -= words; - } + inline void increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words); + inline void decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words); inline void set_bias_from_left_to_right(ShenandoahFreeSetPartitionId which_partition, bool value) { assert (which_partition < NumPartitions, "selected free set must be valid"); @@ -290,6 +281,13 @@ class ShenandoahRegionPartitions { return _available[int(which_partition)]; } + // Returns words of humongous waste + inline size_t humongous_waste(ShenandoahFreeSetPartitionId which_partition) const { + assert (which_partition < NumPartitions, "selected free set must be valid"); + // This may be called with or without the global heap lock. Changes to _humongous_waste[] are always made with heap lock. + return _humongous_waste[int(which_partition)]; + } + // Return available_in assuming caller does not hold the heap lock. In production builds, available is // returned without acquiring the lock. In debug builds, the global heap lock is acquired in order to // enforce a consistency assert. @@ -542,6 +540,9 @@ class ShenandoahFreeSet : public CHeapObj { inline size_t used() const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator); } inline size_t available() const { return _partitions.available_in_not_locked(ShenandoahFreeSetPartitionId::Mutator); } + inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); } + inline size_t humongous_waste_in_old() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector); } + HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region); /* diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 9a511de939ccb..7cc2b9cf58f8f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -752,9 +752,9 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { // We are preparing for evacuation. At this time, we ignore cset region tallies. size_t first_old, last_old, num_old; - heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); // Free set construction uses reserve quantities, because they are known to be valid here - heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true); + _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true); } } @@ -807,6 +807,7 @@ ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type, _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0), _used(0), _bytes_allocated_since_gc_start(0), _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity), + _free_set(nullptr), _heuristics(nullptr) { _is_marking_complete.set(); @@ -825,6 +826,11 @@ ShenandoahGeneration::~ShenandoahGeneration() { delete _task_queues; } +void ShenandoahGeneration::post_initialize(ShenandoahHeap* heap) { + _free_set = heap->free_set(); + assert(_free_set != nullptr, "bad initialization order"); +} + void ShenandoahGeneration::reserve_task_queues(uint workers) { _task_queues->reserve(workers); } @@ -906,6 +912,10 @@ void ShenandoahGeneration::increase_used(size_t bytes) { void ShenandoahGeneration::increase_humongous_waste(size_t bytes) { if (bytes > 0) { Atomic::add(&_humongous_waste, bytes); +#ifdef KELVIN_MONITOR_HUMONGOUS + log_info(gc)("Generation %s humongous waste increased by %zu to %zu", + shenandoah_generation_name(_type), bytes, _humongous_waste); +#endif } } @@ -914,6 +924,10 @@ void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes), "Waste (%zu) cannot be negative (after subtracting %zu)", _humongous_waste, bytes); Atomic::sub(&_humongous_waste, bytes); +#ifdef KELVIN_MONITOR_HUMONGOUS + log_info(gc)("Generation %s humongous waste decreased by %zu to %zu", + shenandoah_generation_name(_type), bytes, _humongous_waste); +#endif } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 1f17b5bdcb3d0..d2e0c8fd6d040 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -27,6 +27,7 @@ #include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp" #include "gc/shenandoah/shenandoahAffiliation.hpp" +#include "gc/shenandoah/shenandoahFreeSet.hpp" #include "gc/shenandoah/shenandoahGenerationType.hpp" #include "gc/shenandoah/shenandoahLock.hpp" #include "gc/shenandoah/shenandoahMarkingContext.hpp" @@ -72,7 +73,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { volatile size_t _bytes_allocated_since_gc_start; size_t _max_capacity; size_t _soft_max_capacity; - + ShenandoahFreeSet* _free_set; ShenandoahHeuristics* _heuristics; private: @@ -127,6 +128,8 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { virtual ShenandoahHeuristics* initialize_heuristics(ShenandoahMode* gc_mode); + virtual void post_initialize(ShenandoahHeap* heap); + size_t soft_max_capacity() const override { return _soft_max_capacity; } size_t max_capacity() const override { return _max_capacity; } virtual size_t used_regions() const; @@ -237,7 +240,33 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { void increase_humongous_waste(size_t bytes); void decrease_humongous_waste(size_t bytes); - size_t get_humongous_waste() const { return _humongous_waste; } + size_t get_humongous_waste() const { + size_t result; + switch (_type) { + case ShenandoahGenerationType::OLD: + result = _free_set->humongous_waste_in_old(); + break; + case ShenandoahGenerationType::YOUNG: + result = _free_set->humongous_waste_in_mutator(); + break; + case ShenandoahGenerationType::GLOBAL: + case ShenandoahGenerationType::NON_GEN: + default: + result = _free_set->humongous_waste_in_mutator() + _free_set->humongous_waste_in_old(); + break; + } + result *= HeapWordSize; + +#define KELVIN_MONITOR_HUMONGOUS +#ifdef KELVIN_MONITOR_HUMONGOUS + if (result != _humongous_waste) { + log_info(gc)("Generation %s expects consistency between humongous waste in free set (%zu) and in generation (%zu)", + shenandoah_generation_name(_type), result, _humongous_waste); + } +#endif + + return result; + } virtual bool is_concurrent_mark_in_progress() = 0; void confirm_heuristics_mode(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 5ccb779ff41ca..524a3b98a9aa9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -282,6 +282,10 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio // usage totals, including humongous waste, after evacuation is done. log_debug(gc)("promoting humongous region %zu, spanning %zu", region->index(), spanned_regions); +#ifdef KELVIN_HUMONGOUS_WASTE + log_info(gc)("Promoting humongous object, transferring %zu bytes of humongous waste", humongous_waste); +#endif + young_gen->decrease_used(used_bytes); young_gen->decrease_humongous_waste(humongous_waste); young_gen->decrease_affiliated_region_count(spanned_regions); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 1f84feb20e8f4..442ffed329644 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -147,6 +147,12 @@ void ShenandoahGenerationalHeap::initialize_heuristics() { _old_generation->initialize_heuristics(mode()); } +void ShenandoahGenerationalHeap::post_initialize_heuristics() { + ShenandoahHeap::post_initialize_heuristics(); + _young_generation->post_initialize(this); + _old_generation->post_initialize(this); +} + void ShenandoahGenerationalHeap::initialize_serviceability() { assert(mode()->is_generational(), "Only for the generational mode"); _young_gen_memory_pool = new ShenandoahYoungGenMemoryPool(this); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp index cef5dfd7070ba..b7e1c61a00497 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp @@ -40,6 +40,7 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap { explicit ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy); void post_initialize() override; void initialize_heuristics() override; + void post_initialize_heuristics() override; static ShenandoahGenerationalHeap* heap() { shenandoah_assert_generational(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 55dfb2e8de400..547f74236f3fe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -421,10 +421,10 @@ jint ShenandoahHeap::initialize() { _affiliations[i] = ShenandoahAffiliation::FREE; } - size_t young_cset_regions, old_cset_regions; + post_initialize_heuristics(); // We are initializing free set. We ignore cset region tallies. - size_t first_old, last_old, num_old; + size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old; _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old); } @@ -528,6 +528,10 @@ void ShenandoahHeap::initialize_heuristics() { _global_generation->initialize_heuristics(mode()); } +void ShenandoahHeap::post_initialize_heuristics() { + _global_generation->post_initialize(this); +} + #ifdef _MSC_VER #pragma warning( push ) #pragma warning( disable:4355 ) // 'this' : used in base member initializer list diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index 4124bf8be7f5a..3370a70b060ba 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -199,6 +199,7 @@ class ShenandoahHeap : public CollectedHeap { void post_initialize() override; void initialize_mode(); virtual void initialize_heuristics(); + virtual void post_initialize_heuristics(); virtual void print_init_logger() const; void initialize_serviceability() override; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index d00a99ee7289c..b33803d7e0976 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -907,6 +907,10 @@ void ShenandoahHeapRegion::decrement_humongous_waste() const { if (waste_bytes > 0) { ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahGeneration* generation = heap->generation_for(affiliation()); +#define KELVIN_HUMONGOUS_HEAP_REGION +#ifdef KELVIN_HUMONGOUS_HEAP_REGION + log_info(gc)("Decrementing humongous waste by %zu in ShenHeapRegion", waste_bytes); +#endif heap->decrease_humongous_waste(generation, waste_bytes); } } From 34bd23a9122666e1b172d47c8635a57c39e6e179 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 2 Jul 2025 03:23:15 +0000 Subject: [PATCH 08/61] Incremental progress with redundant calculations and consistency checks --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 1126 ++++++++++++++--- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 231 +++- .../gc/shenandoah/shenandoahGeneration.cpp | 19 +- .../gc/shenandoah/shenandoahGeneration.hpp | 49 +- .../shenandoahGenerationalEvacuationTask.cpp | 8 + .../share/gc/shenandoah/shenandoahHeap.cpp | 2 +- .../gc/shenandoah/shenandoahHeapRegion.cpp | 19 +- .../gc/shenandoah/shenandoahVerifier.cpp | 27 +- 8 files changed, 1251 insertions(+), 230 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 601ec488feca3..525f66449b1f0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -50,6 +50,8 @@ static const char* partition_name(ShenandoahFreeSetPartitionId t) { } } +#define KELVIN_USED + class ShenandoahLeftRightIterator { private: idx_t _idx; @@ -167,6 +169,7 @@ ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, Shena _free_set(free_set), _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) , ShenandoahSimpleBitMap(max_regions) } { + initialize_old_collector(); make_all_regions_unavailable(); } @@ -219,6 +222,12 @@ inline idx_t ShenandoahRegionPartitions::rightmost(ShenandoahFreeSetPartitionId return idx; } +void ShenandoahRegionPartitions::initialize_old_collector() { + _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = 0; + _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = 0; + _empty_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = 0; +} + void ShenandoahRegionPartitions::make_all_regions_unavailable() { shenandoah_assert_heaplocked(); for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) { @@ -232,6 +241,9 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { _humongous_waste[partition_id] = 0; _available[partition_id] = FreeSetUnderConstruction; } +#ifdef KELVIN_USED + log_info(gc)("make_all_regions_unavailable() setting _used[] to 0 for all partitions"); +#endif #define KELVIN_REGION_COUNTS #ifdef KELVIN_REGION_COUNTS log_info(gc)("Setting Mutator and Collector total_region_counts to zero, OldCollector is %zu", @@ -248,11 +260,11 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty, size_t total_mutator_regions, size_t empty_mutator_regions, size_t mutator_region_count, size_t mutator_used, - size_t mutator_humongous_waste_words) { + size_t mutator_humongous_waste_bytes) { shenandoah_assert_heaplocked(); #ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet mutator humongous waste set to %zu, collector to zero", mutator_humongous_waste_words); + log_info(gc)("FreeSet mutator humongous waste set to %zu, collector to zero", mutator_humongous_waste_bytes); #endif _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost; @@ -262,8 +274,8 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count; _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used; - _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes; - _humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_humongous_waste_words; + _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = total_mutator_regions * _region_size_bytes; + _humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_humongous_waste_bytes; _available[int(ShenandoahFreeSetPartitionId::Mutator)] = _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]; @@ -283,6 +295,14 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _empty_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; +#define KELVIN_CAPACITY +#ifdef KELVIN_CAPACITY + log_info(gc)("establish_mutator_intervals() sets Mutator capacity: %zu", _capacity[int(ShenandoahFreeSetPartitionId::Mutator)]); + log_info(gc)("establish_mutator_intervals() sets Collector capacity: %zu", _capacity[int(ShenandoahFreeSetPartitionId::Collector)]); +#endif +#ifdef KELVIN_USED + log_info(gc)("Setting Mutator used to %zu Collector to 0", mutator_used); +#endif #ifdef KELVIN_REGION_COUNTS log_info(gc)("Setting Mutator total_region_counts to %zu Collector to 0", total_mutator_regions); #endif @@ -294,11 +314,11 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col size_t total_old_collector_region_count, size_t old_collector_empty, size_t old_collector_regions, size_t old_collector_used, - size_t old_collector_humongous_waste_words) { + size_t old_collector_humongous_waste_bytes) { shenandoah_assert_heaplocked(); #ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet old_collector humongous waste set to %zu", old_collector_humongous_waste_words); + log_info(gc)("FreeSet old_collector humongous waste set to %zu", old_collector_humongous_waste_bytes); #endif _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost; _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost; @@ -307,13 +327,20 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_regions; _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used; - _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_regions * _region_size_bytes; - _humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_humongous_waste_words; + _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = total_old_collector_region_count * _region_size_bytes; + _humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_humongous_waste_bytes; _available[int(ShenandoahFreeSetPartitionId::OldCollector)] = _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]; _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = total_old_collector_region_count; _empty_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_empty; +#ifdef KELVIN_CAPACITY + log_info(gc)("establish_old_collector_intervals() sets OldCollector capacity: %zu", + _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]); +#endif +#ifdef KELVIN_USED + log_info(gc)("Setting OldCollector used to %zu", old_collector_used); +#endif #ifdef KELVIN_REGION_COUNTS log_info(gc)("Setting OldCollector total_region_counts to %zu", total_old_collector_region_count); #endif @@ -325,28 +352,128 @@ void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId whic _used[int(which_partition)] += bytes; _available[int(which_partition)] -= bytes; +#ifdef KELVIN_USED + log_info(gc)("ShenRegionPartitions %s increase_used(%zu) to %zu, available shrinks to %zu", + partition_name(which_partition), bytes, _used[int(which_partition)], _available[int(which_partition)]); +#endif assert (_used[int(which_partition)] <= _capacity[int(which_partition)], "Must not use (%zu) more than capacity (%zu) after increase by %zu", _used[int(which_partition)], _capacity[int(which_partition)], bytes); } -void ShenandoahRegionPartitions::increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words) { - _humongous_waste[int(which_partition)] += words; +void ShenandoahRegionPartitions::decrease_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "Partition must be valid"); + assert (_used[int(which_partition)] >= bytes, + "Must not use (%zu) less than zero after decrease by %zu", + _used[int(which_partition)], bytes); + + _used[int(which_partition)] -= bytes; + _available[int(which_partition)] += bytes; +#ifdef KELVIN_USED + log_info(gc)("ShenRegionPartitions %s decrease_used(%zu) to %zu, available grows to %zu", + partition_name(which_partition), bytes, _used[int(which_partition)], _available[int(which_partition)]); +#endif +} + +size_t ShenandoahRegionPartitions::get_used(ShenandoahFreeSetPartitionId which_partition) { + assert (which_partition < NumPartitions, "Partition must be valid"); + return _used[int(which_partition)];; +} + +void ShenandoahRegionPartitions::increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "Partition must be valid"); + + _humongous_waste[int(which_partition)] += bytes; #ifdef KELVIN_HUMONGOUS_WASTE log_info(gc)("FreeSet<%s>::increase_humongous_waste(%zu) yields: %zu", partition_name(which_partition), - words, _humongous_waste[int(which_partition)]); + bytes, _humongous_waste[int(which_partition)]); #endif } -void ShenandoahRegionPartitions::decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words) { - assert(_humongous_waste[int(which_partition)] >= words, "Cannot decrease waste beyond what is there"); - _humongous_waste[int(which_partition)] -= words; +void ShenandoahRegionPartitions::decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "Partition must be valid"); + assert(_humongous_waste[int(which_partition)] >= bytes, "Cannot decrease waste beyond what is there"); + + _humongous_waste[int(which_partition)] -= bytes; #ifdef KELVIN_HUMONGOUS_WASTE log_info(gc)("FreeSet<%s>::decrease_humongous_waste(%zu) yields: %zu", partition_name(which_partition), - words, _humongous_waste[int(which_partition)]); + bytes, _humongous_waste[int(which_partition)]); #endif } +size_t ShenandoahRegionPartitions::get_humongous_waste(ShenandoahFreeSetPartitionId which_partition) { + assert (which_partition < NumPartitions, "Partition must be valid"); + return _humongous_waste[int(which_partition)];; +} + +void ShenandoahRegionPartitions::set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "selected free set must be valid"); + _capacity[int(which_partition)] = value; + _available[int(which_partition)] = value - _used[int(which_partition)]; +#define KELVIN_CAPACITY +#ifdef KELVIN_CAPACITY + log_info(gc)("set_capacity of %s to %zu", partition_name(which_partition), _capacity[int(which_partition)]); +#endif +} + + +void ShenandoahRegionPartitions::increase_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "Partition must be valid"); + _capacity[int(which_partition)] += bytes; +#ifdef KELVIN_CAPACITY + log_info(gc)("FreeSet<%s>::increase_capacity(%zu) yields: %zu", partition_name(which_partition), + bytes, _capacity[int(which_partition)]); +#endif +} + +void ShenandoahRegionPartitions::decrease_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "Partition must be valid"); + assert(_capacity[int(which_partition)] >= bytes, "Cannot remove more capacity bytes than are present"); + _capacity[int(which_partition)] -= bytes; +#ifdef KELVIN_CAPACITY + log_info(gc)("FreeSet<%s>::decrease_capacity(%zu) yields: %zu", partition_name(which_partition), + bytes, _capacity[int(which_partition)]); +#endif +} + +size_t ShenandoahRegionPartitions::get_capacity(ShenandoahFreeSetPartitionId which_partition) { + assert (which_partition < NumPartitions, "Partition must be valid"); + return _capacity[int(which_partition)];; +} + +void ShenandoahRegionPartitions::increase_available(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "Partition must be valid"); + _available[int(which_partition)] += bytes; +#define KELVIN_AVAILABLE +#ifdef KELVIN_AVAILABLE + log_info(gc)("FreeSet<%s>::increase_available(%zu) yields: %zu", partition_name(which_partition), + bytes, _available[int(which_partition)]); +#endif +} + +void ShenandoahRegionPartitions::decrease_available(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "Partition must be valid"); + assert(_available[int(which_partition)] >= bytes, "Cannot remove more available bytes than are present"); + _available[int(which_partition)] -= bytes; +#ifdef KELVIN_AVAILABLE + log_info(gc)("FreeSet<%s>::decrease_available(%zu) yields: %zu", partition_name(which_partition), + bytes, _available[int(which_partition)]); +#endif +} + +size_t ShenandoahRegionPartitions::get_available(ShenandoahFreeSetPartitionId which_partition) { + assert (which_partition < NumPartitions, "Partition must be valid"); + return _available[int(which_partition)];; +} + void ShenandoahRegionPartitions::increase_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { _total_region_counts[int(which_partition)] += regions; #ifdef KELVIN_REGION_COUNTS @@ -364,9 +491,48 @@ void ShenandoahRegionPartitions::decrease_total_region_counts(ShenandoahFreeSetP _total_region_counts[int(which_partition)] -= regions; } -inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary( +void ShenandoahRegionPartitions::increase_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { + _region_counts[int(which_partition)] += regions; +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Increase region counts[%s] by %zu to %zu", partition_name(which_partition), regions, + _region_counts[int(which_partition)]); +#endif +} + +void ShenandoahRegionPartitions::decrease_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Decreasing region counts[%s] by %zu from %zu", partition_name(which_partition), regions, + _region_counts[int(which_partition)]); +#endif + assert(_region_counts[int(which_partition)] >= regions, "Cannot remove more regions than are present"); + _region_counts[int(which_partition)] -= regions; +} + +void ShenandoahRegionPartitions::increase_empty_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { + _empty_region_counts[int(which_partition)] += regions; +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Increase empty region counts[%s] by %zu to %zu", partition_name(which_partition), regions, + _empty_region_counts[int(which_partition)]); +#endif +} + +void ShenandoahRegionPartitions::decrease_empty_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { +#ifdef KELVIN_REGION_COUNTS + log_info(gc)("Decreasing empty_region counts[%s] by %zu from %zu", partition_name(which_partition), regions, + _empty_region_counts[int(which_partition)]); +#endif + assert(_empty_region_counts[int(which_partition)] >= regions, "Cannot remove more regions than are present"); + _empty_region_counts[int(which_partition)] -= regions; +} + +// All members of partition between low_idx and high_idx inclusive have been removed. +void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary( ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values"); +#define KELVIN_INTERVALS +#ifdef KELVIN_INTERVALS + log_info(gc)("shrink_interval_if_range_modifies_either_boundary(%s, %zd, %zd)", partition_name(partition), low_idx, high_idx); +#endif if (low_idx == leftmost(partition)) { assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed"); if (high_idx + 1 == _max) { @@ -397,14 +563,78 @@ inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either _leftmosts_empty[int(partition)] = _max; _rightmosts_empty[int(partition)] = -1; } +#ifdef KELVIN_INTERVALS + log_info(gc)("shrink_interval_if_range_modifies_either_boundary ends with range [%zd, %zd], empty range [%zd, %zd]", + _leftmosts[int(partition)], _rightmosts[int(partition)], + _leftmosts_empty[int(partition)], _rightmosts_empty[int(partition)]); +#endif +} + +void ShenandoahRegionPartitions::establish_interval(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, + idx_t low_empty_idx, idx_t high_empty_idx) { +#ifdef ASSERT + assert (partition < NumPartitions, "invalid partition"); + if (low_idx != max()) { + assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values"); + assert (in_free_set(partition, low_idx), "Must be in partition of established interval"); + assert (in_free_set(partition, high_idx), "Must be in partition of established interval"); + } + if (low_empty_idx != max()) { + ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(low_empty_idx); + assert (in_free_set(partition, low_empty_idx) && r->is_empty(), "Must be empty and in partition of established interval"); + r = ShenandoahHeap::heap()->get_region(high_empty_idx); + assert (in_free_set(partition, high_empty_idx), "Must be in partition of established interval"); + } +#endif + + _leftmosts[int(partition)] = low_idx; + _rightmosts[int(partition)] = high_idx; + _leftmosts_empty[int(partition)] = low_empty_idx; + _rightmosts_empty[int(partition)] = high_empty_idx; + +#ifdef KELVIN_INTERVALS + log_info(gc)("%s: establish_interval ends with [%zd, %zd], empty range [%zd, %zd]", + partition_name(partition), _leftmosts[int(partition)], _rightmosts[int(partition)], + _leftmosts_empty[int(partition)], _rightmosts_empty[int(partition)]); +#endif } inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) { shrink_interval_if_range_modifies_either_boundary(partition, idx, idx); } -inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, - idx_t idx, size_t region_available) { +// Some members of partition between low_idx and high_idx inclusive have been added. +void ShenandoahRegionPartitions:: +expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, + idx_t low_empty_idx, idx_t high_empty_idx) { +#ifdef KELVIN_INTERVALS + log_info(gc)("expand_interval_if_range_modifies_either_boundary(%s, %zd, %zd, %zd, %zd)", + partition_name(partition), low_idx, high_idx, low_empty_idx, high_empty_idx); +#endif + if (_leftmosts[int(partition)] > low_idx) { + _leftmosts[int(partition)] = low_idx; + } + if (_rightmosts[int(partition)] < high_idx) { + _rightmosts[int(partition)] = high_idx; + } + if (_leftmosts_empty[int(partition)] > low_empty_idx) { + _leftmosts_empty[int(partition)] = low_empty_idx; + } + if (_rightmosts_empty[int(partition)] < high_empty_idx) { + _rightmosts_empty[int(partition)] = high_empty_idx; + } +#ifdef KELVIN_INTERVALS + log_info(gc)("expand_interval_if_range_modifies_either_boundary ends with range [%zd, %zd], empty range [%zd, %zd]", + _leftmosts[int(partition)], _rightmosts[int(partition)], + _leftmosts_empty[int(partition)], _rightmosts_empty[int(partition)]); +#endif +} + +void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, + idx_t idx, size_t region_available) { +#ifdef KELVIN_INTERVALS + log_info(gc)("expand_interval_if_boundary_modified(%s, %zd, %zu)", partition_name(partition), idx, region_available); +#endif if (_leftmosts[int(partition)] > idx) { _leftmosts[int(partition)] = idx; } @@ -419,6 +649,11 @@ inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(She _rightmosts_empty[int(partition)] = idx; } } +#ifdef KELVIN_INTERVALS + log_info(gc)("expand_interval_if_boundary_modified ends with range [%zd, %zd], empty range [%zd, %zd]", + _leftmosts[int(partition)], _rightmosts[int(partition)], + _leftmosts_empty[int(partition)], _rightmosts_empty[int(partition)]); +#endif } void ShenandoahRegionPartitions::retire_range_from_partition( @@ -455,6 +690,8 @@ void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitio _region_counts[int(partition)]--; } +// The caller is responsible for increasing capacity and available and used in which_partition, and decreasing the +// same quantities for the original partition void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) { shenandoah_assert_heaplocked(); assert (idx < _max, "index is sane: %zu < %zu", idx, _max); @@ -463,19 +700,7 @@ void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartition assert (available <= _region_size_bytes, "Available cannot exceed region size"); _membership[int(which_partition)].set_bit(idx); - _capacity[int(which_partition)] += _region_size_bytes; - _used[int(which_partition)] += _region_size_bytes - available; - _available[int(which_partition)] += available; expand_interval_if_boundary_modified(which_partition, idx, available); - _region_counts[int(which_partition)]++; - _total_region_counts[int(which_partition)]++; -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Incrementing _total_region_counts[%s] to %zu in make_free", - partition_name(which_partition), _total_region_counts[int(which_partition)]); -#endif - if (available == _region_size_bytes) { - _empty_region_counts[int(which_partition)]++; - } } bool ShenandoahRegionPartitions::is_mutator_partition(ShenandoahFreeSetPartitionId p) { @@ -494,9 +719,10 @@ bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_reg return (available_in_region == _region_size_bytes); } - -void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, - ShenandoahFreeSetPartitionId new_partition, size_t available) { +// Do not adjust capacities, available, or used. Return used delta. +size_t ShenandoahRegionPartitions:: +move_from_partition_to_partition_with_deferred_accounting(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, + ShenandoahFreeSetPartitionId new_partition, size_t available) { ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx); shenandoah_assert_heaplocked(); assert (idx < _max, "index is sane: %zu < %zu", idx, _max); @@ -534,22 +760,32 @@ void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, She _membership[int(orig_partition)].clear_bit(idx); _membership[int(new_partition)].set_bit(idx); + return used; +} - _capacity[int(orig_partition)] -= _region_size_bytes; - _used[int(orig_partition)] -= used; - _available[int(orig_partition)] -= available; - shrink_interval_if_boundary_modified(orig_partition, idx); - - _capacity[int(new_partition)] += _region_size_bytes;; - _used[int(new_partition)] += used; - _available[int(new_partition)] += available; - expand_interval_if_boundary_modified(new_partition, idx, available); +void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, + ShenandoahFreeSetPartitionId new_partition, size_t available) { + size_t used = move_from_partition_to_partition_with_deferred_accounting(idx, orig_partition, new_partition, available); + decrease_used(orig_partition, used); _total_region_counts[int(orig_partition)]--; _region_counts[int(orig_partition)]--; + _capacity[int(orig_partition)] -= _region_size_bytes; + _available[int(orig_partition)] -= _region_size_bytes; + shrink_interval_if_boundary_modified(orig_partition, idx); + _capacity[int(new_partition)] += _region_size_bytes; + _available[int(new_partition)] += _region_size_bytes; _total_region_counts[int(new_partition)]++; _region_counts[int(new_partition)]++; + increase_used(new_partition, used); + expand_interval_if_boundary_modified(new_partition, idx, available); + +#ifdef KELVIN_CAPACITY + log_info(gc)("Moving one region from %s to %s, adjusting capacities to %zu and %zu", + partition_name(orig_partition), partition_name(new_partition), + _capacity[int(orig_partition)], _capacity[int(new_partition)]); +#endif #ifdef KELVIN_REGION_COUNTS log_info(gc)("Moving one region from %s to %s, adjusting region counts to %zu and %zu", @@ -758,19 +994,19 @@ void ShenandoahRegionPartitions::assert_bounds() { idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator), - "free regions before the leftmost: %zd, bound %zd", + "Mutator free regions before the leftmost: %zd, bound %zd", beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator)); assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator), - "free regions past the rightmost: %zd, bound %zd", + "Mutator free regions past the rightmost: %zd, bound %zd", end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator)); beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator), - "free empty regions before the leftmost: %zd, bound %zd", + "Mutator free empty regions before the leftmost: %zd, bound %zd", beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)); assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator), - "free empty regions past the rightmost: %zd, bound %zd", + "Mutator free empty regions past the rightmost: %zd, bound %zd", end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator)); // Performance invariants. Failing these would not break the free partition, but performance would suffer. @@ -781,64 +1017,64 @@ void ShenandoahRegionPartitions::assert_bounds() { assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector), - "leftmost region should be free: %zd", leftmost(ShenandoahFreeSetPartitionId::Collector)); + "Collector leftmost region should be free: %zd", leftmost(ShenandoahFreeSetPartitionId::Collector)); assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector), - "rightmost region should be free: %zd", rightmost(ShenandoahFreeSetPartitionId::Collector)); + "Collector rightmost region should be free: %zd", rightmost(ShenandoahFreeSetPartitionId::Collector)); // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero. // Likewise for empty region partitions. beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)]; end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]; assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector), - "free regions before the leftmost: %zd, bound %zd", + "Collector free regions before the leftmost: %zd, bound %zd", beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector)); assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector), - "free regions past the rightmost: %zd, bound %zd", + "Collector free regions past the rightmost: %zd, bound %zd", end_off, rightmost(ShenandoahFreeSetPartitionId::Collector)); beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)]; end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)]; assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], - "free empty regions before the leftmost: %zd, bound %zd", + "Collector free empty regions before the leftmost: %zd, bound %zd", beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector)); assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)], - "free empty regions past the rightmost: %zd, bound %zd", + "Collector free empty regions past the rightmost: %zd, bound %zd", end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector)); // Performance invariants. Failing these would not break the free partition, but performance would suffer. - assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: %zd < %zd", + assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "OldCollector leftmost in bounds: %zd < %zd", leftmost(ShenandoahFreeSetPartitionId::OldCollector), _max); - assert (rightmost(ShenandoahFreeSetPartitionId::OldCollector) < _max, "rightmost in bounds: %zd < %zd", + assert (rightmost(ShenandoahFreeSetPartitionId::OldCollector) < _max, "OldCollector rightmost in bounds: %zd < %zd", rightmost(ShenandoahFreeSetPartitionId::OldCollector), _max); assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::OldCollector), ShenandoahFreeSetPartitionId::OldCollector), - "leftmost region should be free: %zd", leftmost(ShenandoahFreeSetPartitionId::OldCollector)); + "OldCollector leftmost region should be free: %zd", leftmost(ShenandoahFreeSetPartitionId::OldCollector)); assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::OldCollector), ShenandoahFreeSetPartitionId::OldCollector), - "rightmost region should be free: %zd", rightmost(ShenandoahFreeSetPartitionId::OldCollector)); + "OldCollector rightmost region should be free: %zd", rightmost(ShenandoahFreeSetPartitionId::OldCollector)); // If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero. // Likewise for empty region partitions. beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]; end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]; assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector), - "free regions before the leftmost: %zd, bound %zd", + "OldCollector free regions before the leftmost: %zd, bound %zd", beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector)); assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector), - "free regions past the rightmost: %zd, bound %zd", + "OldCollector free regions past the rightmost: %zd, bound %zd", end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector)); beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]; end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]; assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)], - "free empty regions before the leftmost: %zd, bound %zd", + "OldCollector free empty regions before the leftmost: %zd, bound %zd", beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)); assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)], - "free empty regions past the rightmost: %zd, bound %zd", + "OldCollector free empty regions past the rightmost: %zd, bound %zd", end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector)); } #endif @@ -846,7 +1082,10 @@ void ShenandoahRegionPartitions::assert_bounds() { ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : _heap(heap), _partitions(max_regions, this), - _alloc_bias_weight(0) + _alloc_bias_weight(0), + _total_young_used(0), + _total_old_used(0), + _total_global_used(0) { clear_internal(); } @@ -854,14 +1093,53 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) { shenandoah_assert_heaplocked(); size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize; + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t available_in_region = alloc_capacity(region); + size_t used_in_region = region->used(); +#ifdef ASSERT size_t idx = region->index(); - size_t capacity = alloc_capacity(region); assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Regions promoted in place should have been excluded from Mutator partition"); - if (capacity >= plab_min_size_in_bytes) { - _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, capacity); - _heap->old_generation()->augment_promoted_reserve(capacity); - } +#endif + if (available_in_region < plab_min_size_in_bytes) { + // If region had been retired, its end-of-region alignment pad is counted as used + used_in_region += available_in_region; + } + + _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_in_region); + _partitions.decrease_capacity(ShenandoahFreeSetPartitionId::Mutator, region_size_bytes); + _partitions.decrease_available(ShenandoahFreeSetPartitionId::Mutator, region_size_bytes); + _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, 1); + + _partitions.increase_available(ShenandoahFreeSetPartitionId::OldCollector, region_size_bytes); + _partitions.increase_capacity(ShenandoahFreeSetPartitionId::OldCollector, region_size_bytes); + _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_in_region); + _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, 1); + + if (available_in_region >= plab_min_size_in_bytes) { + // region counts represents regions from which we are currently allocating. + _partitions.decrease_region_counts(ShenandoahFreeSetPartitionId::Mutator, 1); + _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::OldCollector, 1); + _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, available_in_region); + _heap->old_generation()->augment_promoted_reserve(available_in_region); + assert(available_in_region != region_size_bytes, "Nothing to promote in place"); + } + recompute_total_used(); +#ifdef KELVIN_CAPACITY + log_info(gc)("Ater add_pip_to_old(%zu), Mutate used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", + region->index(), + _partitions.get_used(ShenandoahFreeSetPartitionId::Mutator), + _partitions.get_capacity(ShenandoahFreeSetPartitionId::Mutator), + _partitions.get_available(ShenandoahFreeSetPartitionId::Mutator), + _partitions.get_region_counts(ShenandoahFreeSetPartitionId::Mutator), + _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator)); + log_info(gc)(" OldCollect used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", + _partitions.get_used(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.get_available(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.get_region_counts(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector)); +#endif } HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(ShenandoahAffiliation affiliation, @@ -1265,6 +1543,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah } else if ((result != nullptr) && in_new_region) { _partitions.one_region_is_no_longer_empty(orig_partition); } + recompute_total_used(); return result; } @@ -1369,7 +1648,6 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); - size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); _partitions.assert_bounds(); @@ -1379,22 +1657,180 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { req.set_waste(waste); #ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet alloc_contiguous increasing mutator humongous waste by %zu", waste); + log_info(gc)("FreeSet alloc_contiguous increasing mutator humongous waste by %zu bytes", waste * HeapWordSize); #endif - _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste); + _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste * HeapWordSize); } #ifdef KELVIN_REGION_COUNTS log_info(gc)("Allocating humongous to span %zu regions", num); #endif + recompute_total_young_used(); + recompute_total_global_used(); return _heap->get_region(beg)->bottom(); } class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionClosure { +private: + static const ssize_t SentinelUsed = -1; + static const ssize_t SentinelIndex = -1; + static const size_t MaxSavedRegions = 128; + + ShenandoahRegionPartitions* _partitions; + volatile size_t _recycled_region_count; + ssize_t _region_indices[MaxSavedRegions]; + ssize_t _region_used[MaxSavedRegions]; + + void get_lock_and_flush_buffer(size_t region_count, size_t overflow_region_used, size_t overflow_region_index) { + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahHeapLocker locker(heap->lock()); +#define KELVIN_RECYCLE +#ifdef KELVIN_RECYCLE + Thread* t = Thread::current(); + size_t p2i = (size_t) t; + size_t recycled_regions = Atomic::load(&_recycled_region_count); + log_info(gc)("%zx: got lock, will flush buffer with %zu entries plus (used: %zu, index: %zu)", + p2i, recycled_regions, overflow_region_used, overflow_region_index); +#endif + size_t region_tallies[int(ShenandoahRegionPartitions::NumPartitions)]; + size_t used_byte_tallies[int(ShenandoahRegionPartitions::NumPartitions)]; + for (int p = 0; p < int(ShenandoahRegionPartitions::NumPartitions); p++) { + region_tallies[p] = 0; + used_byte_tallies[p] = 0; + } + ShenandoahFreeSetPartitionId p = _partitions->membership(overflow_region_index); + used_byte_tallies[int(p)] += overflow_region_used; + if (region_count <= recycled_regions) { + // _recycled_region_count has not been decremented after I incremented it to obtain region_count, so I will + // try to flush the buffer. + + // Multiple worker threads may attempt to flush this buffer. The first thread to acquire the lock does the work. + // _recycled_region_count is only decreased while holding the heap lock. + if (region_count > recycled_regions) { + region_count = recycled_regions; + } + for (size_t i = 0; i < region_count; i++) { + ssize_t used; + // wait for other threads to finish updating their entries within the region buffer before processing entry +#ifdef KELVIN_RECYCLE + size_t retry_count = 0; +#endif + do { + used = _region_used[i]; +#ifdef KELVIN_RECYCLE + if (retry_count++ == 1024 * 1024) { + log_info(gc)("Too many used retries at index %zu", i); + retry_count = 0; + } +#endif + } while (used == SentinelUsed); + ssize_t index; +#ifdef KELVIN_RECYCLE + retry_count = 0; +#endif + do { + index = _region_indices[i]; +#ifdef KELVIN_RECYCLE + if (retry_count++ == 1024 * 1024) { + log_info(gc)("Too many index retries at index %zu", i); + retry_count = 0; + } +#endif + } while (index == SentinelIndex); + ShenandoahFreeSetPartitionId p = _partitions->membership(index); + + + + assert(p != ShenandoahFreeSetPartitionId::NotFree, "Trashed regions should be in a free partition"); + used_byte_tallies[int(p)] += used; + region_tallies[int(p)]++; +#ifdef KELVIN_RECYCLE + log_info(gc)("flushing bufffer[%zu], index: %zu, used: %zu, partition: %s, used_tally: %zu, region_tally: %zu", + i, index, used, partition_name(p), used_byte_tallies[int(p)], region_tallies[int(p)]); +#endif + } + if (region_count > 0) { + for (size_t i = 0; i < MaxSavedRegions; i++) { + _region_indices[i] = SentinelIndex; + _region_used[i] = SentinelUsed; + } +#ifdef KELVIN_RECYCLE + log_info(gc)("overwrote buffer to all sentinel values"); +#endif + } + + // The almost last thing we do before releasing the lock is to set the _recycled_region_count to 0. What happens next? + // + // 1. Any worker thread that attempted to buffer a new region while we were flushing the buffer will have seen + // that _recycled_region_count > MaxSavedRegions. All such worker threads will first wait for the lock, then + // discover that the _recycled_region_count is zero, then, while holding the lock, they will process the + // region so it doesn't have to be placed into the buffer. This handles the large majority of cases. + // + // 2. However, there's a race that can happen, which will result in someewhat different behavior. Suppose + // this thread resets _recycled_region_count to 0. Then some other worker thread increments _recycled_region_count + // in order to stores its region into the buffer and suppose this happens before all of the other worker threads + // which are waiting to acquire the heap lock have finished their efforts to flush the buffer. If this happens, + // then the workers who are waiting to acquire the heap lock and flush the buffer will find that _recycled_region_count + // has decreased from the value it held when they last tried to increment its value. In this case, these worker + // threads will process their overflow region while holding the lock, but they will not attempt to process regions + // newly placed into the buffer. Otherwise, confusion could result. + // + // Assumption: all worker threads who are attempting to acquire lock and flush buffer will finish their efforts before + // the buffer once again overflows. + // How could we avoid depending on this assumption? + // 1. Let MaxSavedRegions be as large as number of regions, or at least as large as the collection set. + // 2. Keep a count of how many times the buffer has been flushed per instantation of the + // ShenandoahRecycleTrashedRegionClosure object, and only consult/update this value while holding the heap lock. + // Need to think about how this helps resolve the race. + _recycled_region_count = 0; + } else { + // Some other thread has already processed the buffer, resetting _recycled_region_count to zero. Its current value + // may be greater than zero because other workers may have accumulated entries into the buffer. But it is "extremely" + // unlikely that it will overflow again before all waiting workers have had a chance to clear their state. While I've + // got the heap lock, I'll go ahead and update the global state for my overflow region. I'll let other heap regions + // accumulate in the buffer to be processed when the buffer is once again full. +#ifdef KELVIN_RECYCLE + log_info(gc)("Not flushing buffer because other thread did it"); +#endif + region_count = 0; + } +#ifdef KELVIN_RECYCLE + log_info(gc)("Recycling %zu regions [M, C, O]: used: [%zu, %zu, %zu], regions [%zu, %zu, %zu]", + region_count + 1, + used_byte_tallies[int(ShenandoahFreeSetPartitionId::Mutator)], + used_byte_tallies[int(ShenandoahFreeSetPartitionId::Collector)], + used_byte_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)], + region_tallies[int(ShenandoahFreeSetPartitionId::Mutator)], + region_tallies[int(ShenandoahFreeSetPartitionId::Collector)], + region_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)]); +#endif + for (size_t p = 0; p < int(ShenandoahRegionPartitions::NumPartitions); p++) { + _partitions->decrease_used(ShenandoahFreeSetPartitionId(p), used_byte_tallies[p]); + } + ShenandoahYoungGeneration* young_gen = heap->young_generation(); + ShenandoahOldGeneration* old_gen = heap->old_generation(); + young_gen->decrease_used(used_byte_tallies[int(ShenandoahFreeSetPartitionId::Mutator)] + + used_byte_tallies[int(ShenandoahFreeSetPartitionId::Collector)]); + young_gen->decrease_affiliated_region_count(region_tallies[int(ShenandoahFreeSetPartitionId::Mutator)] + + region_tallies[int(ShenandoahFreeSetPartitionId::Collector)]); + old_gen->decrease_used(used_byte_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)]); + old_gen->decrease_affiliated_region_count(region_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)]); +#ifdef KELVIN_RECYCLE + log_info(gc)("Done with flush buffer, releasing global heap lock"); +#endif + } + public: - ShenandoahRecycleTrashedRegionClosure(): ShenandoahHeapRegionClosure() {} + ShenandoahRecycleTrashedRegionClosure(ShenandoahRegionPartitions* p): ShenandoahHeapRegionClosure() { + _partitions = p; + _recycled_region_count = 0; + for (size_t i = 0; i < MaxSavedRegions; i++) { + _region_indices[i] = SentinelIndex; + _region_used[i] = SentinelUsed; + } + } void heap_region_do(ShenandoahHeapRegion* r) { r->try_recycle(); @@ -1412,7 +1848,7 @@ void ShenandoahFreeSet::recycle_trash() { ShenandoahHeap* heap = ShenandoahHeap::heap(); heap->assert_gc_workers(heap->workers()->active_workers()); - ShenandoahRecycleTrashedRegionClosure closure; + ShenandoahRecycleTrashedRegionClosure closure(&_partitions); heap->parallel_heap_region_iterate(&closure); } @@ -1431,6 +1867,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { ShenandoahFreeSetPartitionId::OldCollector, region_capacity); _partitions.assert_bounds(); _heap->old_generation()->augment_evacuation_reserve(region_capacity); + recompute_total_used(); return true; } @@ -1461,8 +1898,8 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::OldCollector, region_capacity); - _partitions.assert_bounds(); + recompute_total_used(); // 4. Do not adjust capacities for generations, we just swapped the regions that have already // been accounted for. However, we should adjust the evacuation reserves as those may have changed. @@ -1487,6 +1924,7 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::Collector, ac); _partitions.assert_bounds(); + recompute_total_used(); // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, // to recycle trash before attempting to allocate anything in the region. @@ -1499,6 +1937,7 @@ void ShenandoahFreeSet::clear() { void ShenandoahFreeSet::clear_internal() { shenandoah_assert_heaplocked(); _partitions.make_all_regions_unavailable(); + recompute_total_used(); _alloc_bias_weight = 0; _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, true); @@ -1548,16 +1987,28 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi ShenandoahHeapRegion* region = _heap->get_region(idx); if (region->is_trash()) { // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up". - // The cset regions are not "trashed" until we have finished update refs. + // The cset regions are not recycled until we have finished update refs. if (region->is_old()) { +#define KELVIN_PUZZLE +#ifdef KELVIN_PUZZLE + log_info(gc)("find_alloc_capacity sees old trashed region %zu, with %zu used bytes", region->index(), region->used()); +#endif + assert(region->used() == region_size_bytes, "Expect cset regions to be fully used"); old_cset_regions++; } else { assert(region->is_young(), "Trashed region should be old or young"); +#ifdef KELVIN_PUZZLE + log_info(gc)("find_alloc_capacity sees young trashed region %zu, with %zu used bytes", region->index(), region->used()); +#endif + assert(region->used() == region_size_bytes, "Expect cset regions to be fully used"); young_cset_regions++; } } else if (region->is_old()) { // count both humongous and regular regions, but don't count trash (cset) regions. old_region_count++; +#ifdef KELVIN_PUZZLE + log_info(gc)("find_alloc_capacity sees old non-trashed region %zu, with %zu used bytes", region->index(), region->used()); +#endif if (first_old_region > idx) { first_old_region = idx; } @@ -1608,7 +2059,13 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi } } old_collector_regions++; - old_collector_used += (region_size_bytes - ac); + old_collector_used += region_size_bytes - ac; + } + } else { + if (region->is_old()) { + old_collector_used += region_size_bytes - ac; + } else { + mutator_used += region_size_bytes - ac; } } if (region->is_trash() || !region->is_old()) { @@ -1619,24 +2076,39 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi } else { if (region->is_humongous_start()) { oop obj = cast_to_oop(region->bottom()); - size_t word_size = obj->size(); - size_t region_span = ShenandoahHeapRegion::required_regions(word_size * HeapWordSize); - size_t humongous_waste_words = region_span * ShenandoahHeapRegion::region_size_words() - word_size; + size_t byte_size = obj->size() * HeapWordSize; + size_t region_span = ShenandoahHeapRegion::required_regions(byte_size); + size_t humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_words() - byte_size; if (is_generational) { if (region->is_young()) { total_mutator_regions += region_span; - mutator_humongous_waste += humongous_waste_words; + mutator_humongous_waste += humongous_waste_bytes; } else { total_old_collector_regions += region_span; - old_collector_humongous_waste += humongous_waste_words; + old_collector_humongous_waste += humongous_waste_bytes; } } else { total_mutator_regions += region_span; - mutator_humongous_waste += humongous_waste_words;; + mutator_humongous_waste += humongous_waste_bytes; } } } } + // At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as + // part of capacity, as fully used, as affiliated. Computations performed after prepare_to_rebuild() + // assume the cset regions are part of used regions within each generation. These same computations make use + // of the number of cset regions, with the understanding that cset regions will become unaffiliated and available + // after finish_rebuild. + // + // Partition ranges and empty ranges do not include trash regions as of the end of finish_rebuild. Region ranges + // will be adjusted after we finish recycling the trash. + total_mutator_regions += young_cset_regions; + mutator_used += young_cset_regions * region_size_bytes; + total_old_collector_regions += old_cset_regions; + old_collector_used += old_cset_regions * region_size_bytes; + // No need to update generation sizes here. These are the sizes already recognized by the generations. Theses + // adjustments allow the freeset tallies to match the generation tallies. + #ifdef KELVIN_ENHANCEMENTS log_info(gc, free)(" At end of prep_to_rebuild, mutator_leftmost: %zu" ", mutator_rightmost: %zu" @@ -1695,6 +2167,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi old_collector_leftmost_empty, rightmost_empty_idx, total_old_collector_regions, old_collector_empty, old_collector_regions, old_collector_used, old_collector_humongous_waste); + recompute_total_used(); log_debug(gc, free)(" After find_regions_with_alloc_capacity(), Mutator range [%zd, %zd]," " Old Collector range [%zd, %zd]", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), @@ -1704,11 +2177,11 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi } void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector(size_t xfer_regions, - size_t humongous_waste_words) { + size_t humongous_waste_bytes) { shenandoah_assert_heaplocked(); #ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet humongous promotion, with waste %zu", humongous_waste_words); + log_info(gc)("FreeSet humongous promotion, with waste %zu", humongous_waste_bytes); #endif #ifdef KELVIN_REGION_COUNTS @@ -1716,10 +2189,14 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector #endif _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, xfer_regions); - _partitions.decrease_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, humongous_waste_words); + _partitions.decrease_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, humongous_waste_bytes); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, xfer_regions); - _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::OldCollector, humongous_waste_words); + _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::OldCollector, humongous_waste_bytes); + + recompute_total_young_used(); + recompute_total_old_used(); + // global_used is unaffected by this transfer // No need to adjust ranges because humongous regions are not allocatable } @@ -1731,33 +2208,104 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s shenandoah_assert_heaplocked(); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); size_t transferred_regions = 0; + size_t used_transfer = 0; + idx_t collector_low_idx = _partitions.max(); + idx_t collector_high_idx = -1; + idx_t mutator_low_idx = _partitions.max(); + idx_t mutator_high_idx = -1; ShenandoahLeftRightIterator iterator(&_partitions, which_collector, true); for (idx_t idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) { // Note: can_allocate_from() denotes that region is entirely empty if (can_allocate_from(idx)) { - _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, region_size_bytes); + if (idx < collector_low_idx) { + collector_low_idx = idx; + } + if (idx > collector_high_idx) { + collector_high_idx = idx; + } + if (idx < mutator_low_idx) { + mutator_low_idx = idx; + } + if (idx > mutator_high_idx) { + mutator_high_idx = idx; + } + used_transfer = _partitions.move_from_partition_to_partition_with_deferred_accounting(idx, which_collector, + ShenandoahFreeSetPartitionId::Mutator, + region_size_bytes); transferred_regions++; bytes_transferred += region_size_bytes; } } + // All transferred regions are empty. + assert(used_transfer == 0, "empty regions should have no used"); + _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator, mutator_low_idx, + mutator_high_idx, mutator_low_idx, mutator_high_idx); + _partitions.shrink_interval_if_range_modifies_either_boundary(which_collector, collector_low_idx, collector_high_idx); + + _partitions.decrease_total_region_counts(which_collector, transferred_regions); + _partitions.decrease_region_counts(which_collector, transferred_regions); + _partitions.decrease_empty_region_counts(which_collector, transferred_regions); + + _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); + _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); + _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); + + recompute_total_used(); return transferred_regions; } // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred -size_t ShenandoahFreeSet::transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector, - size_t max_xfer_regions, - size_t& bytes_transferred) { +size_t ShenandoahFreeSet:: +transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector, + size_t max_xfer_regions, size_t& bytes_transferred) { shenandoah_assert_heaplocked(); + size_t region_size_bytes = _partitions.region_size_bytes(); size_t transferred_regions = 0; + size_t used_transfer = 0; + idx_t collector_low_idx = _partitions.max(); + idx_t collector_high_idx = -1; + idx_t mutator_low_idx = _partitions.max(); + idx_t mutator_high_idx = -1; + ShenandoahLeftRightIterator iterator(&_partitions, which_collector, false); for (idx_t idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) { size_t ac = alloc_capacity(idx); if (ac > 0) { - _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, ac); + if (idx < collector_low_idx) { + collector_low_idx = idx; + } + if (idx > collector_high_idx) { + collector_high_idx = idx; + } + if (idx < mutator_low_idx) { + mutator_low_idx = idx; + } + if (idx > mutator_high_idx) { + mutator_high_idx = idx; + } + assert (ac < region_size_bytes, "Move empty regions with different function"); + used_transfer += _partitions.move_from_partition_to_partition_with_deferred_accounting(idx, which_collector, + ShenandoahFreeSetPartitionId::Mutator, + ac); transferred_regions++; bytes_transferred += ac; } } + // _empty_region_counts is unaffected, because we transfer only non-empty regions here. + + _partitions.decrease_used(which_collector, used_transfer); + _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator, + mutator_low_idx, mutator_high_idx, _partitions.max(), -1); + _partitions.shrink_interval_if_range_modifies_either_boundary(which_collector, collector_low_idx, collector_high_idx); + + _partitions.decrease_total_region_counts(which_collector, transferred_regions); + _partitions.decrease_region_counts(which_collector, transferred_regions); + + _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); + _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); + _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, used_transfer); + + recompute_total_used(); return transferred_regions; } @@ -1806,7 +2354,6 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r byte_size_in_proper_unit(old_collector_xfer), proper_unit_for_byte_size(old_collector_xfer)); } - // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions, size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) { @@ -1820,19 +2367,36 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &o find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); } -void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count) { +// The totals reported here anticipate the recycling of trash regions. Their memory is counted as unused and fully +// available at this moment in time, even though the memory cannot be re-allocated until after it is recycled. +void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count, + size_t young_used_regions, size_t old_used_regions, + size_t young_used_bytes, size_t old_used_bytes) { assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); +#define KELVIN_RESERVE +#ifdef KELVIN_RESERVE + log_info(gc)("establish_generation_sizes(young_region_count: %zu, old_region_count: %zu, young_used_regions: %zu,", + young_region_count, old_region_count, young_used_regions); + log_info(gc)(" old_used_regions: %zu, young_used_bytes: %zu, old_used_bytes: %zu)", + old_used_regions, young_used_bytes, old_used_bytes); +#endif if (ShenandoahHeap::heap()->mode()->is_generational()) { ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); ShenandoahOldGeneration* old_gen = heap->old_generation(); ShenandoahYoungGeneration* young_gen = heap->young_generation(); + ShenandoahGeneration* global_gen = heap->global_generation(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); size_t original_old_capacity = old_gen->max_capacity(); size_t new_old_capacity = old_region_count * region_size_bytes; size_t new_young_capacity = young_region_count * region_size_bytes; old_gen->set_capacity(new_old_capacity); + old_gen->set_used(old_used_regions, old_used_bytes); young_gen->set_capacity(new_young_capacity); + young_gen->set_used(young_used_regions, young_used_bytes); + global_gen->set_capacity(new_young_capacity + new_old_capacity); + global_gen->set_used(young_used_regions + old_used_regions, young_used_bytes + old_used_bytes); if (new_old_capacity > original_old_capacity) { size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes; @@ -1861,11 +2425,14 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cse old_reserve = 0; } - // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy + // Move some of the mutator regions into the Collector and OldCollector partitions in order to satisfy // young_reserve and old_reserve. - reserve_regions(young_reserve, old_reserve, old_region_count); + size_t young_used_regions, old_used_regions, young_used_bytes, old_used_bytes; + reserve_regions(young_reserve, old_reserve, old_region_count, + young_used_regions, old_used_regions, young_used_bytes, old_used_bytes); size_t young_region_count = _heap->num_regions() - old_region_count; - establish_generation_sizes(young_region_count, old_region_count); + establish_generation_sizes(young_region_count, old_region_count, + young_used_regions, old_used_regions, young_used_bytes, old_used_bytes); establish_old_collector_alloc_bias(); _partitions.assert_bounds(); log_status(); @@ -1950,69 +2517,303 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions // into the collector set or old collector set in order to assure that the memory available for allocations within // the collector set is at least to_reserve and the memory available for allocations within the old collector set // is at least to_reserve_old. -void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) { +void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count, + size_t &young_used_regions, size_t &old_used_regions, + size_t &young_used_bytes, size_t &old_used_bytes) { + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + + young_used_regions = 0; + old_used_regions = 0; + young_used_bytes = 0; + old_used_bytes = 0; + +#ifdef KELVIN_RESERVE + log_info(gc)("reserve_regions(to_reserve: %zu, to_reserve_old: %zu", to_reserve, to_reserve_old); +#endif +#undef KELVIN_RESERVE + + idx_t mutator_low_idx = _partitions.max(); + idx_t mutator_high_idx = -1; + idx_t mutator_empty_low_idx = _partitions.max(); + idx_t mutator_empty_high_idx = -1; + + idx_t collector_low_idx = _partitions.max(); + idx_t collector_high_idx = -1; + idx_t collector_empty_low_idx = _partitions.max(); + idx_t collector_empty_high_idx = -1; + + idx_t old_collector_low_idx = _partitions.max(); + idx_t old_collector_high_idx = -1; + idx_t old_collector_empty_low_idx = _partitions.max(); + idx_t old_collector_empty_high_idx = -1; + + size_t used_to_collector = 0; + size_t used_to_old_collector = 0; + size_t regions_to_collector = 0; + size_t regions_to_old_collector = 0; + size_t empty_regions_to_collector = 0; + size_t empty_regions_to_old_collector = 0; + + size_t old_collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);; + size_t collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector); + + size_t trashed_mutator_used = 0; + size_t trashed_collector_used = 0; + size_t trashed_old_collector_used = 0; + +#define KELVIN_RESERVE +#ifdef KELVIN_RESERVE + log_info(gc)("reserve_regions() seeks to_reserve: %zu and to_reserve_old: %zu", to_reserve, to_reserve_old); +#endif +#undef KELVIN_RESERVE + for (size_t i = _heap->num_regions(); i > 0; i--) { - size_t idx = i - 1; + idx_t idx = i - 1; ShenandoahHeapRegion* r = _heap->get_region(idx); - if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) { - continue; - } + if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) { + // Note: trashed regions have region_size_bytes alloc capacity. + size_t ac = alloc_capacity(r); + assert (ac > 0, "Membership in free set implies has capacity"); + assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD"); + + bool move_to_old_collector = old_collector_available < to_reserve_old; + bool move_to_collector = collector_available < to_reserve; + +#ifdef KELVIN_RESERVE + log_info(gc)("reserve_regions() region %zu, has ac: %zu, old_avail: %zu, collector_avail: %zu, move old: %s, move young: %s", + idx, ac, old_collector_available, collector_available, move_to_old_collector? "yes": "no", + move_to_collector? "yes": "no"); +#endif - size_t ac = alloc_capacity(r); - assert (ac > 0, "Membership in free set implies has capacity"); - assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD"); +#ifdef KELVIN_DEPRECATE + // We have to iterate through all regions in order to update generation accounting + if (!move_to_collector && !move_to_old_collector) { + // We've satisfied both to_reserve and to_reserved_old + break; + } +#endif - bool move_to_old_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old; - bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve; + if (move_to_old_collector) { + // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher + // addresses than Collector regions. Presumably, OldCollector regions are more "stable" and less likely to + // be collected in the near future. + if (r->is_trash() || !r->is_affiliated()) { + // OLD regions that have available memory are already in the old_collector free set. + assert(r->is_empty(), "Not affiliated implies region %zu is empty", r->index()); + if (idx < old_collector_low_idx) { + old_collector_low_idx = idx; + } + if (idx > old_collector_high_idx) { + old_collector_high_idx = idx; + } + if (idx < old_collector_empty_low_idx) { + old_collector_empty_low_idx = idx; + } + if (idx > old_collector_empty_high_idx) { + old_collector_empty_high_idx = idx; + } + used_to_old_collector += + _partitions.move_from_partition_to_partition_with_deferred_accounting(idx, ShenandoahFreeSetPartitionId::Mutator, + ShenandoahFreeSetPartitionId::OldCollector, ac); + old_collector_available += ac; + regions_to_old_collector++; + empty_regions_to_old_collector++; + if (r->is_trash()) { + trashed_old_collector_used += ac; + } - if (!move_to_collector && !move_to_old_collector) { - // We've satisfied both to_reserve and to_reserved_old - break; - } + log_trace(gc, free)(" Shifting region %zu from mutator_free to old_collector_free", idx); + log_trace(gc, free)(" Shifted Mutator range [%zd, %zd]," + " Old Collector range [%zd, %zd]", + _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), + _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); + old_region_count++; + continue; + } + } - if (move_to_old_collector) { - // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher - // addresses than Collector regions. Presumably, OldCollector regions are more "stable" and less likely to - // be collected in the near future. - if (r->is_trash() || !r->is_affiliated()) { - // OLD regions that have available memory are already in the old_collector free set. - _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, - ShenandoahFreeSetPartitionId::OldCollector, ac); - log_trace(gc, free)(" Shifting region %zu from mutator_free to old_collector_free", idx); + if (move_to_collector) { + // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if + // they were entirely empty. This has the effect of causing new Mutator allocation to reside next to objects + // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region. + // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains + // survivor objects is less likely to be selected for the collection set. This alternative implementation allows + // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects + // occupy regions comprised entirely of ephemeral objects. These regions are highly likely to be included in the next + // collection set, and they are easily evacuated because they have low density of live objects. + if (idx < collector_low_idx) { + collector_low_idx = idx; + } + if (idx > collector_high_idx) { + collector_high_idx = idx; + } + if (ac == region_size_bytes) { + if (idx < collector_empty_low_idx) { + collector_empty_low_idx = idx; + } + if (idx > collector_empty_high_idx) { + collector_empty_high_idx = idx; + } + empty_regions_to_collector++; + } + used_to_collector += + _partitions.move_from_partition_to_partition_with_deferred_accounting(idx, ShenandoahFreeSetPartitionId::Mutator, + ShenandoahFreeSetPartitionId::Collector, ac); + collector_available += ac; + regions_to_collector++; + if (r->is_trash()) { + trashed_collector_used += ac; + } else if (ac != region_size_bytes) { + young_used_regions++; + young_used_bytes = region_size_bytes - ac; + } + + old_used_bytes += region_size_bytes - ac; + log_trace(gc, free)(" Shifting region %zu from mutator_free to collector_free", idx); log_trace(gc, free)(" Shifted Mutator range [%zd, %zd]," - " Old Collector range [%zd, %zd]", + " Collector range [%zd, %zd]", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector)); - - old_region_count++; + _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector), + _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector)); continue; } - } - if (move_to_collector) { - // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if - // they were entirely empty. This has the effect of causing new Mutator allocation to reside next to objects - // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region. - // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains - // survivor objects is less likely to be selected for the collection set. This alternative implementation allows - // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects - // occupy regions comprised entirely of ephemeral objects. These regions are highly likely to be included in the next - // collection set, and they are easily evacuated because they have low density of live objects. - _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, - ShenandoahFreeSetPartitionId::Collector, ac); - log_trace(gc, free)(" Shifting region %zu from mutator_free to collector_free", idx); - log_trace(gc, free)(" Shifted Mutator range [%zd, %zd]," - " Collector range [%zd, %zd]", - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator), - _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector), - _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector)); + // Mutator region is not moved to Collector or OldCollector. Still, do the accounting. + if (idx < mutator_low_idx) { + mutator_low_idx = idx; + } + if (idx > mutator_high_idx) { + mutator_high_idx = idx; + } + if ((ac == region_size_bytes) && (idx < mutator_empty_low_idx)) { + mutator_empty_low_idx = idx; + } + if ((ac == region_size_bytes) && (idx > mutator_empty_high_idx)) { + mutator_empty_high_idx = idx; + } + if (r->is_trash()) { + trashed_mutator_used += ac; + } else if (ac != region_size_bytes) { + young_used_regions++; + young_used_bytes += region_size_bytes - ac; + +#ifdef KELVIN_RESERVE + log_info(gc)(" region is not moved:, young_used_regions increments to: %zu, young_used_bytes becomes: %zu", + young_used_regions, young_used_bytes); +#endif + } + } else { + // Region is not in Mutator partition. Do the accounting. + ShenandoahFreeSetPartitionId p = _partitions.membership(idx); + size_t ac = alloc_capacity(r); + assert(ac != region_size_bytes, "Empty regions should be in Mutator partion at entry to reserve_regions"); + if (p == ShenandoahFreeSetPartitionId::Collector) { + if (ac != region_size_bytes) { + young_used_regions++; + young_used_bytes = region_size_bytes - ac; +#ifdef KELVIN_RESERVE + log_info(gc)(" Collector Region %zu not in Mutator, ac: %zu, incr young regions to: %zu, young_used_bytes: %zu", + idx, ac, young_used_regions, young_used_bytes); +#endif + } + // else, unaffiliated region has no used + } else if (p == ShenandoahFreeSetPartitionId::OldCollector) { + if (ac != region_size_bytes) { + old_used_regions++; + old_used_bytes = region_size_bytes - ac; +#ifdef KELVIN_RESERVE + log_info(gc)(" OldCollector Region %zu not in Mutator, ac: %zu, incr old regions to: %zu, old_used_bytes: %zu", + idx, ac, old_used_regions, old_used_bytes); +#endif + } + // else, unaffiliated region has no used + } else if (p == ShenandoahFreeSetPartitionId::NotFree) { + // This region has been retired + if (r->is_old()) { + old_used_regions++; + old_used_bytes += region_size_bytes - ac; +#ifdef KELVIN_RESERVE + log_info(gc)(" Retired old region %zu not in Mutator, ac: %zu, incr old regions to: %zu, old_used_bytes: %zu", + idx, ac, old_used_regions, old_used_bytes); +#endif + // KELVIN TODO: do we add alignment padding into old_used_bytes? + } else { + assert(r->is_young(), "Retired region should be old or young"); + young_used_regions++; + young_used_bytes += region_size_bytes - ac; +#ifdef KELVIN_RESERVE + log_info(gc)(" Retired young region %zu not in Mutator, ac: %zu, incr young regions to: %zu, young_used_bytes: %zu", + idx, ac, young_used_regions, young_used_bytes); +#endif + } + } else { + assert(p == ShenandoahFreeSetPartitionId::OldCollector, "Not mutator and not NotFree, so must be OldCollector"); + assert(!r->is_empty(), "Empty regions should be in Mutator partition at entry to reserve_regions"); + if (idx < old_collector_low_idx) { + old_collector_low_idx = idx; + } + if (idx > old_collector_high_idx) { + old_collector_high_idx = idx; + } + if (idx < old_collector_empty_low_idx) { + old_collector_empty_low_idx = idx; + } + if (idx > old_collector_empty_high_idx) { + old_collector_empty_high_idx = idx; + } + } } } + _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_to_old_collector + used_to_collector + trashed_mutator_used); + _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, + regions_to_old_collector + regions_to_collector); + _partitions.decrease_region_counts(ShenandoahFreeSetPartitionId::Mutator, regions_to_old_collector + regions_to_collector); + _partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, + empty_regions_to_old_collector + empty_regions_to_collector); + _partitions.decrease_capacity(ShenandoahFreeSetPartitionId::Mutator, + (regions_to_old_collector + regions_to_collector) * region_size_bytes); + _partitions.decrease_available(ShenandoahFreeSetPartitionId::Mutator, + (regions_to_old_collector + regions_to_collector) * region_size_bytes); + + _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::Collector, regions_to_collector); + _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Collector, regions_to_collector); + _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, empty_regions_to_collector); + _partitions.increase_capacity(ShenandoahFreeSetPartitionId::Collector, regions_to_collector * region_size_bytes); + _partitions.increase_available(ShenandoahFreeSetPartitionId::Collector, regions_to_collector * region_size_bytes); + + _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector); + _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector); + _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, empty_regions_to_old_collector); + _partitions.increase_capacity(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector * region_size_bytes); + _partitions.increase_available(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector * region_size_bytes); + + if (used_to_collector > trashed_collector_used) { + _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, used_to_collector - trashed_collector_used); + } else { + _partitions.decrease_used(ShenandoahFreeSetPartitionId::Collector, trashed_collector_used - used_to_collector); + } + + if (used_to_old_collector > trashed_old_collector_used) { + _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_to_old_collector - trashed_old_collector_used); + } else { + _partitions.decrease_used(ShenandoahFreeSetPartitionId::OldCollector, trashed_old_collector_used - used_to_old_collector); + } + + _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector, + collector_low_idx, collector_high_idx, + collector_empty_low_idx, collector_empty_high_idx); + _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::OldCollector, + old_collector_low_idx, old_collector_high_idx, + old_collector_empty_low_idx, old_collector_empty_high_idx); + _partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator, + mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx); + + recompute_total_used(); if (LogTarget(Info, gc, free)::is_enabled()) { size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector); if (old_reserve < to_reserve_old) { @@ -2025,6 +2826,11 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve)); } } +#ifdef KELVIN_RESERVE + log_info(gc)("reserve_regions end: old_region_count: %zu, young_used_regions: %zu," + " old_used_regions: %zu, young_used_bytes: %zu, old_used_bytes: %zu", + old_region_count, young_used_regions, old_used_regions, young_used_bytes, old_used_bytes); +#endif } void ShenandoahFreeSet::establish_old_collector_alloc_bias() { @@ -2153,6 +2959,7 @@ void ShenandoahFreeSet::log_status() { size_t total_used = 0; size_t total_free = 0; size_t total_free_ext = 0; + size_t total_trashed_free = 0; for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx++) { @@ -2160,7 +2967,9 @@ void ShenandoahFreeSet::log_status() { ShenandoahHeapRegion *r = _heap->get_region(idx); size_t free = alloc_capacity(r); max = MAX2(max, free); - if (r->is_empty()) { + size_t used_in_region = r->used(); + if (r->is_empty() || r->is_trash()) { + used_in_region = 0; total_free_ext += free; if (last_idx + 1 == idx) { empty_contig++; @@ -2170,20 +2979,49 @@ void ShenandoahFreeSet::log_status() { } else { empty_contig = 0; } - total_used += r->used(); + total_used += used_in_region; total_free += free; +#define KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("%sMutator region %zu has free: %zu, used: %zu, total_free: %zu, total_used: %zu", + r->is_trash()? "Trashed ": "", r->index(), free, used_in_region, total_free, total_used); +#endif max_contig = MAX2(max_contig, empty_contig); last_idx = idx; } } size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes(); + // capacity() is capacity of mutator + // used() is used of mutator size_t free = capacity() - used(); +#ifdef KELVIN_DEBUG + // kelvin problem: trashed regions should not count as used, + // because this gets subtracted from capacity to wrongly compute free. + log_info(gc)("capacity(): %zu, used: %zu", capacity(), used()); + log_info(gc)("capacity_of(Mutator): %zu, used_by(Mutator): %zu", + _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator), + _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator)); + log_info(gc)("capacity_of(Collector): %zu, used_by(Collector): %zu", + _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector), + _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); + log_info(gc)("capacity_of(OldCollector): %zu, used_by(OldCollector): %zu", + _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector), + _partitions.used_by(ShenandoahFreeSetPartitionId::OldCollector)); + + ShenandoahYoungGeneration* young_gen = ShenandoahHeap::heap()->young_generation(); + size_t total_capacity = (_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator) + * ShenandoahHeapRegion::region_size_bytes()); + assert(total_capacity = capacity(), "Total capacity (%zu) does not match capacity (%zu)", + total_capacity, capacity()); + assert(used() == young_gen->used() - _partitions.used_by(ShenandoahFreeSetPartitionId::Collector), + "Used by Mutator is young used minus Collector used"); +#endif // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match // my internally tracked values of used() and free(). - assert(free == total_free, "Free memory should match"); + assert(free == total_free, "Free memory (%zu) should match calculated memory (%zu)", free, total_free); ls.print("Free: %zu%s, Max: %zu%s regular, %zu%s humongous, ", byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free), byte_size_in_proper_unit(max), proper_unit_for_byte_size(max), diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index bd2b2f8cad37d..65b7cb2261c9d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -38,8 +38,12 @@ enum class ShenandoahFreeSetPartitionId : uint8_t { Mutator, // Region is in the Mutator free set: available memory is available to mutators. Collector, // Region is in the Collector free set: available memory is reserved for evacuations. OldCollector, // Region is in the Old Collector free set: - // available memory is reserved for old evacuations and for promotions.. - NotFree // Region is in no free set: it has no available memory + // available memory is reserved for old evacuations and for promotions. + NotFree // Region is in no free set: it has no available memory. Consult region affiliation + // to determine whether this retired region is young or old. If young, the region + // is considered to be part of the Mutator partition. (When we retire from the + // Collector partition, we decrease total_region_count for Collector and increaese + // for Mutator, making similar adjustments to used (net impact on available is neutral). }; // ShenandoahRegionPartitions provides an abstraction to help organize the implementation of ShenandoahFreeSet. This @@ -48,14 +52,15 @@ enum class ShenandoahFreeSetPartitionId : uint8_t { // for which the ShenandoahFreeSetPartitionId is not equal to NotFree. class ShenandoahRegionPartitions { -private: +public: // We do not maintain counts, capacity, or used for regions that are not free. Informally, if a region is NotFree, it is // in no partition. NumPartitions represents the size of an array that may be indexed by Mutator or Collector. static constexpr ShenandoahFreeSetPartitionId NumPartitions = ShenandoahFreeSetPartitionId::NotFree; static constexpr int IntNumPartitions = int(ShenandoahFreeSetPartitionId::NotFree); static constexpr uint UIntNumPartitions = uint(ShenandoahFreeSetPartitionId::NotFree); - const ssize_t _max; // The maximum number of heap regions +private: + const idx_t _max; // The maximum number of heap regions const size_t _region_size_bytes; const ShenandoahFreeSet* _free_set; // For each partition, we maintain a bitmap of which regions are affiliated with his partition. @@ -65,22 +70,19 @@ class ShenandoahRegionPartitions { // not to be found. This makes searches for free space more efficient. For each partition p, _leftmosts[p] // represents its least index, and its _rightmosts[p] its greatest index. Empty intervals are indicated by the // canonical [_max, -1]. - ssize_t _leftmosts[UIntNumPartitions]; - ssize_t _rightmosts[UIntNumPartitions]; + idx_t _leftmosts[UIntNumPartitions]; + idx_t _rightmosts[UIntNumPartitions]; // Allocation for humongous objects needs to find regions that are entirely empty. For each partion p, _leftmosts_empty[p] // represents the first region belonging to this partition that is completely empty and _rightmosts_empty[p] represents the // last region that is completely empty. If there is no completely empty region in this partition, this is represented // by the canonical [_max, -1]. - ssize_t _leftmosts_empty[UIntNumPartitions]; - ssize_t _rightmosts_empty[UIntNumPartitions]; + idx_t _leftmosts_empty[UIntNumPartitions]; + idx_t _rightmosts_empty[UIntNumPartitions]; // For each partition p: - // _capacity[p] represents the total amount of memory within the partition at the time of the most recent rebuild - // _retired_capacity[p] represents the amount of memory that would be associated with p if it had not already been - // retired at the time of the most recent rebuild - // _total_capacity[p] is the sum of _capacity[p] and _retired_capacity[p] - // (The values are added under heap lock to assure coherency) + // _capacity[p] represents the total amount of memory within the partition, including retired regions, as adjusted + // by transfers of memory between partitions // _used[p] represents the total amount of memory that has been allocated within this partition (either already // allocated as of the rebuild, or allocated since the rebuild). // _available[p] represents the total amount of memory that can be allocated within partition p, calculated from @@ -91,7 +93,7 @@ class ShenandoahRegionPartitions { // // _region_counts[p] represents the number of regions associated with the partition which currently have available memory. // When a region is retired from partition p, _region_counts[p] is decremented. - // _total_region_counts[p] is _total_capacity[p] / RegionSizeBytes. probably do not need to keep separate field for this. + // _total_region_counts[p] is _total_capacity[p] / RegionSizeBytes. // _empty_region_counts[p] is number of regions associated with p which are entirely empty // // capacity and used values are expressed in bytes. @@ -101,8 +103,10 @@ class ShenandoahRegionPartitions { // When a region is "flipped", we adjust capacities and region counts for original and destination partitions. We also // adjust used values when flipping from mutator to collector. Flip to old collector does not need to adjust used because // only empty regions can be flipped to old collector. + // + // All memory quantities (capacty, available, used) are represented in bytes. + - size_t _retired_capacity[UIntNumPartitions]; size_t _capacity[UIntNumPartitions]; size_t _used[UIntNumPartitions]; @@ -111,12 +115,12 @@ class ShenandoahRegionPartitions { // Some notes: // _retired_regions[p] is _total_region_counts[p] - _region_counts[p] // _empty_region_counts[p] <= _region_counts[p] <= _total_region_counts[p] - // _total_capacity[p] is _total_region_counts[p] * _region_size_bytes + // generation_used is (_total_region_counts[p] - _region_counts[p]) * region_size_bytes + _used[p] size_t _region_counts[UIntNumPartitions]; size_t _total_region_counts[UIntNumPartitions]; size_t _empty_region_counts[UIntNumPartitions]; - // Humongous waste, in words, can exist in Mutator partition for recently allocated humongous objects + // Humongous waste, in bytes, can exist in Mutator partition for recently allocated humongous objects // and in OldCollector partition for humongous objects that have been promoted in place. size_t _humongous_waste[UIntNumPartitions]; @@ -124,22 +128,14 @@ class ShenandoahRegionPartitions { // before higher indexed regions. bool _left_to_right_bias[UIntNumPartitions]; - // Shrink the intervals associated with partition when region idx is removed from this free set - inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx); - - // Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set - inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, - ssize_t low_idx, ssize_t high_idx); - inline void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx, size_t capacity); - inline bool is_mutator_partition(ShenandoahFreeSetPartitionId p); inline bool is_young_collector_partition(ShenandoahFreeSetPartitionId p); inline bool is_old_collector_partition(ShenandoahFreeSetPartitionId p); inline bool available_implies_empty(size_t available); #ifndef PRODUCT - void dump_bitmap_row(ssize_t region_idx) const; - void dump_bitmap_range(ssize_t start_region_idx, ssize_t end_region_idx) const; + void dump_bitmap_row(idx_t region_idx) const; + void dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const; void dump_bitmap() const; #endif public: @@ -148,6 +144,11 @@ class ShenandoahRegionPartitions { static const size_t FreeSetUnderConstruction = SIZE_MAX; + inline idx_t max() const { return _max; } + + // At initialization, reset OldCollector tallies + void initialize_old_collector(); + // Remove all regions from all partitions and reset all bounds void make_all_regions_unavailable(); @@ -177,52 +178,72 @@ class ShenandoahRegionPartitions { size_t old_collector_regions, size_t old_collector_used, size_t old_collector_humongous_words_waste); + void establish_interval(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, + idx_t low_empty_idx, idx_t high_empty_idx); + + // Shrink the intervals associated with partition when region idx is removed from this free set + inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx); + + // Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set + void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, + idx_t low_idx, idx_t high_idx); + + void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t capacity); + void expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, + idx_t low_idx, idx_t high_idx, + idx_t low_empty_idx, idx_t high_empty_idx); + // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals. // Requires that region idx is in in the Mutator or Collector partitions. Hereafter, identifies this region as NotFree. // Any remnant of available memory at the time of retirement is added to the original partition's total of used bytes. - void retire_from_partition(ShenandoahFreeSetPartitionId p, ssize_t idx, size_t used_bytes); + void retire_from_partition(ShenandoahFreeSetPartitionId p, idx_t idx, size_t used_bytes); // Retire all regions between low_idx and high_idx inclusive from within partition. Requires that each region idx is // in the same Mutator or Collector partition. Hereafter, identifies each region as NotFree. Assumes that each region // is now considered fully used, since the region is presumably used to represent a humongous object. - void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, ssize_t low_idx, ssize_t high_idx); + void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx); // Place region idx into free set which_partition. Requires that idx is currently NotFree. - void make_free(ssize_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity); + void make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity); + + // Place region idx into free partition new_partition, not adjusting used and capacity totals for the original and new partition. + // available represents bytes that can still be allocated within this region. Requires that idx is currently not NotFree. + size_t move_from_partition_to_partition_with_deferred_accounting(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, + ShenandoahFreeSetPartitionId new_partition, size_t available); - // Place region idx into free partition new_partition, adjusting used and capacity totals for the original and new partition - // given that available bytes can still be allocated within this region. Requires that idx is currently not NotFree. - void move_from_partition_to_partition(ssize_t idx, ShenandoahFreeSetPartitionId orig_partition, + // Place region idx into free partition new_partition, adjusting used and capacity totals for the original and new partition. + // available represents bytes that can still be allocated within this region. Requires that idx is currently not NotFree. + void move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, ShenandoahFreeSetPartitionId new_partition, size_t available); - const char* partition_membership_name(ssize_t idx) const; + const char* partition_membership_name(idx_t idx) const; // Return the index of the next available region >= start_index, or maximum_regions if not found. - inline ssize_t find_index_of_next_available_region(ShenandoahFreeSetPartitionId which_partition, ssize_t start_index) const; + inline idx_t find_index_of_next_available_region(ShenandoahFreeSetPartitionId which_partition, idx_t start_index) const; // Return the index of the previous available region <= last_index, or -1 if not found. - inline ssize_t find_index_of_previous_available_region(ShenandoahFreeSetPartitionId which_partition, ssize_t last_index) const; + inline idx_t find_index_of_previous_available_region(ShenandoahFreeSetPartitionId which_partition, idx_t last_index) const; // Return the index of the next available cluster of cluster_size regions >= start_index, or maximum_regions if not found. - inline ssize_t find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition, - ssize_t start_index, size_t cluster_size) const; + inline idx_t find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition, + idx_t start_index, size_t cluster_size) const; // Return the index of the previous available cluster of cluster_size regions <= last_index, or -1 if not found. - inline ssize_t find_index_of_previous_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition, - ssize_t last_index, size_t cluster_size) const; + inline idx_t find_index_of_previous_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition, + idx_t last_index, size_t cluster_size) const; - inline bool in_free_set(ShenandoahFreeSetPartitionId which_partition, ssize_t idx) const { + inline bool in_free_set(ShenandoahFreeSetPartitionId which_partition, idx_t idx) const { return _membership[int(which_partition)].is_set(idx); } // Returns the ShenandoahFreeSetPartitionId affiliation of region idx, NotFree if this region is not currently in any partition. // This does not enforce that free_set membership implies allocation capacity. - inline ShenandoahFreeSetPartitionId membership(ssize_t idx) const; + inline ShenandoahFreeSetPartitionId membership(idx_t idx) const; #ifdef ASSERT // Returns true iff region idx's membership is which_partition. If which_partition represents a free set, asserts // that the region has allocation capacity. - inline bool partition_id_matches(ssize_t idx, ShenandoahFreeSetPartitionId which_partition) const; + inline bool partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId which_partition) const; #endif inline size_t max_regions() const { return _max; } @@ -236,20 +257,49 @@ class ShenandoahRegionPartitions { // leftmost() and leftmost_empty() return _max, rightmost() and rightmost_empty() return 0 // otherwise, expect the following: // 0 <= leftmost <= leftmost_empty <= rightmost_empty <= rightmost < _max - inline ssize_t leftmost(ShenandoahFreeSetPartitionId which_partition) const; - inline ssize_t rightmost(ShenandoahFreeSetPartitionId which_partition) const; - ssize_t leftmost_empty(ShenandoahFreeSetPartitionId which_partition); - ssize_t rightmost_empty(ShenandoahFreeSetPartitionId which_partition); + inline idx_t leftmost(ShenandoahFreeSetPartitionId which_partition) const; + inline idx_t rightmost(ShenandoahFreeSetPartitionId which_partition) const; + idx_t leftmost_empty(ShenandoahFreeSetPartitionId which_partition); + idx_t rightmost_empty(ShenandoahFreeSetPartitionId which_partition); inline bool is_empty(ShenandoahFreeSetPartitionId which_partition) const; inline void increase_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions); inline void decrease_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions); + inline size_t get_total_region_counts(ShenandoahFreeSetPartitionId which_partition) { + assert (which_partition < NumPartitions, "selected free set must be valid"); + return _total_region_counts[int(which_partition)]; + } + + inline void increase_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions); + inline void decrease_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions); + inline size_t get_region_counts(ShenandoahFreeSetPartitionId which_partition) { + assert (which_partition < NumPartitions, "selected free set must be valid"); + return _region_counts[int(which_partition)]; + } + + inline void increase_empty_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions); + inline void decrease_empty_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions); + inline size_t get_empty_region_counts(ShenandoahFreeSetPartitionId which_partition) { + assert (which_partition < NumPartitions, "selected free set must be valid"); + return _empty_region_counts[int(which_partition)]; + } + + inline void increase_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline void decrease_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline size_t get_capacity(ShenandoahFreeSetPartitionId which_partition); + + inline void increase_available(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline void decrease_available(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline size_t get_available(ShenandoahFreeSetPartitionId which_partition); inline void increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline void decrease_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline size_t get_used(ShenandoahFreeSetPartitionId which_partition); - inline void increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words); - inline void decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t words); + inline void increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline void decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline size_t get_humongous_waste(ShenandoahFreeSetPartitionId which_partition); inline void set_bias_from_left_to_right(ShenandoahFreeSetPartitionId which_partition, bool value) { assert (which_partition < NumPartitions, "selected free set must be valid"); @@ -277,11 +327,11 @@ class ShenandoahRegionPartitions { assert(_available[int(which_partition)] == _capacity[int(which_partition)] - _used[int(which_partition)], "Expect available (%zu) equals capacity (%zu) - used (%zu) for partition %s", _available[int(which_partition)], _capacity[int(which_partition)], _used[int(which_partition)], - partition_membership_name(ssize_t(which_partition))); + partition_membership_name(idx_t(which_partition))); return _available[int(which_partition)]; } - // Returns words of humongous waste + // Returns bytes of humongous waste inline size_t humongous_waste(ShenandoahFreeSetPartitionId which_partition) const { assert (which_partition < NumPartitions, "selected free set must be valid"); // This may be called with or without the global heap lock. Changes to _humongous_waste[] are always made with heap lock. @@ -300,17 +350,12 @@ class ShenandoahRegionPartitions { (_available[int(which_partition)] == _capacity[int(which_partition)] - _used[int(which_partition)]), "Expect available (%zu) equals capacity (%zu) - used (%zu) for partition %s", _available[int(which_partition)], _capacity[int(which_partition)], _used[int(which_partition)], - partition_membership_name(ssize_t(which_partition))); + partition_membership_name(idx_t(which_partition))); #endif return _available[int(which_partition)]; } - inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value) { - shenandoah_assert_heaplocked(); - assert (which_partition < NumPartitions, "selected free set must be valid"); - _capacity[int(which_partition)] = value; - _available[int(which_partition)] = value - _used[int(which_partition)]; - } + inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value); inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) { shenandoah_assert_heaplocked(); @@ -387,6 +432,54 @@ class ShenandoahFreeSet : public CHeapObj { const ssize_t INITIAL_ALLOC_BIAS_WEIGHT = 256; + // bytes used by young + size_t _total_young_used; + inline void recompute_total_young_used() { + shenandoah_assert_heaplocked(); + size_t region_size_bytes = _partitions.region_size_bytes(); + _total_young_used = (_partitions.used_by(ShenandoahFreeSetPartitionId::Mutator) + + _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); +#define KELVIN_USED +#ifdef KELVIN_USED + log_info(gc)(" recompute_total_young_used(): %zu from total regions M: %zu, C: %zu, allocatable regions M: %zu, C: %zu, " + "M used: %zu, C used: %zu", _total_young_used, + _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator), + _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Collector), + _partitions.get_region_counts(ShenandoahFreeSetPartitionId::Mutator), + _partitions.get_region_counts(ShenandoahFreeSetPartitionId::Collector), + _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator), + _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); +#endif + } + + // bytes used by old + size_t _total_old_used; + inline void recompute_total_old_used() { + shenandoah_assert_heaplocked(); + size_t region_size_bytes = _partitions.region_size_bytes(); + _total_old_used =_partitions.used_by(ShenandoahFreeSetPartitionId::OldCollector); +#ifdef KELVIN_USED + log_info(gc)(" recompute_total_old_used(): %zu", _total_old_used); +#endif + } + + // bytes used by global + size_t _total_global_used; + // Prerequisite: _total_young_used and _total_old_used are valid + inline void recompute_total_global_used() { + shenandoah_assert_heaplocked(); + _total_global_used = _total_young_used + _total_old_used; +#ifdef KELVIN_USED + log_info(gc)("recompute_total_global_used(): %zu", _total_global_used); +#endif + } + + inline void recompute_total_used() { + recompute_total_young_used(); + recompute_total_old_used(); + recompute_total_global_used(); + } + // Increases used memory for the partition if the allocation is successful. `in_new_region` will be set // if this is the first allocation in the region. HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region); @@ -460,7 +553,9 @@ class ShenandoahFreeSet : public CHeapObj { void establish_old_collector_alloc_bias(); // Set max_capacity for young and old generations - void establish_generation_sizes(size_t young_region_count, size_t old_region_count); + void establish_generation_sizes(size_t young_region_count, size_t old_region_count, + size_t young_used_regions, size_t old_used_regions, + size_t young_used_bytes, size_t old_used_bytes); size_t get_usable_free_words(size_t free_bytes) const; // log status, assuming lock has already been acquired by the caller. @@ -475,6 +570,21 @@ class ShenandoahFreeSet : public CHeapObj { inline size_t alloc_capacity(ShenandoahHeapRegion *r) const; inline size_t alloc_capacity(size_t idx) const; + // Return bytes used by old + inline size_t old_used() { + return _total_old_used; + } + + // Return bytes used by young + inline size_t young_used() { + return _total_young_used; + } + + // Return bytes used by global + inline size_t global_used() { + return _total_global_used; + } + void clear(); // Examine the existing free set representation, capturing the current state into var arguments: @@ -600,7 +710,8 @@ class ShenandoahFreeSet : public CHeapObj { // Ensure that Collector has at least to_reserve bytes of available memory, and OldCollector has at least old_reserve // bytes of available memory. On input, old_region_count holds the number of regions already present in the // OldCollector partition. Upon return, old_region_count holds the updated number of regions in the OldCollector partition. - void reserve_regions(size_t to_reserve, size_t old_reserve, size_t &old_region_count); + void reserve_regions(size_t to_reserve, size_t old_reserve, size_t &old_region_count, + size_t &young_used_regions, size_t &old_used_regions, size_t &young_used_bytes, size_t &old_used_bytes); // Reserve space for evacuations, with regions reserved for old evacuations placed to the right // of regions reserved of young evacuations. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 7cc2b9cf58f8f..ee114e8099579 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -907,6 +907,19 @@ void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, void ShenandoahGeneration::increase_used(size_t bytes) { Atomic::add(&_used, bytes); +#define KELVIN_MONITOR_USED +#ifdef KELVIN_MONITOR_USED + log_info(gc)("Generation %s increase_used(%zu) to %zu", shenandoah_generation_name(_type), bytes, _used); +#endif +} + +void ShenandoahGeneration::decrease_used(size_t bytes) { + assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || + (_used >= bytes), "cannot reduce bytes used by generation below zero"); + Atomic::sub(&_used, bytes); +#ifdef KELVIN_MONITOR_USED + log_info(gc)("Generation %s decrease_used(%zu) to %zu", shenandoah_generation_name(_type), bytes, _used); +#endif } void ShenandoahGeneration::increase_humongous_waste(size_t bytes) { @@ -931,12 +944,6 @@ void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { } } -void ShenandoahGeneration::decrease_used(size_t bytes) { - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (_used >= bytes), "cannot reduce bytes used by generation below zero"); - Atomic::sub(&_used, bytes); -} - size_t ShenandoahGeneration::used_regions() const { return Atomic::load(&_affiliated_region_count); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index d2e0c8fd6d040..4b207358cdaa2 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -135,7 +135,44 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { virtual size_t used_regions() const; virtual size_t used_regions_size() const; virtual size_t free_unaffiliated_regions() const; - size_t used() const override { return Atomic::load(&_used); } + size_t used() const override { + size_t result; + switch (_type) { + case ShenandoahGenerationType::OLD: + result = _free_set->old_used(); + break; + case ShenandoahGenerationType::YOUNG: + result = _free_set->young_used(); + break; + case ShenandoahGenerationType::GLOBAL: + case ShenandoahGenerationType::NON_GEN: + default: + result = _free_set->global_used(); + break; + } + + size_t original_result = Atomic::load(&_used); +#define KELVIN_SCAFFOLDING +#ifdef KELVIN_SCAFFOLDING + static int problem_count = 0; + if (result != original_result) { + if (problem_count++ > 6) { + assert(result == original_result, "Problem with used for generation %s, freeset thinks %zu, generation thinks: %zu", + shenandoah_generation_name(_type), result, original_result); + } else { + log_info(gc)("Problem with used for generation %s, freeset thinks %zu, generation thinks: %zu", + shenandoah_generation_name(_type), result, original_result); + } + } else { + problem_count = 0; + } +#endif + + + return result; + } + + size_t available() const override; size_t available_with_reserve() const; size_t used_including_humongous_waste() const { @@ -160,6 +197,14 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // Set the capacity of the generation, returning the value set size_t set_capacity(size_t byte_size); + void set_used(size_t region_count, size_t byte_count) { + Atomic::store(&_used, byte_count); + Atomic::store(&_affiliated_region_count, region_count); +#ifdef KELVIN_SCAFFOLDING + log_info(gc)("%s:set_used(regions: %zu, bytes: %zu)", shenandoah_generation_name(_type), region_count, byte_count); +#endif + } + void log_status(const char* msg) const; // Used directly by FullGC @@ -255,8 +300,6 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { result = _free_set->humongous_waste_in_mutator() + _free_set->humongous_waste_in_old(); break; } - result *= HeapWordSize; - #define KELVIN_MONITOR_HUMONGOUS #ifdef KELVIN_MONITOR_HUMONGOUS if (result != _humongous_waste) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 524a3b98a9aa9..be6dc20f4cbcb 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -241,6 +241,14 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have // otherwise been available to hold old evacuations, because old available is max_capacity - used and now // we would be trading a fully empty region for a partially used region. + + size_t available_in_region = region->free(); + size_t plab_min_size_in_bytes = _heap->plab_min_size() * HeapWordSize; + if (available_in_region < plab_min_size_in_bytes) { + // The available memory in young had been retired. Retire it in old also. + region_used += available_in_region; + } + young_gen->decrease_used(region_used); young_gen->decrement_affiliated_region_count(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 547f74236f3fe..f55af02085b7c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -746,7 +746,7 @@ void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) { notify_mutator_alloc_words(req.actual_size(), req.waste()); if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) { - increase_humongous_waste(generation,wasted_bytes); + increase_humongous_waste(generation, wasted_bytes); } } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index b33803d7e0976..0af7beddcd70b 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -592,12 +592,10 @@ void ShenandoahHeapRegion::try_recycle_under_lock() { shenandoah_assert_heaplocked(); if (is_trash() && _recycling.try_set()) { if (is_trash()) { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - ShenandoahGeneration* generation = heap->generation_for(affiliation()); - - heap->decrease_used(generation, used()); - generation->decrement_affiliated_region_count(); - + // At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as + // part of capacity, as empty, as fully available, and as unaffiliated. This provides short-lived optimism + // for triggering and pacing heuristics. It greatly simplifies and reduces the locking overhead required + // by more time-precise accounting of these details. recycle_internal(); } _recycling.unset(); @@ -618,11 +616,10 @@ void ShenandoahHeapRegion::try_recycle() { if (is_trash() && _recycling.try_set()) { // Double check region state after win the race to set recycling flag if (is_trash()) { - ShenandoahHeap* heap = ShenandoahHeap::heap(); - ShenandoahGeneration* generation = heap->generation_for(affiliation()); - heap->decrease_used(generation, used()); - generation->decrement_affiliated_region_count_without_lock(); - + // At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as + // part of capacity, as empty, as fully available, and as unaffiliated. This provides short-lived optimism + // for triggering and pacing heuristics. It greatly simplifies and reduces the locking overhead required + // by more time-precise accounting of these details. recycle_internal(); } _recycling.unset(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index cdf7848520765..5ecf669a07cbe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -367,10 +367,10 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { // a subset (e.g. the young generation or old generation) of the total heap. class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure { private: - size_t _used, _committed, _garbage, _regions, _humongous_waste, _trashed_regions; + size_t _used, _committed, _garbage, _regions, _humongous_waste, _trashed_regions, _trashed_used; public: ShenandoahCalculateRegionStatsClosure() : - _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0) {}; + _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0) {}; void heap_region_do(ShenandoahHeapRegion* r) override { _used += r->used(); @@ -381,6 +381,7 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure } if (r->is_trash()) { _trashed_regions++; + _trashed_used += r->used(); } _regions++; log_debug(gc)("ShenandoahCalculateRegionStatsClosure: adding %zu for %s Region %zu, yielding: %zu", @@ -388,9 +389,11 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure } size_t used() const { return _used; } + size_t used_after_recycle() const { return _used - _trashed_used; } size_t committed() const { return _committed; } size_t garbage() const { return _garbage; } size_t regions() const { return _regions; } + size_t trashed_regions() const { return _trashed_regions; } size_t waste() const { return _humongous_waste; } // span is the total memory affiliated with these stats (some of which is in use and other is available) @@ -405,6 +408,11 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { ShenandoahCalculateRegionStatsClosure global; void heap_region_do(ShenandoahHeapRegion* r) override { +#define KELVIN_STATS +#ifdef KELVIN_STATS + log_info(gc)("StatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", + r->affiliation_name(), r->index(), r->used(), r->is_trash()? "yes": "no"); +#endif switch (r->affiliation()) { case FREE: return; @@ -438,13 +446,22 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { generation_used += pad; } +#define KELVIN_EXTRA_NOISE +#ifdef KELVIN_EXTRA_NOISE + log_info(gc)("%s: generation (%s) used size must be consistent: generation-used: %zu, regions-used from stats: %zu, stats.used_after_recycle: %zu", + label, generation->name(), generation_used, stats.used(), stats.used_after_recycle()); + // kelvin once thought he needed to use stats.used_after_recycle() + // in the following assertion, but maybe not... +#endif + guarantee(stats.used() == generation_used, "%s: generation (%s) used size must be consistent: generation-used: " PROPERFMT ", regions-used: " PROPERFMT, label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats.used())); - guarantee(stats.regions() == generation_used_regions, - "%s: generation (%s) used regions (%zu) must equal regions that are in use (%zu)", - label, generation->name(), generation->used_regions(), stats.regions()); + size_t stats_regions = stats.regions() - stats.trashed_regions(); + guarantee(stats_regions == generation_used_regions, + "%s: generation (%s) used regions (%zu) must equal regions that are in use (%zu) - trashed regions (%zu)", + label, generation->name(), generation->used_regions(), stats.regions(), stats.trashed_regions()); size_t generation_capacity = generation->max_capacity(); guarantee(stats.non_trashed_span() <= generation_capacity, From 5ee691fac5660de6c11de5759843bb0a96cc9604 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 8 Jul 2025 08:14:55 -0600 Subject: [PATCH 09/61] Fix several bugs in freeset accounting --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 154 +++++++++--------- .../gc/shenandoah/shenandoahHeapRegion.cpp | 7 + .../gc/shenandoah/shenandoahHeapRegion.hpp | 3 + .../shenandoahHeapRegion.inline.hpp | 17 ++ .../gc/shenandoah/shenandoahVerifier.cpp | 20 ++- 5 files changed, 120 insertions(+), 81 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 525f66449b1f0..e182c5b365f7c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -581,7 +581,8 @@ void ShenandoahRegionPartitions::establish_interval(ShenandoahFreeSetPartitionId } if (low_empty_idx != max()) { ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(low_empty_idx); - assert (in_free_set(partition, low_empty_idx) && r->is_empty(), "Must be empty and in partition of established interval"); + assert (in_free_set(partition, low_empty_idx) && (r->is_trash() || r->free() == _region_size_bytes), + "Must be empty and in partition of established interval"); r = ShenandoahHeap::heap()->get_region(high_empty_idx); assert (in_free_set(partition, high_empty_idx), "Must be in partition of established interval"); } @@ -683,7 +684,19 @@ void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitio if (used_bytes < _region_size_bytes) { // Count the alignment pad remnant of memory as used when we retire this region - increase_used(partition, _region_size_bytes - used_bytes); + size_t fill_padding = _region_size_bytes - used_bytes; + increase_used(partition, fill_padding); +#ifdef ASSERT + // Fill the unused memory so that verification will not be confused by inconsistent tallies of used + size_t fill_words = fill_padding / HeapWordSize; + ShenandoahHeapRegion*r = ShenandoahHeap::heap()->get_region(idx); + if (fill_words >= ShenandoahHeap::min_fill_size()) { + r->allocate_fill(fill_words); + } +#ifdef KELVIN_USED + log_info(gc)("Retiring generation %zu with padding: %zu", idx, fill_padding); +#endif +#endif } _membership[int(partition)].clear_bit(idx); shrink_interval_if_boundary_modified(partition, idx); @@ -1516,17 +1529,23 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah size_t ac = alloc_capacity(r); ShenandoahFreeSetPartitionId orig_partition; + ShenandoahGeneration* request_generation = nullptr; if (req.is_mutator_alloc()) { + request_generation = _heap->young_generation(); orig_partition = ShenandoahFreeSetPartitionId::Mutator; } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) { + request_generation = _heap->young_generation(); orig_partition = ShenandoahFreeSetPartitionId::Collector; } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) { + request_generation = _heap->old_generation(); orig_partition = ShenandoahFreeSetPartitionId::OldCollector; } else { assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type"); if (req.is_old()) { + request_generation = _heap->old_generation(); orig_partition = ShenandoahFreeSetPartitionId::OldCollector; } else { + request_generation = _heap->young_generation(); orig_partition = ShenandoahFreeSetPartitionId::Collector; } } @@ -1538,6 +1557,10 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah // then retire the region so that subsequent searches can find available memory more quickly. size_t idx = r->index(); + request_generation->increase_used(r->free()); + if (_heap->mode()->is_generational()) { + _heap->global_generation()->increase_used(r->free()); + } _partitions.retire_from_partition(orig_partition, idx, r->used()); _partitions.assert_bounds(); } else if ((result != nullptr) && in_new_region) { @@ -1945,7 +1968,7 @@ void ShenandoahFreeSet::clear_internal() { _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector, false); } -void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions, +void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_regions, size_t &old_trashed_regions, size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) { clear_internal(); @@ -1953,8 +1976,8 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi first_old_region = _heap->num_regions(); last_old_region = 0; old_region_count = 0; - old_cset_regions = 0; - young_cset_regions = 0; + old_trashed_regions = 0; + young_trashed_regions = 0; size_t region_size_bytes = _partitions.region_size_bytes(); size_t max_regions = _partitions.max_regions(); @@ -1986,22 +2009,20 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi for (size_t idx = 0; idx < num_regions; idx++) { ShenandoahHeapRegion* region = _heap->get_region(idx); if (region->is_trash()) { - // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up". - // The cset regions are not recycled until we have finished update refs. + // Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection + // partition but have not yet been "cleaned up" following update refs. if (region->is_old()) { -#define KELVIN_PUZZLE +#undef KELVIN_PUZZLE #ifdef KELVIN_PUZZLE log_info(gc)("find_alloc_capacity sees old trashed region %zu, with %zu used bytes", region->index(), region->used()); #endif - assert(region->used() == region_size_bytes, "Expect cset regions to be fully used"); - old_cset_regions++; + old_trashed_regions++; } else { assert(region->is_young(), "Trashed region should be old or young"); #ifdef KELVIN_PUZZLE log_info(gc)("find_alloc_capacity sees young trashed region %zu, with %zu used bytes", region->index(), region->used()); #endif - assert(region->used() == region_size_bytes, "Expect cset regions to be fully used"); - young_cset_regions++; + young_trashed_regions++; } } else if (region->is_old()) { // count both humongous and regular regions, but don't count trash (cset) regions. @@ -2039,6 +2060,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi } } mutator_regions++; + total_mutator_regions++; mutator_used += (region_size_bytes - ac); } else { // !region->is_trash() && region is_old() @@ -2059,54 +2081,43 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi } } old_collector_regions++; + total_old_collector_regions++; old_collector_used += region_size_bytes - ac; } } else { + // This region does not have enough free to be part of the free set. Count all of its memory as used. if (region->is_old()) { - old_collector_used += region_size_bytes - ac; + old_collector_used += region_size_bytes; + total_old_collector_regions++; } else { - mutator_used += region_size_bytes - ac; + mutator_used += region_size_bytes; + total_mutator_regions++; } } - if (region->is_trash() || !region->is_old()) { - total_mutator_regions++; - } else { - total_old_collector_regions++; - } } else { + // This region does not allow allocation (it is retired or is humongous). Count all of its memory as used. + size_t humongous_waste_bytes = 0; if (region->is_humongous_start()) { oop obj = cast_to_oop(region->bottom()); size_t byte_size = obj->size() * HeapWordSize; size_t region_span = ShenandoahHeapRegion::required_regions(byte_size); - size_t humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_words() - byte_size; - if (is_generational) { - if (region->is_young()) { - total_mutator_regions += region_span; - mutator_humongous_waste += humongous_waste_bytes; - } else { - total_old_collector_regions += region_span; - old_collector_humongous_waste += humongous_waste_bytes; - } - } else { - total_mutator_regions += region_span; - mutator_humongous_waste += humongous_waste_bytes; - } + humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_words() - byte_size; + } + if (region->is_old()) { + old_collector_used += region_size_bytes; + total_old_collector_regions++; + old_collector_humongous_waste += humongous_waste_bytes; + } else { + mutator_used += region_size_bytes; + total_mutator_regions++; + mutator_humongous_waste += humongous_waste_bytes; } } } // At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as - // part of capacity, as fully used, as affiliated. Computations performed after prepare_to_rebuild() - // assume the cset regions are part of used regions within each generation. These same computations make use - // of the number of cset regions, with the understanding that cset regions will become unaffiliated and available - // after finish_rebuild. - // - // Partition ranges and empty ranges do not include trash regions as of the end of finish_rebuild. Region ranges - // will be adjusted after we finish recycling the trash. - total_mutator_regions += young_cset_regions; - mutator_used += young_cset_regions * region_size_bytes; - total_old_collector_regions += old_cset_regions; - old_collector_used += old_cset_regions * region_size_bytes; - // No need to update generation sizes here. These are the sizes already recognized by the generations. Theses + // part of capacity, as fully available, as unaffiliated. We place trashed regions into the Mutator partition. + + // No need to update generation sizes here. These are the sizes already recognized by the generations. These // adjustments allow the freeset tallies to match the generation tallies. #ifdef KELVIN_ENHANCEMENTS @@ -2168,6 +2179,12 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi old_collector_empty, old_collector_regions, old_collector_used, old_collector_humongous_waste); recompute_total_used(); + // Update generations to assure consistency while we still hold the lock. This handles case that someone consults + // generation sizes between now and start of finish_rebuild. + establish_generation_sizes(total_mutator_regions, total_old_collector_regions, + mutator_regions, old_collector_regions, + mutator_used, old_collector_used); + log_debug(gc, free)(" After find_regions_with_alloc_capacity(), Mutator range [%zd, %zd]," " Old Collector range [%zd, %zd]", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), @@ -2355,7 +2372,7 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r } // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled -void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions, +void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t &old_trashed_regions, size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) { shenandoah_assert_heaplocked(); // This resets all state information, removing all regions from all sets. @@ -2364,7 +2381,7 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &o // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the // mutator set otherwise. All trashed (cset) regions are affiliated young and placed in mutator set. - find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count); + find_regions_with_alloc_capacity(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count); } // The totals reported here anticipate the recycling of trash regions. Their memory is counted as unused and fully @@ -2412,13 +2429,13 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si } } -void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count, +void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t old_region_count, bool have_evacuation_reserves) { shenandoah_assert_heaplocked(); size_t young_reserve(0), old_reserve(0); if (_heap->mode()->is_generational()) { - compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves, + compute_young_and_old_reserves(young_trashed_regions, old_trashed_regions, have_evacuation_reserves, young_reserve, old_reserve); } else { young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve; @@ -2438,7 +2455,7 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cse log_status(); } -void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, +void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regions, size_t old_trashed_regions, bool have_evacuation_reserves, size_t& young_reserve_result, size_t& old_reserve_result) const { shenandoah_assert_generational(); @@ -2453,8 +2470,8 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions(); // Add in the regions we anticipate to be freed by evacuation of the collection set - old_unaffiliated_regions += old_cset_regions; - young_unaffiliated_regions += young_cset_regions; + old_unaffiliated_regions += old_trashed_regions; + young_unaffiliated_regions += young_trashed_regions; // Consult old-region balance to make adjustments to current generation capacities and availability. // The generation region transfers take place after we rebuild. @@ -2557,15 +2574,10 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old size_t old_collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);; size_t collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector); - size_t trashed_mutator_used = 0; - size_t trashed_collector_used = 0; - size_t trashed_old_collector_used = 0; - #define KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("reserve_regions() seeks to_reserve: %zu and to_reserve_old: %zu", to_reserve, to_reserve_old); #endif -#undef KELVIN_RESERVE for (size_t i = _heap->num_regions(); i > 0; i--) { idx_t idx = i - 1; @@ -2618,9 +2630,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old old_collector_available += ac; regions_to_old_collector++; empty_regions_to_old_collector++; - if (r->is_trash()) { - trashed_old_collector_used += ac; - } log_trace(gc, free)(" Shifting region %zu from mutator_free to old_collector_free", idx); log_trace(gc, free)(" Shifted Mutator range [%zd, %zd]," @@ -2663,14 +2672,11 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old ShenandoahFreeSetPartitionId::Collector, ac); collector_available += ac; regions_to_collector++; - if (r->is_trash()) { - trashed_collector_used += ac; - } else if (ac != region_size_bytes) { + if (ac != region_size_bytes) { young_used_regions++; young_used_bytes = region_size_bytes - ac; } - old_used_bytes += region_size_bytes - ac; log_trace(gc, free)(" Shifting region %zu from mutator_free to collector_free", idx); log_trace(gc, free)(" Shifted Mutator range [%zd, %zd]," " Collector range [%zd, %zd]", @@ -2694,9 +2700,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old if ((ac == region_size_bytes) && (idx > mutator_empty_high_idx)) { mutator_empty_high_idx = idx; } - if (r->is_trash()) { - trashed_mutator_used += ac; - } else if (ac != region_size_bytes) { + if (ac != region_size_bytes) { young_used_regions++; young_used_bytes += region_size_bytes - ac; @@ -2769,7 +2773,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old } } - _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_to_old_collector + used_to_collector + trashed_mutator_used); + _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_to_old_collector + used_to_collector); _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, regions_to_old_collector + regions_to_collector); _partitions.decrease_region_counts(ShenandoahFreeSetPartitionId::Mutator, regions_to_old_collector + regions_to_collector); @@ -2792,16 +2796,12 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old _partitions.increase_capacity(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector * region_size_bytes); _partitions.increase_available(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector * region_size_bytes); - if (used_to_collector > trashed_collector_used) { - _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, used_to_collector - trashed_collector_used); - } else { - _partitions.decrease_used(ShenandoahFreeSetPartitionId::Collector, trashed_collector_used - used_to_collector); + if (used_to_collector > 0) { + _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, used_to_collector); } - if (used_to_old_collector > trashed_old_collector_used) { - _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_to_old_collector - trashed_old_collector_used); - } else { - _partitions.decrease_used(ShenandoahFreeSetPartitionId::OldCollector, trashed_old_collector_used - used_to_old_collector); + if (used_to_old_collector > 0) { + _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_to_old_collector); } _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector, @@ -2810,6 +2810,10 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::OldCollector, old_collector_low_idx, old_collector_high_idx, old_collector_empty_low_idx, old_collector_empty_high_idx); +#ifdef KELVIN_RESERVE + log_info(gc)("reserve_regions() establishes Mutator interval(low: %zu, high: %zu, low_empty: %zu, high_empty: %zu)", + mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx); +#endif _partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator, mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index 0af7beddcd70b..6684dab08c538 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -299,6 +299,10 @@ void ShenandoahHeapRegion::make_cset() { } void ShenandoahHeapRegion::make_trash() { +#define KELVIN_TRASH +#ifdef KELVIN_TRASH + log_info(gc)("make_trash() on region %zu", index()); +#endif shenandoah_assert_heaplocked(); reset_age(); switch (state()) { @@ -321,6 +325,9 @@ void ShenandoahHeapRegion::make_trash() { } void ShenandoahHeapRegion::make_trash_immediate() { +#ifdef KELVIN_TRASH + log_info(gc)("make_trash_immediate() on region %zu", index()); +#endif make_trash(); // On this path, we know there are no marked objects in the region, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp index 4c99364bc6ed4..742b8cddeccdc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -366,6 +366,9 @@ class ShenandoahHeapRegion { // Allocation (return nullptr if full) inline HeapWord* allocate(size_t word_size, const ShenandoahAllocRequest& req); + // Allocate fill after top + inline HeapWord* allocate_fill(size_t word_size); + inline void clear_live_data(); void set_live_data(size_t s); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp index 0df482c1e2dab..c866bb0977f0e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp @@ -88,6 +88,23 @@ HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocReq } } +HeapWord* ShenandoahHeapRegion::allocate_fill(size_t size) { + shenandoah_assert_heaplocked_or_safepoint(); + assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size); + assert(size >= ShenandoahHeap::min_fill_size(), "Cannot fill unless min fill size"); + + HeapWord* obj = top(); + HeapWord* new_top = obj + size; + ShenandoahHeap::fill_with_object(obj, size); + set_top(new_top); + + assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top)); + assert(is_object_aligned(obj), "obj is not aligned: " PTR_FORMAT, p2i(obj)); + + return obj; +} + + HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocRequest& req) { shenandoah_assert_heaplocked_or_safepoint(); assert(is_object_aligned(size), "alloc size breaks alignment: %zu", size); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 5ecf669a07cbe..8b9d77055e1da 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -373,6 +373,11 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0) {}; void heap_region_do(ShenandoahHeapRegion* r) override { +#define KELVIN_STATS +#ifdef KELVIN_STATS + log_info(gc)("ShenandoahCalculateRegiontatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", + r->affiliation_name(), r->index(), r->used(), r->is_trash()? "yes": "no"); +#endif _used += r->used(); _garbage += r->garbage(); _committed += r->is_committed() ? ShenandoahHeapRegion::region_size_bytes() : 0; @@ -384,6 +389,10 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure _trashed_used += r->used(); } _regions++; +#ifdef KELVIN_STATS + log_info(gc)(" _used: %zu, _garbage: %zu, _committed: %zu, _humongous_waste: %zu, _trashed_regions: %zu, _trashed_used: %zu", + _used, _garbage, _committed, _humongous_waste, _trashed_regions, _trashed_used); +#endif log_debug(gc)("ShenandoahCalculateRegionStatsClosure: adding %zu for %s Region %zu, yielding: %zu", r->used(), (r->is_humongous() ? "humongous" : "regular"), r->index(), _used); } @@ -408,9 +417,8 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { ShenandoahCalculateRegionStatsClosure global; void heap_region_do(ShenandoahHeapRegion* r) override { -#define KELVIN_STATS #ifdef KELVIN_STATS - log_info(gc)("StatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", + log_info(gc)("ShenandoahGenerationaStatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", r->affiliation_name(), r->index(), r->used(), r->is_trash()? "yes": "no"); #endif switch (r->affiliation()) { @@ -454,9 +462,9 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { // in the following assertion, but maybe not... #endif - guarantee(stats.used() == generation_used, + guarantee(stats.used_after_recycle() == generation_used, "%s: generation (%s) used size must be consistent: generation-used: " PROPERFMT ", regions-used: " PROPERFMT, - label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats.used())); + label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats.used_after_recycle())); size_t stats_regions = stats.regions() - stats.trashed_regions(); guarantee(stats_regions == generation_used_regions, @@ -889,11 +897,11 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, heap_used = _heap->used(); } if (sizeness != _verify_size_disable) { - guarantee(cl.used() == heap_used, + guarantee(cl.used_after_recycle() == heap_used, "%s: heap used size must be consistent: heap-used = %zu%s, regions-used = %zu%s", label, byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used), - byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used())); + byte_size_in_proper_unit(cl.used_after_recycle()), proper_unit_for_byte_size(cl.used_after_recycle())); } size_t heap_committed = _heap->committed(); guarantee(cl.committed() == heap_committed, From 39ba8058488d8b061955d76552e9ed265ce5513d Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 8 Jul 2025 18:10:02 -0600 Subject: [PATCH 10/61] Some progress --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 24 +++++++-- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 39 ++++++++++++++ .../gc/shenandoah/shenandoahGeneration.cpp | 52 +++++++++++++++++-- .../shenandoahGenerationalEvacuationTask.cpp | 6 +-- .../gc/shenandoah/shenandoahVerifier.cpp | 36 +++++++------ .../gc/shenandoah/shenandoahVerifier.hpp | 5 +- 6 files changed, 132 insertions(+), 30 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index e182c5b365f7c..1f40efc53df6e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1098,7 +1098,10 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : _alloc_bias_weight(0), _total_young_used(0), _total_old_used(0), - _total_global_used(0) + _total_global_used(0), + _young_affiliated_regions(0), + _old_affiliated_regions(0), + _global_affiliated_regions(0) { clear_internal(); } @@ -1138,6 +1141,7 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah assert(available_in_region != region_size_bytes, "Nothing to promote in place"); } recompute_total_used(); + recompute_total_affiliated(); #ifdef KELVIN_CAPACITY log_info(gc)("Ater add_pip_to_old(%zu), Mutate used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", region->index(), @@ -1435,7 +1439,6 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah _heap->old_generation()->clear_cards_for(r); } _heap->generation_for(r->affiliation())->increment_affiliated_region_count(); - #ifdef ASSERT ShenandoahMarkingContext* const ctx = _heap->marking_context(); assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); @@ -1558,6 +1561,9 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah size_t idx = r->index(); request_generation->increase_used(r->free()); + if (in_new_region) { + _partitions.decrease_empty_region_counts(orig_partition, 1); + } if (_heap->mode()->is_generational()) { _heap->global_generation()->increase_used(r->free()); } @@ -1567,6 +1573,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah _partitions.one_region_is_no_longer_empty(orig_partition); } recompute_total_used(); + recompute_total_affiliated(); // could optimize: only recompute affiliated for orig_partition and global return result; } @@ -1669,7 +1676,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); } - // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate + // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate and will recompute affiliated _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); @@ -1891,6 +1898,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { _partitions.assert_bounds(); _heap->old_generation()->augment_evacuation_reserve(region_capacity); recompute_total_used(); + recompute_total_affiliated(); return true; } @@ -1923,7 +1931,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { ShenandoahFreeSetPartitionId::OldCollector, region_capacity); _partitions.assert_bounds(); recompute_total_used(); - + recompute_total_affiliated(); // 4. Do not adjust capacities for generations, we just swapped the regions that have already // been accounted for. However, we should adjust the evacuation reserves as those may have changed. shenandoah_assert_heaplocked(); @@ -1948,7 +1956,7 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { ShenandoahFreeSetPartitionId::Collector, ac); _partitions.assert_bounds(); recompute_total_used(); - + recompute_total_affiliated(); // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, // to recycle trash before attempting to allocate anything in the region. } @@ -1961,6 +1969,7 @@ void ShenandoahFreeSet::clear_internal() { shenandoah_assert_heaplocked(); _partitions.make_all_regions_unavailable(); recompute_total_used(); + recompute_total_affiliated(); _alloc_bias_weight = 0; _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, true); @@ -2179,6 +2188,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r old_collector_empty, old_collector_regions, old_collector_used, old_collector_humongous_waste); recompute_total_used(); + recompute_total_affiliated(); // Update generations to assure consistency while we still hold the lock. This handles case that someone consults // generation sizes between now and start of finish_rebuild. establish_generation_sizes(total_mutator_regions, total_old_collector_regions, @@ -2213,6 +2223,7 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector recompute_total_young_used(); recompute_total_old_used(); + recompute_total_affiliated(); // global_used is unaffected by this transfer // No need to adjust ranges because humongous regions are not allocatable @@ -2268,6 +2279,7 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); recompute_total_used(); + // Should not need to recompute_total_affiliated() because all transferred regions are empty. return transferred_regions; } @@ -2323,6 +2335,7 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, used_transfer); recompute_total_used(); + recompute_total_affiliated(); return transferred_regions; } @@ -2818,6 +2831,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx); recompute_total_used(); + recompute_total_affiliated(); if (LogTarget(Info, gc, free)::is_enabled()) { size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector); if (old_reserve < to_reserve_old) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 65b7cb2261c9d..f739fd63053fc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -116,6 +116,7 @@ class ShenandoahRegionPartitions { // _retired_regions[p] is _total_region_counts[p] - _region_counts[p] // _empty_region_counts[p] <= _region_counts[p] <= _total_region_counts[p] // generation_used is (_total_region_counts[p] - _region_counts[p]) * region_size_bytes + _used[p] + // (affiliated regions is total_region_counts - empty_region_counts) size_t _region_counts[UIntNumPartitions]; size_t _total_region_counts[UIntNumPartitions]; size_t _empty_region_counts[UIntNumPartitions]; @@ -480,6 +481,32 @@ class ShenandoahFreeSet : public CHeapObj { recompute_total_global_used(); } + size_t _young_affiliated_regions; + size_t _old_affiliated_regions; + size_t _global_affiliated_regions; + + inline void recompute_total_affiliated() { + _young_affiliated_regions = (_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator) + + _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Collector) - + (_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator) + + _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector))); + _old_affiliated_regions = (_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector) - + _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector)); + _global_affiliated_regions = _young_affiliated_regions + _old_affiliated_regions; +#define KELVIN_AFFILIATED +#ifdef KELVIN_AFFILIATED + log_info(gc)("recompute_affiliated(young: %zu, old: %zu, global: %zu)", + _young_affiliated_regions, _old_affiliated_regions, _global_affiliated_regions); +#endif +#ifdef ASSERT + if (ShenandoahHeap::heap()->mode()->is_generational()) { + assert(_young_affiliated_regions * ShenandoahHeapRegion::region_size_bytes() >= _total_young_used, "sanity"); + assert(_old_affiliated_regions * ShenandoahHeapRegion::region_size_bytes() >= _total_old_used, "sanity"); + } + assert(_global_affiliated_regions * ShenandoahHeapRegion::region_size_bytes() >= _total_global_used, "sanity"); +#endif + } + // Increases used memory for the partition if the allocation is successful. `in_new_region` will be set // if this is the first allocation in the region. HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region); @@ -585,6 +612,18 @@ class ShenandoahFreeSet : public CHeapObj { return _total_global_used; } + size_t young_affiliated_regions() { + return _young_affiliated_regions; + } + + size_t old_affiliated_regions() { + return _old_affiliated_regions; + } + + size_t global_affiliated_regions() { + return _global_affiliated_regions; + } + void clear(); // Examine the existing free set representation, capturing the current state into var arguments: diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index ee114e8099579..c268e977aa550 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -863,7 +863,12 @@ size_t ShenandoahGeneration::increment_affiliated_region_count() { // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with // a coherent value. - return Atomic::add(&_affiliated_region_count, (size_t) 1); + size_t result = Atomic::add(&_affiliated_region_count, (size_t) 1); +#define KELVIN_AFFILIATED +#ifdef KELVIN_AFFILIATED + log_info(gc)("%s: increment_affiliated_region_count() by 1: %zu", name(), result); +#endif + return result; } size_t ShenandoahGeneration::decrement_affiliated_region_count() { @@ -875,16 +880,27 @@ size_t ShenandoahGeneration::decrement_affiliated_region_count() { assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (used() + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), "used + humongous cannot exceed regions"); +#ifdef KELVIN_AFFILIATED + log_info(gc)("%s: decrement_affiliated_region_count() by 1: %zu", name(), affiliated_region_count); +#endif return affiliated_region_count; } size_t ShenandoahGeneration::decrement_affiliated_region_count_without_lock() { - return Atomic::sub(&_affiliated_region_count, (size_t) 1); + size_t result = Atomic::sub(&_affiliated_region_count, (size_t) 1); +#ifdef KELVIN_AFFILIATED + log_info(gc)("%s: decrement_affiliated_region_count_without_lock() by 1: %zu", name(), result); +#endif + return result; } size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) { shenandoah_assert_heaplocked_or_safepoint(); - return Atomic::add(&_affiliated_region_count, delta); + size_t result = Atomic::add(&_affiliated_region_count, delta); +#ifdef KELVIN_AFFILIATED + log_info(gc)("%s: increase_affiliated_region_count() by %zu: %zu", name(), delta, result); +#endif + return result; } size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { @@ -895,6 +911,9 @@ size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_used + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), "used + humongous cannot exceed regions"); +#ifdef KELVIN_AFFILIATED + log_info(gc)("%s: decrease_affiliated_region_count() by %zu: %zu", name(), delta, affiliated_region_count); +#endif return affiliated_region_count; } @@ -903,6 +922,10 @@ void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, Atomic::store(&_affiliated_region_count, num_regions); Atomic::store(&_used, num_bytes); _humongous_waste = humongous_waste; +#ifdef KELVIN_AFFILIATED + log_info(gc)("%s: establish_usage(affiliated regions: %zu bytes: %zu, humongous_waste: %zu)", + name(), num_regions, num_bytes, humongous_waste); +#endif } void ShenandoahGeneration::increase_used(size_t bytes) { @@ -945,7 +968,28 @@ void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { } size_t ShenandoahGeneration::used_regions() const { - return Atomic::load(&_affiliated_region_count); + size_t result; + switch (_type) { + case ShenandoahGenerationType::OLD: + result = _free_set->old_affiliated_regions(); + break; + case ShenandoahGenerationType::YOUNG: + result = _free_set->young_affiliated_regions(); + break; + case ShenandoahGenerationType::GLOBAL: + case ShenandoahGenerationType::NON_GEN: + default: + result = _free_set->global_affiliated_regions(); + break; + } + size_t original_result = Atomic::load(&_affiliated_region_count); +#ifdef KELVIN_SCAFFOLDING + if (result != original_result) { + log_info(gc)("Problem with used for generation %s, freeset thinks %zu, generation thinks: %zu", + shenandoah_generation_name(_type), result, original_result); + } +#endif + return result; } size_t ShenandoahGeneration::free_unaffiliated_regions() const { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index be6dc20f4cbcb..185a619568a6a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -249,6 +249,9 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion region_used += available_in_region; } + // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size() + _heap->free_set()->add_promoted_in_place_region_to_old_collector(region); + young_gen->decrease_used(region_used); young_gen->decrement_affiliated_region_count(); @@ -258,9 +261,6 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion old_gen->increment_affiliated_region_count(); old_gen->increase_used(region_used); - - // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size() - _heap->free_set()->add_promoted_in_place_region_to_old_collector(region); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 8b9d77055e1da..f18bc4db38341 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -444,7 +444,7 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { byte_size_in_proper_unit(stats.used()), proper_unit_for_byte_size(stats.used())); } - static void validate_usage(const bool adjust_for_padding, + static void validate_usage(const bool adjust_for_padding, const bool adjust_for_trash, const char* label, ShenandoahGeneration* generation, ShenandoahCalculateRegionStatsClosure& stats) { ShenandoahHeap* heap = ShenandoahHeap::heap(); size_t generation_used = generation->used(); @@ -461,10 +461,10 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { // kelvin once thought he needed to use stats.used_after_recycle() // in the following assertion, but maybe not... #endif - - guarantee(stats.used_after_recycle() == generation_used, + size_t stats_used = adjust_for_trash? stats.used_after_recycle(): stats.used(); + guarantee(stats_used == generation_used, "%s: generation (%s) used size must be consistent: generation-used: " PROPERFMT ", regions-used: " PROPERFMT, - label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats.used_after_recycle())); + label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats_used)); size_t stats_regions = stats.regions() - stats.trashed_regions(); guarantee(stats_regions == generation_used_regions, @@ -897,11 +897,12 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, heap_used = _heap->used(); } if (sizeness != _verify_size_disable) { - guarantee(cl.used_after_recycle() == heap_used, + size_t cl_size = (sizeness == _verify_size_including_trash)? cl.used(): cl.used_after_recycle(); + guarantee(cl_size == heap_used, "%s: heap used size must be consistent: heap-used = %zu%s, regions-used = %zu%s", label, byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used), - byte_size_in_proper_unit(cl.used_after_recycle()), proper_unit_for_byte_size(cl.used_after_recycle())); + byte_size_in_proper_unit(cl_size), proper_unit_for_byte_size(cl_size)); } size_t heap_committed = _heap->committed(); guarantee(cl.committed() == heap_committed, @@ -953,13 +954,14 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, ShenandoahGenerationStatsClosure::log_usage(_heap->global_generation(), cl.global); } if (sizeness == _verify_size_adjusted_for_padding) { - ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->old_generation(), cl.old); - ShenandoahGenerationStatsClosure::validate_usage(true, label, _heap->young_generation(), cl.young); - ShenandoahGenerationStatsClosure::validate_usage(true, label, _heap->global_generation(), cl.global); - } else if (sizeness == _verify_size_exact) { - ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->old_generation(), cl.old); - ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->young_generation(), cl.young); - ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->global_generation(), cl.global); + ShenandoahGenerationStatsClosure::validate_usage(false, false, label, _heap->old_generation(), cl.old); + ShenandoahGenerationStatsClosure::validate_usage(true, false, label, _heap->young_generation(), cl.young); + ShenandoahGenerationStatsClosure::validate_usage(true, false, label, _heap->global_generation(), cl.global); + } else if (sizeness == _verify_size_exact || sizeness == _verify_size_including_trash) { + bool adjust_trash = (sizeness == _verify_size_exact); + ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->old_generation(), cl.old); + ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->young_generation(), cl.young); + ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->global_generation(), cl.global); } // else: sizeness must equal _verify_size_disable } @@ -1175,7 +1177,7 @@ void ShenandoahVerifier::verify_after_update_refs() { _verify_cset_none, // no cset references, all updated _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_nocset, // no cset regions, trash regions have appeared - _verify_size_exact, // expect generation and heap sizes to match exactly + _verify_size_including_trash,// expect generation and heap sizes to match exactly, including trash _verify_gcstate_stable // update refs had cleaned up forwarded objects ); } @@ -1439,7 +1441,7 @@ void ShenandoahVerifier::verify_before_rebuilding_free_set() { ShenandoahGenerationStatsClosure cl; _heap->heap_region_iterate(&cl); - ShenandoahGenerationStatsClosure::validate_usage(false, "Before free set rebuild", _heap->old_generation(), cl.old); - ShenandoahGenerationStatsClosure::validate_usage(false, "Before free set rebuild", _heap->young_generation(), cl.young); - ShenandoahGenerationStatsClosure::validate_usage(false, "Before free set rebuild", _heap->global_generation(), cl.global); + ShenandoahGenerationStatsClosure::validate_usage(false, false, "Before free set rebuild", _heap->old_generation(), cl.old); + ShenandoahGenerationStatsClosure::validate_usage(false, false, "Before free set rebuild", _heap->young_generation(), cl.young); + ShenandoahGenerationStatsClosure::validate_usage(false, false, "Before free set rebuild", _heap->global_generation(), cl.global); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp index aba6379e0223c..94bf0409cad99 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp @@ -155,7 +155,10 @@ class ShenandoahVerifier : public CHeapObj { _verify_size_exact, // Expect promote-in-place adjustments: padding inserted to temporarily prevent further allocation in regular regions - _verify_size_adjusted_for_padding + _verify_size_adjusted_for_padding, + + // Expected heap size should not include + _verify_size_including_trash } VerifySize; typedef enum { From 1eb09e5a338855b82ffa910db0e59c969f878ccf Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 9 Jul 2025 11:20:17 -0600 Subject: [PATCH 11/61] more progress --- .../shenandoahGenerationalControlThread.cpp | 6 ++-- .../gc/shenandoah/shenandoahVerifier.cpp | 34 ++++++++++--------- .../gc/shenandoah/shenandoahVerifier.hpp | 2 +- 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp index 6b33d5207d040..b64a479d469b0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp @@ -277,11 +277,11 @@ void ShenandoahGenerationalControlThread::run_gc_cycle(const ShenandoahGCRequest if (!_heap->cancelled_gc()) { notify_gc_waiters(); notify_alloc_failure_waiters(); + // Report current free set state at the end of cycle if normal completion. + // Do not report if cancelled, since we may not have rebuilt free set and content is unreliable. + _heap->free_set()->log_status_under_lock(); } - // Report current free set state at the end of cycle, whether - // it is a normal completion, or the abort. - _heap->free_set()->log_status_under_lock(); // Notify Universe about new heap usage. This has implications for // global soft refs policy, and we better report it every time heap diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index f18bc4db38341..4a51b70b8c5f3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -375,7 +375,7 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure void heap_region_do(ShenandoahHeapRegion* r) override { #define KELVIN_STATS #ifdef KELVIN_STATS - log_info(gc)("ShenandoahCalculateRegiontatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", + log_info(gc)("ShenandoahCalculateRegionStatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", r->affiliation_name(), r->index(), r->used(), r->is_trash()? "yes": "no"); #endif _used += r->used(); @@ -418,7 +418,7 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { void heap_region_do(ShenandoahHeapRegion* r) override { #ifdef KELVIN_STATS - log_info(gc)("ShenandoahGenerationaStatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", + log_info(gc)("ShenandoahGenerationalStatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", r->affiliation_name(), r->index(), r->used(), r->is_trash()? "yes": "no"); #endif switch (r->affiliation()) { @@ -456,8 +456,8 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { #define KELVIN_EXTRA_NOISE #ifdef KELVIN_EXTRA_NOISE - log_info(gc)("%s: generation (%s) used size must be consistent: generation-used: %zu, regions-used from stats: %zu, stats.used_after_recycle: %zu", - label, generation->name(), generation_used, stats.used(), stats.used_after_recycle()); + log_info(gc)("%s: generation (%s) used size must be consistent: generation-used: %zu, regions-used from stats: %zu, stats.used_after_recycle: %zu, adjust_for_trash: %s", + label, generation->name(), generation_used, stats.used(), stats.used_after_recycle(), adjust_for_trash? "yes": "no"); // kelvin once thought he needed to use stats.used_after_recycle() // in the following assertion, but maybe not... #endif @@ -466,10 +466,11 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { "%s: generation (%s) used size must be consistent: generation-used: " PROPERFMT ", regions-used: " PROPERFMT, label, generation->name(), PROPERFMTARGS(generation_used), PROPERFMTARGS(stats_used)); - size_t stats_regions = stats.regions() - stats.trashed_regions(); + size_t stats_regions = adjust_for_trash? stats.regions() - stats.trashed_regions(): stats.regions(); guarantee(stats_regions == generation_used_regions, - "%s: generation (%s) used regions (%zu) must equal regions that are in use (%zu) - trashed regions (%zu)", - label, generation->name(), generation->used_regions(), stats.regions(), stats.trashed_regions()); + "%s: generation (%s) used regions (%zu) must equal regions that are in use (%zu)%s", + label, generation->name(), generation->used_regions(), stats_regions, + adjust_for_trash? " (after adjusting for trash)": ""); size_t generation_capacity = generation->max_capacity(); guarantee(stats.non_trashed_span() <= generation_capacity, @@ -897,7 +898,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, heap_used = _heap->used(); } if (sizeness != _verify_size_disable) { - size_t cl_size = (sizeness == _verify_size_including_trash)? cl.used(): cl.used_after_recycle(); + size_t cl_size = (sizeness == _verify_size_exact_including_trash)? cl.used(): cl.used_after_recycle(); guarantee(cl_size == heap_used, "%s: heap used size must be consistent: heap-used = %zu%s, regions-used = %zu%s", label, @@ -954,10 +955,10 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, ShenandoahGenerationStatsClosure::log_usage(_heap->global_generation(), cl.global); } if (sizeness == _verify_size_adjusted_for_padding) { - ShenandoahGenerationStatsClosure::validate_usage(false, false, label, _heap->old_generation(), cl.old); - ShenandoahGenerationStatsClosure::validate_usage(true, false, label, _heap->young_generation(), cl.young); - ShenandoahGenerationStatsClosure::validate_usage(true, false, label, _heap->global_generation(), cl.global); - } else if (sizeness == _verify_size_exact || sizeness == _verify_size_including_trash) { + ShenandoahGenerationStatsClosure::validate_usage(false, true, label, _heap->old_generation(), cl.old); + ShenandoahGenerationStatsClosure::validate_usage(true, true, label, _heap->young_generation(), cl.young); + ShenandoahGenerationStatsClosure::validate_usage(true, true, label, _heap->global_generation(), cl.global); + } else if (sizeness == _verify_size_exact || sizeness == _verify_size_exact_including_trash) { bool adjust_trash = (sizeness == _verify_size_exact); ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->old_generation(), cl.old); ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->young_generation(), cl.young); @@ -1177,7 +1178,8 @@ void ShenandoahVerifier::verify_after_update_refs() { _verify_cset_none, // no cset references, all updated _verify_liveness_disable, // no reliable liveness data anymore _verify_regions_nocset, // no cset regions, trash regions have appeared - _verify_size_including_trash,// expect generation and heap sizes to match exactly, including trash + // expect generation and heap sizes to match exactly, including trash + _verify_size_exact_including_trash, _verify_gcstate_stable // update refs had cleaned up forwarded objects ); } @@ -1441,7 +1443,7 @@ void ShenandoahVerifier::verify_before_rebuilding_free_set() { ShenandoahGenerationStatsClosure cl; _heap->heap_region_iterate(&cl); - ShenandoahGenerationStatsClosure::validate_usage(false, false, "Before free set rebuild", _heap->old_generation(), cl.old); - ShenandoahGenerationStatsClosure::validate_usage(false, false, "Before free set rebuild", _heap->young_generation(), cl.young); - ShenandoahGenerationStatsClosure::validate_usage(false, false, "Before free set rebuild", _heap->global_generation(), cl.global); + ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->old_generation(), cl.old); + ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->young_generation(), cl.young); + ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->global_generation(), cl.global); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp index 94bf0409cad99..c4b1d6f313cee 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp @@ -158,7 +158,7 @@ class ShenandoahVerifier : public CHeapObj { _verify_size_adjusted_for_padding, // Expected heap size should not include - _verify_size_including_trash + _verify_size_exact_including_trash } VerifySize; typedef enum { From 49cc11f61d6c4ce9719cb96b727c3f91c27e8eb8 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 14 Jul 2025 18:33:51 -0600 Subject: [PATCH 12/61] try_allocate_in decrements empty region count --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 1f40efc53df6e..8cf3d2e74b2ae 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1439,6 +1439,8 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah _heap->old_generation()->clear_cards_for(r); } _heap->generation_for(r->affiliation())->increment_affiliated_region_count(); + _partitions.decrease_empty_region_counts(_partitions.membership(r->index()), (size_t) 1); + recompute_total_affiliated(); #ifdef ASSERT ShenandoahMarkingContext* const ctx = _heap->marking_context(); assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); From 14524cdbac913b0d86495538b0e022965cfa2167 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 15 Jul 2025 07:21:17 -0600 Subject: [PATCH 13/61] fix to global affiliated counts --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 25 +++++++++++++------ .../share/gc/shenandoah/shenandoahFreeSet.hpp | 4 +-- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 8cf3d2e74b2ae..98a0934bc5c41 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -525,6 +525,10 @@ void ShenandoahRegionPartitions::decrease_empty_region_counts(ShenandoahFreeSetP _empty_region_counts[int(which_partition)] -= regions; } +void ShenandoahRegionPartitions::one_region_is_no_longer_empty(ShenandoahFreeSetPartitionId partition) { + decrease_empty_region_counts(partition, (size_t) 1); +} + // All members of partition between low_idx and high_idx inclusive have been removed. void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary( ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { @@ -670,8 +674,8 @@ void ShenandoahRegionPartitions::retire_range_from_partition( _membership[int(partition)].clear_bit(idx); } size_t num_regions = high_idx + 1 - low_idx; - _region_counts[int(partition)] -= num_regions; - _empty_region_counts[int(partition)] -= num_regions; + decrease_region_counts(partition, num_regions); + decrease_empty_region_counts(partition, num_regions); shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx); } @@ -1439,8 +1443,10 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah _heap->old_generation()->clear_cards_for(r); } _heap->generation_for(r->affiliation())->increment_affiliated_region_count(); - _partitions.decrease_empty_region_counts(_partitions.membership(r->index()), (size_t) 1); - recompute_total_affiliated(); + if (_heap->mode()->is_generational()) { + _heap->global_generation()->increment_affiliated_region_count(); + } + #ifdef ASSERT ShenandoahMarkingContext* const ctx = _heap->marking_context(); assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); @@ -1563,12 +1569,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah size_t idx = r->index(); request_generation->increase_used(r->free()); - if (in_new_region) { - _partitions.decrease_empty_region_counts(orig_partition, 1); - } if (_heap->mode()->is_generational()) { _heap->global_generation()->increase_used(r->free()); } + if ((result != nullptr) && in_new_region) { + _partitions.one_region_is_no_longer_empty(orig_partition); + } _partitions.retire_from_partition(orig_partition, idx, r->used()); _partitions.assert_bounds(); } else if ((result != nullptr) && in_new_region) { @@ -1673,6 +1679,10 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { r->set_top(r->bottom() + used_words); } generation->increase_affiliated_region_count(num); + if (_heap->mode()->is_generational()) { + _heap->global_generation()->increase_affiliated_region_count(num); + } + if (remainder != 0) { // Record this remainder as allocation waste _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); @@ -1701,6 +1711,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { recompute_total_young_used(); recompute_total_global_used(); + recompute_total_affiliated(); return _heap->get_region(beg)->bottom(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index f739fd63053fc..d2afca8718aed 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -158,9 +158,7 @@ class ShenandoahRegionPartitions { _membership[int(p)].set_bit(idx); } - inline void one_region_is_no_longer_empty(ShenandoahFreeSetPartitionId partition) { - _empty_region_counts[int(partition)] -= 1; - } + inline void one_region_is_no_longer_empty(ShenandoahFreeSetPartitionId partition); // Set the Mutator intervals, usage, and capacity according to arguments. Reset the Collector intervals, used, capacity // to represent empty Collector free set. We use this at the end of rebuild_free_set() to avoid the overhead of making From 245982939a0a524de5fd594b2b09ea59db097cae Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 15 Jul 2025 07:41:23 -0600 Subject: [PATCH 14/61] fix assert --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 98a0934bc5c41..0dddb1dca78cf 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -2637,7 +2637,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old // be collected in the near future. if (r->is_trash() || !r->is_affiliated()) { // OLD regions that have available memory are already in the old_collector free set. - assert(r->is_empty(), "Not affiliated implies region %zu is empty", r->index()); + assert(r->is_empty() || r->is_trash(), "Not affiliated implies region %zu is empty", r->index()); if (idx < old_collector_low_idx) { old_collector_low_idx = idx; } From 428484822d0d3e7a25ac09c445a1c809e0e8577a Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 17 Jul 2025 13:50:13 -0600 Subject: [PATCH 15/61] fix multiple accounting errors --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 133 ++++++++++++++---- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 7 +- .../gc/shenandoah/shenandoahGeneration.cpp | 15 +- .../gc/shenandoah/shenandoahGeneration.hpp | 9 +- .../shenandoah/shenandoahGenerationalHeap.cpp | 10 ++ .../gc/shenandoah/shenandoahOldGeneration.hpp | 8 +- 6 files changed, 143 insertions(+), 39 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 0dddb1dca78cf..1900d0478995d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1527,11 +1527,11 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah // PLABs be made parsable at the end of evacuation. This is enabled by retiring all plabs at end of evacuation. r->set_update_watermark(r->top()); if (r->is_old()) { - _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, req.actual_size() * HeapWordSize); + _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, (req.actual_size() + req.waste()) * HeapWordSize); assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation"); // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab } else { - _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, req.actual_size() * HeapWordSize); + _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, (req.actual_size() + req.waste()) * HeapWordSize); } } } @@ -1568,9 +1568,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah // then retire the region so that subsequent searches can find available memory more quickly. size_t idx = r->index(); - request_generation->increase_used(r->free()); - if (_heap->mode()->is_generational()) { - _heap->global_generation()->increase_used(r->free()); + size_t free_bytes = r->free(); + if (free_bytes > 0) { + request_generation->increase_used(free_bytes); + if (_heap->mode()->is_generational()) { + _heap->global_generation()->increase_used(free_bytes); + } } if ((result != nullptr) && in_new_region) { _partitions.one_region_is_no_longer_empty(orig_partition); @@ -2008,21 +2011,32 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r size_t mutator_rightmost = 0; size_t mutator_leftmost_empty = max_regions; size_t mutator_rightmost_empty = 0; - size_t mutator_regions = 0; - size_t mutator_used = 0; - size_t mutator_humongous_waste = 0; size_t old_collector_leftmost = max_regions; size_t old_collector_rightmost = 0; size_t old_collector_leftmost_empty = max_regions; size_t old_collector_rightmost_empty = 0; - size_t old_collector_regions = 0; - size_t old_collector_used = 0; - size_t old_collector_humongous_waste = 0; size_t mutator_empty = 0; size_t old_collector_empty = 0; + // These two variables represent the total used within each partition, including humongous waste and retired regions + size_t mutator_used = 0; + size_t old_collector_used = 0; + + // These two variables represent memory that is wasted within humongous regions due to alignment padding + size_t mutator_humongous_waste = 0; + size_t old_collector_humongous_waste = 0; + + // These two variables track regions that have allocatable memory + size_t mutator_regions = 0; + size_t old_collector_regions = 0; + + // These two variables track regions that are not empty within each partition + size_t affiliated_mutator_regions = 0; + size_t affiliated_old_collector_regions = 0; + + // These two variables represent the total capacity of each partition, including retired regions size_t total_mutator_regions = 0; size_t total_old_collector_regions = 0; @@ -2102,6 +2116,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r old_collector_rightmost_empty = idx; } } + affiliated_old_collector_regions++; old_collector_regions++; total_old_collector_regions++; old_collector_used += region_size_bytes - ac; @@ -2111,9 +2126,11 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r if (region->is_old()) { old_collector_used += region_size_bytes; total_old_collector_regions++; + affiliated_old_collector_regions++; } else { mutator_used += region_size_bytes; total_mutator_regions++; + affiliated_mutator_regions++; } } } else { @@ -2129,10 +2146,12 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r old_collector_used += region_size_bytes; total_old_collector_regions++; old_collector_humongous_waste += humongous_waste_bytes; + affiliated_old_collector_regions++; } else { mutator_used += region_size_bytes; total_mutator_regions++; mutator_humongous_waste += humongous_waste_bytes; + affiliated_mutator_regions++; } } } @@ -2203,9 +2222,10 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r recompute_total_used(); recompute_total_affiliated(); // Update generations to assure consistency while we still hold the lock. This handles case that someone consults - // generation sizes between now and start of finish_rebuild. + // generation sizes between now and start of finish_rebuild. This may release from old memory that we intend to reserve + // for the old collector. establish_generation_sizes(total_mutator_regions, total_old_collector_regions, - mutator_regions, old_collector_regions, + affiliated_mutator_regions, affiliated_old_collector_regions, mutator_used, old_collector_used); log_debug(gc, free)(" After find_regions_with_alloc_capacity(), Mutator range [%zd, %zd]," @@ -2410,18 +2430,63 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t find_regions_with_alloc_capacity(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count); } + + // The totals reported here anticipate the recycling of trash regions. Their memory is counted as unused and fully // available at this moment in time, even though the memory cannot be re-allocated until after it is recycled. void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count, - size_t young_used_regions, size_t old_used_regions, + size_t affiliated_young_regions, size_t affiliated_old_regions, size_t young_used_bytes, size_t old_used_bytes) { assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); #define KELVIN_RESERVE #ifdef KELVIN_RESERVE - log_info(gc)("establish_generation_sizes(young_region_count: %zu, old_region_count: %zu, young_used_regions: %zu,", - young_region_count, old_region_count, young_used_regions); - log_info(gc)(" old_used_regions: %zu, young_used_bytes: %zu, old_used_bytes: %zu)", - old_used_regions, young_used_bytes, old_used_bytes); + log_info(gc)("establish_generation_sizes(young_region_count: %zu, old_region_count: %zu, ", + young_region_count, old_region_count); + log_info(gc)(" young_used_bytes: %zu, old_used_bytes: %zu)", + young_used_bytes, old_used_bytes); +#endif + if (ShenandoahHeap::heap()->mode()->is_generational()) { + ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); + ShenandoahOldGeneration* old_gen = heap->old_generation(); + ShenandoahYoungGeneration* young_gen = heap->young_generation(); + ShenandoahGeneration* global_gen = heap->global_generation(); + + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + + size_t original_old_capacity = old_gen->max_capacity(); + size_t new_old_capacity = old_region_count * region_size_bytes; + size_t new_young_capacity = young_region_count * region_size_bytes; + old_gen->set_capacity(new_old_capacity); + old_gen->set_used(affiliated_old_regions, old_used_bytes); + young_gen->set_capacity(new_young_capacity); + young_gen->set_used(affiliated_young_regions, young_used_bytes); + global_gen->set_capacity(new_young_capacity + new_old_capacity); + global_gen->set_used(affiliated_young_regions + affiliated_old_regions, young_used_bytes + old_used_bytes); + + if (new_old_capacity > original_old_capacity) { + size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes; + log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, + region_count, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_old_capacity)); + } else if (new_old_capacity < original_old_capacity) { + size_t region_count = (original_old_capacity - new_old_capacity) / region_size_bytes; + log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, + region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity)); + } + } +} + +// As part of finish_rebuild(), we reestablish generation sizes, which were originally established during prepare_to_rebuild(). +// During finish_rebuild(), we will reserve regions for the collector and old collector by transferring some regions from +// the Mutator partition. Regions transferred from Mutator to Collector have no effect on generation sizes. Regions transferred +// from Mutator to OldCollector may increase capacity of old and decrease capacity of young, but will have no effect on used +// within either generation because only empty regions are transferred. +void ShenandoahFreeSet::reestablish_generation_sizes(size_t young_region_count, size_t old_region_count) { + + assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); +#define KELVIN_RESERVE +#ifdef KELVIN_RESERVE + log_info(gc)("reestablish_generation_sizes(young_region_count: %zu, old_region_count: %zu, ", + young_region_count, old_region_count); #endif if (ShenandoahHeap::heap()->mode()->is_generational()) { ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); @@ -2435,11 +2500,8 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si size_t new_old_capacity = old_region_count * region_size_bytes; size_t new_young_capacity = young_region_count * region_size_bytes; old_gen->set_capacity(new_old_capacity); - old_gen->set_used(old_used_regions, old_used_bytes); young_gen->set_capacity(new_young_capacity); - young_gen->set_used(young_used_regions, young_used_bytes); global_gen->set_capacity(new_young_capacity + new_old_capacity); - global_gen->set_used(young_used_regions + old_used_regions, young_used_bytes + old_used_bytes); if (new_old_capacity > original_old_capacity) { size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes; @@ -2470,12 +2532,11 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_ // Move some of the mutator regions into the Collector and OldCollector partitions in order to satisfy // young_reserve and old_reserve. - size_t young_used_regions, old_used_regions, young_used_bytes, old_used_bytes; - reserve_regions(young_reserve, old_reserve, old_region_count, - young_used_regions, old_used_regions, young_used_bytes, old_used_bytes); + size_t young_used_regions, old_used_regions, young_used_bytes, old_used_bytes, affiliated_young_regions, affiliated_old_regions; + reserve_regions(young_reserve, old_reserve, old_region_count, young_used_regions, old_used_regions, + young_used_bytes, old_used_bytes); size_t young_region_count = _heap->num_regions() - old_region_count; - establish_generation_sizes(young_region_count, old_region_count, - young_used_regions, old_used_regions, young_used_bytes, old_used_bytes); + reestablish_generation_sizes(young_region_count, old_region_count); establish_old_collector_alloc_bias(); _partitions.assert_bounds(); log_status(); @@ -2500,8 +2561,9 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi young_unaffiliated_regions += young_trashed_regions; // Consult old-region balance to make adjustments to current generation capacities and availability. - // The generation region transfers take place after we rebuild. - const ssize_t old_region_balance = old_generation->get_region_balance(); + // The generation region transfers take place after we rebuild. old_region_balance represents number of regions + // to transfer from old to young. + ssize_t old_region_balance = old_generation->get_region_balance(); if (old_region_balance != 0) { #ifdef ASSERT if (old_region_balance > 0) { @@ -2528,9 +2590,18 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); young_reserve_result = young_generation->get_evacuation_reserve(); old_reserve_result = promoted_reserve + old_evac_reserve; - assert(old_reserve_result <= old_available, - "Cannot reserve (%zu + %zu) more OLD than is available: %zu", - promoted_reserve, old_evac_reserve, old_available); + if (old_reserve_result > old_available) { + // Try to transfer memory from young to old. + size_t old_deficit = old_reserve_result - old_available; + size_t old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes; + if (young_unaffiliated_regions < old_region_deficit) { + old_region_deficit = young_unaffiliated_regions; + } + young_unaffiliated_regions -= old_region_deficit; + old_unaffiliated_regions += old_region_deficit; + old_region_balance -= old_region_deficit; + old_generation->set_region_balance(old_region_balance); + } } else { // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index d2afca8718aed..71b42bcce6327 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -579,8 +579,11 @@ class ShenandoahFreeSet : public CHeapObj { // Set max_capacity for young and old generations void establish_generation_sizes(size_t young_region_count, size_t old_region_count, - size_t young_used_regions, size_t old_used_regions, - size_t young_used_bytes, size_t old_used_bytes); + size_t affiliated_young_regions, size_t affiliated_old_regions, + size_t young_used_bytes, size_t old_used_bytes); + + void reestablish_generation_sizes(size_t young_region_count, size_t old_region_count); + size_t get_usable_free_words(size_t free_bytes) const; // log status, assuming lock has already been acquired by the caller. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index c268e977aa550..882adfd77a777 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -984,9 +984,20 @@ size_t ShenandoahGeneration::used_regions() const { } size_t original_result = Atomic::load(&_affiliated_region_count); #ifdef KELVIN_SCAFFOLDING + static int problem_count = 0; if (result != original_result) { - log_info(gc)("Problem with used for generation %s, freeset thinks %zu, generation thinks: %zu", - shenandoah_generation_name(_type), result, original_result); + log_info(gc)("Problem with used regions for generation %s, freeset thinks %zu, generation thinks: %zu", + shenandoah_generation_name(_type), result, original_result); + if (problem_count++ > 8) { + assert(result == original_result, "Out of sync in used_regions for generation %s, freeset: %zu, generation: %zu", + shenandoah_generation_name(_type), result, original_result); + } + } else { + if (problem_count > 0) { + problem_count = 0; + log_info(gc)("used regions for generation %s is back in sync: %zu", + shenandoah_generation_name(_type), result); + } } #endif return result; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 4b207358cdaa2..22c933cb9f6f1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -164,6 +164,9 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { shenandoah_generation_name(_type), result, original_result); } } else { + if (problem_count > 0) { + log_info(gc)("Used for generation %s is back in sync: %zu", shenandoah_generation_name(_type), result); + } problem_count = 0; } #endif @@ -197,11 +200,11 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // Set the capacity of the generation, returning the value set size_t set_capacity(size_t byte_size); - void set_used(size_t region_count, size_t byte_count) { + void set_used(size_t affiliated_region_count, size_t byte_count) { Atomic::store(&_used, byte_count); - Atomic::store(&_affiliated_region_count, region_count); + Atomic::store(&_affiliated_region_count, affiliated_region_count); #ifdef KELVIN_SCAFFOLDING - log_info(gc)("%s:set_used(regions: %zu, bytes: %zu)", shenandoah_generation_name(_type), region_count, byte_count); + log_info(gc)("%s:set_used(regions: %zu, bytes: %zu)", shenandoah_generation_name(_type), affiliated_region_count, byte_count); #endif } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 442ffed329644..a216d96e44f86 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -650,6 +650,10 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve); +#define KELVIN_REBALANCE +#ifdef KELVIN_REBALANCE + log_info(gc)("compute_old_gen_balance(%zu, %zu)", old_xfer_limit, old_cset_regions); +#endif const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); @@ -690,6 +694,9 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes; const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions); +#ifdef KELVIN_REBALANCE + log_info(gc)("surplus of old, transferring %zd regions to young", old_region_surplus); +#endif old_generation()->set_region_balance(checked_cast(old_region_surplus)); } else { // We are running a deficit which we'd like to fill from young. @@ -703,6 +710,9 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ // on young-gen memory, we restrict the xfer. Old-gen collection activities will be // curtailed if the budget is restricted. const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer); +#ifdef KELVIN_REBALANCE + log_info(gc)("deficit of old, transferring %zd regions to old", old_region_deficit); +#endif old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index b70a8d33b9560..35b109aea3add 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -135,7 +135,13 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { void configure_plab_for_current_thread(const ShenandoahAllocRequest &req); // See description in field declaration - void set_region_balance(ssize_t balance) { _region_balance = balance; } + void set_region_balance(ssize_t balance) { +#define KELVIN_BALANCIO +#ifdef KELVIN_BALANCIO + log_info(gc)("OldGen::set_region_balance(%zd)", balance); +#endif + _region_balance = balance; + } ssize_t get_region_balance() const { return _region_balance; } // See description in field declaration void set_promotion_potential(size_t val) { _promotion_potential = val; }; From 620dcfc789c713da2b48172dae770b49451891be Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 20 Jul 2025 09:37:02 -0600 Subject: [PATCH 16/61] more bug fixes and more asserts --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 119 +++++++++++++-- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 2 +- .../shenandoahGenerationalEvacuationTask.cpp | 3 +- .../share/gc/shenandoah/shenandoahHeap.cpp | 2 +- .../gc/shenandoah/shenandoahVerifier.cpp | 136 ++++++++++-------- 5 files changed, 192 insertions(+), 70 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 1900d0478995d..aff6671220759 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -946,7 +946,17 @@ idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId w #ifdef ASSERT -void ShenandoahRegionPartitions::assert_bounds() { +void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { + + size_t capacities[UIntNumPartitions]; + size_t used[UIntNumPartitions]; + size_t regions[UIntNumPartitions]; + + // We don't know whether young retired regions belonged to Mutator or Collector before they were retired. + // We just tally the total, and divide it to make matches work if possible. + size_t young_retired_regions = 0; + size_t young_retired_used = 0; + size_t young_retired_capacity = 0; idx_t leftmosts[UIntNumPartitions]; idx_t rightmosts[UIntNumPartitions]; @@ -958,13 +968,31 @@ void ShenandoahRegionPartitions::assert_bounds() { empty_leftmosts[i] = _max; rightmosts[i] = -1; empty_rightmosts[i] = -1; + capacities[i] = 0; + used[i] = 0; + regions[i] = 0; } for (idx_t i = 0; i < _max; i++) { ShenandoahFreeSetPartitionId partition = membership(i); switch (partition) { case ShenandoahFreeSetPartitionId::NotFree: - break; + { + size_t capacity = _free_set->alloc_capacity(i); + assert(!validate_totals || (capacity != _region_size_bytes), "Should not be retired if empty"); + ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i); + if (r->is_old()) { + regions[int(ShenandoahFreeSetPartitionId::OldCollector)]++; + used[int(ShenandoahFreeSetPartitionId::OldCollector)] += _region_size_bytes - capacity; + capacities[int(ShenandoahFreeSetPartitionId::OldCollector)] += _region_size_bytes; + } else { + assert(r->is_young(), "Must be young if not old"); + young_retired_regions++; + young_retired_used += _region_size_bytes - capacity; + young_retired_capacity += _region_size_bytes; + } + } + break; case ShenandoahFreeSetPartitionId::Mutator: case ShenandoahFreeSetPartitionId::Collector: @@ -973,6 +1001,10 @@ void ShenandoahRegionPartitions::assert_bounds() { size_t capacity = _free_set->alloc_capacity(i); bool is_empty = (capacity == _region_size_bytes); assert(capacity > 0, "free regions must have allocation capacity"); + regions[int(partition)]++; + used[int(partition)] += _region_size_bytes - capacity; + capacities[int(partition)] += _region_size_bytes; + if (i < leftmosts[int(partition)]) { leftmosts[int(partition)] = i; } @@ -1093,6 +1125,71 @@ void ShenandoahRegionPartitions::assert_bounds() { assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)], "OldCollector free empty regions past the rightmost: %zd, bound %zd", end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector)); + + if (validate_totals) { + assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)] == _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], + "Old collector capacities must match"); + assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)] == _used[int(ShenandoahFreeSetPartitionId::OldCollector)], + "Old collector used must match"); + assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)] == _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)], + "Old collector regions must match"); + assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] >= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], + "Old Collector capacity must be >= used"); + assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] == + (_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]), + "Old Collector available must equal capacity minus used"); + + assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)], + "Capacity tally must be >= counted tally"); + size_t mutator_capacity_delta = + _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)]; + assert(mutator_capacity_delta <= young_retired_capacity, "sanity"); + capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_delta; + young_retired_capacity -= mutator_capacity_delta; + capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity; + + assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)], + "Used tally must be >= counted tally"); + size_t mutator_used_delta = + _used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)]; + assert(mutator_used_delta <= young_retired_used, "sanity"); + used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_delta; + young_retired_used -= mutator_used_delta; + used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used; + + assert(_total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] >= regions[int(ShenandoahFreeSetPartitionId::Mutator)], + "Region tally must be >= counted tally"); + size_t mutator_regions_delta = + _total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] - regions[int(ShenandoahFreeSetPartitionId::Mutator)]; + assert(mutator_regions_delta <= young_retired_regions, "sanity"); + regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_delta; + young_retired_regions -= mutator_regions_delta; + regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions; + + assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)], + "Collector capacities must match"); + assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)], + "Collector used must match"); + assert(regions[int(ShenandoahFreeSetPartitionId::Collector)] == _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)], + "Collector regions must match"); + assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)], + "Collector Capacity must be >= used"); + assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] == + (_capacity[int(ShenandoahFreeSetPartitionId::Collector)] - _used[int(ShenandoahFreeSetPartitionId::Collector)]), + "Collector Available must equal capacity minus used"); + + assert(capacities[int(ShenandoahFreeSetPartitionId::Mutator)] == _capacity[int(ShenandoahFreeSetPartitionId::Mutator)], + "Mutator capacities must match"); + assert(used[int(ShenandoahFreeSetPartitionId::Mutator)] == _used[int(ShenandoahFreeSetPartitionId::Mutator)], + "Mutator used must match"); + assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)] == _total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)], + "Mutator regions must match"); + assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)], + "Mutator apacity must be >= used"); + assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] == + (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]), + "Mutator available must equal capacity minus used"); + } } #endif @@ -1146,6 +1243,7 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah } recompute_total_used(); recompute_total_affiliated(); + _partitions.assert_bounds(true); #ifdef KELVIN_CAPACITY log_info(gc)("Ater add_pip_to_old(%zu), Mutate used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", region->index(), @@ -1579,12 +1677,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah _partitions.one_region_is_no_longer_empty(orig_partition); } _partitions.retire_from_partition(orig_partition, idx, r->used()); - _partitions.assert_bounds(); } else if ((result != nullptr) && in_new_region) { _partitions.one_region_is_no_longer_empty(orig_partition); } recompute_total_used(); recompute_total_affiliated(); // could optimize: only recompute affiliated for orig_partition and global + _partitions.assert_bounds(true); return result; } @@ -1695,7 +1793,6 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); - _partitions.assert_bounds(); req.set_actual_size(words_size); if (remainder != 0) { size_t waste = ShenandoahHeapRegion::region_size_words() - remainder; @@ -1715,6 +1812,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { recompute_total_young_used(); recompute_total_global_used(); recompute_total_affiliated(); + _partitions.assert_bounds(true); return _heap->get_region(beg)->bottom(); } @@ -1911,10 +2009,10 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { if (transferred) { _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::OldCollector, region_capacity); - _partitions.assert_bounds(); _heap->old_generation()->augment_evacuation_reserve(region_capacity); recompute_total_used(); recompute_total_affiliated(); + _partitions.assert_bounds(true); return true; } @@ -1945,9 +2043,9 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::OldCollector, region_capacity); - _partitions.assert_bounds(); recompute_total_used(); recompute_total_affiliated(); + _partitions.assert_bounds(true); // 4. Do not adjust capacities for generations, we just swapped the regions that have already // been accounted for. However, we should adjust the evacuation reserves as those may have changed. shenandoah_assert_heaplocked(); @@ -1970,9 +2068,9 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) { size_t ac = alloc_capacity(r); _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::Collector, ac); - _partitions.assert_bounds(); recompute_total_used(); recompute_total_affiliated(); + _partitions.assert_bounds(true); // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next, // to recycle trash before attempting to allocate anything in the region. } @@ -2221,6 +2319,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r old_collector_humongous_waste); recompute_total_used(); recompute_total_affiliated(); + _partitions.assert_bounds(true); // Update generations to assure consistency while we still hold the lock. This handles case that someone consults // generation sizes between now and start of finish_rebuild. This may release from old memory that we intend to reserve // for the old collector. @@ -2257,6 +2356,7 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector recompute_total_young_used(); recompute_total_old_used(); recompute_total_affiliated(); + _partitions.assert_bounds(true); // global_used is unaffected by this transfer // No need to adjust ranges because humongous regions are not allocatable @@ -2312,6 +2412,7 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); recompute_total_used(); + _partitions.assert_bounds(true); // Should not need to recompute_total_affiliated() because all transferred regions are empty. return transferred_regions; } @@ -2369,6 +2470,7 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa recompute_total_used(); recompute_total_affiliated(); + _partitions.assert_bounds(true); return transferred_regions; } @@ -2538,7 +2640,7 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_ size_t young_region_count = _heap->num_regions() - old_region_count; reestablish_generation_sizes(young_region_count, old_region_count); establish_old_collector_alloc_bias(); - _partitions.assert_bounds(); + _partitions.assert_bounds(true); log_status(); } @@ -2916,6 +3018,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old recompute_total_used(); recompute_total_affiliated(); + _partitions.assert_bounds(true); if (LogTarget(Info, gc, free)::is_enabled()) { size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector); if (old_reserve < to_reserve_old) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 71b42bcce6327..d2b573fb6b31f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -385,7 +385,7 @@ class ShenandoahRegionPartitions { // idx >= leftmost && // idx <= rightmost // } - void assert_bounds() NOT_DEBUG_RETURN; + void assert_bounds(bool validate_totals) NOT_DEBUG_RETURN; }; // Publicly, ShenandoahFreeSet represents memory that is available to mutator threads. The public capacity(), used(), diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 185a619568a6a..7d0d29b798dc4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -249,6 +249,8 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion region_used += available_in_region; } + region->set_affiliation(OLD_GENERATION); + // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size() _heap->free_set()->add_promoted_in_place_region_to_old_collector(region); @@ -257,7 +259,6 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion // transfer_to_old() increases capacity of old and decreases capacity of young _heap->generation_sizer()->force_transfer_to_old(1); - region->set_affiliation(OLD_GENERATION); old_gen->increment_affiliated_region_count(); old_gen->increase_used(region_used); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index f55af02085b7c..61c0265938238 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -405,7 +405,6 @@ jint ShenandoahHeap::initialize() { { ShenandoahHeapLocker locker(lock()); - _free_set = new ShenandoahFreeSet(this, _num_regions); for (size_t i = 0; i < _num_regions; i++) { HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i; bool is_committed = i < num_committed_regions; @@ -420,6 +419,7 @@ jint ShenandoahHeap::initialize() { _affiliations[i] = ShenandoahAffiliation::FREE; } + _free_set = new ShenandoahFreeSet(this, _num_regions); post_initialize_heuristics(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 4a51b70b8c5f3..47b7910ee80f5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -368,15 +368,23 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure { private: size_t _used, _committed, _garbage, _regions, _humongous_waste, _trashed_regions, _trashed_used; +#define KELVIN_VERBOSE +#ifdef KELVIN_VERBOSE + const char* _nm; +#endif public: +#ifdef KELVIN_VERBOSE + ShenandoahCalculateRegionStatsClosure(const char *name) : + _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0), _nm(name) {}; +#else ShenandoahCalculateRegionStatsClosure() : _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0) {}; - +#endif void heap_region_do(ShenandoahHeapRegion* r) override { #define KELVIN_STATS #ifdef KELVIN_STATS - log_info(gc)("ShenandoahCalculateRegionStatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", - r->affiliation_name(), r->index(), r->used(), r->is_trash()? "yes": "no"); + log_info(gc)("%s:ShenandoahCalculateRegionStatsClosure::heap_region_do(), %s r: %zu used: %zu, garbage: %zu, is_trash: %s", + _nm, r->affiliation_name(), r->index(), r->used(), r->garbage(), r->is_trash()? "yes": "no"); #endif _used += r->used(); _garbage += r->garbage(); @@ -412,9 +420,15 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { public: - ShenandoahCalculateRegionStatsClosure old; - ShenandoahCalculateRegionStatsClosure young; - ShenandoahCalculateRegionStatsClosure global; +#ifdef KELVIN_VERBOSE + ShenandoahCalculateRegionStatsClosure _old = ShenandoahCalculateRegionStatsClosure("Old"); + ShenandoahCalculateRegionStatsClosure _young = ShenandoahCalculateRegionStatsClosure("Young"); + ShenandoahCalculateRegionStatsClosure _global = ShenandoahCalculateRegionStatsClosure("Global"); +#else + ShenandoahCalculateRegionStatsClosure _old; + ShenandoahCalculateRegionStatsClosure _young; + ShenandoahCalculateRegionStatsClosure _global; +#endif void heap_region_do(ShenandoahHeapRegion* r) override { #ifdef KELVIN_STATS @@ -425,12 +439,12 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { case FREE: return; case YOUNG_GENERATION: - young.heap_region_do(r); - global.heap_region_do(r); + _young.heap_region_do(r); + _global.heap_region_do(r); break; case OLD_GENERATION: - old.heap_region_do(r); - global.heap_region_do(r); + _old.heap_region_do(r); + _global.heap_region_do(r); break; default: ShouldNotReachHere(); @@ -487,15 +501,15 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { }; class ShenandoahVerifyHeapRegionClosure : public ShenandoahHeapRegionClosure { -private: + private: ShenandoahHeap* _heap; const char* _phase; ShenandoahVerifier::VerifyRegions _regions; -public: + public: ShenandoahVerifyHeapRegionClosure(const char* phase, ShenandoahVerifier::VerifyRegions regions) : - _heap(ShenandoahHeap::heap()), - _phase(phase), - _regions(regions) {}; + _heap(ShenandoahHeap::heap()), + _phase(phase), + _regions(regions) {}; void print_failure(ShenandoahHeapRegion* r, const char* label) { ResourceMark rm; @@ -582,7 +596,7 @@ class ShenandoahVerifyHeapRegionClosure : public ShenandoahHeapRegionClosure { }; class ShenandoahVerifierReachableTask : public WorkerTask { -private: + private: const char* _label; ShenandoahVerifier::VerifyOptions _options; ShenandoahHeap* _heap; @@ -590,18 +604,18 @@ class ShenandoahVerifierReachableTask : public WorkerTask { MarkBitMap* _bitmap; volatile size_t _processed; -public: + public: ShenandoahVerifierReachableTask(MarkBitMap* bitmap, ShenandoahLivenessData* ld, const char* label, ShenandoahVerifier::VerifyOptions options) : - WorkerTask("Shenandoah Verifier Reachable Objects"), - _label(label), - _options(options), - _heap(ShenandoahHeap::heap()), - _ld(ld), - _bitmap(bitmap), - _processed(0) {}; + WorkerTask("Shenandoah Verifier Reachable Objects"), + _label(label), + _options(options), + _heap(ShenandoahHeap::heap()), + _ld(ld), + _bitmap(bitmap), + _processed(0) {}; size_t processed() const { return _processed; @@ -617,14 +631,14 @@ class ShenandoahVerifierReachableTask : public WorkerTask { // extended parallelism would buy us out. if (((ShenandoahVerifyLevel == 2) && (worker_id == 0)) || (ShenandoahVerifyLevel >= 3)) { - ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld, - ShenandoahMessageBuffer("%s, Roots", _label), - _options); - if (_heap->unload_classes()) { - ShenandoahRootVerifier::strong_roots_do(&cl); - } else { - ShenandoahRootVerifier::roots_do(&cl); - } + ShenandoahVerifyOopClosure cl(&stack, _bitmap, _ld, + ShenandoahMessageBuffer("%s, Roots", _label), + _options); + if (_heap->unload_classes()) { + ShenandoahRootVerifier::strong_roots_do(&cl); + } else { + ShenandoahRootVerifier::roots_do(&cl); + } } size_t processed = 0; @@ -645,7 +659,7 @@ class ShenandoahVerifierReachableTask : public WorkerTask { }; class ShenandoahVerifyNoIncompleteSatbBuffers : public ThreadClosure { -public: + public: void do_thread(Thread* thread) override { SATBMarkQueue& queue = ShenandoahThreadLocalData::satb_mark_queue(thread); if (!queue.is_empty()) { @@ -655,7 +669,7 @@ class ShenandoahVerifyNoIncompleteSatbBuffers : public ThreadClosure { }; class ShenandoahVerifierMarkedRegionTask : public WorkerTask { -private: + private: const char* _label; ShenandoahVerifier::VerifyOptions _options; ShenandoahHeap *_heap; @@ -665,20 +679,20 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { volatile size_t _processed; ShenandoahGeneration* _generation; -public: + public: ShenandoahVerifierMarkedRegionTask(MarkBitMap* bitmap, ShenandoahLivenessData* ld, const char* label, ShenandoahVerifier::VerifyOptions options) : - WorkerTask("Shenandoah Verifier Marked Objects"), - _label(label), - _options(options), - _heap(ShenandoahHeap::heap()), - _bitmap(bitmap), - _ld(ld), - _claimed(0), - _processed(0), - _generation(nullptr) { + WorkerTask("Shenandoah Verifier Marked Objects"), + _label(label), + _options(options), + _heap(ShenandoahHeap::heap()), + _bitmap(bitmap), + _ld(ld), + _claimed(0), + _processed(0), + _generation(nullptr) { if (_heap->mode()->is_generational()) { _generation = _heap->gc_generation(); assert(_generation != nullptr, "Expected active generation in this mode."); @@ -788,11 +802,11 @@ class ShenandoahVerifierMarkedRegionTask : public WorkerTask { }; class VerifyThreadGCState : public ThreadClosure { -private: + private: const char* const _label; - char const _expected; + char const _expected; -public: + public: VerifyThreadGCState(const char* label, char expected) : _label(label), _expected(expected) {} void do_thread(Thread* t) override { char actual = ShenandoahThreadLocalData::gc_state(t); @@ -888,7 +902,11 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, { ShenandoahHeapLocker lock(_heap->lock()); +#ifdef KELVIN_VERBOSE + ShenandoahCalculateRegionStatsClosure cl = ShenandoahCalculateRegionStatsClosure("Global"); +#else ShenandoahCalculateRegionStatsClosure cl; +#endif _heap->heap_region_iterate(&cl); size_t heap_used; if (_heap->mode()->is_generational() && (sizeness == _verify_size_adjusted_for_padding)) { @@ -950,19 +968,19 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, _heap->heap_region_iterate(&cl); if (LogTarget(Debug, gc)::is_enabled()) { - ShenandoahGenerationStatsClosure::log_usage(_heap->old_generation(), cl.old); - ShenandoahGenerationStatsClosure::log_usage(_heap->young_generation(), cl.young); - ShenandoahGenerationStatsClosure::log_usage(_heap->global_generation(), cl.global); + ShenandoahGenerationStatsClosure::log_usage(_heap->old_generation(), cl._old); + ShenandoahGenerationStatsClosure::log_usage(_heap->young_generation(), cl._young); + ShenandoahGenerationStatsClosure::log_usage(_heap->global_generation(), cl._global); } if (sizeness == _verify_size_adjusted_for_padding) { - ShenandoahGenerationStatsClosure::validate_usage(false, true, label, _heap->old_generation(), cl.old); - ShenandoahGenerationStatsClosure::validate_usage(true, true, label, _heap->young_generation(), cl.young); - ShenandoahGenerationStatsClosure::validate_usage(true, true, label, _heap->global_generation(), cl.global); + ShenandoahGenerationStatsClosure::validate_usage(false, true, label, _heap->old_generation(), cl._old); + ShenandoahGenerationStatsClosure::validate_usage(true, true, label, _heap->young_generation(), cl._young); + ShenandoahGenerationStatsClosure::validate_usage(true, true, label, _heap->global_generation(), cl._global); } else if (sizeness == _verify_size_exact || sizeness == _verify_size_exact_including_trash) { bool adjust_trash = (sizeness == _verify_size_exact); - ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->old_generation(), cl.old); - ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->young_generation(), cl.young); - ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->global_generation(), cl.global); + ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->old_generation(), cl._old); + ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->young_generation(), cl._young); + ShenandoahGenerationStatsClosure::validate_usage(false, adjust_trash, label, _heap->global_generation(), cl._global); } // else: sizeness must equal _verify_size_disable } @@ -1443,7 +1461,7 @@ void ShenandoahVerifier::verify_before_rebuilding_free_set() { ShenandoahGenerationStatsClosure cl; _heap->heap_region_iterate(&cl); - ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->old_generation(), cl.old); - ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->young_generation(), cl.young); - ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->global_generation(), cl.global); + ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->old_generation(), cl._old); + ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->young_generation(), cl._young); + ShenandoahGenerationStatsClosure::validate_usage(false, true, "Before free set rebuild", _heap->global_generation(), cl._global); } From faf2006ddd272a8c56ad918f6d9791915ce09572 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 20 Jul 2025 15:03:50 -0600 Subject: [PATCH 17/61] capacity accounting adjusts available --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index aff6671220759..1424651ee04b4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -425,6 +425,7 @@ void ShenandoahRegionPartitions::increase_capacity(ShenandoahFreeSetPartitionId shenandoah_assert_heaplocked(); assert (which_partition < NumPartitions, "Partition must be valid"); _capacity[int(which_partition)] += bytes; + _available[int(which_partition)] += bytes; #ifdef KELVIN_CAPACITY log_info(gc)("FreeSet<%s>::increase_capacity(%zu) yields: %zu", partition_name(which_partition), bytes, _capacity[int(which_partition)]); @@ -436,6 +437,7 @@ void ShenandoahRegionPartitions::decrease_capacity(ShenandoahFreeSetPartitionId assert (which_partition < NumPartitions, "Partition must be valid"); assert(_capacity[int(which_partition)] >= bytes, "Cannot remove more capacity bytes than are present"); _capacity[int(which_partition)] -= bytes; + _available[int(which_partition)] -= bytes; #ifdef KELVIN_CAPACITY log_info(gc)("FreeSet<%s>::decrease_capacity(%zu) yields: %zu", partition_name(which_partition), bytes, _capacity[int(which_partition)]); @@ -2406,10 +2408,12 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s _partitions.decrease_total_region_counts(which_collector, transferred_regions); _partitions.decrease_region_counts(which_collector, transferred_regions); _partitions.decrease_empty_region_counts(which_collector, transferred_regions); + _partitions.decrease_capacity(which_collector, transferred_regions * region_size_bytes); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); + _partitions.increase_capacity(ShenandoahFreeSetPartitionId::Mutator, transferred_regions * region_size_bytes); recompute_total_used(); _partitions.assert_bounds(true); @@ -2978,22 +2982,21 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old _partitions.decrease_region_counts(ShenandoahFreeSetPartitionId::Mutator, regions_to_old_collector + regions_to_collector); _partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, empty_regions_to_old_collector + empty_regions_to_collector); + // decrease_capacity() also decreases available _partitions.decrease_capacity(ShenandoahFreeSetPartitionId::Mutator, (regions_to_old_collector + regions_to_collector) * region_size_bytes); - _partitions.decrease_available(ShenandoahFreeSetPartitionId::Mutator, - (regions_to_old_collector + regions_to_collector) * region_size_bytes); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::Collector, regions_to_collector); _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Collector, regions_to_collector); _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, empty_regions_to_collector); + // increase_capacity() also increases available _partitions.increase_capacity(ShenandoahFreeSetPartitionId::Collector, regions_to_collector * region_size_bytes); - _partitions.increase_available(ShenandoahFreeSetPartitionId::Collector, regions_to_collector * region_size_bytes); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector); _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector); _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, empty_regions_to_old_collector); + // increase_capacity() also increases available _partitions.increase_capacity(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector * region_size_bytes); - _partitions.increase_available(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector * region_size_bytes); if (used_to_collector > 0) { _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, used_to_collector); From 4121ce997eae6e6663e8290dcdb4c48ab94ebcbd Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 20 Jul 2025 15:34:06 -0600 Subject: [PATCH 18/61] remove redundant calls to adjust available --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 1424651ee04b4..a2852d2b41119 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1226,11 +1226,10 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah } _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_in_region); + // decrease capacity adjusts available _partitions.decrease_capacity(ShenandoahFreeSetPartitionId::Mutator, region_size_bytes); - _partitions.decrease_available(ShenandoahFreeSetPartitionId::Mutator, region_size_bytes); _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, 1); - _partitions.increase_available(ShenandoahFreeSetPartitionId::OldCollector, region_size_bytes); _partitions.increase_capacity(ShenandoahFreeSetPartitionId::OldCollector, region_size_bytes); _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_in_region); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, 1); From 3f7042fd40e9003c99bbbcf27ead1d272a2e4365 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 21 Jul 2025 10:38:11 -0600 Subject: [PATCH 19/61] Account for PiP padding in usage --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 12 ++++++++++-- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 5 ++++- .../share/gc/shenandoah/shenandoahGeneration.cpp | 5 +++++ .../shenandoahGenerationalEvacuationTask.cpp | 6 ++++-- 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index a2852d2b41119..8377ee9d40f72 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -173,6 +173,14 @@ ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, Shena make_all_regions_unavailable(); } +void ShenandoahFreeSet::increase_young_used(size_t bytes) { + _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, bytes); +} + +void ShenandoahFreeSet::decrease_young_used(size_t bytes) { + _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, bytes); +} + inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const { return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress()); } @@ -1209,7 +1217,7 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : clear_internal(); } -void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) { +void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region, size_t pip_pad_bytes) { shenandoah_assert_heaplocked(); size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize; size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); @@ -1225,7 +1233,7 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah used_in_region += available_in_region; } - _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_in_region); + _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_in_region + pip_pad_bytes); // decrease capacity adjusts available _partitions.decrease_capacity(ShenandoahFreeSetPartitionId::Mutator, region_size_bytes); _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, 1); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index d2b573fb6b31f..4ff37431e3b3f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -603,6 +603,9 @@ class ShenandoahFreeSet : public CHeapObj { return _total_old_used; } + void increase_young_used(size_t bytes); + void decrease_young_used(size_t bytes); + // Return bytes used by young inline size_t young_used() { return _total_young_used; @@ -662,7 +665,7 @@ class ShenandoahFreeSet : public CHeapObj { // When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size() // into the old collector partition by invoking this method. - void add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region); + void add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region, size_t pip_pad_bytes); // Move up to cset_regions number of regions from being available to the collector to being available to the mutator. // diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 882adfd77a777..793006971c33e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -531,6 +531,8 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { assert_no_in_place_promotions(); auto const heap = ShenandoahGenerationalHeap::heap(); + ShenandoahYoungGeneration* young_gen = heap->young_generation(); + ShenandoahFreeSet* free_set = heap->free_set(); bool* const candidate_regions_for_promotion_by_copy = heap->collection_set()->preselected_regions(); ShenandoahMarkingContext* const ctx = heap->marking_context(); @@ -580,6 +582,9 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // new allocations would not necessarily be eligible for promotion. This addresses both issues. r->set_top(r->end()); promote_in_place_pad += remnant_size * HeapWordSize; + + free_set->increase_young_used(remnant_size); + young_gen->increase_used(remnant_size); } else { // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental // allocations occurring within this region before the region is promoted in place. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 7d0d29b798dc4..70433e97e4cb8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -225,6 +225,8 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion HeapWord* update_watermark = region->get_update_watermark(); + size_t pip_pad_bytes = (region->top() - region->get_top_before_promote()) * HeapWordSize; + // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the // is_collector_free range. region->restore_top_before_promote(); @@ -252,9 +254,9 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion region->set_affiliation(OLD_GENERATION); // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size() - _heap->free_set()->add_promoted_in_place_region_to_old_collector(region); + _heap->free_set()->add_promoted_in_place_region_to_old_collector(region, pip_pad_bytes); - young_gen->decrease_used(region_used); + young_gen->decrease_used(region_used + pip_pad_bytes); young_gen->decrement_affiliated_region_count(); // transfer_to_old() increases capacity of old and decreases capacity of young From 0e7b91f034f3d06e9d414d89e99eb74702d5c0d2 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 22 Jul 2025 09:33:10 -0600 Subject: [PATCH 20/61] fix remaining known accounting bug --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 52 ++++++++++++++----- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 3 +- .../gc/shenandoah/shenandoahVerifier.cpp | 29 ++++++++--- 3 files changed, 64 insertions(+), 20 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 166a473c9c1cf..56a4ec5c9a1b7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -689,8 +689,9 @@ void ShenandoahRegionPartitions::retire_range_from_partition( shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx); } -void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) { +size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) { + size_t waste_bytes = 0; // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size() assert (idx < _max, "index is sane: %zu < %zu", idx, _max); assert (partition < NumPartitions, "Cannot remove from free partitions if not already free"); @@ -699,6 +700,7 @@ void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitio if (used_bytes < _region_size_bytes) { // Count the alignment pad remnant of memory as used when we retire this region size_t fill_padding = _region_size_bytes - used_bytes; + waste_bytes = fill_padding; increase_used(partition, fill_padding); #ifdef ASSERT // Fill the unused memory so that verification will not be confused by inconsistent tallies of used @@ -708,13 +710,21 @@ void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitio r->allocate_fill(fill_words); } #ifdef KELVIN_USED - log_info(gc)("Retiring generation %zu with padding: %zu", idx, fill_padding); + else { + log_info(gc)("KELVIN!!!! Did not fill because padding: %zu is too small", waste_bytes); + } + log_info(gc)("Retiring region %zu with padding: %zu", idx, fill_padding); #endif #endif } _membership[int(partition)].clear_bit(idx); shrink_interval_if_boundary_modified(partition, idx); _region_counts[int(partition)]--; + + // This region is fully used, whether or not top() equals end(). It + // is retired and no more memory will be allocated from within it. + + return waste_bytes; } // The caller is responsible for increasing capacity and available and used in which_partition, and decreasing the @@ -983,12 +993,18 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { regions[i] = 0; } + size_t min_free_size = ShenandoahHeap::min_fill_size() * HeapWordSize; for (idx_t i = 0; i < _max; i++) { ShenandoahFreeSetPartitionId partition = membership(i); + size_t capacity = _free_set->alloc_capacity(i); + if (capacity < min_free_size) { + assert(capacity / HeapWordSize < ShenandoahHeap::min_fill_size(), "pad should be filled"); + // this region has been retired already, count it as entirely consumed + capacity = 0; + } switch (partition) { case ShenandoahFreeSetPartitionId::NotFree: { - size_t capacity = _free_set->alloc_capacity(i); assert(!validate_totals || (capacity != _region_size_bytes), "Should not be retired if empty"); ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i); if (r->is_old()) { @@ -1008,9 +1024,8 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { case ShenandoahFreeSetPartitionId::Collector: case ShenandoahFreeSetPartitionId::OldCollector: { - size_t capacity = _free_set->alloc_capacity(i); - bool is_empty = (capacity == _region_size_bytes); assert(capacity > 0, "free regions must have allocation capacity"); + bool is_empty = (capacity == _region_size_bytes); regions[int(partition)]++; used[int(partition)] += _region_size_bytes - capacity; capacities[int(partition)] += _region_size_bytes; @@ -1675,17 +1690,16 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah // then retire the region so that subsequent searches can find available memory more quickly. size_t idx = r->index(); - size_t free_bytes = r->free(); - if (free_bytes > 0) { - request_generation->increase_used(free_bytes); - if (_heap->mode()->is_generational()) { - _heap->global_generation()->increase_used(free_bytes); - } - } if ((result != nullptr) && in_new_region) { _partitions.one_region_is_no_longer_empty(orig_partition); } - _partitions.retire_from_partition(orig_partition, idx, r->used()); + size_t waste_bytes = _partitions.retire_from_partition(orig_partition, idx, r->used()); + if (waste_bytes > 0) { + request_generation->increase_used(waste_bytes); + if (_heap->mode()->is_generational()) { + _heap->global_generation()->increase_used(waste_bytes); + } + } } else if ((result != nullptr) && in_new_region) { _partitions.one_region_is_no_longer_empty(orig_partition); } @@ -2147,10 +2161,15 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r size_t total_mutator_regions = 0; size_t total_old_collector_regions = 0; +#define KELVIN_MYSTERY bool is_generational = _heap->mode()->is_generational(); size_t num_regions = _heap->num_regions(); for (size_t idx = 0; idx < num_regions; idx++) { ShenandoahHeapRegion* region = _heap->get_region(idx); +#ifdef KELVIN_MYSTERY + log_info(gc)("find_alloc_capacity() on region %zu, used is %zu", region->index(), region->used()); +#endif + if (region->is_trash()) { // Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection // partition but have not yet been "cleaned up" following update refs. @@ -2230,6 +2249,13 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } } else { // This region does not have enough free to be part of the free set. Count all of its memory as used. +#ifdef KELVIN_MYSTERY + if (region->used() != region_size_bytes) { + log_info(gc)("KELVIN!!! find_alloc_capacity() for region %zu is consuming %zu bytes of waste", + idx, region_size_bytes - region->used()); + } +#endif + if (region->is_old()) { old_collector_used += region_size_bytes; total_old_collector_regions++; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 4ff37431e3b3f..d5bb597fd94cd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -195,7 +195,8 @@ class ShenandoahRegionPartitions { // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals. // Requires that region idx is in in the Mutator or Collector partitions. Hereafter, identifies this region as NotFree. // Any remnant of available memory at the time of retirement is added to the original partition's total of used bytes. - void retire_from_partition(ShenandoahFreeSetPartitionId p, idx_t idx, size_t used_bytes); + // Return the number of filled bytes (if any). + size_t retire_from_partition(ShenandoahFreeSetPartitionId p, idx_t idx, size_t used_bytes); // Retire all regions between low_idx and high_idx inclusive from within partition. Requires that each region idx is // in the same Mutator or Collector partition. Hereafter, identifies each region as NotFree. Assumes that each region diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index b7d5969e68d3f..4416296b299a8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -370,6 +370,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure { class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure { private: size_t _used, _committed, _garbage, _regions, _humongous_waste, _trashed_regions, _trashed_used; + size_t _region_size_bytes, _min_free_size; #define KELVIN_VERBOSE #ifdef KELVIN_VERBOSE const char* _nm; @@ -377,26 +378,42 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure public: #ifdef KELVIN_VERBOSE ShenandoahCalculateRegionStatsClosure(const char *name) : - _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0), _nm(name) {}; + _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0), _nm(name) #else ShenandoahCalculateRegionStatsClosure() : - _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0) {}; + _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0) #endif + { + _region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + _min_free_size = ShenandoahHeap::min_fill_size() * HeapWordSize; + }; + void heap_region_do(ShenandoahHeapRegion* r) override { #define KELVIN_STATS #ifdef KELVIN_STATS log_info(gc)("%s:ShenandoahCalculateRegionStatsClosure::heap_region_do(), %s r: %zu used: %zu, garbage: %zu, is_trash: %s", _nm, r->affiliation_name(), r->index(), r->used(), r->garbage(), r->is_trash()? "yes": "no"); #endif - _used += r->used(); - _garbage += r->garbage(); - _committed += r->is_committed() ? ShenandoahHeapRegion::region_size_bytes() : 0; + size_t alloc_capacity = r->free(); + if ((alloc_capacity > 0) && (alloc_capacity < _min_free_size)) { +#ifdef KELVIN_STATS + log_info(gc)("KELVIN!!!! overwriting alloc_capacity %zu with 0 because too small", alloc_capacity); +#endif + // this region has been retired already, count it as entirely consumed + alloc_capacity = 0; + } + size_t bytes_used_in_region = _region_size_bytes - alloc_capacity; + size_t bytes_garbage_in_region = bytes_used_in_region - r->get_live_data_bytes(); + + _used += bytes_used_in_region; + _garbage += bytes_garbage_in_region; + _committed += r->is_committed() ? _region_size_bytes : 0; if (r->is_humongous()) { _humongous_waste += r->free(); } if (r->is_trash()) { _trashed_regions++; - _trashed_used += r->used(); + _trashed_used += bytes_used_in_region; } _regions++; #ifdef KELVIN_STATS From d299ec28926f65ae6e542abba3bafc36adf5a021 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 22 Jul 2025 10:08:59 -0600 Subject: [PATCH 21/61] disable debug instrumentation --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 26 ++++++++++--------- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 6 ++--- .../gc/shenandoah/shenandoahGeneration.cpp | 4 +-- .../gc/shenandoah/shenandoahGeneration.hpp | 5 ++-- .../shenandoah/shenandoahGenerationalHeap.cpp | 2 +- .../gc/shenandoah/shenandoahHeapRegion.cpp | 4 +-- .../gc/shenandoah/shenandoahOldGeneration.hpp | 2 +- .../gc/shenandoah/shenandoahVerifier.cpp | 6 ++--- 8 files changed, 28 insertions(+), 27 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 56a4ec5c9a1b7..a263556490883 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -50,7 +50,7 @@ static const char* partition_name(ShenandoahFreeSetPartitionId t) { } } -#define KELVIN_USED +#undef KELVIN_USED class ShenandoahLeftRightIterator { private: @@ -252,7 +252,7 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { #ifdef KELVIN_USED log_info(gc)("make_all_regions_unavailable() setting _used[] to 0 for all partitions"); #endif -#define KELVIN_REGION_COUNTS +#undef KELVIN_REGION_COUNTS #ifdef KELVIN_REGION_COUNTS log_info(gc)("Setting Mutator and Collector total_region_counts to zero, OldCollector is %zu", _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)]); @@ -303,7 +303,7 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _empty_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; -#define KELVIN_CAPACITY +#undef KELVIN_CAPACITY #ifdef KELVIN_CAPACITY log_info(gc)("establish_mutator_intervals() sets Mutator capacity: %zu", _capacity[int(ShenandoahFreeSetPartitionId::Mutator)]); log_info(gc)("establish_mutator_intervals() sets Collector capacity: %zu", _capacity[int(ShenandoahFreeSetPartitionId::Collector)]); @@ -422,7 +422,7 @@ void ShenandoahRegionPartitions::set_capacity_of(ShenandoahFreeSetPartitionId wh assert (which_partition < NumPartitions, "selected free set must be valid"); _capacity[int(which_partition)] = value; _available[int(which_partition)] = value - _used[int(which_partition)]; -#define KELVIN_CAPACITY +#undef KELVIN_CAPACITY #ifdef KELVIN_CAPACITY log_info(gc)("set_capacity of %s to %zu", partition_name(which_partition), _capacity[int(which_partition)]); #endif @@ -461,7 +461,7 @@ void ShenandoahRegionPartitions::increase_available(ShenandoahFreeSetPartitionId shenandoah_assert_heaplocked(); assert (which_partition < NumPartitions, "Partition must be valid"); _available[int(which_partition)] += bytes; -#define KELVIN_AVAILABLE +#undef KELVIN_AVAILABLE #ifdef KELVIN_AVAILABLE log_info(gc)("FreeSet<%s>::increase_available(%zu) yields: %zu", partition_name(which_partition), bytes, _available[int(which_partition)]); @@ -543,7 +543,7 @@ void ShenandoahRegionPartitions::one_region_is_no_longer_empty(ShenandoahFreeSet void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary( ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values"); -#define KELVIN_INTERVALS +#undef KELVIN_INTERVALS #ifdef KELVIN_INTERVALS log_info(gc)("shrink_interval_if_range_modifies_either_boundary(%s, %zd, %zd)", partition_name(partition), low_idx, high_idx); #endif @@ -1853,11 +1853,13 @@ class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionC void get_lock_and_flush_buffer(size_t region_count, size_t overflow_region_used, size_t overflow_region_index) { ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahHeapLocker locker(heap->lock()); -#define KELVIN_RECYCLE +#undef KELVIN_RECYCLE #ifdef KELVIN_RECYCLE Thread* t = Thread::current(); size_t p2i = (size_t) t; +#endif size_t recycled_regions = Atomic::load(&_recycled_region_count); +#ifdef KELVIN_RECYCLE log_info(gc)("%zx: got lock, will flush buffer with %zu entries plus (used: %zu, index: %zu)", p2i, recycled_regions, overflow_region_used, overflow_region_index); #endif @@ -2161,7 +2163,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r size_t total_mutator_regions = 0; size_t total_old_collector_regions = 0; -#define KELVIN_MYSTERY +#undef KELVIN_MYSTERY bool is_generational = _heap->mode()->is_generational(); size_t num_regions = _heap->num_regions(); for (size_t idx = 0; idx < num_regions; idx++) { @@ -2577,7 +2579,7 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si size_t affiliated_young_regions, size_t affiliated_old_regions, size_t young_used_bytes, size_t old_used_bytes) { assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); -#define KELVIN_RESERVE +#undef KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("establish_generation_sizes(young_region_count: %zu, old_region_count: %zu, ", young_region_count, old_region_count); @@ -2622,7 +2624,7 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si void ShenandoahFreeSet::reestablish_generation_sizes(size_t young_region_count, size_t old_region_count) { assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); -#define KELVIN_RESERVE +#undef KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("reestablish_generation_sizes(young_region_count: %zu, old_region_count: %zu, ", young_region_count, old_region_count); @@ -2810,7 +2812,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old size_t old_collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);; size_t collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector); -#define KELVIN_RESERVE +#undef KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("reserve_regions() seeks to_reserve: %zu and to_reserve_old: %zu", to_reserve, to_reserve_old); #endif @@ -3222,7 +3224,7 @@ void ShenandoahFreeSet::log_status() { } total_used += used_in_region; total_free += free; -#define KELVIN_DEBUG +#undef KELVIN_DEBUG #ifdef KELVIN_DEBUG log_info(gc)("%sMutator region %zu has free: %zu, used: %zu, total_free: %zu, total_used: %zu", r->is_trash()? "Trashed ": "", r->index(), free, used_in_region, total_free, total_used); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index d5bb597fd94cd..cb555ddd038a9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -31,7 +31,7 @@ #include "gc/shenandoah/shenandoahSimpleBitMap.hpp" -#define KELVIN_HUMONGOUS_WASTE +#undef KELVIN_HUMONGOUS_WASTE // Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId. enum class ShenandoahFreeSetPartitionId : uint8_t { @@ -439,7 +439,7 @@ class ShenandoahFreeSet : public CHeapObj { size_t region_size_bytes = _partitions.region_size_bytes(); _total_young_used = (_partitions.used_by(ShenandoahFreeSetPartitionId::Mutator) + _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); -#define KELVIN_USED +#undef KELVIN_USED #ifdef KELVIN_USED log_info(gc)(" recompute_total_young_used(): %zu from total regions M: %zu, C: %zu, allocatable regions M: %zu, C: %zu, " "M used: %zu, C used: %zu", _total_young_used, @@ -492,7 +492,7 @@ class ShenandoahFreeSet : public CHeapObj { _old_affiliated_regions = (_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector) - _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector)); _global_affiliated_regions = _young_affiliated_regions + _old_affiliated_regions; -#define KELVIN_AFFILIATED +#undef KELVIN_AFFILIATED #ifdef KELVIN_AFFILIATED log_info(gc)("recompute_affiliated(young: %zu, old: %zu, global: %zu)", _young_affiliated_regions, _old_affiliated_regions, _global_affiliated_regions); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index b4e035b153645..35c7301a62311 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -868,7 +868,7 @@ size_t ShenandoahGeneration::increment_affiliated_region_count() { // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with // a coherent value. size_t result = Atomic::add(&_affiliated_region_count, (size_t) 1); -#define KELVIN_AFFILIATED +#undef KELVIN_AFFILIATED #ifdef KELVIN_AFFILIATED log_info(gc)("%s: increment_affiliated_region_count() by 1: %zu", name(), result); #endif @@ -934,7 +934,7 @@ void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, void ShenandoahGeneration::increase_used(size_t bytes) { Atomic::add(&_used, bytes); -#define KELVIN_MONITOR_USED +#undef KELVIN_MONITOR_USED #ifdef KELVIN_MONITOR_USED log_info(gc)("Generation %s increase_used(%zu) to %zu", shenandoah_generation_name(_type), bytes, _used); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 4be22f3eb7ecb..b378bb405d9b7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -200,7 +200,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { void set_used(size_t affiliated_region_count, size_t byte_count) { Atomic::store(&_used, byte_count); Atomic::store(&_affiliated_region_count, affiliated_region_count); -#ifdef KELVIN_SCAFFOLDING +#ifdef KELVIN_DEBUG log_info(gc)("%s:set_used(regions: %zu, bytes: %zu)", shenandoah_generation_name(_type), affiliated_region_count, byte_count); #endif } @@ -300,8 +300,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { result = _free_set->humongous_waste_in_mutator() + _free_set->humongous_waste_in_old(); break; } -#define KELVIN_MONITOR_HUMONGOUS -#ifdef KELVIN_MONITOR_HUMONGOUS +#ifdef KELVIN_SCAFFOLDING if (result != _humongous_waste) { log_info(gc)("Generation %s expects consistency between humongous waste in free set (%zu) and in generation (%zu)", shenandoah_generation_name(_type), result, _humongous_waste); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 2604b07352a8c..1ec2dfb8a8420 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -658,7 +658,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve); -#define KELVIN_REBALANCE +#undef KELVIN_REBALANCE #ifdef KELVIN_REBALANCE log_info(gc)("compute_old_gen_balance(%zu, %zu)", old_xfer_limit, old_cset_regions); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index 4ce65b8b1768c..f7adf37de9dbc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -299,7 +299,7 @@ void ShenandoahHeapRegion::make_cset() { } void ShenandoahHeapRegion::make_trash() { -#define KELVIN_TRASH +#undef KELVIN_TRASH #ifdef KELVIN_TRASH log_info(gc)("make_trash() on region %zu", index()); #endif @@ -913,7 +913,7 @@ void ShenandoahHeapRegion::decrement_humongous_waste() const { if (waste_bytes > 0) { ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahGeneration* generation = heap->generation_for(affiliation()); -#define KELVIN_HUMONGOUS_HEAP_REGION +#undef KELVIN_HUMONGOUS_HEAP_REGION #ifdef KELVIN_HUMONGOUS_HEAP_REGION log_info(gc)("Decrementing humongous waste by %zu in ShenHeapRegion", waste_bytes); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index de300e4134304..0e6bf946fcd05 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -136,7 +136,7 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { // See description in field declaration void set_region_balance(ssize_t balance) { -#define KELVIN_BALANCIO +#undef KELVIN_BALANCIO #ifdef KELVIN_BALANCIO log_info(gc)("OldGen::set_region_balance(%zd)", balance); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 4416296b299a8..b2b5df9931ba3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -371,7 +371,7 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure private: size_t _used, _committed, _garbage, _regions, _humongous_waste, _trashed_regions, _trashed_used; size_t _region_size_bytes, _min_free_size; -#define KELVIN_VERBOSE +#undef KELVIN_VERBOSE #ifdef KELVIN_VERBOSE const char* _nm; #endif @@ -389,7 +389,7 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure }; void heap_region_do(ShenandoahHeapRegion* r) override { -#define KELVIN_STATS +#undef KELVIN_STATS #ifdef KELVIN_STATS log_info(gc)("%s:ShenandoahCalculateRegionStatsClosure::heap_region_do(), %s r: %zu used: %zu, garbage: %zu, is_trash: %s", _nm, r->affiliation_name(), r->index(), r->used(), r->garbage(), r->is_trash()? "yes": "no"); @@ -487,7 +487,7 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { generation_used += pad; } -#define KELVIN_EXTRA_NOISE +#undef KELVIN_EXTRA_NOISE #ifdef KELVIN_EXTRA_NOISE log_info(gc)("%s: generation (%s) used size must be consistent: generation-used: %zu, regions-used from stats: %zu, stats.used_after_recycle: %zu, adjust_for_trash: %s", label, generation->name(), generation_used, stats.used(), stats.used_after_recycle(), adjust_for_trash? "yes": "no"); From 4afb041ecc30ffbc2666162f0296bcf7557fa614 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 22 Jul 2025 19:36:23 +0000 Subject: [PATCH 22/61] special handling for non-generational try_allocate_in() --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index a263556490883..48fafa59ce92c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1664,7 +1664,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah ShenandoahFreeSetPartitionId orig_partition; ShenandoahGeneration* request_generation = nullptr; if (req.is_mutator_alloc()) { - request_generation = _heap->young_generation(); + request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation(); orig_partition = ShenandoahFreeSetPartitionId::Mutator; } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) { request_generation = _heap->young_generation(); From 42abc63c50aae64c69ec7385a298fabde9bb9657 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 23 Jul 2025 15:10:44 -0600 Subject: [PATCH 23/61] Count waste as part of generation used --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 135 ++++++++++-------- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 39 ++++- .../gc/shenandoah/shenandoahGeneration.cpp | 2 +- .../share/gc/shenandoah/shenandoahHeap.cpp | 6 +- .../gc/shenandoah/shenandoahHeapRegion.cpp | 4 +- .../gc/shenandoah/shenandoahHeapRegion.hpp | 2 +- 6 files changed, 123 insertions(+), 65 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 48fafa59ce92c..f036ce3cd9fc0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -38,7 +38,8 @@ #include "memory/resourceArea.hpp" #include "runtime/orderAccess.hpp" -static const char* partition_name(ShenandoahFreeSetPartitionId t) { +#ifdef KELVIN_USED_PARTITION +const char* partition_name(ShenandoahFreeSetPartitionId t) { switch (t) { case ShenandoahFreeSetPartitionId::NotFree: return "NotFree"; case ShenandoahFreeSetPartitionId::Mutator: return "Mutator"; @@ -49,8 +50,7 @@ static const char* partition_name(ShenandoahFreeSetPartitionId t) { return "Unrecognized"; } } - -#undef KELVIN_USED +#endif class ShenandoahLeftRightIterator { private: @@ -249,7 +249,7 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { _humongous_waste[partition_id] = 0; _available[partition_id] = FreeSetUnderConstruction; } -#ifdef KELVIN_USED +#ifdef KELVIN_USED_PARTITION log_info(gc)("make_all_regions_unavailable() setting _used[] to 0 for all partitions"); #endif #undef KELVIN_REGION_COUNTS @@ -308,7 +308,7 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm log_info(gc)("establish_mutator_intervals() sets Mutator capacity: %zu", _capacity[int(ShenandoahFreeSetPartitionId::Mutator)]); log_info(gc)("establish_mutator_intervals() sets Collector capacity: %zu", _capacity[int(ShenandoahFreeSetPartitionId::Collector)]); #endif -#ifdef KELVIN_USED +#ifdef KELVIN_USED_PARTITION log_info(gc)("Setting Mutator used to %zu Collector to 0", mutator_used); #endif #ifdef KELVIN_REGION_COUNTS @@ -346,7 +346,7 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col log_info(gc)("establish_old_collector_intervals() sets OldCollector capacity: %zu", _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]); #endif -#ifdef KELVIN_USED +#ifdef KELVIN_USED_PARTITION log_info(gc)("Setting OldCollector used to %zu", old_collector_used); #endif #ifdef KELVIN_REGION_COUNTS @@ -360,7 +360,7 @@ void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId whic _used[int(which_partition)] += bytes; _available[int(which_partition)] -= bytes; -#ifdef KELVIN_USED +#ifdef KELVIN_USED_PARTITION log_info(gc)("ShenRegionPartitions %s increase_used(%zu) to %zu, available shrinks to %zu", partition_name(which_partition), bytes, _used[int(which_partition)], _available[int(which_partition)]); #endif @@ -369,21 +369,6 @@ void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId whic _used[int(which_partition)], _capacity[int(which_partition)], bytes); } -void ShenandoahRegionPartitions::decrease_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { - shenandoah_assert_heaplocked(); - assert (which_partition < NumPartitions, "Partition must be valid"); - assert (_used[int(which_partition)] >= bytes, - "Must not use (%zu) less than zero after decrease by %zu", - _used[int(which_partition)], bytes); - - _used[int(which_partition)] -= bytes; - _available[int(which_partition)] += bytes; -#ifdef KELVIN_USED - log_info(gc)("ShenRegionPartitions %s decrease_used(%zu) to %zu, available grows to %zu", - partition_name(which_partition), bytes, _used[int(which_partition)], _available[int(which_partition)]); -#endif -} - size_t ShenandoahRegionPartitions::get_used(ShenandoahFreeSetPartitionId which_partition) { assert (which_partition < NumPartitions, "Partition must be valid"); return _used[int(which_partition)];; @@ -400,18 +385,6 @@ void ShenandoahRegionPartitions::increase_humongous_waste(ShenandoahFreeSetParti #endif } -void ShenandoahRegionPartitions::decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { - shenandoah_assert_heaplocked(); - assert (which_partition < NumPartitions, "Partition must be valid"); - assert(_humongous_waste[int(which_partition)] >= bytes, "Cannot decrease waste beyond what is there"); - - _humongous_waste[int(which_partition)] -= bytes; -#ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet<%s>::decrease_humongous_waste(%zu) yields: %zu", partition_name(which_partition), - bytes, _humongous_waste[int(which_partition)]); -#endif -} - size_t ShenandoahRegionPartitions::get_humongous_waste(ShenandoahFreeSetPartitionId which_partition) { assert (which_partition < NumPartitions, "Partition must be valid"); return _humongous_waste[int(which_partition)];; @@ -709,7 +682,7 @@ size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartit if (fill_words >= ShenandoahHeap::min_fill_size()) { r->allocate_fill(fill_words); } -#ifdef KELVIN_USED +#ifdef KELVIN_USED_PARTITION else { log_info(gc)("KELVIN!!!! Did not fill because padding: %zu is too small", waste_bytes); } @@ -727,6 +700,12 @@ size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartit return waste_bytes; } +void ShenandoahRegionPartitions::unretire_to_partition(ShenandoahHeapRegion* r, ShenandoahFreeSetPartitionId which_partition) { + shenandoah_assert_heaplocked(); + make_free(r->index(), which_partition, r->free()); +} + + // The caller is responsible for increasing capacity and available and used in which_partition, and decreasing the // same quantities for the original partition void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) { @@ -971,12 +950,14 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { size_t capacities[UIntNumPartitions]; size_t used[UIntNumPartitions]; size_t regions[UIntNumPartitions]; + size_t humongous_waste[UIntNumPartitions]; // We don't know whether young retired regions belonged to Mutator or Collector before they were retired. // We just tally the total, and divide it to make matches work if possible. size_t young_retired_regions = 0; size_t young_retired_used = 0; size_t young_retired_capacity = 0; + size_t young_humongous_waste = 0; idx_t leftmosts[UIntNumPartitions]; idx_t rightmosts[UIntNumPartitions]; @@ -991,31 +972,47 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { capacities[i] = 0; used[i] = 0; regions[i] = 0; + humongous_waste[i] = 0; } size_t min_free_size = ShenandoahHeap::min_fill_size() * HeapWordSize; for (idx_t i = 0; i < _max; i++) { ShenandoahFreeSetPartitionId partition = membership(i); size_t capacity = _free_set->alloc_capacity(i); - if (capacity < min_free_size) { - assert(capacity / HeapWordSize < ShenandoahHeap::min_fill_size(), "pad should be filled"); - // this region has been retired already, count it as entirely consumed - capacity = 0; - } switch (partition) { case ShenandoahFreeSetPartitionId::NotFree: { assert(!validate_totals || (capacity != _region_size_bytes), "Should not be retired if empty"); ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(i); - if (r->is_old()) { - regions[int(ShenandoahFreeSetPartitionId::OldCollector)]++; - used[int(ShenandoahFreeSetPartitionId::OldCollector)] += _region_size_bytes - capacity; - capacities[int(ShenandoahFreeSetPartitionId::OldCollector)] += _region_size_bytes; + if (r->is_humongous()) { + if (r->is_old()) { + regions[int(ShenandoahFreeSetPartitionId::OldCollector)]++; + used[int(ShenandoahFreeSetPartitionId::OldCollector)] += _region_size_bytes; + capacities[int(ShenandoahFreeSetPartitionId::OldCollector)] += _region_size_bytes; + humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] += capacity; + } else { + assert(r->is_young(), "Must be young if not old"); + young_retired_regions++; + young_retired_used += _region_size_bytes; + young_retired_capacity += _region_size_bytes; + young_humongous_waste += capacity; + } } else { - assert(r->is_young(), "Must be young if not old"); - young_retired_regions++; - young_retired_used += _region_size_bytes - capacity; - young_retired_capacity += _region_size_bytes; + if (capacity < min_free_size) { + assert(capacity / HeapWordSize < ShenandoahHeap::min_fill_size(), "pad should be filled"); + // this region has been retired already, count it as entirely consumed + capacity = 0; + } + if (r->is_old()) { + regions[int(ShenandoahFreeSetPartitionId::OldCollector)]++; + used[int(ShenandoahFreeSetPartitionId::OldCollector)] += _region_size_bytes - capacity; + capacities[int(ShenandoahFreeSetPartitionId::OldCollector)] += _region_size_bytes; + } else { + assert(r->is_young(), "Must be young if not old"); + young_retired_regions++; + young_retired_used += _region_size_bytes - capacity; + young_retired_capacity += _region_size_bytes; + } } } break; @@ -1152,17 +1149,19 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector)); if (validate_totals) { - assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)] == _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], - "Old collector capacities must match"); - assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)] == _used[int(ShenandoahFreeSetPartitionId::OldCollector)], - "Old collector used must match"); - assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)] == _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)], - "Old collector regions must match"); - assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] >= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], - "Old Collector capacity must be >= used"); + assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)] + == _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match"); + assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)] + == _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector used must match"); + assert(regions[int(ShenandoahFreeSetPartitionId::OldCollector)] + == _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector regions must match"); + assert(_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] + >= _used[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector capacity must be >= used"); assert(_available[int(ShenandoahFreeSetPartitionId::OldCollector)] == (_capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] - _used[int(ShenandoahFreeSetPartitionId::OldCollector)]), "Old Collector available must equal capacity minus used"); + assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)] == + humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match"); assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)], "Capacity tally must be >= counted tally"); @@ -1210,10 +1209,12 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { assert(regions[int(ShenandoahFreeSetPartitionId::Mutator)] == _total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)], "Mutator regions must match"); assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= _used[int(ShenandoahFreeSetPartitionId::Mutator)], - "Mutator apacity must be >= used"); + "Mutator capacity must be >= used"); assert(_available[int(ShenandoahFreeSetPartitionId::Mutator)] == (_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - _used[int(ShenandoahFreeSetPartitionId::Mutator)]), "Mutator available must equal capacity minus used"); + assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste, + "Mutator humongous waste must match"); } } #endif @@ -3337,6 +3338,26 @@ void ShenandoahFreeSet::log_status() { } } +void ShenandoahFreeSet::decrease_humongous_waste_for_regular_bypass(ShenandoahHeapRegion*r, size_t waste) { + shenandoah_assert_heaplocked(); + ShenandoahFreeSetPartitionId p = + r->is_old()? ShenandoahFreeSetPartitionId::OldCollector: ShenandoahFreeSetPartitionId::Mutator; + _partitions.decrease_humongous_waste(p, waste); + if ((_partitions.membership(r->index()) == ShenandoahFreeSetPartitionId::NotFree) && + (waste >= PLAB::min_size() * HeapWordSize)) { + _partitions.decrease_used(p, waste); + _partitions.unretire_to_partition(r, p); + } +#ifdef ASSERT + else if (waste >= ShenandoahHeap::min_fill_size() * HeapWordSize) { + // Fill the unused memory so that verification will not be confused by inconsistent tallies of used + size_t fill_words = waste / HeapWordSize; + r->allocate_fill(fill_words); + } +#endif +} + + HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) { shenandoah_assert_heaplocked(); if (ShenandoahHeapRegion::requires_humongous(req.size())) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index cb555ddd038a9..4bb64fa5dafee 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -203,6 +203,8 @@ class ShenandoahRegionPartitions { // is now considered fully used, since the region is presumably used to represent a humongous object. void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx); + void unretire_to_partition(ShenandoahHeapRegion* region, ShenandoahFreeSetPartitionId which_partition); + // Place region idx into free set which_partition. Requires that idx is currently NotFree. void make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity); @@ -294,11 +296,40 @@ class ShenandoahRegionPartitions { inline size_t get_available(ShenandoahFreeSetPartitionId which_partition); inline void increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes); - inline void decrease_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline void decrease_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "Partition must be valid"); + assert (_used[int(which_partition)] >= bytes, + "Must not use (%zu) less than zero after decrease by %zu", + _used[int(which_partition)], bytes); + + _used[int(which_partition)] -= bytes; + _available[int(which_partition)] += bytes; +#define KELVIN_USED_PARTITION +#ifdef KELVIN_USED_PARTITION + extern const char* partition_name(ShenandoahFreeSetPartitionId t); + log_info(gc)("ShenRegionPartitions %s decrease_used(%zu) to %zu, available grows to %zu", + partition_name(which_partition), bytes, _used[int(which_partition)], _available[int(which_partition)]); +#endif + } + inline size_t get_used(ShenandoahFreeSetPartitionId which_partition); inline void increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes); - inline void decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes); + inline void decrease_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { + shenandoah_assert_heaplocked(); + assert (which_partition < NumPartitions, "Partition must be valid"); + assert(_humongous_waste[int(which_partition)] >= bytes, "Cannot decrease waste beyond what is there"); + + _humongous_waste[int(which_partition)] -= bytes; +#undef KELVIN_HUMONGOUS_WASTE +#ifdef KELVIN_HUMONGOUS_WASTE + extern const char* partition_name(ShenandoahFreeSetPartitionId t); + log_info(gc)("FreeSet<%s>::decrease_humongous_waste(%zu) yields: %zu", partition_name(which_partition), + bytes, _humongous_waste[int(which_partition)]); +#endif + } + inline size_t get_humongous_waste(ShenandoahFreeSetPartitionId which_partition); inline void set_bias_from_left_to_right(ShenandoahFreeSetPartitionId which_partition, bool value) { @@ -439,7 +470,7 @@ class ShenandoahFreeSet : public CHeapObj { size_t region_size_bytes = _partitions.region_size_bytes(); _total_young_used = (_partitions.used_by(ShenandoahFreeSetPartitionId::Mutator) + _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); -#undef KELVIN_USED +#define KELVIN_USED #ifdef KELVIN_USED log_info(gc)(" recompute_total_young_used(): %zu from total regions M: %zu, C: %zu, allocatable regions M: %zu, C: %zu, " "M used: %zu, C used: %zu", _total_young_used, @@ -697,6 +728,8 @@ class ShenandoahFreeSet : public CHeapObj { inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); } inline size_t humongous_waste_in_old() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector); } + void decrease_humongous_waste_for_regular_bypass(ShenandoahHeapRegion* r, size_t waste); + HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region); /* diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 35c7301a62311..7504c786be0fa 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -934,7 +934,7 @@ void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, void ShenandoahGeneration::increase_used(size_t bytes) { Atomic::add(&_used, bytes); -#undef KELVIN_MONITOR_USED +#define KELVIN_MONITOR_USED #ifdef KELVIN_MONITOR_USED log_info(gc)("Generation %s increase_used(%zu) to %zu", shenandoah_generation_name(_type), bytes, _used); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 9620f8ab2809c..32b01c4d13267 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -739,8 +739,8 @@ void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) { // padding and actual size both count towards allocation counter generation->increase_allocated(actual_bytes + wasted_bytes); - // only actual size counts toward usage for mutator allocations - increase_used(generation, actual_bytes); + // Used within generation is actual bytes + alignment padding (wasted bytes) + increase_used(generation, actual_bytes + wasted_bytes); // notify pacer of both actual size and waste notify_mutator_alloc_words(req.actual_size(), req.waste()); @@ -760,8 +760,10 @@ void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) { generation->decrease_humongous_waste(bytes); + generation->decrease_used(bytes); if (!generation->is_global()) { global_generation()->decrease_humongous_waste(bytes); + global_generation()->decrease_used(bytes); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index f7adf37de9dbc..36c9373d0cc34 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -162,6 +162,7 @@ void ShenandoahHeapRegion::make_regular_bypass() { // regular regions. The 'waste' in the last region is no longer wasted at this point, // so we must stop treating it as such. decrement_humongous_waste(); + } set_state(_regular); return; @@ -907,7 +908,7 @@ void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation heap->set_affiliation(this, new_affiliation); } -void ShenandoahHeapRegion::decrement_humongous_waste() const { +void ShenandoahHeapRegion::decrement_humongous_waste() { assert(is_humongous(), "Should only use this for humongous regions"); size_t waste_bytes = free(); if (waste_bytes > 0) { @@ -918,5 +919,6 @@ void ShenandoahHeapRegion::decrement_humongous_waste() const { log_info(gc)("Decrementing humongous waste by %zu in ShenHeapRegion", waste_bytes); #endif heap->decrease_humongous_waste(generation, waste_bytes); + heap->free_set()->decrease_humongous_waste_for_regular_bypass(this, waste_bytes); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp index 742b8cddeccdc..a56efcc4ebae9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -495,7 +495,7 @@ class ShenandoahHeapRegion { } private: - void decrement_humongous_waste() const; + void decrement_humongous_waste(); void do_commit(); void do_uncommit(); From fec9d3fed24a54756058b44dea03d3351830abf6 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sat, 26 Jul 2025 17:38:21 -0600 Subject: [PATCH 24/61] more bug fixes --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 204 +++++++++++------- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 13 +- .../gc/shenandoah/shenandoahGeneration.cpp | 8 +- .../gc/shenandoah/shenandoahGeneration.hpp | 9 +- .../shenandoahGenerationalEvacuationTask.cpp | 7 +- .../share/gc/shenandoah/shenandoahHeap.cpp | 2 - 6 files changed, 153 insertions(+), 90 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index f036ce3cd9fc0..5cb380b8d62dc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -38,8 +38,7 @@ #include "memory/resourceArea.hpp" #include "runtime/orderAccess.hpp" -#ifdef KELVIN_USED_PARTITION -const char* partition_name(ShenandoahFreeSetPartitionId t) { +static const char* partition_name(ShenandoahFreeSetPartitionId t) { switch (t) { case ShenandoahFreeSetPartitionId::NotFree: return "NotFree"; case ShenandoahFreeSetPartitionId::Mutator: return "Mutator"; @@ -50,8 +49,20 @@ const char* partition_name(ShenandoahFreeSetPartitionId t) { return "Unrecognized"; } } + +#ifdef KELVIN_USED_PARTITION +const char* _shenandoah_partition_name(ShenandoahFreeSetPartitionId t) { + return partition_name(t); +} +#endif + +#ifdef KELVIN_HUMONGOUS_WASTE +const char* _shenandoah_humongous_partition_name(ShenandoahFreeSetPartitionId t) { + return partition_name(t); +} #endif + class ShenandoahLeftRightIterator { private: idx_t _idx; @@ -993,16 +1004,16 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { } else { assert(r->is_young(), "Must be young if not old"); young_retired_regions++; + // Count entire region as used even if there is some waste. young_retired_used += _region_size_bytes; young_retired_capacity += _region_size_bytes; young_humongous_waste += capacity; } } else { - if (capacity < min_free_size) { - assert(capacity / HeapWordSize < ShenandoahHeap::min_fill_size(), "pad should be filled"); - // this region has been retired already, count it as entirely consumed - capacity = 0; - } + assert(r->is_cset() || (capacity < min_free_size), "Retired regions should be filled already"); + // This region has been retired already or it is in the cset. In either case, we set capacity to zero + // so that the entire region will be counted as used. We count young cset regions as "retired". + capacity = 0; if (r->is_old()) { regions[int(ShenandoahFreeSetPartitionId::OldCollector)]++; used[int(ShenandoahFreeSetPartitionId::OldCollector)] += _region_size_bytes - capacity; @@ -1149,6 +1160,17 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector)); if (validate_totals) { + // young_retired_regions need to be added to either Mutator or Collector partitions, 100% used. + // Give enough of young_retired_regions, young_retired_capacity, young_retired_user + // to the Mutator partition to top it off so that it matches the running totals. + // + // Give any remnants to the Collector partition. After topping off the Collector partition, its values + // should also match running totals. + + assert(young_retired_regions * _region_size_bytes == young_retired_capacity, "sanity"); + assert(young_retired_capacity == young_retired_used, "sanity"); + + assert(capacities[int(ShenandoahFreeSetPartitionId::OldCollector)] == _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old collector capacities must match"); assert(used[int(ShenandoahFreeSetPartitionId::OldCollector)] @@ -1164,38 +1186,39 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { humongous_waste[int(ShenandoahFreeSetPartitionId::OldCollector)], "Old Collector humongous waste must match"); assert(_capacity[int(ShenandoahFreeSetPartitionId::Mutator)] >= capacities[int(ShenandoahFreeSetPartitionId::Mutator)], - "Capacity tally must be >= counted tally"); - size_t mutator_capacity_delta = + "Capacity total must be >= counted tally"); + size_t mutator_capacity_shortfall = _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] - capacities[int(ShenandoahFreeSetPartitionId::Mutator)]; - assert(mutator_capacity_delta <= young_retired_capacity, "sanity"); - capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_delta; - young_retired_capacity -= mutator_capacity_delta; + assert(mutator_capacity_shortfall <= young_retired_capacity, "sanity"); + capacities[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_capacity_shortfall; + young_retired_capacity -= mutator_capacity_shortfall; capacities[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_capacity; + assert(_used[int(ShenandoahFreeSetPartitionId::Mutator)] >= used[int(ShenandoahFreeSetPartitionId::Mutator)], - "Used tally must be >= counted tally"); - size_t mutator_used_delta = + "Used total must be >= counted tally"); + size_t mutator_used_shortfall = _used[int(ShenandoahFreeSetPartitionId::Mutator)] - used[int(ShenandoahFreeSetPartitionId::Mutator)]; - assert(mutator_used_delta <= young_retired_used, "sanity"); - used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_delta; - young_retired_used -= mutator_used_delta; + assert(mutator_used_shortfall <= young_retired_used, "sanity"); + used[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_used_shortfall; + young_retired_used -= mutator_used_shortfall; used[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_used; assert(_total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] >= regions[int(ShenandoahFreeSetPartitionId::Mutator)], - "Region tally must be >= counted tally"); - size_t mutator_regions_delta = + "Region total must be >= counted tally"); + size_t mutator_regions_shortfall = _total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] - regions[int(ShenandoahFreeSetPartitionId::Mutator)]; - assert(mutator_regions_delta <= young_retired_regions, "sanity"); - regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_delta; - young_retired_regions -= mutator_regions_delta; + assert(mutator_regions_shortfall <= young_retired_regions, "sanity"); + regions[int(ShenandoahFreeSetPartitionId::Mutator)] += mutator_regions_shortfall; + young_retired_regions -= mutator_regions_shortfall; regions[int(ShenandoahFreeSetPartitionId::Collector)] += young_retired_regions; assert(capacities[int(ShenandoahFreeSetPartitionId::Collector)] == _capacity[int(ShenandoahFreeSetPartitionId::Collector)], "Collector capacities must match"); assert(used[int(ShenandoahFreeSetPartitionId::Collector)] == _used[int(ShenandoahFreeSetPartitionId::Collector)], "Collector used must match"); - assert(regions[int(ShenandoahFreeSetPartitionId::Collector)] == _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)], - "Collector regions must match"); + assert(regions[int(ShenandoahFreeSetPartitionId::Collector)] == + _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)], "Collector regions must match"); assert(_capacity[int(ShenandoahFreeSetPartitionId::Collector)] >= _used[int(ShenandoahFreeSetPartitionId::Collector)], "Collector Capacity must be >= used"); assert(_available[int(ShenandoahFreeSetPartitionId::Collector)] == @@ -1668,7 +1691,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation(); orig_partition = ShenandoahFreeSetPartitionId::Mutator; } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) { - request_generation = _heap->young_generation(); + request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation(); orig_partition = ShenandoahFreeSetPartitionId::Collector; } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) { request_generation = _heap->old_generation(); @@ -1679,7 +1702,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah request_generation = _heap->old_generation(); orig_partition = ShenandoahFreeSetPartitionId::OldCollector; } else { - request_generation = _heap->young_generation(); + request_generation = _heap->mode()->is_generational()? _heap->young_generation(): _heap->global_generation(); orig_partition = ShenandoahFreeSetPartitionId::Collector; } } @@ -1813,7 +1836,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); } - // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate and will recompute affiliated + // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate and will recompute affiliated. _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); @@ -1821,11 +1844,9 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { if (remainder != 0) { size_t waste = ShenandoahHeapRegion::region_size_words() - remainder; req.set_waste(waste); - #ifdef KELVIN_HUMONGOUS_WASTE log_info(gc)("FreeSet alloc_contiguous increasing mutator humongous waste by %zu bytes", waste * HeapWordSize); #endif - _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste * HeapWordSize); } @@ -2128,6 +2149,9 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r old_trashed_regions = 0; young_trashed_regions = 0; + size_t old_cset_regions = 0; + size_t young_cset_regions = 0; + size_t region_size_bytes = _partitions.region_size_bytes(); size_t max_regions = _partitions.max_regions(); @@ -2217,6 +2241,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } if (ac == region_size_bytes) { mutator_empty++; + affiliated_mutator_regions--; if (idx < mutator_leftmost_empty) { mutator_leftmost_empty = idx; } @@ -2226,6 +2251,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } mutator_regions++; total_mutator_regions++; + affiliated_mutator_regions++; mutator_used += (region_size_bytes - ac); } else { // !region->is_trash() && region is_old() @@ -2252,13 +2278,11 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } } else { // This region does not have enough free to be part of the free set. Count all of its memory as used. -#ifdef KELVIN_MYSTERY - if (region->used() != region_size_bytes) { - log_info(gc)("KELVIN!!! find_alloc_capacity() for region %zu is consuming %zu bytes of waste", - idx, region_size_bytes - region->used()); + assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired"); + if (ac >= ShenandoahHeap::min_fill_size() * HeapWordSize) { + size_t fill_words = ac / HeapWordSize; + region->allocate_fill(fill_words); } -#endif - if (region->is_old()) { old_collector_used += region_size_bytes; total_old_collector_regions++; @@ -2270,29 +2294,47 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } } } else { - // This region does not allow allocation (it is retired or is humongous). Count all of its memory as used. - size_t humongous_waste_bytes = 0; - if (region->is_humongous_start()) { - oop obj = cast_to_oop(region->bottom()); - size_t byte_size = obj->size() * HeapWordSize; - size_t region_span = ShenandoahHeapRegion::required_regions(byte_size); - humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_words() - byte_size; - } - if (region->is_old()) { - old_collector_used += region_size_bytes; - total_old_collector_regions++; - old_collector_humongous_waste += humongous_waste_bytes; - affiliated_old_collector_regions++; + // This region does not allow allocation (it is retired or is humongous or is in cset). + // Retired and humongous regions generally have no alloc capacity, but cset regions may have large alloc capacity. + if (region->is_cset()) { + if (region->is_old()) { + old_cset_regions++; + } else { + young_cset_regions++; + } + } else { - mutator_used += region_size_bytes; - total_mutator_regions++; - mutator_humongous_waste += humongous_waste_bytes; - affiliated_mutator_regions++; + assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired"); + size_t ac = alloc_capacity(region); + size_t humongous_waste_bytes = 0; + if (region->is_humongous_start()) { + oop obj = cast_to_oop(region->bottom()); + size_t byte_size = obj->size() * HeapWordSize; + size_t region_span = ShenandoahHeapRegion::required_regions(byte_size); + humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_words() - byte_size; + } else if (ac >= ShenandoahHeap::min_fill_size() * HeapWordSize) { + size_t fill_words = ac / HeapWordSize; + region->allocate_fill(fill_words); + } + if (region->is_old()) { + old_collector_used += region_size_bytes; + total_old_collector_regions++; + old_collector_humongous_waste += humongous_waste_bytes; + affiliated_old_collector_regions++; + } else { + mutator_used += region_size_bytes; + total_mutator_regions++; + mutator_humongous_waste += humongous_waste_bytes; + affiliated_mutator_regions++; + } } } } - // At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as - // part of capacity, as fully available, as unaffiliated. We place trashed regions into the Mutator partition. + // At the start of evacuation, the cset regions are not counted as part of Mutator or OldCollector partitions. + + // At the end of GC, when we rebuild rebuild freeset (which happens before we have recycled the collection set), we treat + // all cset regions as part of capacity, as fully available, as unaffiliated. We place trashed regions into the Mutator + // partition. // No need to update generation sizes here. These are the sizes already recognized by the generations. These // adjustments allow the freeset tallies to match the generation tallies. @@ -2347,13 +2389,15 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty; _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx, - total_mutator_regions, mutator_empty, mutator_regions, mutator_used, - mutator_humongous_waste); + total_mutator_regions + young_cset_regions, mutator_empty, mutator_regions, + mutator_used + young_cset_regions * region_size_bytes, mutator_humongous_waste); rightmost_idx = (old_collector_leftmost == max_regions)? -1: (idx_t) old_collector_rightmost; rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (idx_t) old_collector_rightmost_empty; _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, - old_collector_leftmost_empty, rightmost_empty_idx, total_old_collector_regions, - old_collector_empty, old_collector_regions, old_collector_used, + old_collector_leftmost_empty, rightmost_empty_idx, + total_old_collector_regions + old_cset_regions, + old_collector_empty, old_collector_regions, + old_collector_used + old_cset_regions * region_size_bytes, old_collector_humongous_waste); recompute_total_used(); recompute_total_affiliated(); @@ -2361,10 +2405,19 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r // Update generations to assure consistency while we still hold the lock. This handles case that someone consults // generation sizes between now and start of finish_rebuild. This may release from old memory that we intend to reserve // for the old collector. - establish_generation_sizes(total_mutator_regions, total_old_collector_regions, - affiliated_mutator_regions, affiliated_old_collector_regions, - mutator_used, old_collector_used); + establish_generation_sizes(total_mutator_regions + young_cset_regions, total_old_collector_regions + old_cset_regions, + affiliated_mutator_regions + young_cset_regions, + affiliated_old_collector_regions + old_cset_regions, + mutator_used + young_cset_regions * region_size_bytes, + old_collector_used + old_cset_regions * region_size_bytes); +#ifdef ASSERT + if (_heap->mode()->is_generational()) { + assert(young_affiliated_regions() == _heap->young_generation()->get_affiliated_region_count(), "sanity"); + } else { + assert(young_affiliated_regions() == _heap->global_generation()->get_affiliated_region_count(), "sanity"); + } +#endif log_debug(gc, free)(" After find_regions_with_alloc_capacity(), Mutator range [%zd, %zd]," " Old Collector range [%zd, %zd]", _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator), @@ -2374,8 +2427,9 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector(size_t xfer_regions, - size_t humongous_waste_bytes) { + size_t humongous_waste_bytes) { shenandoah_assert_heaplocked(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); #ifdef KELVIN_HUMONGOUS_WASTE log_info(gc)("FreeSet humongous promotion, with waste %zu", humongous_waste_bytes); @@ -2387,9 +2441,11 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, xfer_regions); _partitions.decrease_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, humongous_waste_bytes); + _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, xfer_regions * region_size_bytes); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, xfer_regions); _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::OldCollector, humongous_waste_bytes); + _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, xfer_regions * region_size_bytes); recompute_total_young_used(); recompute_total_old_used(); @@ -2587,13 +2643,13 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si log_info(gc)(" young_used_bytes: %zu, old_used_bytes: %zu)", young_used_bytes, old_used_bytes); #endif - if (ShenandoahHeap::heap()->mode()->is_generational()) { - ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); - ShenandoahOldGeneration* old_gen = heap->old_generation(); - ShenandoahYoungGeneration* young_gen = heap->young_generation(); - ShenandoahGeneration* global_gen = heap->global_generation(); - - size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + if (heap->mode()->is_generational()) { + ShenandoahGenerationalHeap* gen_heap = (ShenandoahGenerationalHeap*) heap; + ShenandoahOldGeneration* old_gen = gen_heap->old_generation(); + ShenandoahYoungGeneration* young_gen = gen_heap->young_generation(); + ShenandoahGeneration* global_gen = gen_heap->global_generation(); size_t original_old_capacity = old_gen->max_capacity(); size_t new_old_capacity = old_region_count * region_size_bytes; @@ -2614,6 +2670,10 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity)); } + } else { + ShenandoahGeneration* global_gen = heap->global_generation(); + assert(global_gen->max_capacity() == young_region_count * region_size_bytes, "sanity"); + global_gen->set_used(affiliated_young_regions, young_used_bytes); } } @@ -2625,7 +2685,6 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si void ShenandoahFreeSet::reestablish_generation_sizes(size_t young_region_count, size_t old_region_count) { assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); -#undef KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("reestablish_generation_sizes(young_region_count: %zu, old_region_count: %zu, ", young_region_count, old_region_count); @@ -3340,11 +3399,11 @@ void ShenandoahFreeSet::log_status() { void ShenandoahFreeSet::decrease_humongous_waste_for_regular_bypass(ShenandoahHeapRegion*r, size_t waste) { shenandoah_assert_heaplocked(); + assert(_partitions.membership(r->index()) == ShenandoahFreeSetPartitionId::NotFree, "Humongous regions should be NotFree"); ShenandoahFreeSetPartitionId p = r->is_old()? ShenandoahFreeSetPartitionId::OldCollector: ShenandoahFreeSetPartitionId::Mutator; _partitions.decrease_humongous_waste(p, waste); - if ((_partitions.membership(r->index()) == ShenandoahFreeSetPartitionId::NotFree) && - (waste >= PLAB::min_size() * HeapWordSize)) { + if (waste >= PLAB::min_size() * HeapWordSize) { _partitions.decrease_used(p, waste); _partitions.unretire_to_partition(r, p); } @@ -3355,6 +3414,7 @@ void ShenandoahFreeSet::decrease_humongous_waste_for_regular_bypass(ShenandoahHe r->allocate_fill(fill_words); } #endif + recompute_total_used(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 4bb64fa5dafee..6473fe39c3aff 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -305,11 +305,11 @@ class ShenandoahRegionPartitions { _used[int(which_partition)] -= bytes; _available[int(which_partition)] += bytes; -#define KELVIN_USED_PARTITION +#undef KELVIN_USED_PARTITION #ifdef KELVIN_USED_PARTITION - extern const char* partition_name(ShenandoahFreeSetPartitionId t); + extern const char* _shenandoah_partition_name(ShenandoahFreeSetPartitionId t); log_info(gc)("ShenRegionPartitions %s decrease_used(%zu) to %zu, available grows to %zu", - partition_name(which_partition), bytes, _used[int(which_partition)], _available[int(which_partition)]); + _shenandoah_partition_name(which_partition), bytes, _used[int(which_partition)], _available[int(which_partition)]); #endif } @@ -320,12 +320,11 @@ class ShenandoahRegionPartitions { shenandoah_assert_heaplocked(); assert (which_partition < NumPartitions, "Partition must be valid"); assert(_humongous_waste[int(which_partition)] >= bytes, "Cannot decrease waste beyond what is there"); - _humongous_waste[int(which_partition)] -= bytes; #undef KELVIN_HUMONGOUS_WASTE #ifdef KELVIN_HUMONGOUS_WASTE - extern const char* partition_name(ShenandoahFreeSetPartitionId t); - log_info(gc)("FreeSet<%s>::decrease_humongous_waste(%zu) yields: %zu", partition_name(which_partition), + extern const char* _shenandoah_humongous_partition_name(ShenandoahFreeSetPartitionId t); + log_info(gc)("FreeSet<%s>::decrease_humongous_waste(%zu) yields: %zu", _shenandoah_humongous_partition_name(which_partition), bytes, _humongous_waste[int(which_partition)]); #endif } @@ -470,7 +469,7 @@ class ShenandoahFreeSet : public CHeapObj { size_t region_size_bytes = _partitions.region_size_bytes(); _total_young_used = (_partitions.used_by(ShenandoahFreeSetPartitionId::Mutator) + _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); -#define KELVIN_USED +#undef KELVIN_USED #ifdef KELVIN_USED log_info(gc)(" recompute_total_young_used(): %zu from total regions M: %zu, C: %zu, allocatable regions M: %zu, C: %zu, " "M used: %zu, C used: %zu", _total_young_used, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 7504c786be0fa..60a54fc19c92a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -882,7 +882,7 @@ size_t ShenandoahGeneration::decrement_affiliated_region_count() { // a coherent value. auto affiliated_region_count = Atomic::sub(&_affiliated_region_count, (size_t) 1); assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (used() + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), + (used() <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), "used + humongous cannot exceed regions"); #ifdef KELVIN_AFFILIATED log_info(gc)("%s: decrement_affiliated_region_count() by 1: %zu", name(), affiliated_region_count); @@ -913,7 +913,7 @@ size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { auto const affiliated_region_count = Atomic::sub(&_affiliated_region_count, delta); assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (_used + _humongous_waste <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), + (_used <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), "used + humongous cannot exceed regions"); #ifdef KELVIN_AFFILIATED log_info(gc)("%s: decrease_affiliated_region_count() by %zu: %zu", name(), delta, affiliated_region_count); @@ -934,7 +934,7 @@ void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, void ShenandoahGeneration::increase_used(size_t bytes) { Atomic::add(&_used, bytes); -#define KELVIN_MONITOR_USED +#undef KELVIN_MONITOR_USED #ifdef KELVIN_MONITOR_USED log_info(gc)("Generation %s increase_used(%zu) to %zu", shenandoah_generation_name(_type), bytes, _used); #endif @@ -1036,7 +1036,7 @@ size_t ShenandoahGeneration::soft_available() const { } size_t ShenandoahGeneration::available(size_t capacity) const { - size_t in_use = used() + get_humongous_waste(); + size_t in_use = used(); return in_use > capacity ? 0 : capacity - in_use; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index b378bb405d9b7..160e73a0e85f7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -149,7 +149,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { } size_t original_result = Atomic::load(&_used); -#define KELVIN_SCAFFOLDING +#undef KELVIN_SCAFFOLDING #ifdef KELVIN_SCAFFOLDING static int problem_count = 0; if (result != original_result) { @@ -176,7 +176,8 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { size_t available() const override; size_t available_with_reserve() const; size_t used_including_humongous_waste() const { - return used() + get_humongous_waste(); + // In the current implementation, used() includes humongous waste + return used(); } // Returns the memory available based on the _soft_ max heap capacity (soft_max_heap - used). @@ -278,6 +279,10 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // Return the updated value of affiliated_region_count size_t decrease_affiliated_region_count(size_t delta); + size_t get_affiliated_region_count() const { + return Atomic::load(&_affiliated_region_count); + } + void establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste); void increase_used(size_t bytes); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 70433e97e4cb8..7b9d395b11479 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -279,7 +279,8 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio const size_t used_bytes = obj->size() * HeapWordSize; const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes); - const size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize; + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + const size_t humongous_waste = spanned_regions * region_size_bytes - obj->size() * HeapWordSize; const size_t index_limit = region->index() + spanned_regions; ShenandoahOldGeneration* const old_gen = _heap->old_generation(); @@ -297,7 +298,7 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio log_info(gc)("Promoting humongous object, transferring %zu bytes of humongous waste", humongous_waste); #endif - young_gen->decrease_used(used_bytes); + young_gen->decrease_used(spanned_regions * region_size_bytes); young_gen->decrease_humongous_waste(humongous_waste); young_gen->decrease_affiliated_region_count(spanned_regions); @@ -319,7 +320,7 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste / HeapWordSize); old_gen->increase_affiliated_region_count(spanned_regions); - old_gen->increase_used(used_bytes); + old_gen->increase_used(spanned_regions * region_size_bytes); old_gen->increase_humongous_waste(humongous_waste); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 32b01c4d13267..175f322f6b659 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -760,10 +760,8 @@ void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) { generation->decrease_humongous_waste(bytes); - generation->decrease_used(bytes); if (!generation->is_global()) { global_generation()->decrease_humongous_waste(bytes); - global_generation()->decrease_used(bytes); } } From e2fb4d2da0791342f66168f302d462029a006703 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 27 Jul 2025 20:33:12 -0600 Subject: [PATCH 25/61] more fixes to humongous waste accounting --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 29 +++++++++++-------- .../shenandoahGenerationalEvacuationTask.cpp | 2 +- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 5cb380b8d62dc..419661cf6b217 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -2311,8 +2311,9 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r oop obj = cast_to_oop(region->bottom()); size_t byte_size = obj->size() * HeapWordSize; size_t region_span = ShenandoahHeapRegion::required_regions(byte_size); - humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_words() - byte_size; - } else if (ac >= ShenandoahHeap::min_fill_size() * HeapWordSize) { + humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_bytes() - byte_size; + } else if (!region->is_humongous() && (ac >= ShenandoahHeap::min_fill_size() * HeapWordSize)) { + // Don't fill humongous continuations size_t fill_words = ac / HeapWordSize; region->allocate_fill(fill_words); } @@ -2439,11 +2440,13 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector log_info(gc)("Transferring %zu humongous regions from mutator to old (promoting)", xfer_regions); #endif - _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, xfer_regions); _partitions.decrease_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, humongous_waste_bytes); _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, xfer_regions * region_size_bytes); + _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, xfer_regions); + _partitions.decrease_capacity(ShenandoahFreeSetPartitionId::Mutator, xfer_regions * region_size_bytes); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, xfer_regions); + _partitions.increase_capacity(ShenandoahFreeSetPartitionId::OldCollector, xfer_regions * region_size_bytes); _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::OldCollector, humongous_waste_bytes); _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, xfer_regions * region_size_bytes); @@ -2497,15 +2500,15 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s mutator_high_idx, mutator_low_idx, mutator_high_idx); _partitions.shrink_interval_if_range_modifies_either_boundary(which_collector, collector_low_idx, collector_high_idx); - _partitions.decrease_total_region_counts(which_collector, transferred_regions); _partitions.decrease_region_counts(which_collector, transferred_regions); _partitions.decrease_empty_region_counts(which_collector, transferred_regions); + _partitions.decrease_total_region_counts(which_collector, transferred_regions); _partitions.decrease_capacity(which_collector, transferred_regions * region_size_bytes); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); + _partitions.increase_capacity(ShenandoahFreeSetPartitionId::Mutator, transferred_regions * region_size_bytes); _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); - _partitions.increase_capacity(ShenandoahFreeSetPartitionId::Mutator, transferred_regions * region_size_bytes); recompute_total_used(); _partitions.assert_bounds(true); @@ -2557,10 +2560,12 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa mutator_low_idx, mutator_high_idx, _partitions.max(), -1); _partitions.shrink_interval_if_range_modifies_either_boundary(which_collector, collector_low_idx, collector_high_idx); - _partitions.decrease_total_region_counts(which_collector, transferred_regions); _partitions.decrease_region_counts(which_collector, transferred_regions); + _partitions.decrease_capacity(which_collector, transferred_regions * region_size_bytes); + _partitions.decrease_total_region_counts(which_collector, transferred_regions); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); + _partitions.increase_capacity(ShenandoahFreeSetPartitionId::Mutator, transferred_regions * region_size_bytes); _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, used_transfer); @@ -3072,26 +3077,26 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old } _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_to_old_collector + used_to_collector); - _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, - regions_to_old_collector + regions_to_collector); _partitions.decrease_region_counts(ShenandoahFreeSetPartitionId::Mutator, regions_to_old_collector + regions_to_collector); _partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, empty_regions_to_old_collector + empty_regions_to_collector); + _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, + regions_to_old_collector + regions_to_collector); // decrease_capacity() also decreases available _partitions.decrease_capacity(ShenandoahFreeSetPartitionId::Mutator, (regions_to_old_collector + regions_to_collector) * region_size_bytes); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::Collector, regions_to_collector); - _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Collector, regions_to_collector); - _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, empty_regions_to_collector); // increase_capacity() also increases available _partitions.increase_capacity(ShenandoahFreeSetPartitionId::Collector, regions_to_collector * region_size_bytes); + _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Collector, regions_to_collector); + _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Collector, empty_regions_to_collector); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector); - _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector); - _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, empty_regions_to_old_collector); // increase_capacity() also increases available _partitions.increase_capacity(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector * region_size_bytes); + _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::OldCollector, regions_to_old_collector); + _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector, empty_regions_to_old_collector); if (used_to_collector > 0) { _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, used_to_collector); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 7b9d395b11479..eced3054a14ba 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -317,7 +317,7 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio } ShenandoahFreeSet* freeset = _heap->free_set(); - freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste / HeapWordSize); + freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste); old_gen->increase_affiliated_region_count(spanned_regions); old_gen->increase_used(spanned_regions * region_size_bytes); From ff7a6116feba7baf67da0b1ef2fbe58a91b3a7d1 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 27 Jul 2025 20:38:40 -0600 Subject: [PATCH 26/61] Fix old triggering with respect to humongous waste --- .../gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index 2d0bbfd5e4a3c..d049e7edf0b57 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -602,12 +602,12 @@ void ShenandoahOldHeuristics::set_trigger_if_old_is_fragmented(size_t first_old_ } void ShenandoahOldHeuristics::set_trigger_if_old_is_overgrown() { - size_t old_used = _old_generation->used() + _old_generation->get_humongous_waste(); + // used() includes humongous waste + size_t old_used = _old_generation->used(); size_t trigger_threshold = _old_generation->usage_trigger_threshold(); // Detects unsigned arithmetic underflow assert(old_used <= _heap->capacity(), - "Old used (%zu, %zu) must not be more than heap capacity (%zu)", - _old_generation->used(), _old_generation->get_humongous_waste(), _heap->capacity()); + "Old used (%zu) must not be more than heap capacity (%zu)", _old_generation->used(), _heap->capacity()); if (old_used > trigger_threshold) { _growth_trigger = true; } @@ -679,7 +679,8 @@ bool ShenandoahOldHeuristics::should_start_gc() { if (_growth_trigger) { // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been // evacuated. Before acting on a false trigger, we check to confirm the trigger condition is still satisfied. - const size_t current_usage = _old_generation->used() + _old_generation->get_humongous_waste(); + // _old_generation->used() includes humongous waste. + const size_t current_usage = _old_generation->used(); const size_t trigger_threshold = _old_generation->usage_trigger_threshold(); const size_t heap_size = heap->capacity(); const size_t ignore_threshold = (ShenandoahIgnoreOldGrowthBelowPercentage * heap_size) / 100; From 90a515f38da6700625f55d4e6ebefe2a246276ac Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 28 Jul 2025 11:21:33 -0600 Subject: [PATCH 27/61] verifier counts humongous waste as used --- src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index b2b5df9931ba3..addb2c712a801 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -409,7 +409,9 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure _garbage += bytes_garbage_in_region; _committed += r->is_committed() ? _region_size_bytes : 0; if (r->is_humongous()) { - _humongous_waste += r->free(); + size_t waste_bytes = r->free(); + _humongous_waste += waste_bytes; + _used += waste_bytes; // humongous_waste is counted as part of _used } if (r->is_trash()) { _trashed_regions++; From 702958a1fffdc9278e371f02945983b350a4b033 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 28 Jul 2025 17:00:12 -0600 Subject: [PATCH 28/61] fix verification account of cset and trashed regions --- .../gc/shenandoah/shenandoahVerifier.cpp | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index addb2c712a801..bf4593be3266e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -405,18 +405,25 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure size_t bytes_used_in_region = _region_size_bytes - alloc_capacity; size_t bytes_garbage_in_region = bytes_used_in_region - r->get_live_data_bytes(); - _used += bytes_used_in_region; - _garbage += bytes_garbage_in_region; - _committed += r->is_committed() ? _region_size_bytes : 0; - if (r->is_humongous()) { - size_t waste_bytes = r->free(); - _humongous_waste += waste_bytes; - _used += waste_bytes; // humongous_waste is counted as part of _used - } - if (r->is_trash()) { - _trashed_regions++; - _trashed_used += bytes_used_in_region; + if (r->is_cset() || r->is_trash()) { + // Count the entire cset or trashed (formerly cset) region as used + // Note: Immediate garbage trash regions were never in the cset. + _used += _region_size_bytes; + _garbage += bytes_garbage_in_region + r->free(); + if (r->is_trash()) { + _trashed_regions++; + _trashed_used += _region_size_bytes; + } + } else { + _used += bytes_used_in_region; + _garbage += bytes_garbage_in_region; + if (r->is_humongous()) { + size_t waste_bytes = r->free(); + _humongous_waste += waste_bytes; + _used += waste_bytes; // humongous_waste is counted as part of _used + } } + _committed += r->is_committed() ? _region_size_bytes : 0; _regions++; #ifdef KELVIN_STATS log_info(gc)(" _used: %zu, _garbage: %zu, _committed: %zu, _humongous_waste: %zu, _trashed_regions: %zu, _trashed_used: %zu", From 2c7e880e41d53818e641c6f939cc5fdee6b76125 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 29 Jul 2025 09:44:57 -0600 Subject: [PATCH 29/61] register the fill object when padding a retired old region --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 419661cf6b217..86967812f0841 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -691,6 +691,10 @@ size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartit size_t fill_words = fill_padding / HeapWordSize; ShenandoahHeapRegion*r = ShenandoahHeap::heap()->get_region(idx); if (fill_words >= ShenandoahHeap::min_fill_size()) { + if (r->is_old()) { + // We hold the heap lock already + old_generation()->card_scan()->register_object(r->top()); + } r->allocate_fill(fill_words); } #ifdef KELVIN_USED_PARTITION From ff914120979bdfba05f7610c225e510dda528576 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 30 Jul 2025 09:15:59 -0600 Subject: [PATCH 30/61] register retired region fill object and disable pad-for-pip adjustments during size verifications --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 21 +++++-------------- .../gc/shenandoah/shenandoahVerifier.cpp | 7 ++++++- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 86967812f0841..56812f30bb28e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -693,7 +693,7 @@ size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartit if (fill_words >= ShenandoahHeap::min_fill_size()) { if (r->is_old()) { // We hold the heap lock already - old_generation()->card_scan()->register_object(r->top()); + ShenandoahHeap::heap()->old_generation()->card_scan()->register_object(r->top()); } r->allocate_fill(fill_words); } @@ -1686,9 +1686,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah } } - static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste)); size_t ac = alloc_capacity(r); - ShenandoahFreeSetPartitionId orig_partition; ShenandoahGeneration* request_generation = nullptr; if (req.is_mutator_alloc()) { @@ -1710,7 +1708,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah orig_partition = ShenandoahFreeSetPartitionId::Collector; } } - if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) { + if (alloc_capacity(r) < PLAB::min_size() * HeapWordSize) { // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region. // Note that retire_from_partition() increases used to account for waste. @@ -2245,17 +2243,17 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } if (ac == region_size_bytes) { mutator_empty++; - affiliated_mutator_regions--; if (idx < mutator_leftmost_empty) { mutator_leftmost_empty = idx; } if (idx > mutator_rightmost_empty) { mutator_rightmost_empty = idx; } + } else { + affiliated_mutator_regions++; } mutator_regions++; total_mutator_regions++; - affiliated_mutator_regions++; mutator_used += (region_size_bytes - ac); } else { // !region->is_trash() && region is_old() @@ -2266,15 +2264,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r if (idx > old_collector_rightmost) { old_collector_rightmost = idx; } - if (ac == region_size_bytes) { - old_collector_empty++; - if (idx < old_collector_leftmost_empty) { - old_collector_leftmost_empty = idx; - } - if (idx > old_collector_rightmost_empty) { - old_collector_rightmost_empty = idx; - } - } + assert(ac != region_size_bytes, "Empty regions should be in mutator partition"); affiliated_old_collector_regions++; old_collector_regions++; total_old_collector_regions++; @@ -2306,7 +2296,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } else { young_cset_regions++; } - } else { assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired"); size_t ac = alloc_capacity(region); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index bf4593be3266e..b4d3c663f5e75 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -491,10 +491,14 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { ShenandoahHeap* heap = ShenandoahHeap::heap(); size_t generation_used = generation->used(); size_t generation_used_regions = generation->used_regions(); +#ifdef KELVIN_DEPRECATE + // We no longer need to adjust for padding. Probably want to + // remove this argument altogether. if (adjust_for_padding && (generation->is_young() || generation->is_global())) { size_t pad = heap->old_generation()->get_pad_for_promote_in_place(); generation_used += pad; } +#endif #undef KELVIN_EXTRA_NOISE #ifdef KELVIN_EXTRA_NOISE @@ -939,7 +943,8 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, size_t heap_used; if (_heap->mode()->is_generational() && (sizeness == _verify_size_adjusted_for_padding)) { // Prior to evacuation, regular regions that are to be evacuated in place are padded to prevent further allocations - heap_used = _heap->used() + _heap->old_generation()->get_pad_for_promote_in_place(); + // but this padding is already represented in _heap->used() + heap_used = _heap->used(); } else if (sizeness != _verify_size_disable) { heap_used = _heap->used(); } From ebe7b322b37522c4d9faa706c91d2f70a02f7334 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 31 Jul 2025 15:45:54 -0600 Subject: [PATCH 31/61] Do not double-count free as used for humongous --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 13 +++++--- .../gc/shenandoah/shenandoahVerifier.cpp | 32 ++++++++++--------- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 56812f30bb28e..c8d5cde604d1c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -664,7 +664,11 @@ void ShenandoahRegionPartitions::retire_range_from_partition( assert (partition < NumPartitions, "Cannot remove from free partitions if not already free"); for (idx_t idx = low_idx; idx <= high_idx; idx++) { +#ifdef ASSERT + ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx); assert (in_free_set(partition, idx), "Must be in partition to remove from partition"); + assert(r->is_empty() || r->is_trash(), "Region must be empty or trash"); +#endif _membership[int(partition)].clear_bit(idx); } size_t num_regions = high_idx + 1 - low_idx; @@ -1801,6 +1805,11 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { end++; } + // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate and will recompute affiliated. + _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); + size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; + _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); + size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); // Initialize regions: for (idx_t i = beg; i <= end; i++) { @@ -1838,10 +1847,6 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); } - // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate and will recompute affiliated. - _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); - size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; - _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); req.set_actual_size(words_size); if (remainder != 0) { size_t waste = ShenandoahHeapRegion::region_size_words() - remainder; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index b4d3c663f5e75..590980381d6dc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -394,33 +394,35 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure log_info(gc)("%s:ShenandoahCalculateRegionStatsClosure::heap_region_do(), %s r: %zu used: %zu, garbage: %zu, is_trash: %s", _nm, r->affiliation_name(), r->index(), r->used(), r->garbage(), r->is_trash()? "yes": "no"); #endif - size_t alloc_capacity = r->free(); - if ((alloc_capacity > 0) && (alloc_capacity < _min_free_size)) { -#ifdef KELVIN_STATS - log_info(gc)("KELVIN!!!! overwriting alloc_capacity %zu with 0 because too small", alloc_capacity); -#endif - // this region has been retired already, count it as entirely consumed - alloc_capacity = 0; - } - size_t bytes_used_in_region = _region_size_bytes - alloc_capacity; - size_t bytes_garbage_in_region = bytes_used_in_region - r->get_live_data_bytes(); if (r->is_cset() || r->is_trash()) { // Count the entire cset or trashed (formerly cset) region as used // Note: Immediate garbage trash regions were never in the cset. _used += _region_size_bytes; - _garbage += bytes_garbage_in_region + r->free(); + _garbage += _region_size_bytes - r->get_live_data_bytes(); if (r->is_trash()) { _trashed_regions++; _trashed_used += _region_size_bytes; } } else { - _used += bytes_used_in_region; - _garbage += bytes_garbage_in_region; if (r->is_humongous()) { + _used += _region_size_bytes; + _garbage += _region_size_bytes - r->get_live_data_bytes(); + _humongous_waste += r->free(); + } else { + size_t alloc_capacity = r->free(); + if (alloc_capacity < _min_free_size) { +#ifdef KELVIN_STATS + log_info(gc)("KELVIN!!!! overwriting alloc_capacity %zu with 0 because too small", alloc_capacity); +#endif + // this region has been retired already, count it as entirely consumed + alloc_capacity = 0; + } + size_t bytes_used_in_region = _region_size_bytes - alloc_capacity; + size_t bytes_garbage_in_region = bytes_used_in_region - r->get_live_data_bytes(); size_t waste_bytes = r->free(); - _humongous_waste += waste_bytes; - _used += waste_bytes; // humongous_waste is counted as part of _used + _used += bytes_used_in_region; + _garbage += bytes_garbage_in_region; } } _committed += r->is_committed() ? _region_size_bytes : 0; From 09d5a9597d37d2f417be3ef800218f2e2c9ac947 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 1 Aug 2025 09:21:44 -0600 Subject: [PATCH 32/61] shrink interval for range cannot assume contiguous regions --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 44 ++++++++++++++----- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 2 +- 2 files changed, 33 insertions(+), 13 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index c8d5cde604d1c..692bf52ef9919 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -525,18 +525,28 @@ void ShenandoahRegionPartitions::one_region_is_no_longer_empty(ShenandoahFreeSet // All members of partition between low_idx and high_idx inclusive have been removed. void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary( - ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { + ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, size_t num_regions) { assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values"); #undef KELVIN_INTERVALS #ifdef KELVIN_INTERVALS log_info(gc)("shrink_interval_if_range_modifies_either_boundary(%s, %zd, %zd)", partition_name(partition), low_idx, high_idx); #endif + size_t span = high_idx + 1 - low_idx; + bool regions_are_contiguous = (span == num_regions); if (low_idx == leftmost(partition)) { assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed"); if (high_idx + 1 == _max) { - _leftmosts[int(partition)] = _max; + if (regions_are_contiguous) { + _leftmosts[int(partition)] = _max; + } else { + _leftmosts[int(partition)] = find_index_of_next_available_region(partition, low_idx + 1); + } } else { - _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1); + if (regions_are_contiguous) { + _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1); + } else { + _leftmosts[int(partition)] = find_index_of_next_available_region(partition, low_idx + 1); + } } if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) { // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested. @@ -546,9 +556,17 @@ void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_bounda if (high_idx == _rightmosts[int(partition)]) { assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed"); if (low_idx == 0) { - _rightmosts[int(partition)] = -1; + if (regions_are_contiguous) { + _rightmosts[int(partition)] = -1; + } else { + _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, high_idx - 1); + } } else { - _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1); + if (regions_are_contiguous) { + _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1); + } else { + _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, high_idx - 1); + } } if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) { // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested. @@ -599,7 +617,7 @@ void ShenandoahRegionPartitions::establish_interval(ShenandoahFreeSetPartitionId } inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) { - shrink_interval_if_range_modifies_either_boundary(partition, idx, idx); + shrink_interval_if_range_modifies_either_boundary(partition, idx, idx, 1); } // Some members of partition between low_idx and high_idx inclusive have been added. @@ -674,7 +692,7 @@ void ShenandoahRegionPartitions::retire_range_from_partition( size_t num_regions = high_idx + 1 - low_idx; decrease_region_counts(partition, num_regions); decrease_empty_region_counts(partition, num_regions); - shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx); + shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx, num_regions); } size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) { @@ -2485,9 +2503,9 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s if (idx > mutator_high_idx) { mutator_high_idx = idx; } - used_transfer = _partitions.move_from_partition_to_partition_with_deferred_accounting(idx, which_collector, - ShenandoahFreeSetPartitionId::Mutator, - region_size_bytes); + used_transfer += _partitions.move_from_partition_to_partition_with_deferred_accounting(idx, which_collector, + ShenandoahFreeSetPartitionId::Mutator, + region_size_bytes); transferred_regions++; bytes_transferred += region_size_bytes; } @@ -2496,7 +2514,8 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s assert(used_transfer == 0, "empty regions should have no used"); _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator, mutator_low_idx, mutator_high_idx, mutator_low_idx, mutator_high_idx); - _partitions.shrink_interval_if_range_modifies_either_boundary(which_collector, collector_low_idx, collector_high_idx); + _partitions.shrink_interval_if_range_modifies_either_boundary(which_collector, collector_low_idx, collector_high_idx, + transferred_regions); _partitions.decrease_region_counts(which_collector, transferred_regions); _partitions.decrease_empty_region_counts(which_collector, transferred_regions); @@ -2556,7 +2575,8 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa _partitions.decrease_used(which_collector, used_transfer); _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator, mutator_low_idx, mutator_high_idx, _partitions.max(), -1); - _partitions.shrink_interval_if_range_modifies_either_boundary(which_collector, collector_low_idx, collector_high_idx); + _partitions.shrink_interval_if_range_modifies_either_boundary(which_collector, collector_low_idx, collector_high_idx, + transferred_regions); _partitions.decrease_region_counts(which_collector, transferred_regions); _partitions.decrease_capacity(which_collector, transferred_regions * region_size_bytes); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 6473fe39c3aff..b164bb9d678da 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -185,7 +185,7 @@ class ShenandoahRegionPartitions { // Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, - idx_t low_idx, idx_t high_idx); + idx_t low_idx, idx_t high_idx, size_t num_regions); void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t capacity); void expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, From 3422f25a87693d1f5fcaee42a86921c3c21cc314 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 1 Aug 2025 18:26:58 -0600 Subject: [PATCH 33/61] adjustments to promotion in place bookkeeping --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 80 ++++++++++++------- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 5 +- .../gc/shenandoah/shenandoahGeneration.cpp | 10 +-- .../shenandoahGenerationalEvacuationTask.cpp | 17 ++-- 4 files changed, 67 insertions(+), 45 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 692bf52ef9919..8c84e83ee6a7a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -184,12 +184,14 @@ ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, Shena make_all_regions_unavailable(); } -void ShenandoahFreeSet::increase_young_used(size_t bytes) { - _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, bytes); -} - -void ShenandoahFreeSet::decrease_young_used(size_t bytes) { - _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, bytes); +void ShenandoahFreeSet::prepare_to_promote_in_place(size_t idx, size_t bytes) { + shenandoah_assert_heaplocked(); + ShenandoahFreeSetPartitionId p = _partitions.membership(idx); + assert((p == ShenandoahFreeSetPartitionId::Mutator) || (p == ShenandoahFreeSetPartitionId::Collector), + "PIP region must be associated with young"); + _partitions.increase_used(p, bytes); + recompute_total_young_used(); + recompute_total_global_used(); } inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const { @@ -1282,51 +1284,67 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : clear_internal(); } -void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region, size_t pip_pad_bytes) { +// was pip_pad_bytes +void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) { shenandoah_assert_heaplocked(); size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize; size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); size_t available_in_region = alloc_capacity(region); - size_t used_in_region = region->used(); -#ifdef ASSERT - size_t idx = region->index(); - assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, + size_t region_index = region->index(); + ShenandoahFreeSetPartitionId p = _partitions.membership(region_index); + assert(_partitions.membership(region_index) == ShenandoahFreeSetPartitionId::NotFree, "Regions promoted in place should have been excluded from Mutator partition"); -#endif - if (available_in_region < plab_min_size_in_bytes) { - // If region had been retired, its end-of-region alignment pad is counted as used - used_in_region += available_in_region; + + // If region had been retired, its end-of-region alignment pad had been counted as used within the Mutator partition + size_t used_while_awaiting_pip = region_size_bytes; + size_t used_after_pip = region_size_bytes; + if (available_in_region >= plab_min_size_in_bytes) { + used_after_pip -= available_in_region; + } else { + if (available_in_region >= ShenandoahHeap::min_fill_size() * HeapWordSize) { + size_t fill_words = available_in_region / HeapWordSize; + ShenandoahHeap::heap()->old_generation()->card_scan()->register_object(region->top()); + region->allocate_fill(fill_words); + } + available_in_region = 0; } - - _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_in_region + pip_pad_bytes); + + assert(p == ShenandoahFreeSetPartitionId::NotFree, "pip region must be NotFree"); + assert(region->is_young(), "pip region must be young"); + _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, used_while_awaiting_pip); + // decrease capacity adjusts available _partitions.decrease_capacity(ShenandoahFreeSetPartitionId::Mutator, region_size_bytes); _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, 1); - _partitions.increase_capacity(ShenandoahFreeSetPartitionId::OldCollector, region_size_bytes); - _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_in_region); _partitions.increase_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector, 1); - - if (available_in_region >= plab_min_size_in_bytes) { - // region counts represents regions from which we are currently allocating. - _partitions.decrease_region_counts(ShenandoahFreeSetPartitionId::Mutator, 1); + _partitions.increase_capacity(ShenandoahFreeSetPartitionId::OldCollector, region_size_bytes); + _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, used_after_pip); + region->set_affiliation(ShenandoahAffiliation::OLD_GENERATION); + if (available_in_region > 0) { + assert(available_in_region >= plab_min_size_in_bytes, "enforced above"); _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::OldCollector, 1); - _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, available_in_region); + // make_free() adjusts bounds for OldCollector partition + _partitions.make_free(region_index, ShenandoahFreeSetPartitionId::OldCollector, available_in_region); _heap->old_generation()->augment_promoted_reserve(available_in_region); assert(available_in_region != region_size_bytes, "Nothing to promote in place"); } + // else, leave this region as NotFree + recompute_total_used(); recompute_total_affiliated(); _partitions.assert_bounds(true); #ifdef KELVIN_CAPACITY - log_info(gc)("Ater add_pip_to_old(%zu), Mutate used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", + log_info(gc)("Ater add_pip_to_old(%zu) from partition %s, " + " used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", region->index(), - _partitions.get_used(ShenandoahFreeSetPartitionId::Mutator), - _partitions.get_capacity(ShenandoahFreeSetPartitionId::Mutator), - _partitions.get_available(ShenandoahFreeSetPartitionId::Mutator), - _partitions.get_region_counts(ShenandoahFreeSetPartitionId::Mutator), - _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator)); - log_info(gc)(" OldCollect used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", + partition_name(p), + _partitions.get_used(p), + _partitions.get_capacity(p), + _partitions.get_available(p), + _partitions.get_region_counts(p), + _partitions.get_total_region_counts(p)); + log_info(gc)(" OldCollect used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", _partitions.get_used(ShenandoahFreeSetPartitionId::OldCollector), _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector), _partitions.get_available(ShenandoahFreeSetPartitionId::OldCollector), diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index b164bb9d678da..00e896540ab89 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -634,8 +634,7 @@ class ShenandoahFreeSet : public CHeapObj { return _total_old_used; } - void increase_young_used(size_t bytes); - void decrease_young_used(size_t bytes); + void prepare_to_promote_in_place(size_t idx, size_t bytes); // Return bytes used by young inline size_t young_used() { @@ -696,7 +695,7 @@ class ShenandoahFreeSet : public CHeapObj { // When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size() // into the old collector partition by invoking this method. - void add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region, size_t pip_pad_bytes); + void add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region); // Move up to cset_regions number of regions from being available to the collector to being available to the mutator. // diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 60a54fc19c92a..7638491371cce 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -574,17 +574,17 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // phase. r->save_top_before_promote(); - size_t remnant_size = r->free() / HeapWordSize; + size_t remnant_bytes = r->free(); + size_t remnant_size = remnant_bytes / HeapWordSize; if (remnant_size > ShenandoahHeap::min_fill_size()) { ShenandoahHeap::fill_with_object(original_top, remnant_size); // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise, // newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any // new allocations would not necessarily be eligible for promotion. This addresses both issues. r->set_top(r->end()); - promote_in_place_pad += remnant_size * HeapWordSize; - - free_set->increase_young_used(remnant_size); - young_gen->increase_used(remnant_size); + promote_in_place_pad += remnant_bytes; + free_set->prepare_to_promote_in_place(i, remnant_bytes); + young_gen->increase_used(remnant_bytes); } else { // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental // allocations occurring within this region before the region is promoted in place. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index eced3054a14ba..5e548e7c9681f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -172,9 +172,10 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion assert(!_heap->gc_generation()->is_old(), "Sanity check"); ShenandoahMarkingContext* const marking_context = _heap->young_generation()->complete_marking_context(); HeapWord* const tams = marking_context->top_at_mark_start(region); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); { - const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100; + const size_t old_garbage_threshold = (region_size_bytes * ShenandoahOldGarbageThreshold) / 100; shenandoah_assert_generations_reconciled(); assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking"); assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region %zu has too much garbage for promotion", region->index()); @@ -224,14 +225,19 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion ShenandoahHeapLocker locker(_heap->lock()); HeapWord* update_watermark = region->get_update_watermark(); - + // pip_unpadded is memory too small to be filled above original top + size_t pip_unpadded = (region->end() - region->top()) * HeapWordSize; + assert((region->top() == region->end()) + || (pip_unpadded == (size_t) ((region->end() - region->top()) * HeapWordSize)), "Invariant"); + assert(pip_unpadded < ShenandoahHeap::min_fill_size(), "Sanity"); size_t pip_pad_bytes = (region->top() - region->get_top_before_promote()) * HeapWordSize; + assert((pip_unpadded == 0) || (pip_pad_bytes == 0), "Only one of pip_unpadded and pip_pad_bytes is non-zero"); // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the // is_collector_free range. region->restore_top_before_promote(); - size_t region_used = region->used(); + assert(region_used + pip_pad_bytes + pip_unpadded == region_size_bytes, "invariant"); // The update_watermark was likely established while we had the artificially high value of top. Make it sane now. assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark"); @@ -251,10 +257,9 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion region_used += available_in_region; } - region->set_affiliation(OLD_GENERATION); - // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size() - _heap->free_set()->add_promoted_in_place_region_to_old_collector(region, pip_pad_bytes); + _heap->free_set()->add_promoted_in_place_region_to_old_collector(region); + region->set_affiliation(OLD_GENERATION); young_gen->decrease_used(region_used + pip_pad_bytes); young_gen->decrement_affiliated_region_count(); From 9e4224f56eaa32b9895c5fefc2eb957362ae74c6 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 3 Aug 2025 09:13:22 -0600 Subject: [PATCH 34/61] recompute young_evac_reserve after selecting pip regions --- src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 7638491371cce..ff6beb4aaea8c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -272,7 +272,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young const size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100; - const size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); + size_t young_evacuation_reserve = MIN2(maximum_young_evacuation_reserve, young_generation->available_with_reserve()); // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted), // clamped by the old generation space available. @@ -352,6 +352,11 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve); assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory"); + // If any regions have been selected for promotion in place, this has the effect of decreasing available within mutator + // and collector partitions, due to padding of remnant memory within each promoted in place region. This will affect + // young_evacuation_reserve but not old_evacuation_reserve or consumed_by_advance_promotion. So recompute. + young_evacuation_reserve = MIN2(young_evacuation_reserve, young_generation->available_with_reserve()); + // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion. Do not transfer this // to old_evacuation_reserve because this memory is likely very fragmented, and we do not want to increase the likelihood // of old evacuation failure. From d67cd995d64d5e339a7894b01b7bec9c8c0111f9 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 3 Aug 2025 10:12:24 -0600 Subject: [PATCH 35/61] fix up accounting for pip padding --- .../share/gc/shenandoah/shenandoahGeneration.cpp | 3 ++- .../shenandoahGenerationalEvacuationTask.cpp | 10 +++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index ff6beb4aaea8c..9790dc63f1a24 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -589,11 +589,12 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { r->set_top(r->end()); promote_in_place_pad += remnant_bytes; free_set->prepare_to_promote_in_place(i, remnant_bytes); - young_gen->increase_used(remnant_bytes); } else { // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental // allocations occurring within this region before the region is promoted in place. } + // Even when we do not fill the remnant, we count the remnant as used + young_gen->increase_used(remnant_bytes); } // Else, we do not promote this region (either in place or by copy) because it has received new allocations. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 5e548e7c9681f..f2b8b1e6cdb67 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -236,8 +236,8 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the // is_collector_free range. region->restore_top_before_promote(); - size_t region_used = region->used(); - assert(region_used + pip_pad_bytes + pip_unpadded == region_size_bytes, "invariant"); + size_t region_to_be_used_in_old = region->used(); + assert(region_to_be_used_in_old + pip_pad_bytes + pip_unpadded == region_size_bytes, "invariant"); // The update_watermark was likely established while we had the artificially high value of top. Make it sane now. assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark"); @@ -254,21 +254,21 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion size_t plab_min_size_in_bytes = _heap->plab_min_size() * HeapWordSize; if (available_in_region < plab_min_size_in_bytes) { // The available memory in young had been retired. Retire it in old also. - region_used += available_in_region; + region_to_be_used_in_old += available_in_region; } // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size() _heap->free_set()->add_promoted_in_place_region_to_old_collector(region); region->set_affiliation(OLD_GENERATION); - young_gen->decrease_used(region_used + pip_pad_bytes); + young_gen->decrease_used(region_size_bytes); young_gen->decrement_affiliated_region_count(); // transfer_to_old() increases capacity of old and decreases capacity of young _heap->generation_sizer()->force_transfer_to_old(1); old_gen->increment_affiliated_region_count(); - old_gen->increase_used(region_used); + old_gen->increase_used(region_to_be_used_in_old); } } From b573d5294b3ecce94a231b8f3b0eced57669082f Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 3 Aug 2025 22:56:26 +0000 Subject: [PATCH 36/61] fix qualification test for pip fill object --- src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 9790dc63f1a24..42aec94ead91a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -581,7 +581,7 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { size_t remnant_bytes = r->free(); size_t remnant_size = remnant_bytes / HeapWordSize; - if (remnant_size > ShenandoahHeap::min_fill_size()) { + if (remnant_size >= ShenandoahHeap::min_fill_size()) { ShenandoahHeap::fill_with_object(original_top, remnant_size); // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise, // newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any From 94cf2496770696b249b15ff9ed3fc35d32376942 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Tue, 5 Aug 2025 23:52:17 +0000 Subject: [PATCH 37/61] Retire regions that are prepared for promote in place --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 31 ++++----- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 30 +++++++-- .../gc/shenandoah/shenandoahGeneration.cpp | 64 ++++++++++++++++--- .../gc/shenandoah/shenandoahSimpleBitMap.cpp | 2 +- .../gc/shenandoah/shenandoahSimpleBitMap.hpp | 4 +- 5 files changed, 95 insertions(+), 36 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 8c84e83ee6a7a..7000b7034eb91 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -186,12 +186,19 @@ ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, Shena void ShenandoahFreeSet::prepare_to_promote_in_place(size_t idx, size_t bytes) { shenandoah_assert_heaplocked(); + size_t min_fill_size = ShenandoahHeap::min_fill_size() * HeapWordSize; ShenandoahFreeSetPartitionId p = _partitions.membership(idx); - assert((p == ShenandoahFreeSetPartitionId::Mutator) || (p == ShenandoahFreeSetPartitionId::Collector), - "PIP region must be associated with young"); - _partitions.increase_used(p, bytes); - recompute_total_young_used(); - recompute_total_global_used(); + if (bytes >= min_fill_size) { + assert((p == ShenandoahFreeSetPartitionId::Mutator) || (p == ShenandoahFreeSetPartitionId::Collector), + "PIP region must be associated with young"); + _partitions.increase_used(p, bytes); + _partitions.decrease_region_counts(p, 1); + _partitions.raw_clear_membership(idx, p); + recompute_total_young_used(); + recompute_total_global_used(); + } else { + assert(p == ShenandoahFreeSetPartitionId::NotFree, "We did not fill this region and do not need to adjust used"); + } } inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const { @@ -858,18 +865,6 @@ const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) con return partition_name(membership(idx)); } -inline ShenandoahFreeSetPartitionId ShenandoahRegionPartitions::membership(idx_t idx) const { - assert (idx < _max, "index is sane: %zu < %zu", idx, _max); - ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree; - for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) { - if (_membership[partition_id].is_set(idx)) { - assert(result == ShenandoahFreeSetPartitionId::NotFree, "Region should reside in only one partition"); - result = (ShenandoahFreeSetPartitionId) partition_id; - } - } - return result; -} - #ifdef ASSERT inline bool ShenandoahRegionPartitions::partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId test_partition) const { assert (idx < _max, "index is sane: %zu < %zu", idx, _max); @@ -2196,7 +2191,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r size_t young_cset_regions = 0; size_t region_size_bytes = _partitions.region_size_bytes(); - size_t max_regions = _partitions.max_regions(); + size_t max_regions = _partitions.max(); size_t mutator_leftmost = max_regions; size_t mutator_rightmost = 0; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 00e896540ab89..ca0f111af4515 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -65,7 +65,6 @@ class ShenandoahRegionPartitions { const ShenandoahFreeSet* _free_set; // For each partition, we maintain a bitmap of which regions are affiliated with his partition. ShenandoahSimpleBitMap _membership[UIntNumPartitions]; - // For each partition, we track an interval outside of which a region affiliated with that partition is guaranteed // not to be found. This makes searches for free space more efficient. For each partition p, _leftmosts[p] // represents its least index, and its _rightmosts[p] its greatest index. Empty intervals are indicated by the @@ -104,8 +103,7 @@ class ShenandoahRegionPartitions { // adjust used values when flipping from mutator to collector. Flip to old collector does not need to adjust used because // only empty regions can be flipped to old collector. // - // All memory quantities (capacty, available, used) are represented in bytes. - + // All memory quantities (capacity, available, used) are represented in bytes. size_t _capacity[UIntNumPartitions]; @@ -158,6 +156,11 @@ class ShenandoahRegionPartitions { _membership[int(p)].set_bit(idx); } + // Clear the partition id for a particular region without adjusting interval bounds or usage/capacity tallies + inline void raw_clear_membership(size_t idx, ShenandoahFreeSetPartitionId p) { + _membership[int(p)].clear_bit(idx); + } + inline void one_region_is_no_longer_empty(ShenandoahFreeSetPartitionId partition); // Set the Mutator intervals, usage, and capacity according to arguments. Reset the Collector intervals, used, capacity @@ -240,7 +243,17 @@ class ShenandoahRegionPartitions { // Returns the ShenandoahFreeSetPartitionId affiliation of region idx, NotFree if this region is not currently in any partition. // This does not enforce that free_set membership implies allocation capacity. - inline ShenandoahFreeSetPartitionId membership(idx_t idx) const; + inline ShenandoahFreeSetPartitionId membership(idx_t idx) const { + assert (idx < _max, "index is sane: %zu < %zu", idx, _max); + ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree; + for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) { + if (_membership[partition_id].is_set(idx)) { + assert(result == ShenandoahFreeSetPartitionId::NotFree, "Region should reside in only one partition"); + result = (ShenandoahFreeSetPartitionId) partition_id; + } + } + return result; + } #ifdef ASSERT // Returns true iff region idx's membership is which_partition. If which_partition represents a free set, asserts @@ -248,8 +261,6 @@ class ShenandoahRegionPartitions { inline bool partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId which_partition) const; #endif - inline size_t max_regions() const { return _max; } - inline size_t region_size_bytes() const { return _region_size_bytes; }; // The following four methods return the left-most and right-most bounds on ranges of regions representing @@ -625,6 +636,13 @@ class ShenandoahFreeSet : public CHeapObj { ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions); + inline size_t max_regions() const { return _partitions.max(); } + ShenandoahFreeSetPartitionId membership(size_t index) const { return _partitions.membership(index); } + inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, + idx_t low_idx, idx_t high_idx, size_t num_regions) { + return _partitions.shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx, num_regions); + } + // Public because ShenandoahRegionPartitions assertions require access. inline size_t alloc_capacity(ShenandoahHeapRegion *r) const; inline size_t alloc_capacity(size_t idx) const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 42aec94ead91a..038bf6d045354 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -554,12 +554,22 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require // less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that // have more live data. - const size_t num_regions = heap->num_regions(); + const idx_t num_regions = heap->num_regions(); ResourceMark rm; AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions); - for (size_t i = 0; i < num_regions; i++) { + ShenandoahFreeSet* freeset = heap->free_set(); + + // Any region that is to be promoted in place needs to be retired from its Collector or Mutator partition. + idx_t pip_low_collector_idx = freeset->max_regions(); + idx_t pip_high_collector_idx = -1; + idx_t pip_low_mutator_idx = freeset->max_regions(); + idx_t pip_high_mutator_idx = -1; + size_t collector_regions_to_pip = 0; + size_t mutator_regions_to_pip = 0; + + for (idx_t i = 0; i < num_regions; i++) { ShenandoahHeapRegion* const r = heap->get_region(i); if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) { // skip over regions that aren't regular young with some live data @@ -568,8 +578,7 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { if (r->age() >= tenuring_threshold) { if ((r->garbage() < old_garbage_threshold)) { // This tenure-worthy region has too little garbage, so we do not want to expend the copying effort to - // reclaim the garbage; instead this region may be eligible for promotion-in-place to the - // old generation. + // reclaim the garbage; instead this region may be eligible for promotion-in-place to old generation. HeapWord* tams = ctx->top_at_mark_start(r); HeapWord* original_top = r->top(); if (!heap->is_concurrent_old_mark_in_progress() && tams == original_top) { @@ -578,20 +587,46 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // we use this field to indicate that this region should be promoted in place during the evacuation // phase. r->save_top_before_promote(); - size_t remnant_bytes = r->free(); - size_t remnant_size = remnant_bytes / HeapWordSize; - if (remnant_size >= ShenandoahHeap::min_fill_size()) { - ShenandoahHeap::fill_with_object(original_top, remnant_size); + size_t remnant_words = remnant_bytes / HeapWordSize; + if (remnant_words >= ShenandoahHeap::min_fill_size()) { + ShenandoahHeap::fill_with_object(original_top, remnant_words); // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise, // newly allocated objects will not be parsable when promote in place tries to register them. Furthermore, any // new allocations would not necessarily be eligible for promotion. This addresses both issues. r->set_top(r->end()); + // The region r is either in the Mutator or Collector partition if remnant_words > heap()->plab_min_size. + // Otherwise, the region is in the NotFree partition. + ShenandoahFreeSetPartitionId p = free_set->membership(i); + if (p == ShenandoahFreeSetPartitionId::Mutator) { + mutator_regions_to_pip++; + if (i < pip_low_mutator_idx) { + pip_low_mutator_idx = i; + } + if (i > pip_high_mutator_idx) { + pip_high_mutator_idx = i; + } + } else if (p == ShenandoahFreeSetPartitionId::Collector) { + collector_regions_to_pip++; + if (i < pip_low_collector_idx) { + pip_low_collector_idx = i; + } + if (i > pip_high_collector_idx) { + pip_high_collector_idx = i; + } + } else { + assert((p == ShenandoahFreeSetPartitionId::NotFree) && (remnant_words < heap->plab_min_size()), + "Should be NotFree if not in Collector or Mutator partitions"); + // In this case, we'll count the remnant_bytes as used even though we will not create a fill object. + } promote_in_place_pad += remnant_bytes; free_set->prepare_to_promote_in_place(i, remnant_bytes); } else { // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental // allocations occurring within this region before the region is promoted in place. + + // This region was already not in the Collector or Mutator set, so no need to remove it. + assert(free_set->membership(i) == ShenandoahFreeSetPartitionId::NotFree, "sanity"); } // Even when we do not fill the remnant, we count the remnant as used young_gen->increase_used(remnant_bytes); @@ -633,6 +668,19 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // Note that we keep going even if one region is excluded from selection. // Subsequent regions may be selected if they have smaller live data. } + + // Retire any regions that have been selected for promote in place + if (collector_regions_to_pip > 0) { + freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Collector, + pip_low_collector_idx, pip_high_collector_idx, + collector_regions_to_pip); + } + if (mutator_regions_to_pip > 0) { + freeset->shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator, + pip_low_mutator_idx, pip_high_mutator_idx, + mutator_regions_to_pip); + } + // Sort in increasing order according to live data bytes. Note that candidates represents the number of regions // that qualify to be promoted by evacuation. if (candidates > 0) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp index 3f4bbafb755e3..82a759e34dbdf 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp @@ -25,7 +25,7 @@ #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp" -ShenandoahSimpleBitMap::ShenandoahSimpleBitMap(size_t num_bits) : +ShenandoahSimpleBitMap::ShenandoahSimpleBitMap(idx_t num_bits) : _num_bits(num_bits), _num_words(align_up(num_bits, BitsPerWord) / BitsPerWord), _bitmap(NEW_C_HEAP_ARRAY(uintx, _num_words, mtGC)) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp index 3a4cb8cf742fc..5127e8b221a53 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp @@ -57,7 +57,7 @@ class ShenandoahSimpleBitMap { uintx* const _bitmap; public: - ShenandoahSimpleBitMap(size_t num_bits); + ShenandoahSimpleBitMap(idx_t num_bits); ~ShenandoahSimpleBitMap(); @@ -116,7 +116,6 @@ class ShenandoahSimpleBitMap { inline void clear_bit(idx_t idx) { assert((idx >= 0) && (idx < _num_bits), "precondition"); - assert(idx >= 0, "precondition"); size_t array_idx = idx >> LogBitsPerWord; uintx bit_number = idx & (BitsPerWord - 1); uintx the_bit = nth_bit(bit_number); @@ -125,7 +124,6 @@ class ShenandoahSimpleBitMap { inline bool is_set(idx_t idx) const { assert((idx >= 0) && (idx < _num_bits), "precondition"); - assert(idx >= 0, "precondition"); size_t array_idx = idx >> LogBitsPerWord; uintx bit_number = idx & (BitsPerWord - 1); uintx the_bit = nth_bit(bit_number); From 782571585c852d034cef65d4f38e063bde73e1d7 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 8 Aug 2025 19:18:30 +0000 Subject: [PATCH 38/61] Disable generation accounting --- .../heuristics/shenandoahGlobalHeuristics.cpp | 3 +- .../gc/shenandoah/shenandoahConcurrentGC.cpp | 7 +- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 170 ++++++++++++++++-- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 67 ++++++- .../share/gc/shenandoah/shenandoahFullGC.cpp | 19 +- .../share/gc/shenandoah/shenandoahFullGC.hpp | 5 +- .../gc/shenandoah/shenandoahGeneration.cpp | 108 +++++++++-- .../gc/shenandoah/shenandoahGeneration.hpp | 68 ++++++- .../shenandoah/shenandoahGenerationSizer.cpp | 7 +- .../shenandoah/shenandoahGenerationSizer.hpp | 10 +- .../shenandoahGenerationalEvacuationTask.cpp | 8 +- .../shenandoahGenerationalFullGC.cpp | 4 +- .../shenandoahGenerationalFullGC.hpp | 4 + .../shenandoah/shenandoahGenerationalHeap.cpp | 46 +++-- .../share/gc/shenandoah/shenandoahHeap.cpp | 23 ++- .../gc/shenandoah/shenandoahHeapRegion.cpp | 2 + .../share/gc/shenandoah/shenandoahOldGC.cpp | 8 +- .../gc/shenandoah/shenandoahOldGeneration.cpp | 21 ++- 18 files changed, 501 insertions(+), 79 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index 4e12b1d41e8cc..95ffcaf7a528e 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -136,10 +136,11 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti cset->add_region(r); } } - +#ifdef KELVIN_OUT_WITH_THE_OLD if (regions_transferred_to_old > 0) { heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old); heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes); heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes); } +#endif } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 2f264cae70f19..3bc3e1cb1928c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -1175,6 +1175,9 @@ void ShenandoahConcurrentGC::op_update_thread_roots() { void ShenandoahConcurrentGC::op_final_update_refs() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); + bool is_generational = heap->mode()->is_generational(); + ShenandoahGenerationalHeap* const gen_heap = ShenandoahGenerationalHeap::heap(); + assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); @@ -1198,7 +1201,7 @@ void ShenandoahConcurrentGC::op_final_update_refs() { heap->set_update_refs_in_progress(false); heap->set_has_forwarded_objects(false); - if (heap->mode()->is_generational() && heap->is_concurrent_old_mark_in_progress()) { + if (is_generational && heap->is_concurrent_old_mark_in_progress()) { // When the SATB barrier is left on to support concurrent old gen mark, it may pick up writes to // objects in the collection set. After those objects are evacuated, the pointers in the // SATB are no longer safe. Once we have finished update references, we are guaranteed that @@ -1217,7 +1220,7 @@ void ShenandoahConcurrentGC::op_final_update_refs() { // Aging_cycle is only relevant during evacuation cycle for individual objects and during final mark for // entire regions. Both of these relevant operations occur before final update refs. - ShenandoahGenerationalHeap::heap()->set_aging_cycle(false); + gen_heap->set_aging_cycle(false); } if (ShenandoahVerify) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 7000b7034eb91..0ddff90321102 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -228,6 +228,16 @@ inline bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const return alloc_capacity(r) > 0; } +void ShenandoahFreeSet::reset_bytes_allocated_since_gc_start() { + shenandoah_assert_heaplocked(); + _mutator_bytes_allocated_since_gc_start = 0; +} + +void ShenandoahFreeSet::increase_bytes_allocated(size_t bytes) { + shenandoah_assert_heaplocked(); + _mutator_bytes_allocated_since_gc_start += bytes; +} + inline idx_t ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId which_partition) const { assert (which_partition < NumPartitions, "selected free partition must be valid"); idx_t idx = _leftmosts[int(which_partition)]; @@ -397,7 +407,6 @@ size_t ShenandoahRegionPartitions::get_used(ShenandoahFreeSetPartitionId which_p void ShenandoahRegionPartitions::increase_humongous_waste(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { shenandoah_assert_heaplocked(); assert (which_partition < NumPartitions, "Partition must be valid"); - _humongous_waste[int(which_partition)] += bytes; #ifdef KELVIN_HUMONGOUS_WASTE log_info(gc)("FreeSet<%s>::increase_humongous_waste(%zu) yields: %zu", partition_name(which_partition), @@ -1268,13 +1277,18 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) : _heap(heap), _partitions(max_regions, this), + _total_humongous_waste(0), _alloc_bias_weight(0), _total_young_used(0), _total_old_used(0), _total_global_used(0), _young_affiliated_regions(0), _old_affiliated_regions(0), - _global_affiliated_regions(0) + _global_affiliated_regions(0), + _young_unaffiliated_regions(0), + _global_unaffiliated_regions(0), + _total_young_regions(0), + _total_global_regions(0) { clear_internal(); } @@ -1627,11 +1641,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah r->end_preemptible_coalesce_and_fill(); _heap->old_generation()->clear_cards_for(r); } +#ifdef KELVIN_OUT_WITH_THE_OLD _heap->generation_for(r->affiliation())->increment_affiliated_region_count(); if (_heap->mode()->is_generational()) { _heap->global_generation()->increment_affiliated_region_count(); } - +#endif #ifdef ASSERT ShenandoahMarkingContext* const ctx = _heap->marking_context(); assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); @@ -1704,6 +1719,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah if (req.is_mutator_alloc()) { assert(req.is_young(), "Mutator allocations always come from young generation."); _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize); + increase_bytes_allocated(req.actual_size() * HeapWordSize); } else { assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc"); @@ -1755,12 +1771,18 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah _partitions.one_region_is_no_longer_empty(orig_partition); } size_t waste_bytes = _partitions.retire_from_partition(orig_partition, idx, r->used()); +#ifdef KELVIN_OUT_WITH_THE_OLD if (waste_bytes > 0) { request_generation->increase_used(waste_bytes); if (_heap->mode()->is_generational()) { _heap->global_generation()->increase_used(waste_bytes); } } +#else + if (req.is_mutator_alloc() && (waste_bytes > 0)) { + increase_bytes_allocated(waste_bytes); + } +#endif } else if ((result != nullptr) && in_new_region) { _partitions.one_region_is_no_longer_empty(orig_partition); } @@ -1840,6 +1862,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num; _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size); + increase_bytes_allocated(total_humongous_size); size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); // Initialize regions: @@ -1868,11 +1891,12 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { r->set_update_watermark(r->bottom()); r->set_top(r->bottom() + used_words); } +#ifdef KELVIN_OUT_WITH_THE_OLD generation->increase_affiliated_region_count(num); if (_heap->mode()->is_generational()) { _heap->global_generation()->increase_affiliated_region_count(num); } - +#endif if (remainder != 0) { // Record this remainder as allocation waste _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true); @@ -1885,7 +1909,9 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) { #ifdef KELVIN_HUMONGOUS_WASTE log_info(gc)("FreeSet alloc_contiguous increasing mutator humongous waste by %zu bytes", waste * HeapWordSize); #endif - _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste * HeapWordSize); + size_t waste_bytes = waste * HeapWordSize; + _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste_bytes); + _total_humongous_waste += waste_bytes; } #ifdef KELVIN_REGION_COUNTS @@ -2038,6 +2064,7 @@ class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionC for (size_t p = 0; p < int(ShenandoahRegionPartitions::NumPartitions); p++) { _partitions->decrease_used(ShenandoahFreeSetPartitionId(p), used_byte_tallies[p]); } +#ifdef KELVIN_OUT_WITH_THE_OLD ShenandoahYoungGeneration* young_gen = heap->young_generation(); ShenandoahOldGeneration* old_gen = heap->old_generation(); young_gen->decrease_used(used_byte_tallies[int(ShenandoahFreeSetPartitionId::Mutator)] @@ -2046,6 +2073,7 @@ class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionC + region_tallies[int(ShenandoahFreeSetPartitionId::Collector)]); old_gen->decrease_used(used_byte_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)]); old_gen->decrease_affiliated_region_count(region_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)]); +#endif #ifdef KELVIN_RECYCLE log_info(gc)("Done with flush buffer, releasing global heap lock"); #endif @@ -2081,6 +2109,27 @@ void ShenandoahFreeSet::recycle_trash() { heap->parallel_heap_region_iterate(&closure); } +bool ShenandoahFreeSet::transfer_one_region_from_mutator_to_old_collector(size_t idx, size_t alloc_capacity) { + ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); + ShenandoahYoungGeneration* young_gen = gen_heap->young_generation(); + ShenandoahOldGeneration* old_gen = gen_heap->old_generation(); + size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + if ((young_unaffiliated_regions() > 0) && + (((_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector) + 1) * region_size_bytes) + <= gen_heap->generation_sizer()->max_size_for(old_gen)) && + ((total_young_regions() - 1) * region_size_bytes >= gen_heap->generation_sizer()->min_size_for(young_gen))) { + _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, + ShenandoahFreeSetPartitionId::OldCollector, alloc_capacity); + gen_heap->old_generation()->augment_evacuation_reserve(alloc_capacity); + recompute_total_used(); + recompute_total_affiliated(); + _partitions.assert_bounds(true); + return true; + } else { + return false; + } +} + bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { const size_t idx = r->index(); @@ -2088,18 +2137,24 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { assert(can_allocate_from(r), "Should not be allocated"); ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); - const size_t region_capacity = alloc_capacity(r); + const size_t region_alloc_capacity = alloc_capacity(r); +#ifdef KELVIN_OUT_WITH_THE_OLD bool transferred = gen_heap->generation_sizer()->transfer_to_old(1); if (transferred) { _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, - ShenandoahFreeSetPartitionId::OldCollector, region_capacity); - _heap->old_generation()->augment_evacuation_reserve(region_capacity); + ShenandoahFreeSetPartitionId::OldCollector, region_alloc_capacity); + _heap->old_generation()->augment_evacuation_reserve(region_alloc_capacity); recompute_total_used(); recompute_total_affiliated(); _partitions.assert_bounds(true); return true; } + // replaced by the following +#endif + if (transfer_one_region_from_mutator_to_old_collector(idx, region_alloc_capacity)) { + return true; + } if (_heap->young_generation()->free_unaffiliated_regions() == 0 && _heap->old_generation()->free_unaffiliated_regions() > 0) { // Old has free unaffiliated regions, but it couldn't use them for allocation (likely because they @@ -2127,7 +2182,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { // 3. Move this usable region from the mutator partition to the old collector partition _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, - ShenandoahFreeSetPartitionId::OldCollector, region_capacity); + ShenandoahFreeSetPartitionId::OldCollector, region_alloc_capacity); recompute_total_used(); recompute_total_affiliated(); _partitions.assert_bounds(true); @@ -2135,7 +2190,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { // been accounted for. However, we should adjust the evacuation reserves as those may have changed. shenandoah_assert_heaplocked(); const size_t reserve = _heap->old_generation()->get_evacuation_reserve(); - _heap->old_generation()->set_evacuation_reserve(reserve - unusable_capacity + region_capacity); + _heap->old_generation()->set_evacuation_reserve(reserve - unusable_capacity + region_alloc_capacity); return true; } } @@ -2167,6 +2222,7 @@ void ShenandoahFreeSet::clear() { void ShenandoahFreeSet::clear_internal() { shenandoah_assert_heaplocked(); _partitions.make_all_regions_unavailable(); + reset_bytes_allocated_since_gc_start(); recompute_total_used(); recompute_total_affiliated(); @@ -2429,9 +2485,13 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r old_collector_empty, old_collector_regions, old_collector_used + old_cset_regions * region_size_bytes, old_collector_humongous_waste); + _total_humongous_waste = mutator_humongous_waste + old_collector_humongous_waste; + _total_young_regions = total_mutator_regions + young_cset_regions; + _total_global_regions = _total_young_regions + total_old_collector_regions + old_cset_regions; recompute_total_used(); recompute_total_affiliated(); _partitions.assert_bounds(true); +#ifdef KELVIN_OUT_WITH_THE_OLD // Update generations to assure consistency while we still hold the lock. This handles case that someone consults // generation sizes between now and start of finish_rebuild. This may release from old memory that we intend to reserve // for the old collector. @@ -2440,7 +2500,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r affiliated_old_collector_regions + old_cset_regions, mutator_used + young_cset_regions * region_size_bytes, old_collector_used + old_cset_regions * region_size_bytes); - +#endif #ifdef ASSERT if (_heap->mode()->is_generational()) { assert(young_affiliated_regions() == _heap->young_generation()->get_affiliated_region_count(), "sanity"); @@ -2479,6 +2539,8 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::OldCollector, humongous_waste_bytes); _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, xfer_regions * region_size_bytes); + // _total_humongous_waste, _total_global_regions are unaffected by transfer + _total_young_regions -= xfer_regions; recompute_total_young_used(); recompute_total_old_used(); recompute_total_affiliated(); @@ -2540,6 +2602,10 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); _partitions.increase_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); + if (which_collector == ShenandoahFreeSetPartitionId::OldCollector) { + _total_young_regions += transferred_regions; + } + // _total_global_regions unaffected by transfer recompute_total_used(); _partitions.assert_bounds(true); // Should not need to recompute_total_affiliated() because all transferred regions are empty. @@ -2600,6 +2666,10 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa _partitions.increase_region_counts(ShenandoahFreeSetPartitionId::Mutator, transferred_regions); _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, used_transfer); + if (which_collector == ShenandoahFreeSetPartitionId::OldCollector) { + _total_young_regions += transferred_regions; + } + // _total_global_regions unaffected by transfer recompute_total_used(); recompute_total_affiliated(); _partitions.assert_bounds(true); @@ -2629,9 +2699,11 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::OldCollector, max_xfer_regions, old_collector_xfer); max_xfer_regions -= old_collector_regions; +#ifdef KELVIN_OUT_WITH_THE_OLD if (old_collector_regions > 0) { ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions); } +#endif } // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition @@ -2664,15 +2736,14 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t find_regions_with_alloc_capacity(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count); } - - +#ifdef KELVIN_OUT_WITH_THE_OLD // The totals reported here anticipate the recycling of trash regions. Their memory is counted as unused and fully // available at this moment in time, even though the memory cannot be re-allocated until after it is recycled. void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count, size_t affiliated_young_regions, size_t affiliated_old_regions, size_t young_used_bytes, size_t old_used_bytes) { assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); -#undef KELVIN_RESERVE +#define KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("establish_generation_sizes(young_region_count: %zu, old_region_count: %zu, ", young_region_count, old_region_count); @@ -2753,6 +2824,7 @@ void ShenandoahFreeSet::reestablish_generation_sizes(size_t young_region_count, old_gen->set_region_balance(0); } } +#endif void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t old_region_count, bool have_evacuation_reserves) { @@ -2772,20 +2844,40 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_ size_t young_used_regions, old_used_regions, young_used_bytes, old_used_bytes, affiliated_young_regions, affiliated_old_regions; reserve_regions(young_reserve, old_reserve, old_region_count, young_used_regions, old_used_regions, young_used_bytes, old_used_bytes); - size_t young_region_count = _heap->num_regions() - old_region_count; - reestablish_generation_sizes(young_region_count, old_region_count); + _total_young_regions = _heap->num_regions() - old_region_count; + _total_global_regions = _heap->num_regions(); +#ifdef KELVIN_OUT_WITH_THE_OLD + reestablish_generation_sizes(_total_young_regions, old_region_count); +#endif establish_old_collector_alloc_bias(); _partitions.assert_bounds(true); log_status(); } +/** + * Set young_reserve_result and old_reserve_result to the number of bytes that we desire to set aside to hold the + * results of evacuation to young and old collector spaces respectively during the next evacuation phase. Overwrite + * old_generation region balance in case the original value is incompatible with the current reality. + * + * These values are determined by how much memory is currently available within each generation, which is + * represented by: + * 1. Memory currently available within old and young + * 2. Trashed regions currently residing in young and old, which will become available momentarily + * 3. The value of old_generation->get_region_balance() which represents the number of regions that we plan + * to transfer from old generation to young generation. Prior to each invocation of compute_young_and_old_reserves(), + * this value should computed by ShenandoahGenerationalHeap::compute_old_generation_balance(). + */ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regions, size_t old_trashed_regions, bool have_evacuation_reserves, size_t& young_reserve_result, size_t& old_reserve_result) const { shenandoah_assert_generational(); shenandoah_assert_heaplocked(); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - +#define KELVIN_RESERVE +#ifdef KELVIN_RESERVE + log_info(gc)("compute_young_and_old_reserve(young_trashed: %zu, old_trashed: %zu, have_reserves: %s)", + young_trashed_regions, old_trashed_regions, have_evacuation_reserves? "yes": "no"); +#endif ShenandoahOldGeneration* const old_generation = _heap->old_generation(); size_t old_available = old_generation->available(); size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions(); @@ -2793,20 +2885,32 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi size_t young_capacity = young_generation->max_capacity(); size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions(); +#ifdef KELVIN_RESERVE + log_info(gc)(" young_unaffiliated: %zu, capacity: %zu, old_unaffiliated: %zu, old_avail: %zu", + young_unaffiliated_regions, young_capacity, old_unaffiliated_regions, old_available); +#endif + // Add in the regions we anticipate to be freed by evacuation of the collection set old_unaffiliated_regions += old_trashed_regions; + old_available += old_trashed_regions * region_size_bytes; young_unaffiliated_regions += young_trashed_regions; // Consult old-region balance to make adjustments to current generation capacities and availability. // The generation region transfers take place after we rebuild. old_region_balance represents number of regions // to transfer from old to young. ssize_t old_region_balance = old_generation->get_region_balance(); +#ifdef KELVIN_RESERVE + log_info(gc)(" old_region_balance: %zd", old_region_balance); +#endif if (old_region_balance != 0) { #ifdef ASSERT if (old_region_balance > 0) { - assert(old_region_balance <= checked_cast(old_unaffiliated_regions), "Cannot transfer regions that are affiliated"); + assert(old_region_balance <= checked_cast(old_unaffiliated_regions), + "Cannot transfer %zd regions that are affiliated (old_trashed: %zu, old_unaffiliated: %zu)", + old_region_balance, old_trashed_regions, old_unaffiliated_regions); } else { - assert(0 - old_region_balance <= checked_cast(young_unaffiliated_regions), "Cannot transfer regions that are affiliated"); + assert(0 - old_region_balance <= checked_cast(young_unaffiliated_regions), + "Cannot transfer regions that are affiliated"); } #endif @@ -2815,6 +2919,11 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi old_unaffiliated_regions -= old_region_balance; young_capacity += xfer_bytes; young_unaffiliated_regions += old_region_balance; + +#ifdef KELVIN_RESERVE + log_info(gc)(" xfer_bytes: %zd, adjusted old_available: %zu, old_unaffiliated: %zu, young_capacity: %zu, young_unaffiliated: %zu", + xfer_bytes, old_available, old_unaffiliated_regions, young_capacity, young_unaffiliated_regions); +#endif } // All allocations taken from the old collector set are performed by GC, generally using PLABs for both @@ -2827,6 +2936,10 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); young_reserve_result = young_generation->get_evacuation_reserve(); old_reserve_result = promoted_reserve + old_evac_reserve; +#ifdef KELVIN_RESERVE + log_info(gc)("have_evac_reserve, old_reserve_result (%zu), young_reserve_result (%zu)", + old_reserve_result, young_reserve_result); +#endif if (old_reserve_result > old_available) { // Try to transfer memory from young to old. size_t old_deficit = old_reserve_result - old_available; @@ -2838,6 +2951,13 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi old_unaffiliated_regions += old_region_deficit; old_region_balance -= old_region_deficit; old_generation->set_region_balance(old_region_balance); +#ifdef KELVIN_RESERVE + log_info(gc)(" overwrite old_gen->region_balance() to %zu", old_region_balance); + log_info(gc)(" old_reserve_result (%zu) > old_available (%zu), old_deficit: %zu, region_deficit: %zu", + old_reserve_result, old_available, old_deficit, old_region_deficit); + log_info(gc)(" young_unaffiliated: %zu, old_unaffiliated: %zu, old_region_balance: %zd", + young_unaffiliated_regions, old_unaffiliated_regions, old_region_balance); +#endif } } else { // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) @@ -2846,6 +2966,10 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of // unaffiliated regions. old_reserve_result = old_available; +#ifdef KELVIN_RESERVE + log_info(gc)("have_evac_reserve, old_reserve_result (%zu) > young_reserve_result (%zu)", + old_reserve_result, young_reserve_result); +#endif } // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector @@ -2856,10 +2980,16 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) { old_reserve_result = _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes; +#ifdef KELVIN_RESERVE + log_info(gc)(" downsizing old_reserve_result due to capacity limit: %zu", old_reserve_result); +#endif } if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) { young_reserve_result = young_unaffiliated_regions * region_size_bytes; +#ifdef KELVIN_RESERVE + log_info(gc)(" downsizing young_reserve_result due to unaffilaited limit: %zu", young_reserve_result); +#endif } } @@ -2878,6 +3008,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old young_used_bytes = 0; old_used_bytes = 0; +#define KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("reserve_regions(to_reserve: %zu, to_reserve_old: %zu", to_reserve, to_reserve_old); #endif @@ -3450,6 +3581,7 @@ void ShenandoahFreeSet::decrease_humongous_waste_for_regular_bypass(ShenandoahHe r->allocate_fill(fill_words); } #endif + _total_humongous_waste -= waste; recompute_total_used(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index ca0f111af4515..acbbe69e84650 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -110,6 +110,9 @@ class ShenandoahRegionPartitions { size_t _used[UIntNumPartitions]; size_t _available[UIntNumPartitions]; + // Measured in bytes. + size_t _allocated_since_gc_start[UIntNumPartitions]; + // Some notes: // _retired_regions[p] is _total_region_counts[p] - _region_counts[p] // _empty_region_counts[p] <= _region_counts[p] <= _total_region_counts[p] @@ -459,6 +462,8 @@ class ShenandoahFreeSet : public CHeapObj { ShenandoahHeap* const _heap; ShenandoahRegionPartitions _partitions; + size_t _total_humongous_waste; + HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r); // Return the address of memory allocated, setting in_new_region to true iff the allocation is taken @@ -525,13 +530,25 @@ class ShenandoahFreeSet : public CHeapObj { size_t _old_affiliated_regions; size_t _global_affiliated_regions; + size_t _young_unaffiliated_regions; + size_t _global_unaffiliated_regions; + + size_t _total_young_regions; + size_t _total_global_regions; + + size_t _mutator_bytes_allocated_since_gc_start; + inline void recompute_total_affiliated() { - _young_affiliated_regions = (_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator) + - _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Collector) - - (_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator) + - _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector))); + shenandoah_assert_heaplocked(); + _young_unaffiliated_regions = (_partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator) + + _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector)); + _young_affiliated_regions = ((_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator) + + _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Collector)) - + _young_unaffiliated_regions); _old_affiliated_regions = (_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector) - - _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector)); + _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector)); + _global_unaffiliated_regions = + _young_unaffiliated_regions + _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector); _global_affiliated_regions = _young_affiliated_regions + _old_affiliated_regions; #undef KELVIN_AFFILIATED #ifdef KELVIN_AFFILIATED @@ -564,6 +581,8 @@ class ShenandoahFreeSet : public CHeapObj { // Precondition: ShenandoahHeapRegion::requires_humongous(req.size()) HeapWord* allocate_contiguous(ShenandoahAllocRequest& req); + bool transfer_one_region_from_mutator_to_old_collector(size_t idx, size_t alloc_capacity); + // Change region r from the Mutator partition to the GC's Collector or OldCollector partition. This requires that the // region is entirely empty. // @@ -619,13 +638,14 @@ class ShenandoahFreeSet : public CHeapObj { // Determine whether we prefer to allocate from left to right or from right to left within the OldCollector free-set. void establish_old_collector_alloc_bias(); +#ifdef KELVIN_OUT_WITH_THE_OLD // Set max_capacity for young and old generations void establish_generation_sizes(size_t young_region_count, size_t old_region_count, size_t affiliated_young_regions, size_t affiliated_old_regions, size_t young_used_bytes, size_t old_used_bytes); void reestablish_generation_sizes(size_t young_region_count, size_t old_region_count); - +#endif size_t get_usable_free_words(size_t free_bytes) const; // log status, assuming lock has already been acquired by the caller. @@ -643,6 +663,14 @@ class ShenandoahFreeSet : public CHeapObj { return _partitions.shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx, num_regions); } + void reset_bytes_allocated_since_gc_start(); + + void increase_bytes_allocated(size_t bytes); + + inline size_t get_bytes_allocated_since_gc_start() const { + return _mutator_bytes_allocated_since_gc_start; + } + // Public because ShenandoahRegionPartitions assertions require access. inline size_t alloc_capacity(ShenandoahHeapRegion *r) const; inline size_t alloc_capacity(size_t idx) const; @@ -664,6 +692,18 @@ class ShenandoahFreeSet : public CHeapObj { return _total_global_used; } + size_t global_unaffiliated_regions() { + return _global_unaffiliated_regions; + } + + size_t young_unaffiliated_regions() { + return _young_unaffiliated_regions; + } + + size_t old_unaffiliated_regions() { + return _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector); + } + size_t young_affiliated_regions() { return _young_affiliated_regions; } @@ -676,6 +716,18 @@ class ShenandoahFreeSet : public CHeapObj { return _global_affiliated_regions; } + size_t total_young_regions() { + return _total_young_regions; + } + + size_t total_old_regions() { + return _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector); + } + + size_t total_global_regions() { + return _total_global_regions; + } + void clear(); // Examine the existing free set representation, capturing the current state into var arguments: @@ -741,7 +793,8 @@ class ShenandoahFreeSet : public CHeapObj { inline size_t used() const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator); } inline size_t available() const { return _partitions.available_in_not_locked(ShenandoahFreeSetPartitionId::Mutator); } - inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); } + inline size_t total_humongous_waste() const { return _total_humongous_waste; } + inline size_t humongous_waste_in_mutator() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::Mutator); } inline size_t humongous_waste_in_old() const { return _partitions.humongous_waste(ShenandoahFreeSetPartitionId::OldCollector); } void decrease_humongous_waste_for_regular_bypass(ShenandoahHeapRegion* r, size_t waste); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index 27ff45e67de19..d6e511d268a70 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -253,8 +253,13 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { phase4_compact_objects(worker_slices); +#ifdef KELVIN_OUT_WITH_THE_OLD result = phase5_epilog(); +#else + phase5_epilog(); +#endif } +#ifdef KELVIN_OUT_WITH_THE_OLD if (heap->mode()->is_generational()) { LogTarget(Info, gc, ergo) lt; if (lt.is_enabled()) { @@ -262,6 +267,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { result.print_on("Full GC", &ls); } } +#endif // Resize metaspace MetaspaceGC::compute_new_size(); @@ -994,6 +1000,7 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { r->reset_alloc_metadata(); } +#ifdef KELVIN_OUT_WITH_THE_OLD void update_generation_usage() { if (_is_generational) { _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste); @@ -1010,6 +1017,7 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { _old_usage + _young_usage, _old_humongous_waste + _young_humongous_waste); } +#endif }; void ShenandoahFullGC::compact_humongous_objects() { @@ -1128,7 +1136,11 @@ void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_s } } +#ifdef KELVIN_OUT_WITH_THE_OLD ShenandoahGenerationalHeap::TransferResult ShenandoahFullGC::phase5_epilog() { +#else +void ShenandoahFullGC::phase5_epilog() { +#endif GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer); ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahGenerationalHeap::TransferResult result; @@ -1146,11 +1158,12 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahFullGC::phase5_epilog() { ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); ShenandoahPostCompactClosure post_compact; heap->heap_region_iterate(&post_compact); +#ifdef KELVIN_OUT_WITH_THE_OLD post_compact.update_generation_usage(); - if (heap->mode()->is_generational()) { ShenandoahGenerationalFullGC::balance_generations_after_gc(heap); } +#endif heap->collection_set()->clear(); size_t young_cset_regions, old_cset_regions; @@ -1177,8 +1190,12 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahFullGC::phase5_epilog() { // We defer generation resizing actions until after cset regions have been recycled. We do this even following an // abbreviated cycle. if (heap->mode()->is_generational()) { +#ifdef KELVIN_OUT_WITH_THE_OLD result = ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set(); +#endif ShenandoahGenerationalFullGC::rebuild_remembered_set(heap); } +#ifdef KELVIN_OUT_WITH_THE_OLD return result; +#endif } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp index b0b8c7bf0c599..7ec55240ae835 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp @@ -82,8 +82,11 @@ class ShenandoahFullGC : public ShenandoahGC { void phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices); void phase3_update_references(); void phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices); +#ifdef KELVIN_OUT_WITH_THE_OLD ShenandoahGenerationalHeap::TransferResult phase5_epilog(); - +#else + void phase5_epilog(); +#endif void distribute_slices(ShenandoahHeapRegionSet** worker_slices); void calculate_target_humongous_objects(); void compact_humongous_objects(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 038bf6d045354..2ef49da7064f3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -148,6 +148,7 @@ ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode return _heuristics; } +#ifdef KELVIN_OUT_WITH_THE_OLD size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const { return Atomic::load(&_bytes_allocated_since_gc_start); } @@ -159,8 +160,10 @@ void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() { void ShenandoahGeneration::increase_allocated(size_t bytes) { Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); } +#endif void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) { + shenandoah_assert_heaplocked(); _evacuation_reserve = new_val; } @@ -440,7 +443,9 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, size_t excess_old = old_available - old_consumed; size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions(); size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes; - assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available"); + assert(old_available >= unaffiliated_old, + "Unaffiliated old (%zu is %zu * %zu) is a subset of old available (%zu)", + unaffiliated_old, unaffiliated_old_regions, region_size_bytes, old_available); // Make sure old_evac_committed is unaffiliated if (old_evacuated_committed > 0) { @@ -471,7 +476,13 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions); } +#ifdef KELVIN_OUT_WITH_THE_OLD if (regions_to_xfer > 0) { + // kelvin is here: how does this affect freeset when transfer_to_young is deprecated? + // we are adjusting evacuation budget after choosing the collection set. + + // momentarily, we will rebuild the free se. + bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); assert(excess_old >= regions_to_xfer * region_size_bytes, "Cannot transfer (%zu, %zu) more than excess old (%zu)", @@ -480,6 +491,15 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, log_debug(gc, ergo)("%s transferred %zu excess regions to young before start of evacuation", result? "Successfully": "Unsuccessfully", regions_to_xfer); } +#else + // kelvin conjecture: we do not need to transfer regions. we just need to adjust excess_old. that will + // cause regions to transfer after we rebuild the freeset. + if (regions_to_xfer > 0) { + excess_old -= regions_to_xfer * region_size_bytes; + log_debug(gc, ergo)("Before start of evacuation, total_promotion reserve is young_advance_promoted_reserve: %zu " + "plus excess: old: %zu", young_advance_promoted_reserve_used, excess_old); + } +#endif // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated // promotions than fit in reserved memory, they will be deferred until a future GC pass. @@ -628,8 +648,10 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // This region was already not in the Collector or Mutator set, so no need to remove it. assert(free_set->membership(i) == ShenandoahFreeSetPartitionId::NotFree, "sanity"); } +#ifdef KELVIN_OUT_WITH_THE_OLD // Even when we do not fill the remnant, we count the remnant as used young_gen->increase_used(remnant_bytes); +#endif } // Else, we do not promote this region (either in place or by copy) because it has received new allocations. @@ -812,6 +834,12 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) { // We are preparing for evacuation. At this time, we ignore cset region tallies. size_t first_old, last_old, num_old; _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + + if (heap->mode()->is_generational()) { + ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); + gen_heap->compute_old_generation_balance(young_cset_regions, old_cset_regions); + } + // Free set construction uses reserve quantities, because they are known to be valid here _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true); } @@ -863,7 +891,10 @@ ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type, _task_queues(new ShenandoahObjToScanQueueSet(max_workers)), _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))), _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0), - _used(0), _bytes_allocated_since_gc_start(0), + _used(0), +#ifdef KELVIN_OUT_WITH_THE_OLD + _bytes_allocated_since_gc_start(0), +#endif _max_capacity(max_capacity), _free_set(nullptr), _heuristics(nullptr) @@ -916,6 +947,7 @@ void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) { } } +#ifdef KELVIN_OUT_WITH_THE_OLD size_t ShenandoahGeneration::increment_affiliated_region_count() { shenandoah_assert_heaplocked_or_safepoint(); // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced @@ -1024,6 +1056,7 @@ void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { #endif } } +#endif size_t ShenandoahGeneration::used_regions() const { size_t result; @@ -1040,6 +1073,7 @@ size_t ShenandoahGeneration::used_regions() const { result = _free_set->global_affiliated_regions(); break; } +#ifdef KELVIN_OUT_WITH_THE_OLD size_t original_result = Atomic::load(&_affiliated_region_count); #ifdef KELVIN_SCAFFOLDING static int problem_count = 0; @@ -1057,23 +1091,72 @@ size_t ShenandoahGeneration::used_regions() const { shenandoah_generation_name(_type), result); } } +#endif #endif return result; } +size_t ShenandoahGeneration::max_capacity() const { + size_t total_regions; + switch (_type) { + case ShenandoahGenerationType::OLD: + total_regions = _free_set->total_old_regions(); + break; + case ShenandoahGenerationType::YOUNG: + total_regions = _free_set->total_young_regions(); + break; + case ShenandoahGenerationType::GLOBAL: + case ShenandoahGenerationType::NON_GEN: + default: + total_regions = _free_set->total_global_regions(); + break; + } +#define KELVIN_AVAILABLE +#ifdef KELVIN_AVAILABLE + log_info(gc)("max_capacity(_type: %d) returns %zu (%zu * %zu)", _type, total_regions * ShenandoahHeapRegion::region_size_bytes(), + total_regions, ShenandoahHeapRegion::region_size_bytes()); +#endif + return total_regions * ShenandoahHeapRegion::region_size_bytes(); +} + size_t ShenandoahGeneration::free_unaffiliated_regions() const { - size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes(); - auto const used_regions = this->used_regions(); - if (used_regions > result) { - result = 0; - } else { - result -= used_regions; + size_t free_regions; + switch (_type) { + case ShenandoahGenerationType::OLD: + free_regions = _free_set->old_unaffiliated_regions(); + break; + case ShenandoahGenerationType::YOUNG: + free_regions = _free_set->young_unaffiliated_regions(); + break; + case ShenandoahGenerationType::GLOBAL: + case ShenandoahGenerationType::NON_GEN: + default: + free_regions = _free_set->global_unaffiliated_regions(); + break; } - return result; +#define KELVIN_UNAFFILIATED +#ifdef KELVIN_UNAFFILIATED + log_info(gc)("free_unaffiliated_regions(_type == %d) returns %zu", _type, free_regions); +#endif + return free_regions; } size_t ShenandoahGeneration::used_regions_size() const { - return used_regions() * ShenandoahHeapRegion::region_size_bytes(); + size_t used_regions; + switch (_type) { + case ShenandoahGenerationType::OLD: + used_regions = _free_set->old_affiliated_regions(); + break; + case ShenandoahGenerationType::YOUNG: + used_regions = _free_set->young_affiliated_regions(); + break; + case ShenandoahGenerationType::GLOBAL: + case ShenandoahGenerationType::NON_GEN: + default: + used_regions = _free_set->global_affiliated_regions(); + break; + } + return used_regions * ShenandoahHeapRegion::region_size_bytes(); } size_t ShenandoahGeneration::available() const { @@ -1091,9 +1174,13 @@ size_t ShenandoahGeneration::soft_available() const { size_t ShenandoahGeneration::available(size_t capacity) const { size_t in_use = used(); +#ifdef KELVIN_AVAILABLE + log_info(gc)("ShenGen::available(%zu), with in_use: %zu", capacity, in_use); +#endif return in_use > capacity ? 0 : capacity - in_use; } +#ifdef KELVIN_OUT_WITH_THE_OLD size_t ShenandoahGeneration::increase_capacity(size_t increment) { shenandoah_assert_heaplocked_or_safepoint(); @@ -1140,6 +1227,7 @@ size_t ShenandoahGeneration::decrease_capacity(size_t decrement) { "Cannot use more than capacity"); return _max_capacity; } +#endif void ShenandoahGeneration::record_success_concurrent(bool abbreviated) { heuristics()->record_success_concurrent(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 160e73a0e85f7..d685ab53b1e4a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -70,7 +70,9 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // Usage volatile size_t _used; +#ifdef KELVIN_OUT_WITH_THE_OLD volatile size_t _bytes_allocated_since_gc_start; +#endif size_t _max_capacity; ShenandoahFreeSet* _free_set; ShenandoahHeuristics* _heuristics; @@ -128,7 +130,8 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { virtual void post_initialize(ShenandoahHeap* heap); - size_t max_capacity() const override { return _max_capacity; } + size_t max_capacity() const override; + virtual size_t used_regions() const; virtual size_t used_regions_size() const; virtual size_t free_unaffiliated_regions() const; @@ -148,6 +151,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { break; } +#ifdef KELVIN_OUT_WITH_THE_OLD size_t original_result = Atomic::load(&_used); #undef KELVIN_SCAFFOLDING #ifdef KELVIN_SCAFFOLDING @@ -167,8 +171,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { problem_count = 0; } #endif - - +#endif return result; } @@ -186,7 +189,17 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // max heap size will cause the adaptive heuristic to run more frequent cycles. size_t soft_available() const override; - size_t bytes_allocated_since_gc_start() const override; + size_t bytes_allocated_since_gc_start() const { + size_t result; + if (_type == ShenandoahGenerationType::YOUNG) { + return _free_set->get_bytes_allocated_since_gc_start(); + } else if (ShenandoahHeap::heap()->mode()->is_generational() && (_type == ShenandoahGenerationType::NON_GEN)) { + return _free_set->get_bytes_allocated_since_gc_start(); + } else { + return 0; + } + } +#ifdef KELVIN_OUT_WITH_THE_OLD void reset_bytes_allocated_since_gc_start(); void increase_allocated(size_t bytes); @@ -205,7 +218,8 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { log_info(gc)("%s:set_used(regions: %zu, bytes: %zu)", shenandoah_generation_name(_type), affiliated_region_count, byte_count); #endif } - +#endif + void log_status(const char* msg) const; // Used directly by FullGC @@ -265,6 +279,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // Scan remembered set at start of concurrent young-gen marking. void scan_remembered_set(bool is_concurrent); +#ifdef KELVIN_OUT_WITH_THE_OLD // Return the updated value of affiliated_region_count size_t increment_affiliated_region_count(); @@ -290,6 +305,44 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { void increase_humongous_waste(size_t bytes); void decrease_humongous_waste(size_t bytes); +#else + size_t get_affiliated_region_count() const { + size_t result; + switch (_type) { + case ShenandoahGenerationType::OLD: + result = _free_set->old_affiliated_regions(); + break; + case ShenandoahGenerationType::YOUNG: + result = _free_set->young_affiliated_regions(); + break; + case ShenandoahGenerationType::GLOBAL: + case ShenandoahGenerationType::NON_GEN: + default: + result = _free_set->global_affiliated_regions(); + break; + } + return result; + } + + size_t get_total_region_count() const { + size_t result; + switch (_type) { + case ShenandoahGenerationType::OLD: + result = _free_set->total_old_regions(); + break; + case ShenandoahGenerationType::YOUNG: + result = _free_set->total_young_regions(); + break; + case ShenandoahGenerationType::GLOBAL: + case ShenandoahGenerationType::NON_GEN: + default: + result = _free_set->total_global_regions(); + break; + } + return result; + } +#endif + size_t get_humongous_waste() const { size_t result; switch (_type) { @@ -302,16 +355,17 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { case ShenandoahGenerationType::GLOBAL: case ShenandoahGenerationType::NON_GEN: default: - result = _free_set->humongous_waste_in_mutator() + _free_set->humongous_waste_in_old(); + result = _free_set->total_humongous_waste(); break; } +#ifdef KELVIN_OUT_WITH_THE_OLD #ifdef KELVIN_SCAFFOLDING if (result != _humongous_waste) { log_info(gc)("Generation %s expects consistency between humongous waste in free set (%zu) and in generation (%zu)", shenandoah_generation_name(_type), result, _humongous_waste); } #endif - +#endif return result; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp index 17f3d2f199f1a..ed5fe573b130a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp @@ -115,6 +115,7 @@ void ShenandoahGenerationSizer::heap_size_changed(size_t heap_size) { recalculate_min_max_young_length(heap_size / ShenandoahHeapRegion::region_size_bytes()); } +#ifdef KELVIN_OUT_WITH_THE_OLD bool ShenandoahGenerationSizer::transfer_regions(ShenandoahGeneration* src, ShenandoahGeneration* dst, size_t regions) const { const size_t bytes_to_transfer = regions * ShenandoahHeapRegion::region_size_bytes(); @@ -141,7 +142,7 @@ bool ShenandoahGenerationSizer::transfer_regions(ShenandoahGeneration* src, Shen regions, src->name(), dst->name(), PROPERFMTARGS(new_size)); return true; } - +#endif size_t ShenandoahGenerationSizer::max_size_for(ShenandoahGeneration* generation) const { switch (generation->type()) { @@ -171,7 +172,7 @@ size_t ShenandoahGenerationSizer::min_size_for(ShenandoahGeneration* generation) } } - +#ifdef KELVIN_OUT_WITH_THE_OLD // Returns true iff transfer is successful bool ShenandoahGenerationSizer::transfer_to_old(size_t regions) const { ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); @@ -193,11 +194,11 @@ void ShenandoahGenerationSizer::force_transfer_to_old(size_t regions) const { regions, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_size)); } - bool ShenandoahGenerationSizer::transfer_to_young(size_t regions) const { ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); return transfer_regions(heap->old_generation(), heap->young_generation(), regions); } +#endif size_t ShenandoahGenerationSizer::min_young_size() const { return min_young_regions() * ShenandoahHeapRegion::region_size_bytes(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp index 5752422bb7717..aa385af146c97 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp @@ -51,11 +51,16 @@ class ShenandoahGenerationSizer { // given the number of heap regions depending on the kind of sizing algorithm. void recalculate_min_max_young_length(size_t heap_region_count); +#ifdef KELVIN_OUT_WITH_THE_OLD // This will attempt to transfer regions from the `src` generation to `dst` generation. // If the transfer would violate the configured minimum size for the source or the configured // maximum size of the destination, it will not perform the transfer and will return false. // Returns true if the transfer is performed. bool transfer_regions(ShenandoahGeneration* src, ShenandoahGeneration* dst, size_t regions) const; +#endif + +public: + ShenandoahGenerationSizer(); // Return the configured maximum size in bytes for the given generation. size_t max_size_for(ShenandoahGeneration* generation) const; @@ -63,9 +68,6 @@ class ShenandoahGenerationSizer { // Return the configured minimum size in bytes for the given generation. size_t min_size_for(ShenandoahGeneration* generation) const; -public: - ShenandoahGenerationSizer(); - // Calculate the maximum length of the young gen given the number of regions // depending on the sizing algorithm. void heap_size_changed(size_t heap_size); @@ -82,12 +84,14 @@ class ShenandoahGenerationSizer { return _max_desired_young_regions; } +#ifdef KELVIN_OUT_WITH_THE_OLD // True if transfer succeeds, else false. See transfer_regions. bool transfer_to_young(size_t regions) const; bool transfer_to_old(size_t regions) const; // force transfer is used when we promote humongous objects. May violate min/max limits on generation sizes void force_transfer_to_old(size_t regions) const; +#endif }; #endif //SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONSIZER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index f2b8b1e6cdb67..ec96713da42b0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -261,6 +261,7 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion _heap->free_set()->add_promoted_in_place_region_to_old_collector(region); region->set_affiliation(OLD_GENERATION); +#ifdef KELVIN_OUT_WITH_THE_OLD young_gen->decrease_used(region_size_bytes); young_gen->decrement_affiliated_region_count(); @@ -269,6 +270,7 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion old_gen->increment_affiliated_region_count(); old_gen->increase_used(region_to_be_used_in_old); +#endif } } @@ -303,13 +305,14 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio log_info(gc)("Promoting humongous object, transferring %zu bytes of humongous waste", humongous_waste); #endif +#ifdef KELVIN_OUT_WITH_THE_OLD young_gen->decrease_used(spanned_regions * region_size_bytes); young_gen->decrease_humongous_waste(humongous_waste); young_gen->decrease_affiliated_region_count(spanned_regions); // transfer_to_old() increases capacity of old and decreases capacity of young _heap->generation_sizer()->force_transfer_to_old(spanned_regions); - +#endif // For this region and each humongous continuation region spanned by this humongous object, change // affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory // in the last humongous region that is not spanned by obj is currently not used. @@ -323,10 +326,11 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio ShenandoahFreeSet* freeset = _heap->free_set(); freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste); - +#ifdef KELVIN_OUT_WITH_THE_OLD old_gen->increase_affiliated_region_count(spanned_regions); old_gen->increase_used(spanned_regions * region_size_bytes); old_gen->increase_humongous_waste(humongous_waste); +#endif } // Since this region may have served previously as OLD, it may hold obsolete object range info. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp index e2e3f0a467744..61dbaef8371a8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp @@ -105,6 +105,8 @@ void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) heap->old_generation()->set_parsable(true); } + +#ifdef KELVIN_OUT_WITH_THE_OLD void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* heap) { ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap); ShenandoahOldGeneration* const old_gen = gen_heap->old_generation(); @@ -122,7 +124,6 @@ void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes(); gen_heap->generation_sizer()->force_transfer_to_old(old_regions_deficit); } - log_info(gc, ergo)("FullGC done: young usage: " PROPERFMT ", old usage: " PROPERFMT, PROPERFMTARGS(gen_heap->young_generation()->used()), PROPERFMTARGS(old_gen->used())); @@ -131,6 +132,7 @@ void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() { return ShenandoahGenerationalHeap::heap()->balance_generations(); } +#endif void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) { LogTarget(Debug, gc) lt; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp index 9240a056105fc..62dc3087caef0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp @@ -45,17 +45,20 @@ class ShenandoahGenerationalFullGC { // Records end of cycle for young and old and establishes size of live bytes in old static void handle_completion(ShenandoahHeap* heap); +#ifdef KELVIN_OUT_WITH_THE_OLD // Full GC may have promoted regions and may have temporarily violated constraints on the usage and // capacity of the old generation. This method will balance the accounting of regions between the // young and old generations. This is somewhat vestigial, but the outcome of this method is used // when rebuilding the free sets. static void balance_generations_after_gc(ShenandoahHeap* heap); +#endif // This will compute the target size for the old generation. It will be expressed in terms of // a region surplus and deficit, which will be redistributed accordingly after rebuilding the // free set. static void compute_balances(); +#ifdef KELVIN_OUT_WITH_THE_OLD // Rebuilding the free set may have resulted in regions being pulled in to the old generation // evacuation reserve. For this reason, we must update the usage and capacity of the generations // again. In the distant past, the free set did not know anything about generations, so we had @@ -63,6 +66,7 @@ class ShenandoahGenerationalFullGC { // redundant and adds complexity. We would like to one day remove it. Until then, we must keep it // synchronized with the free set's view of things. static ShenandoahGenerationalHeap::TransferResult balance_generations_after_rebuilding_free_set(); +#endif // Logs the number of live bytes marked in the old generation. This is _not_ the same // value used as the baseline for the old generation _after_ the full gc is complete. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 1ec2dfb8a8420..0d7a7d7a29fed 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -27,6 +27,7 @@ #include "gc/shenandoah/shenandoahClosures.inline.hpp" #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" #include "gc/shenandoah/shenandoahFreeSet.hpp" +#include "gc/shenandoah/shenandoahGeneration.hpp" #include "gc/shenandoah/shenandoahGenerationalControlThread.hpp" #include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp" #include "gc/shenandoah/shenandoahGenerationalHeap.hpp" @@ -595,6 +596,7 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) { retire_plab(plab, thread); } +#ifdef KELVIN_OUT_WITH_THE_OLD ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() { shenandoah_assert_heaplocked_or_safepoint(); @@ -623,11 +625,15 @@ ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_g return TransferResult {true, 0, "none"}; } +#endif // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to // xfer_limit, and any surplus is transferred to the young generation. -// xfer_limit is the maximum we're able to transfer from young to old. +// +// xfer_limit is the maximum we're able to transfer from young to old based on either: +// 1. an assumption that we will be able to replenish memory "borrowed" from young at the end of collection, or +// 2. there is sufficient excess in the allocation runway during GC idle cycles void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_limit, size_t old_cset_regions) { // We can limit the old reserve to the size of anticipated promotions: @@ -655,12 +661,13 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit. const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve; - const double max_old_reserve = (ShenandoahOldEvacRatioPercent == 100)? - bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent), - bound_on_old_reserve); -#undef KELVIN_REBALANCE + const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve: + MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) + / double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve)); +#define KELVIN_REBALANCE #ifdef KELVIN_REBALANCE - log_info(gc)("compute_old_gen_balance(%zu, %zu)", old_xfer_limit, old_cset_regions); + log_info(gc)("compute_old_gen_balance(%zu, %zu), bound_on_old_reserve: %.3f, max_old_reserve: %.3f", + old_xfer_limit, old_cset_regions, bound_on_old_reserve, max_old_reserve); #endif const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); @@ -675,8 +682,15 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ "Unaffiliated available must be less than total available"); const double old_fragmented_available = double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes); reserve_for_mixed = max_evac_need + old_fragmented_available; +#ifdef KELVIN_REBALANCE + log_info(gc)(" max_evac_need: %.3f, old_fragmented_available: %.3f, reserve_for_mixed: %.3f", + max_evac_need, old_fragmented_available, reserve_for_mixed); +#endif if (reserve_for_mixed > max_old_reserve) { reserve_for_mixed = max_old_reserve; +#ifdef KELVIN_REBALANCE + log_info(gc)(" downsize reserve_for_mixed: %.3f", reserve_for_mixed); +#endif } } @@ -693,17 +707,23 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ // This is the total old we want to ideally reserve const size_t old_reserve = reserve_for_mixed + reserve_for_promo; +#ifdef KELVIN_REBALANCE + log_info(gc)(" reserve_for_promo: %zu, old_reserve: %zu", reserve_for_promo, old_reserve); +#endif assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations"); // We now check if the old generation is running a surplus or a deficit. const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes; +#ifdef KELVIN_REBALANCE + log_info(gc)(" max_old_available: %zu", max_old_available); +#endif if (max_old_available >= old_reserve) { // We are running a surplus, so the old region surplus can go to young const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes; const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions); #ifdef KELVIN_REBALANCE - log_info(gc)("surplus of old, transferring %zd regions to young", old_region_surplus); + log_info(gc)("surplus of old, set old_gen->region_balance for transfer %zu regions to young", old_region_surplus); #endif old_generation()->set_region_balance(checked_cast(old_region_surplus)); } else { @@ -719,13 +739,14 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ // curtailed if the budget is restricted. const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer); #ifdef KELVIN_REBALANCE - log_info(gc)("deficit of old, transferring %zd regions to old", old_region_deficit); + log_info(gc)("deficit of old, set old_gen->region_balance for transfer %zu regions to old", old_region_deficit); #endif old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); } } void ShenandoahGenerationalHeap::reset_generation_reserves() { + ShenandoahHeapLocker locker(lock()); young_generation()->set_evacuation_reserve(0); old_generation()->set_evacuation_reserve(0); old_generation()->set_promoted_reserve(0); @@ -1098,7 +1119,7 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { // a more detailed explanation. old_generation()->transfer_pointers_from_satb(); } - +#ifdef KELVIN_OUT_WITH_THE_OLD // We defer generation resizing actions until after cset regions have been recycled. TransferResult result = balance_generations(); LogTarget(Info, gc, ergo) lt; @@ -1106,7 +1127,7 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { LogStream ls(lt); result.print_on("Degenerated GC", &ls); } - +#endif // In case degeneration interrupted concurrent evacuation or update references, we need to clean up // transient state. Otherwise, these actions have no effect. reset_generation_reserves(); @@ -1128,7 +1149,7 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { // throw off the heuristics. entry_global_coalesce_and_fill(); } - +#ifdef KELVIN_OUT_WITH_THE_OLD TransferResult result; { ShenandoahHeapLocker locker(lock()); @@ -1142,6 +1163,9 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { LogStream ls(lt); result.print_on("Concurrent GC", &ls); } +#else + reset_generation_reserves(); +#endif } void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 175f322f6b659..95558ded5faed 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -426,6 +426,12 @@ jint ShenandoahHeap::initialize() { // We are initializing free set. We ignore cset region tallies. size_t young_cset_regions, old_cset_regions, first_old, last_old, num_old; _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); + if (mode()->is_generational()) { + ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); + size_t allocation_runway = + gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions); + gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions); + } _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old); } @@ -733,24 +739,29 @@ void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) { if (req.is_gc_alloc()) { assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste"); +#ifdef KELVIN_OUT_WITH_THE_OLD increase_used(generation, actual_bytes + wasted_bytes); +#endif } else { assert(req.is_mutator_alloc(), "Expected mutator alloc here"); +#ifdef KELVIN_OUT_WITH_THE_OLD // padding and actual size both count towards allocation counter generation->increase_allocated(actual_bytes + wasted_bytes); // Used within generation is actual bytes + alignment padding (wasted bytes) increase_used(generation, actual_bytes + wasted_bytes); - +#endif // notify pacer of both actual size and waste notify_mutator_alloc_words(req.actual_size(), req.waste()); - +#ifdef KELVIN_OUT_WITH_THE_OLD if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) { increase_humongous_waste(generation, wasted_bytes); } +#endif } } +#ifdef KELVIN_OUT_WITH_THE_OLD void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) { generation->increase_humongous_waste(bytes); if (!generation->is_global()) { @@ -778,6 +789,7 @@ void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t byte global_generation()->decrease_used(bytes); } } +#endif void ShenandoahHeap::notify_mutator_alloc_words(size_t words, size_t waste) { if (ShenandoahPacing) { @@ -2321,12 +2333,17 @@ address ShenandoahHeap::in_cset_fast_test_addr() { } void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { +#ifdef KELVIN_OUT_WITH_THE_OLD if (mode()->is_generational()) { young_generation()->reset_bytes_allocated_since_gc_start(); old_generation()->reset_bytes_allocated_since_gc_start(); } - global_generation()->reset_bytes_allocated_since_gc_start(); +#else + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahHeapLocker locker(heap->lock()); + free_set()->reset_bytes_allocated_since_gc_start(); +#endif } void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index 36c9373d0cc34..c2e92a8091f3e 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -918,7 +918,9 @@ void ShenandoahHeapRegion::decrement_humongous_waste() { #ifdef KELVIN_HUMONGOUS_HEAP_REGION log_info(gc)("Decrementing humongous waste by %zu in ShenHeapRegion", waste_bytes); #endif +#ifdef KELVIN_OUT_WITH_THE_OLD heap->decrease_humongous_waste(generation, waste_bytes); +#endif heap->free_set()->decrease_humongous_waste_for_regular_bypass(this, waste_bytes); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 1724fc2849f76..361ea9d5213d1 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -144,11 +144,14 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { // collection. heap->concurrent_final_roots(); - // We do not rebuild_free following increments of old marking because memory has not been reclaimed. However, we may - // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow. size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); heap->compute_old_generation_balance(allocation_runway, 0); +#ifdef KELVIN_OUT_WITH_THE_OLD + // Kelvin says the following comment is not correct. We do rebuild_free following the end of old marking. + + // We do not rebuild_free following increments of old marking because memory has not been reclaimed. However, we may + // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow. ShenandoahGenerationalHeap::TransferResult result; { ShenandoahHeapLocker locker(heap->lock()); @@ -160,5 +163,6 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { LogStream ls(lt); result.print_on("Old Mark", &ls); } +#endif return true; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index 5cccd395d3819..b825994256e59 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -511,12 +511,20 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent ShenandoahPhaseTimings::final_rebuild_freeset : ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset); ShenandoahHeapLocker locker(heap->lock()); - size_t cset_young_regions, cset_old_regions; + size_t young_trash_regions, old_trash_regions; size_t first_old, last_old, num_old; - heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old); - // This is just old-gen completion. No future budgeting required here. The only reason to rebuild the freeset here - // is in case there was any immediate old garbage identified. - heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old); + heap->free_set()->prepare_to_rebuild(young_trash_regions, old_trash_regions, first_old, last_old, num_old); + // At the end of old-gen, we may find that we have reclaimed immediate garbage, allowing a longer allocation runway. + // We may also find that we have accumulated canddiate regions for mixed evacuation. If so, we will want to expand + // the OldCollector reserve in order to make room for these mixed evacuations. + assert(ShenandoahHeap::heap()->mode()->is_generational(), "sanity"); + assert(young_trash_regions == 0, "sanity"); + ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); + size_t allocation_runway = + gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_trash_regions); + gen_heap->compute_old_generation_balance(allocation_runway, old_trash_regions); + + heap->free_set()->finish_rebuild(young_trash_regions, old_trash_regions, num_old); } } @@ -717,11 +725,12 @@ void ShenandoahOldGeneration::handle_evacuation(HeapWord* obj, size_t words, boo // do this in batch, in a background GC thread than to try to carefully dirty only cards // that hold interesting pointers right now. _card_scan->mark_range_as_dirty(obj, words); - +#ifdef KELVIN_OUT_WITH_THE_OLD if (promotion) { // This evacuation was a promotion, track this as allocation against old gen increase_allocated(words * HeapWordSize); } +#endif } bool ShenandoahOldGeneration::has_unprocessed_collection_candidates() { From ef47c14347b20522c55e1d59387b16c70c62bd9d Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 8 Aug 2025 22:06:45 +0000 Subject: [PATCH 39/61] Fix compile-time error --- src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp index 3bc3e1cb1928c..500f167de019a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp @@ -1176,7 +1176,7 @@ void ShenandoahConcurrentGC::op_update_thread_roots() { void ShenandoahConcurrentGC::op_final_update_refs() { ShenandoahHeap* const heap = ShenandoahHeap::heap(); bool is_generational = heap->mode()->is_generational(); - ShenandoahGenerationalHeap* const gen_heap = ShenandoahGenerationalHeap::heap(); + ShenandoahGenerationalHeap* const gen_heap = is_generational? ShenandoahGenerationalHeap::heap(): nullptr; assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint"); assert(!heap->_update_refs_iterator.has_next(), "Should have finished update references"); From e0751ca21c2e7f823e8ed837b607659b4938ac9c Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 8 Aug 2025 22:20:06 +0000 Subject: [PATCH 40/61] fix byte vs word comparison error --- .../gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index ec96713da42b0..99e1b5f38101d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -229,7 +229,7 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion size_t pip_unpadded = (region->end() - region->top()) * HeapWordSize; assert((region->top() == region->end()) || (pip_unpadded == (size_t) ((region->end() - region->top()) * HeapWordSize)), "Invariant"); - assert(pip_unpadded < ShenandoahHeap::min_fill_size(), "Sanity"); + assert(pip_unpadded < ShenandoahHeap::min_fill_size() * HeapWordSize, "Sanity"); size_t pip_pad_bytes = (region->top() - region->get_top_before_promote()) * HeapWordSize; assert((pip_unpadded == 0) || (pip_pad_bytes == 0), "Only one of pip_unpadded and pip_pad_bytes is non-zero"); From 4e68123e298ad1354663b248ee426033cba0db74 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sat, 9 Aug 2025 01:02:31 +0000 Subject: [PATCH 41/61] fix floating reserves for GLOBAL GC --- .../shenandoah/heuristics/shenandoahGlobalHeuristics.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index 95ffcaf7a528e..cc2c5a254d904 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -91,9 +91,10 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0; log_info(gc, ergo)("Adaptive CSet Selection for GLOBAL. Max Young Evacuation: %zu" - "%s, Max Old Evacuation: %zu%s, Actual Free: %zu%s.", + "%s, Max Old Evacuation: %zu%s, Max Either Evacuation: %zu%s, Actual Free: %zu%s.", byte_size_in_proper_unit(max_young_cset), proper_unit_for_byte_size(max_young_cset), byte_size_in_proper_unit(max_old_cset), proper_unit_for_byte_size(max_old_cset), + byte_size_in_proper_unit(unaffiliated_young_memory), proper_unit_for_byte_size(unaffiliated_young_memory), byte_size_in_proper_unit(actual_free), proper_unit_for_byte_size(actual_free)); for (size_t idx = 0; idx < size; idx++) { @@ -136,11 +137,12 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti cset->add_region(r); } } -#ifdef KELVIN_OUT_WITH_THE_OLD if (regions_transferred_to_old > 0) { +#ifdef KELVIN_OUT_WITH_THE_OLD heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old); +#endif + assert(young_evac_reserve > regions_transferred_to_old * region_size_bytes, "young reserve cannot be negative"); heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes); heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes); } -#endif } From e13ec3d9b6d3e98d29803346cf423cc7e9ed2d8e Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Sun, 10 Aug 2025 02:25:18 +0000 Subject: [PATCH 42/61] add instrumentation to debug TestChurnNotifications failure --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 6 +++--- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp | 2 +- .../share/gc/shenandoah/shenandoahGeneration.cpp | 4 ++-- .../share/gc/shenandoah/shenandoahGenerationalHeap.cpp | 2 +- .../share/gc/shenandoah/shenandoahMemoryPool.cpp | 10 +++++++++- .../gc/shenandoah/mxbeans/TestChurnNotifications.java | 9 +++++++++ 6 files changed, 25 insertions(+), 8 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 0ddff90321102..93cd66fa28ea3 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -2743,7 +2743,7 @@ void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, si size_t affiliated_young_regions, size_t affiliated_old_regions, size_t young_used_bytes, size_t old_used_bytes) { assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); -#define KELVIN_RESERVE +#undef KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("establish_generation_sizes(young_region_count: %zu, old_region_count: %zu, ", young_region_count, old_region_count); @@ -2873,7 +2873,7 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi shenandoah_assert_generational(); shenandoah_assert_heaplocked(); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); -#define KELVIN_RESERVE +#undef KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("compute_young_and_old_reserve(young_trashed: %zu, old_trashed: %zu, have_reserves: %s)", young_trashed_regions, old_trashed_regions, have_evacuation_reserves? "yes": "no"); @@ -3008,7 +3008,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old young_used_bytes = 0; old_used_bytes = 0; -#define KELVIN_RESERVE +#undef KELVIN_RESERVE #ifdef KELVIN_RESERVE log_info(gc)("reserve_regions(to_reserve: %zu, to_reserve_old: %zu", to_reserve, to_reserve_old); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index acbbe69e84650..f519255a1df52 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -319,7 +319,7 @@ class ShenandoahRegionPartitions { _used[int(which_partition)] -= bytes; _available[int(which_partition)] += bytes; -#undef KELVIN_USED_PARTITION +#define KELVIN_USED_PARTITION #ifdef KELVIN_USED_PARTITION extern const char* _shenandoah_partition_name(ShenandoahFreeSetPartitionId t); log_info(gc)("ShenRegionPartitions %s decrease_used(%zu) to %zu, available grows to %zu", diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 2ef49da7064f3..8533d2a1d7f81 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -1111,7 +1111,7 @@ size_t ShenandoahGeneration::max_capacity() const { total_regions = _free_set->total_global_regions(); break; } -#define KELVIN_AVAILABLE +#undef KELVIN_AVAILABLE #ifdef KELVIN_AVAILABLE log_info(gc)("max_capacity(_type: %d) returns %zu (%zu * %zu)", _type, total_regions * ShenandoahHeapRegion::region_size_bytes(), total_regions, ShenandoahHeapRegion::region_size_bytes()); @@ -1134,7 +1134,7 @@ size_t ShenandoahGeneration::free_unaffiliated_regions() const { free_regions = _free_set->global_unaffiliated_regions(); break; } -#define KELVIN_UNAFFILIATED +#undef KELVIN_UNAFFILIATED #ifdef KELVIN_UNAFFILIATED log_info(gc)("free_unaffiliated_regions(_type == %d) returns %zu", _type, free_regions); #endif diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 0d7a7d7a29fed..33d32fe2531c6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -664,7 +664,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve)); -#define KELVIN_REBALANCE +#undef KELVIN_REBALANCE #ifdef KELVIN_REBALANCE log_info(gc)("compute_old_gen_balance(%zu, %zu), bound_on_old_reserve: %.3f, max_old_reserve: %.3f", old_xfer_limit, old_cset_regions, bound_on_old_reserve, max_old_reserve); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp index ebfe5267160fb..639be5fdcf3e6 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp @@ -63,7 +63,11 @@ MemoryUsage ShenandoahMemoryPool::get_memory_usage() { // to make sense under the race. See JDK-8207200. committed = MAX2(used, committed); assert(used <= committed, "used: %zu, committed: %zu", used, committed); - +#define KELVIN_MEMORY_POOL +#ifdef KELVIN_MEMORY_POOL + log_info(gc)("ShenMemPool::get_memory_usage(%zu, %zu, %zu, %zu)", + initial, used, committed, max); +#endif return MemoryUsage(initial, used, committed, max); } @@ -86,6 +90,10 @@ MemoryUsage ShenandoahGenerationalMemoryPool::get_memory_usage() { size_t used = used_in_bytes(); size_t committed = _generation->used_regions_size(); +#ifdef KELVIN_MEMORY_POOL + log_info(gc)("ShenGenMemPool::get_memory_usage(%zu, %zu, %zu, %zu) for generation %s", + initial, used, committed, max, _generation->is_young()? "young": _generation->is_old()? "old": "global"); +#endif return MemoryUsage(initial, used, committed, max); } diff --git a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java index bd3c4508c7b84..fae74566668b6 100644 --- a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java +++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java @@ -144,6 +144,9 @@ public void handleNotification(Notification n, Object o) { if ((before != null) && (after != null)) { long diff = before.getUsed() - after.getUsed(); + System.out.println("Got GC notification " + info.getGcAction() + " for cause: " + info.getGcCause() + " from GC: " + info.getGcName() + + ", before: " + before.getUsed() + ", after: " + after.getUsed() + + ", diff: " + diff); if (diff > 0) { churnBytes.addAndGet(diff); } @@ -161,6 +164,9 @@ public void handleNotification(Notification n, Object o) { long mem = count * (16 + 4 * size); + System.out.println("Preparing to allocate " + count + " arrays, each of length " + size); + System.out.println("POOL_NAME: " + POOL_NAME); + for (int c = 0; c < count; c++) { sink = new int[size]; } @@ -178,13 +184,16 @@ public void handleNotification(Notification n, Object o) { long maxTries = (Utils.adjustTimeout(Utils.DEFAULT_TEST_TIMEOUT) - (spentTimeNanos / 1_000_000L)) / STEP_MS / 4; // Wait until enough notifications are accrued to match minimum boundary. + System.out.println("maxTries is " + maxTries + ", minExpected is: " + minExpected + ", spentTimeNanos: " + spentTimeNanos); long tries = 0; while (tries++ < maxTries) { actual = churnBytes.get(); + System.out.println("churnBytes() got " + actual + " in outer loop"); if (minExpected <= actual) { // Wait some more to test if we are breaking the maximum boundary. Thread.sleep(5000); actual = churnBytes.get(); + System.out.println("churnBytes() got " + actual + " in spin wait"); break; } Thread.sleep(STEP_MS); From b788d2b8bfdfb5891a5937735ad0c58bb55430d7 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 11 Aug 2025 23:22:32 +0000 Subject: [PATCH 43/61] Add anticipated humongous waste into TestChurnNotifications expected churn --- .../shenandoah/mxbeans/TestChurnNotifications.java | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java index fae74566668b6..7c4fdf28e8f52 100644 --- a/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java +++ b/test/hotspot/jtreg/gc/shenandoah/mxbeans/TestChurnNotifications.java @@ -144,9 +144,6 @@ public void handleNotification(Notification n, Object o) { if ((before != null) && (after != null)) { long diff = before.getUsed() - after.getUsed(); - System.out.println("Got GC notification " + info.getGcAction() + " for cause: " + info.getGcCause() + " from GC: " + info.getGcName() + - ", before: " + before.getUsed() + ", after: " + after.getUsed() + - ", diff: " + diff); if (diff > 0) { churnBytes.addAndGet(diff); } @@ -162,10 +159,8 @@ public void handleNotification(Notification n, Object o) { final int size = 100_000; long count = TARGET_MB * 1024 * 1024 / (16 + 4 * size); - long mem = count * (16 + 4 * size); - - System.out.println("Preparing to allocate " + count + " arrays, each of length " + size); - System.out.println("POOL_NAME: " + POOL_NAME); + long anticipated_humongous_waste_per_array = 124_272; + long mem = count * (16 + 4 * size + anticipated_humongous_waste_per_array); for (int c = 0; c < count; c++) { sink = new int[size]; @@ -184,16 +179,13 @@ public void handleNotification(Notification n, Object o) { long maxTries = (Utils.adjustTimeout(Utils.DEFAULT_TEST_TIMEOUT) - (spentTimeNanos / 1_000_000L)) / STEP_MS / 4; // Wait until enough notifications are accrued to match minimum boundary. - System.out.println("maxTries is " + maxTries + ", minExpected is: " + minExpected + ", spentTimeNanos: " + spentTimeNanos); long tries = 0; while (tries++ < maxTries) { actual = churnBytes.get(); - System.out.println("churnBytes() got " + actual + " in outer loop"); if (minExpected <= actual) { // Wait some more to test if we are breaking the maximum boundary. Thread.sleep(5000); actual = churnBytes.get(); - System.out.println("churnBytes() got " + actual + " in spin wait"); break; } Thread.sleep(STEP_MS); From c53474003a41a1275b737005c64baa2cdd4dd21c Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 11 Aug 2025 23:26:37 +0000 Subject: [PATCH 44/61] Disable debug instrumentation --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp | 2 +- src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp | 9 --------- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index f519255a1df52..acbbe69e84650 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -319,7 +319,7 @@ class ShenandoahRegionPartitions { _used[int(which_partition)] -= bytes; _available[int(which_partition)] += bytes; -#define KELVIN_USED_PARTITION +#undef KELVIN_USED_PARTITION #ifdef KELVIN_USED_PARTITION extern const char* _shenandoah_partition_name(ShenandoahFreeSetPartitionId t); log_info(gc)("ShenRegionPartitions %s decrease_used(%zu) to %zu, available grows to %zu", diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp index 639be5fdcf3e6..93d40b839e93d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp @@ -63,11 +63,6 @@ MemoryUsage ShenandoahMemoryPool::get_memory_usage() { // to make sense under the race. See JDK-8207200. committed = MAX2(used, committed); assert(used <= committed, "used: %zu, committed: %zu", used, committed); -#define KELVIN_MEMORY_POOL -#ifdef KELVIN_MEMORY_POOL - log_info(gc)("ShenMemPool::get_memory_usage(%zu, %zu, %zu, %zu)", - initial, used, committed, max); -#endif return MemoryUsage(initial, used, committed, max); } @@ -90,10 +85,6 @@ MemoryUsage ShenandoahGenerationalMemoryPool::get_memory_usage() { size_t used = used_in_bytes(); size_t committed = _generation->used_regions_size(); -#ifdef KELVIN_MEMORY_POOL - log_info(gc)("ShenGenMemPool::get_memory_usage(%zu, %zu, %zu, %zu) for generation %s", - initial, used, committed, max, _generation->is_young()? "young": _generation->is_old()? "old": "global"); -#endif return MemoryUsage(initial, used, committed, max); } From aefb7a0cf919fb21b12e0bdc3a6f76e3e3b6b33a Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 13 Aug 2025 07:33:31 -0600 Subject: [PATCH 45/61] Fix accounting error in allocate_contiguous --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 91 +++++++++---------- .../gc/shenandoah/shenandoahHeapRegion.cpp | 4 +- 2 files changed, 47 insertions(+), 48 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 931a6a5856897..ffdafb4905652 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -746,8 +746,8 @@ size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartit #endif } _membership[int(partition)].clear_bit(idx); + decrease_region_counts(partition, 1); shrink_interval_if_boundary_modified(partition, idx); - _region_counts[int(partition)]--; // This region is fully used, whether or not top() equals end(). It // is retired and no more memory will be allocated from within it. @@ -1860,63 +1860,63 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo } size_t total_used = 0; - size_t used_words_in_last_region = 0; - size_t waste_bytes = 0; + const size_t used_words_in_last_region = words_size & ShenandoahHeapRegion::region_size_words_mask(); + size_t waste_bytes; + // Retire regions from free partition and initialize them. if (is_humongous) { // Humongous allocation retires all regions at once: no allocation is possible anymore. // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate and will recompute affiliated. _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); + for (idx_t i = beg; i <= end; i++) { + ShenandoahHeapRegion* r = _heap->get_region(i); + assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); + assert(r->is_empty(), "Should be empty"); + r->try_recycle_under_lock(); + r->set_affiliation(req.affiliation()); + if (i == beg) { + r->make_humongous_start(); + } else { + r->make_humongous_cont(); + } + r->set_top(r->end()); + r->set_update_watermark(r->bottom()); + } total_used = ShenandoahHeapRegion::region_size_bytes() * num; - size_t used_words_in_last_region = words_size & ShenandoahHeapRegion::region_size_words_mask(); waste_bytes = ShenandoahHeapRegion::region_size_bytes() - used_words_in_last_region * HeapWordSize; } else { // Non-humongous allocation retires only the regions that cannot be used for allocation anymore. + waste_bytes = 0; for (idx_t i = beg; i <= end; i++) { ShenandoahHeapRegion* r = _heap->get_region(i); - if (r->free() < PLAB::min_size() * HeapWordSize) { + assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); + assert(r->is_empty(), "Should be empty"); + r->try_recycle_under_lock(); + r->set_affiliation(req.affiliation()); + r->make_regular_allocation(req.affiliation()); + if (i < end) { + r->set_top(r->end()); + } else { + r->set_top(r->bottom() + used_words_in_last_region); + } + r->set_update_watermark(r->bottom()); + total_used += r->used(); + if (r->free() < PLAB::min_size() * HeapWordSize) { // retire_from_partition() will adjust bounds on Mutator free set if appropriate and will recompute affiliated. - // It also increases used for Muttor partition. + // It also increases used for the waste bytes, which includes bytes filled at retirement and bytes too small + // to be filled. Only the last iteration may have non-zero waste_bytes. waste_bytes += _partitions.retire_from_partition(ShenandoahFreeSetPartitionId::Mutator, i, r->used()); } - total_used += r->used(); } + _partitions.decrease_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator, num); if (waste_bytes > 0) { + // For humongous allocations, waste_bytes are included in total_used. Since this is not humongous, + // we need to account separately for the waste_bytes. increase_bytes_allocated(waste_bytes); } } _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_used); increase_bytes_allocated(total_used); - // Initialize regions: - for (idx_t i = beg; i <= end; i++) { - ShenandoahHeapRegion* r = _heap->get_region(i); - r->try_recycle_under_lock(); - - assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); - assert(r->is_empty(), "Should be empty"); - - r->set_affiliation(req.affiliation()); - if (is_humongous) { - if (i == beg) { - r->make_humongous_start(); - } else { - r->make_humongous_cont(); - } - } else { - r->make_regular_allocation(req.affiliation()); - } - - // Trailing region may be non-full, record the humongous_waste there - size_t used_words; - if ((i == end) && (used_words_in_last_region != 0)) { - used_words = used_words_in_last_region; - } else { - used_words = ShenandoahHeapRegion::region_size_words(); - } - r->set_update_watermark(r->bottom()); - r->set_top(r->bottom() + used_words); - } - #ifdef KELVIN_OUT_WITH_THE_OLD // deprecated with ShenandoahPacing if (used_words_in_last_region != 0) { @@ -1927,18 +1927,15 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo req.set_actual_size(words_size); // If !is_humongous, the "waste" is made availabe for new allocation - if (is_humongous && used_words_in_last_region != 0) { - size_t waste = ShenandoahHeapRegion::region_size_words() - used_words_in_last_region; - req.set_waste(waste); + if (waste_bytes > 0) { + req.set_waste(waste_bytes / HeapWordSize); + if (is_humongous) { #ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet alloc_contiguous increasing mutator humongous waste by %zu bytes", waste * HeapWordSize); + log_info(gc)("FreeSet alloc_contiguous increasing mutator humongous waste by %zu bytes", waste * HeapWordSize); #endif - size_t waste_bytes = waste * HeapWordSize; - _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste_bytes); - _total_humongous_waste += waste_bytes; - } else if (!is_humongous && (waste_bytes > 0)) { - // Waste is padding in last region if padding is too small to serve as TLAB. - req.set_waste(waste_bytes / HeapWordSize); + _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste_bytes); + _total_humongous_waste += waste_bytes; + } } #ifdef KELVIN_REGION_COUNTS diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index 79f9e3d9c92e9..feec2a06ccf54 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -585,13 +585,15 @@ void ShenandoahHeapRegion::recycle_internal() { set_affiliation(FREE); } +// Upon return, this region has been recycled. We try to recycle it. +// We may fail if some other thread recycled it before we do. void ShenandoahHeapRegion::try_recycle_under_lock() { shenandoah_assert_heaplocked(); if (is_trash() && _recycling.try_set()) { if (is_trash()) { // At freeset rebuild time, which precedes recycling of collection set, we treat all cset regions as // part of capacity, as empty, as fully available, and as unaffiliated. This provides short-lived optimism - // for triggering and pacing heuristics. It greatly simplifies and reduces the locking overhead required + // for triggering heuristics. It greatly simplifies and reduces the locking overhead required // by more time-precise accounting of these details. recycle_internal(); } From 8c72e9d7cdc662f316f8ec58419a9d6b37f47b01 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 13 Aug 2025 08:41:14 -0600 Subject: [PATCH 46/61] fix top for last humongous region in alloc_contiguous --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index ffdafb4905652..31121907cbb18 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1878,7 +1878,11 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo } else { r->make_humongous_cont(); } - r->set_top(r->end()); + if (i == end) { + r->set_top(r->bottom() + used_words_in_last_region); + } else { + r->set_top(r->end()); + } r->set_update_watermark(r->bottom()); } total_used = ShenandoahHeapRegion::region_size_bytes() * num; @@ -1893,10 +1897,10 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo r->try_recycle_under_lock(); r->set_affiliation(req.affiliation()); r->make_regular_allocation(req.affiliation()); - if (i < end) { - r->set_top(r->end()); - } else { + if (i == end) { r->set_top(r->bottom() + used_words_in_last_region); + } else { + r->set_top(r->end()); } r->set_update_watermark(r->bottom()); total_used += r->used(); From f2ff7224e9958ddb4bf3da096b5f64206947e016 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 13 Aug 2025 17:42:30 -0600 Subject: [PATCH 47/61] a few more bookkeeping fixes in allocate_contiguous --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 31121907cbb18..aa51c1d7db069 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1870,23 +1870,25 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo for (idx_t i = beg; i <= end; i++) { ShenandoahHeapRegion* r = _heap->get_region(i); assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); - assert(r->is_empty(), "Should be empty"); r->try_recycle_under_lock(); + assert(r->is_empty(), "Should be empty"); r->set_affiliation(req.affiliation()); if (i == beg) { r->make_humongous_start(); } else { r->make_humongous_cont(); } - if (i == end) { + if ((i == end) && (used_words_in_last_region > 0)) { r->set_top(r->bottom() + used_words_in_last_region); } else { + // if used_words_in_last_region is zero, then the end region is fully consumed. r->set_top(r->end()); } r->set_update_watermark(r->bottom()); } total_used = ShenandoahHeapRegion::region_size_bytes() * num; - waste_bytes = ShenandoahHeapRegion::region_size_bytes() - used_words_in_last_region * HeapWordSize; + waste_bytes = + (used_words_in_last_region == 0)? 0: ShenandoahHeapRegion::region_size_bytes() - used_words_in_last_region * HeapWordSize; } else { // Non-humongous allocation retires only the regions that cannot be used for allocation anymore. waste_bytes = 0; @@ -1897,9 +1899,10 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo r->try_recycle_under_lock(); r->set_affiliation(req.affiliation()); r->make_regular_allocation(req.affiliation()); - if (i == end) { + if ((i == end) && (used_words_in_last_region > 0)) { r->set_top(r->bottom() + used_words_in_last_region); } else { + // if used_words_in_last_region is zero, then the end region is fully consumed. r->set_top(r->end()); } r->set_update_watermark(r->bottom()); From a8ac8ccf72fd4a3c4fa6e02929786d30c68b1dc6 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 14 Aug 2025 23:08:31 +0000 Subject: [PATCH 48/61] Fix googletest --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 103 ++++++++++++++++++ .../share/gc/shenandoah/shenandoahFreeSet.hpp | 7 ++ .../gc/shenandoah/shenandoahGeneration.hpp | 5 + 3 files changed, 115 insertions(+) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index aa51c1d7db069..297b2868b323d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -228,6 +228,45 @@ inline bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const return alloc_capacity(r) > 0; } +// This is used for unit testing. Do not use in production code. +void ShenandoahFreeSet::resize_old_collector_capacity(size_t regions) { + shenandoah_assert_not_heaplocked(); + ShenandoahHeap* heap = ShenandoahHeap::heap(); + ShenandoahHeapLocker locker(heap->lock()); + size_t original_old_regions = _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector); + size_t unaffiliated_mutator_regions = _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator); + size_t unaffiliated_collector_regions = _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector); + size_t unaffiliated_old_collector_regions = _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector); + if (regions > original_old_regions) { + size_t regions_to_transfer = regions - original_old_regions; + if (regions_to_transfer <= unaffiliated_mutator_regions + unaffiliated_collector_regions) { + size_t regions_from_mutator = + (regions_to_transfer > unaffiliated_mutator_regions)? unaffiliated_mutator_regions: regions_to_transfer; + regions_to_transfer -= regions_from_mutator; + size_t regions_from_collector = regions_to_transfer; + if (regions_from_mutator > 0) { + transfer_empty_regions_from_to(ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::OldCollector, + regions_from_mutator); + } + if (regions_from_collector > 0) { + transfer_empty_regions_from_to(ShenandoahFreeSetPartitionId::Collector, ShenandoahFreeSetPartitionId::OldCollector, + regions_from_mutator); + } + } else { + fatal("Could not resize old for unit test"); + } + } else if (regions < original_old_regions) { + size_t regions_to_transfer = original_old_regions - regions; + if (regions_to_transfer <= unaffiliated_old_collector_regions) { + transfer_empty_regions_from_to(ShenandoahFreeSetPartitionId::OldCollector, ShenandoahFreeSetPartitionId::Mutator, + regions_to_transfer); + } else { + fatal("Could not resize old for unit test"); + } + } + // else, old generation is already appropriately sized +} + void ShenandoahFreeSet::reset_bytes_allocated_since_gc_start() { shenandoah_assert_heaplocked(); _mutator_bytes_allocated_since_gc_start = 0; @@ -2581,6 +2620,70 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector // No need to adjust ranges because humongous regions are not allocatable } +void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitionId source, + ShenandoahFreeSetPartitionId dest, + size_t num_regions) { + shenandoah_assert_heaplocked(); + const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); + size_t transferred_regions = 0; + size_t used_transfer = 0; + idx_t source_low_idx = _partitions.max(); + idx_t source_high_idx = -1; + idx_t dest_low_idx = _partitions.max(); + idx_t dest_high_idx = -1; + ShenandoahLeftRightIterator iterator(&_partitions, source, true); + for (idx_t idx = iterator.current(); transferred_regions < num_regions && iterator.has_next(); idx = iterator.next()) { + // Note: can_allocate_from() denotes that region is entirely empty + if (can_allocate_from(idx)) { + if (idx < source_low_idx) { + source_low_idx = idx; + } + if (idx > source_high_idx) { + source_high_idx = idx; + } + if (idx < dest_low_idx) { + dest_low_idx = idx; + } + if (idx > dest_high_idx) { + dest_high_idx = idx; + } + used_transfer += _partitions.move_from_partition_to_partition_with_deferred_accounting(idx, source, dest, region_size_bytes); + transferred_regions++; + } + } + // All transferred regions are empty. + assert(used_transfer == 0, "empty regions should have no used"); + _partitions.expand_interval_if_range_modifies_either_boundary(dest, dest_low_idx, + dest_high_idx, dest_low_idx, dest_high_idx); + _partitions.shrink_interval_if_range_modifies_either_boundary(source, source_low_idx, source_high_idx, + transferred_regions); + + _partitions.decrease_region_counts(source, transferred_regions); + _partitions.decrease_empty_region_counts(source, transferred_regions); + _partitions.decrease_total_region_counts(source, transferred_regions); + _partitions.decrease_capacity(source, transferred_regions * region_size_bytes); + + _partitions.increase_total_region_counts(dest, transferred_regions); + _partitions.increase_capacity(dest, transferred_regions * region_size_bytes); + _partitions.increase_region_counts(dest, transferred_regions); + _partitions.increase_empty_region_counts(dest, transferred_regions); + + if ((source == ShenandoahFreeSetPartitionId::OldCollector) && (dest == ShenandoahFreeSetPartitionId::Mutator)) { + _total_young_regions += transferred_regions; + } else if ((source == ShenandoahFreeSetPartitionId::Mutator) && (dest == ShenandoahFreeSetPartitionId::OldCollector)) { + _total_young_regions -= transferred_regions; + } else if ((source == ShenandoahFreeSetPartitionId::OldCollector) && (dest == ShenandoahFreeSetPartitionId::Collector)) { + _total_young_regions += transferred_regions; + } else if ((source == ShenandoahFreeSetPartitionId::Collector) && (dest == ShenandoahFreeSetPartitionId::OldCollector)) { + _total_young_regions -= transferred_regions; + } + + // _total_global_regions unaffected by transfer + recompute_total_used(); + _partitions.assert_bounds(true); + // Should not need to recompute_total_affiliated() because all transferred regions are empty. +} + // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector, size_t max_xfer_regions, diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index ab933ebb2b357..393bb3ce806dd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -628,6 +628,10 @@ class ShenandoahFreeSet : public CHeapObj { inline bool has_alloc_capacity(ShenandoahHeapRegion *r) const; + void transfer_empty_regions_from_to(ShenandoahFreeSetPartitionId source_partition, + ShenandoahFreeSetPartitionId dest_partition, + size_t num_regions); + size_t transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector, size_t max_xfer_regions, size_t& bytes_transferred); @@ -682,6 +686,9 @@ class ShenandoahFreeSet : public CHeapObj { void prepare_to_promote_in_place(size_t idx, size_t bytes); + // This is used for unit testing. Not for preoduction. Invokes exit() if old cannot be resized. + void resize_old_collector_capacity(size_t desired_regions); + // Return bytes used by young inline size_t young_used() { return _total_young_used; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index d685ab53b1e4a..8b324877c7fe5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -130,6 +130,11 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { virtual void post_initialize(ShenandoahHeap* heap); + // Use this only for unit testing. Do not use for production. + inline void set_capacity(size_t bytes) { + ShenandoahHeap::heap()->free_set()->resize_old_collector_capacity(bytes / ShenandoahHeapRegion::region_size_bytes()); + } + size_t max_capacity() const override; virtual size_t used_regions() const; From d02a37af7a0b65c90e964cf76ac5ad18553151fa Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Thu, 14 Aug 2025 23:44:44 +0000 Subject: [PATCH 49/61] do not query allocation runway during heap initialization --- src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index ec1d81f4f9f10..8ff2098377c91 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -427,8 +427,10 @@ jint ShenandoahHeap::initialize() { _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old); if (mode()->is_generational()) { ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); - size_t allocation_runway = - gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions); + // We cannot call + // gen_heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions) + // until after the heap is fully initialized. So we make up a safe value here. + size_t allocation_runway = InitialHeapSize / 2; gen_heap->compute_old_generation_balance(allocation_runway, old_cset_regions); } _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old); From a5ee645eaa473fa3a16576483a23ec0ebc7b3c2c Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 15 Aug 2025 15:38:45 +0000 Subject: [PATCH 50/61] move set_capacity() into ShenandoahOldGeneration --- src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp | 5 ----- src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 8b324877c7fe5..d685ab53b1e4a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -130,11 +130,6 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { virtual void post_initialize(ShenandoahHeap* heap); - // Use this only for unit testing. Do not use for production. - inline void set_capacity(size_t bytes) { - ShenandoahHeap::heap()->free_set()->resize_old_collector_capacity(bytes / ShenandoahHeapRegion::region_size_bytes()); - } - size_t max_capacity() const override; virtual size_t used_regions() const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index 0e6bf946fcd05..56e4c21956287 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -100,6 +100,11 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { return _old_heuristics; } + // Use this only for unit testing. Do not use for production. + inline void set_capacity(size_t bytes) { + ShenandoahHeap::heap()->free_set()->resize_old_collector_capacity(bytes / ShenandoahHeapRegion::region_size_bytes()); + } + // See description in field declaration void set_promoted_reserve(size_t new_val); size_t get_promoted_reserve() const; From 7953f9aff623fa72f88623a58fbe76a9b8503c9d Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 15 Aug 2025 19:23:26 +0000 Subject: [PATCH 51/61] fix support for googletest ShenandoahOldHeuristics after more testing --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 297b2868b323d..19c067706d2b9 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -230,9 +230,7 @@ inline bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const // This is used for unit testing. Do not use in production code. void ShenandoahFreeSet::resize_old_collector_capacity(size_t regions) { - shenandoah_assert_not_heaplocked(); - ShenandoahHeap* heap = ShenandoahHeap::heap(); - ShenandoahHeapLocker locker(heap->lock()); + shenandoah_assert_heaplocked(); size_t original_old_regions = _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector); size_t unaffiliated_mutator_regions = _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Mutator); size_t unaffiliated_collector_regions = _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::Collector); @@ -2623,6 +2621,7 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitionId source, ShenandoahFreeSetPartitionId dest, size_t num_regions) { + assert(dest != source, "precondition"); shenandoah_assert_heaplocked(); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); size_t transferred_regions = 0; @@ -2651,6 +2650,7 @@ void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitio transferred_regions++; } } + // All transferred regions are empty. assert(used_transfer == 0, "empty regions should have no used"); _partitions.expand_interval_if_range_modifies_either_boundary(dest, dest_low_idx, @@ -2668,14 +2668,17 @@ void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitio _partitions.increase_region_counts(dest, transferred_regions); _partitions.increase_empty_region_counts(dest, transferred_regions); - if ((source == ShenandoahFreeSetPartitionId::OldCollector) && (dest == ShenandoahFreeSetPartitionId::Mutator)) { + if (source == ShenandoahFreeSetPartitionId::OldCollector) { + assert((dest == ShenandoahFreeSetPartitionId::Collector) || (dest == ShenandoahFreeSetPartitionId::Mutator), "sanity"); _total_young_regions += transferred_regions; - } else if ((source == ShenandoahFreeSetPartitionId::Mutator) && (dest == ShenandoahFreeSetPartitionId::OldCollector)) { - _total_young_regions -= transferred_regions; - } else if ((source == ShenandoahFreeSetPartitionId::OldCollector) && (dest == ShenandoahFreeSetPartitionId::Collector)) { - _total_young_regions += transferred_regions; - } else if ((source == ShenandoahFreeSetPartitionId::Collector) && (dest == ShenandoahFreeSetPartitionId::OldCollector)) { - _total_young_regions -= transferred_regions; + } else { + assert((source == ShenandoahFreeSetPartitionId::Collector) || (source == ShenandoahFreeSetPartitionId::Mutator), "sanity"); + if (dest == ShenandoahFreeSetPartitionId::OldCollector) { + _total_young_regions -= transferred_regions; + } else { + assert((dest == ShenandoahFreeSetPartitionId::Collector) || (dest == ShenandoahFreeSetPartitionId::Mutator), "sanity"); + // No adjustments to total_young_regions if transferring within young + } } // _total_global_regions unaffected by transfer From 3ef98a8f91375bde27197984fdbf6a3876004583 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 15 Aug 2025 20:12:15 +0000 Subject: [PATCH 52/61] Validate SoftMaxHeapSize before Memory Init --- src/hotspot/share/gc/shared/gc_globals.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shared/gc_globals.hpp b/src/hotspot/share/gc/shared/gc_globals.hpp index 7f46f44908556..9df1f19a05f5f 100644 --- a/src/hotspot/share/gc/shared/gc_globals.hpp +++ b/src/hotspot/share/gc/shared/gc_globals.hpp @@ -469,7 +469,7 @@ \ product(size_t, SoftMaxHeapSize, 0, MANAGEABLE, \ "Soft limit for maximum heap size (in bytes)") \ - constraint(SoftMaxHeapSizeConstraintFunc,AfterMemoryInit) \ + constraint(SoftMaxHeapSizeConstraintFunc,AfterErgo) \ \ product(size_t, NewSize, ScaleForWordSize(1*M), \ "Initial new generation size (in bytes)") \ From e33cf90f6dcfb9f5067f97bfa68d9c940a31296d Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Mon, 18 Aug 2025 01:37:30 +0000 Subject: [PATCH 53/61] Add GenShen no-tlab test and increase test timeouts --- .../jtreg/gc/shenandoah/TestSieveObjects.java | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java b/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java index 37359b038b358..dd2c4adf2e5d0 100644 --- a/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java +++ b/test/hotspot/jtreg/gc/shenandoah/TestSieveObjects.java @@ -144,12 +144,25 @@ * @requires vm.gc.Shenandoah * @library /test/lib * - * @run main/othervm/timeout=240 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * @run main/othervm/timeout=480 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions * -XX:+UseShenandoahGC * -XX:-UseTLAB -XX:+ShenandoahVerify * TestSieveObjects */ +/* + * @test id=no-tlab-genshen + * @summary Acceptance tests: collector can deal with retained objects + * @key randomness + * @requires vm.gc.Shenandoah + * @library /test/lib + * + * @run main/othervm/timeout=480 -Xmx1g -Xms1g -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions + * -XX:+UseShenandoahGC -XX:ShenandoahGCMode=generational + * -XX:-UseTLAB -XX:+ShenandoahVerify + * TestSieveObjects + */ + import java.util.Random; import jdk.test.lib.Utils; From d3c75fc9287817ff62866cbaee2890ae4a031508 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 20 Aug 2025 02:54:53 +0000 Subject: [PATCH 54/61] do not fill remnant memory when retiring regions --- src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp | 11 ++++++++--- .../share/gc/shenandoah/shenandoahVerifier.cpp | 3 ++- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 19c067706d2b9..9520578b8b4d0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -763,7 +763,12 @@ size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartit size_t fill_padding = _region_size_bytes - used_bytes; waste_bytes = fill_padding; increase_used(partition, fill_padding); -#ifdef ASSERT +#ifdef KELVIN_DEPRECATE + // Kelvin note: I'm counting the fill as used, but I'm no longer + // allocating the fill object. This means ShenandoahVerify and + // FreeSet::assert_bounds() have to count retired region unused + // memory as if it is used. + // Fill the unused memory so that verification will not be confused by inconsistent tallies of used size_t fill_words = fill_padding / HeapWordSize; ShenandoahHeapRegion*r = ShenandoahHeap::heap()->get_region(idx); @@ -1055,7 +1060,6 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { humongous_waste[i] = 0; } - size_t min_free_size = ShenandoahHeap::min_fill_size() * HeapWordSize; for (idx_t i = 0; i < _max; i++) { ShenandoahFreeSetPartitionId partition = membership(i); size_t capacity = _free_set->alloc_capacity(i); @@ -1079,7 +1083,8 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { young_humongous_waste += capacity; } } else { - assert(r->is_cset() || (capacity < min_free_size), "Retired regions should be filled already"); + assert(r->is_cset() || (capacity < PLAB::min_size() * HeapWordSize), + "Expect retired remnant to be smaller than min plab size"); // This region has been retired already or it is in the cset. In either case, we set capacity to zero // so that the entire region will be counted as used. We count young cset regions as "retired". capacity = 0; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 4a53e19758414..1c33091e795aa 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -391,7 +391,8 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure #endif { _region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - _min_free_size = ShenandoahHeap::min_fill_size() * HeapWordSize; + // Retired regions are not necessarily filled, thouugh their remnant memory is considered used. + _min_free_size = PLAB::min_size() * HeapWordSize; }; void heap_region_do(ShenandoahHeapRegion* r) override { From 938efac9868e014bf7a7d3fdc252048dcda30f2e Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 20 Aug 2025 16:25:12 +0000 Subject: [PATCH 55/61] do not fill remnant memory of retired heap regions --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 26 ++++++++++++++++--- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 2 +- .../shenandoahGenerationalEvacuationTask.cpp | 4 +++ .../shenandoah/shenandoahGenerationalHeap.cpp | 5 ++++ 4 files changed, 32 insertions(+), 5 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 9520578b8b4d0..a170303bf64fc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -186,9 +186,9 @@ ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, Shena void ShenandoahFreeSet::prepare_to_promote_in_place(size_t idx, size_t bytes) { shenandoah_assert_heaplocked(); - size_t min_fill_size = ShenandoahHeap::min_fill_size() * HeapWordSize; + size_t min_remnant_size = PLAB::min_size() * HeapWordSize; ShenandoahFreeSetPartitionId p = _partitions.membership(idx); - if (bytes >= min_fill_size) { + if (bytes >= min_remnant_size) { assert((p == ShenandoahFreeSetPartitionId::Mutator) || (p == ShenandoahFreeSetPartitionId::Collector), "PIP region must be associated with young"); _partitions.increase_used(p, bytes); @@ -1084,7 +1084,7 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { } } else { assert(r->is_cset() || (capacity < PLAB::min_size() * HeapWordSize), - "Expect retired remnant to be smaller than min plab size"); + "Expect retired remnant size to be smaller than min plab size"); // This region has been retired already or it is in the cset. In either case, we set capacity to zero // so that the entire region will be counted as used. We count young cset regions as "retired". capacity = 0; @@ -1354,6 +1354,11 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah } else { if (available_in_region >= ShenandoahHeap::min_fill_size() * HeapWordSize) { size_t fill_words = available_in_region / HeapWordSize; +#undef KELVIN_FILL +#ifdef KELVIN_FILL + log_info(gc)("add_pip_region_to_old_collector(region %zu) filling at " PTR_FORMAT ", size: %zu", + region->index(), p2i(region->top()), available_in_region); +#endif ShenandoahHeap::heap()->old_generation()->card_scan()->register_object(region->top()); region->allocate_fill(fill_words); } @@ -2396,7 +2401,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r // Do not add regions that would almost surely fail allocation size_t ac = alloc_capacity(region); - if (ac > PLAB::min_size() * HeapWordSize) { + if (ac >= PLAB::min_size() * HeapWordSize) { if (region->is_trash() || !region->is_old()) { // Both young and old collected regions (trashed) are placed into the Mutator set _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator); @@ -2438,10 +2443,15 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } else { // This region does not have enough free to be part of the free set. Count all of its memory as used. assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired"); +#ifdef KELVIN_DEPRECATE + // We no longer fill remnants in NOTFREE regions + // Furthermore, this code has a bug. If region->is_old(), we + // should have registered old object when we created the fill word if (ac >= ShenandoahHeap::min_fill_size() * HeapWordSize) { size_t fill_words = ac / HeapWordSize; region->allocate_fill(fill_words); } +#endif if (region->is_old()) { old_collector_used += region_size_bytes; total_old_collector_regions++; @@ -2470,10 +2480,15 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r size_t byte_size = obj->size() * HeapWordSize; size_t region_span = ShenandoahHeapRegion::required_regions(byte_size); humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_bytes() - byte_size; +#ifdef KELVIN_DEPRECATE + // We no longer fill remnant memory in NotFree regions. The memory + // is still counted as used. Furthermore, there's a bug + // here. If the region is old, we need to register the fill object. } else if (!region->is_humongous() && (ac >= ShenandoahHeap::min_fill_size() * HeapWordSize)) { // Don't fill humongous continuations size_t fill_words = ac / HeapWordSize; region->allocate_fill(fill_words); +#endif } if (region->is_old()) { old_collector_used += region_size_bytes; @@ -3716,12 +3731,15 @@ void ShenandoahFreeSet::decrease_humongous_waste_for_regular_bypass(ShenandoahHe _partitions.decrease_used(p, waste); _partitions.unretire_to_partition(r, p); } +#ifdef KELVIN_DEPRECATE + // We no longer fill remnant memory in NotFree regions. We do still count the remnant memory as used. #ifdef ASSERT else if (waste >= ShenandoahHeap::min_fill_size() * HeapWordSize) { // Fill the unused memory so that verification will not be confused by inconsistent tallies of used size_t fill_words = waste / HeapWordSize; r->allocate_fill(fill_words); } +#endif #endif _total_humongous_waste -= waste; recompute_total_used(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 393bb3ce806dd..7baeb99d6fbd7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -201,7 +201,7 @@ class ShenandoahRegionPartitions { // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals. // Requires that region idx is in in the Mutator or Collector partitions. Hereafter, identifies this region as NotFree. // Any remnant of available memory at the time of retirement is added to the original partition's total of used bytes. - // Return the number of filled bytes (if any). + // Return the number of waste bytes (if any). size_t retire_from_partition(ShenandoahFreeSetPartitionId p, idx_t idx, size_t used_bytes); // Retire all regions between low_idx and high_idx inclusive from within partition. Requires that each region idx is diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 27c5f12538a27..04ccf12a2bc34 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -210,6 +210,10 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size"); ShenandoahHeap::fill_with_object(obj_addr, fill_size); scanner->register_object_without_lock(obj_addr); +#undef KELVIN_FILL +#ifdef KELVIN_FILL + log_info(gc)("pip fills region %zu at " PTR_FORMAT ", %zu bytes", region->index(), p2i(obj_addr), fill_size * HeapWordSize); +#endif obj_addr = next_marked_obj; } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 18ff68a639b1b..5949f0161229c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -405,6 +405,11 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, Shena fill_with_object(copy, size); shenandoah_assert_correct(nullptr, copy_val); // For non-LAB allocations, the object has already been registered +#undef KELVIN_FILL +#ifdef KELVIN_FILL + log_info(gc)("try_evacuate_object() is filling abandoned copy at " PTR_FORMAT ", of size %zu", + p2i(copy), size * HeapWordSize); +#endif } shenandoah_assert_correct(nullptr, result); return result; From ee1b7da67d87386414f2e7c60a480aecb96c6277 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 20 Aug 2025 19:13:13 +0000 Subject: [PATCH 56/61] Remove debug instrumentation --- .../heuristics/shenandoahGlobalHeuristics.cpp | 3 - .../share/gc/shenandoah/shenandoahFreeSet.cpp | 612 +----------------- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 46 -- .../share/gc/shenandoah/shenandoahFullGC.cpp | 49 -- .../share/gc/shenandoah/shenandoahFullGC.hpp | 4 - .../gc/shenandoah/shenandoahGeneration.cpp | 233 ------- .../gc/shenandoah/shenandoahGeneration.hpp | 82 --- .../shenandoah/shenandoahGenerationSizer.cpp | 57 -- .../shenandoah/shenandoahGenerationSizer.hpp | 17 - .../shenandoahGenerationalEvacuationTask.cpp | 32 - .../shenandoahGenerationalFullGC.cpp | 29 - .../shenandoahGenerationalFullGC.hpp | 18 - .../shenandoah/shenandoahGenerationalHeap.cpp | 91 +-- .../share/gc/shenandoah/shenandoahHeap.cpp | 53 -- .../gc/shenandoah/shenandoahHeapRegion.cpp | 15 - .../share/gc/shenandoah/shenandoahOldGC.cpp | 18 - .../gc/shenandoah/shenandoahOldGeneration.cpp | 6 - .../gc/shenandoah/shenandoahOldGeneration.hpp | 4 - .../gc/shenandoah/shenandoahVerifier.cpp | 51 -- 19 files changed, 7 insertions(+), 1413 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp index cc2c5a254d904..af431b7e4ac33 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGlobalHeuristics.cpp @@ -138,9 +138,6 @@ void ShenandoahGlobalHeuristics::choose_global_collection_set(ShenandoahCollecti } } if (regions_transferred_to_old > 0) { -#ifdef KELVIN_OUT_WITH_THE_OLD - heap->generation_sizer()->force_transfer_to_old(regions_transferred_to_old); -#endif assert(young_evac_reserve > regions_transferred_to_old * region_size_bytes, "young reserve cannot be negative"); heap->young_generation()->set_evacuation_reserve(young_evac_reserve - regions_transferred_to_old * region_size_bytes); heap->old_generation()->set_evacuation_reserve(old_evac_reserve + regions_transferred_to_old * region_size_bytes); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index a170303bf64fc..0cf0b6d15a368 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -50,19 +50,6 @@ static const char* partition_name(ShenandoahFreeSetPartitionId t) { } } -#ifdef KELVIN_USED_PARTITION -const char* _shenandoah_partition_name(ShenandoahFreeSetPartitionId t) { - return partition_name(t); -} -#endif - -#ifdef KELVIN_HUMONGOUS_WASTE -const char* _shenandoah_humongous_partition_name(ShenandoahFreeSetPartitionId t) { - return partition_name(t); -} -#endif - - class ShenandoahLeftRightIterator { private: idx_t _idx; @@ -316,14 +303,6 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { _humongous_waste[partition_id] = 0; _available[partition_id] = FreeSetUnderConstruction; } -#ifdef KELVIN_USED_PARTITION - log_info(gc)("make_all_regions_unavailable() setting _used[] to 0 for all partitions"); -#endif -#undef KELVIN_REGION_COUNTS -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Setting Mutator and Collector total_region_counts to zero, OldCollector is %zu", - _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)]); -#endif _total_region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; @@ -338,10 +317,6 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm size_t mutator_humongous_waste_bytes) { shenandoah_assert_heaplocked(); -#ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet mutator humongous waste set to %zu, collector to zero", mutator_humongous_waste_bytes); -#endif - _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost; _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost; _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty; @@ -370,17 +345,6 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm _total_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; _empty_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; -#undef KELVIN_CAPACITY -#ifdef KELVIN_CAPACITY - log_info(gc)("establish_mutator_intervals() sets Mutator capacity: %zu", _capacity[int(ShenandoahFreeSetPartitionId::Mutator)]); - log_info(gc)("establish_mutator_intervals() sets Collector capacity: %zu", _capacity[int(ShenandoahFreeSetPartitionId::Collector)]); -#endif -#ifdef KELVIN_USED_PARTITION - log_info(gc)("Setting Mutator used to %zu Collector to 0", mutator_used); -#endif -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Setting Mutator total_region_counts to %zu Collector to 0", total_mutator_regions); -#endif } void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost, @@ -392,9 +356,6 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col size_t old_collector_humongous_waste_bytes) { shenandoah_assert_heaplocked(); -#ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet old_collector humongous waste set to %zu", old_collector_humongous_waste_bytes); -#endif _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost; _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost; _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty; @@ -409,16 +370,6 @@ void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_col _total_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = total_old_collector_region_count; _empty_region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_empty; -#ifdef KELVIN_CAPACITY - log_info(gc)("establish_old_collector_intervals() sets OldCollector capacity: %zu", - _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)]); -#endif -#ifdef KELVIN_USED_PARTITION - log_info(gc)("Setting OldCollector used to %zu", old_collector_used); -#endif -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Setting OldCollector total_region_counts to %zu", total_old_collector_region_count); -#endif } void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { @@ -427,10 +378,6 @@ void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId whic _used[int(which_partition)] += bytes; _available[int(which_partition)] -= bytes; -#ifdef KELVIN_USED_PARTITION - log_info(gc)("ShenRegionPartitions %s increase_used(%zu) to %zu, available shrinks to %zu", - partition_name(which_partition), bytes, _used[int(which_partition)], _available[int(which_partition)]); -#endif assert (_used[int(which_partition)] <= _capacity[int(which_partition)], "Must not use (%zu) more than capacity (%zu) after increase by %zu", _used[int(which_partition)], _capacity[int(which_partition)], bytes); @@ -445,10 +392,6 @@ void ShenandoahRegionPartitions::increase_humongous_waste(ShenandoahFreeSetParti shenandoah_assert_heaplocked(); assert (which_partition < NumPartitions, "Partition must be valid"); _humongous_waste[int(which_partition)] += bytes; -#ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet<%s>::increase_humongous_waste(%zu) yields: %zu", partition_name(which_partition), - bytes, _humongous_waste[int(which_partition)]); -#endif } size_t ShenandoahRegionPartitions::get_humongous_waste(ShenandoahFreeSetPartitionId which_partition) { @@ -461,10 +404,6 @@ void ShenandoahRegionPartitions::set_capacity_of(ShenandoahFreeSetPartitionId wh assert (which_partition < NumPartitions, "selected free set must be valid"); _capacity[int(which_partition)] = value; _available[int(which_partition)] = value - _used[int(which_partition)]; -#undef KELVIN_CAPACITY -#ifdef KELVIN_CAPACITY - log_info(gc)("set_capacity of %s to %zu", partition_name(which_partition), _capacity[int(which_partition)]); -#endif } @@ -473,10 +412,6 @@ void ShenandoahRegionPartitions::increase_capacity(ShenandoahFreeSetPartitionId assert (which_partition < NumPartitions, "Partition must be valid"); _capacity[int(which_partition)] += bytes; _available[int(which_partition)] += bytes; -#ifdef KELVIN_CAPACITY - log_info(gc)("FreeSet<%s>::increase_capacity(%zu) yields: %zu", partition_name(which_partition), - bytes, _capacity[int(which_partition)]); -#endif } void ShenandoahRegionPartitions::decrease_capacity(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { @@ -485,10 +420,6 @@ void ShenandoahRegionPartitions::decrease_capacity(ShenandoahFreeSetPartitionId assert(_capacity[int(which_partition)] >= bytes, "Cannot remove more capacity bytes than are present"); _capacity[int(which_partition)] -= bytes; _available[int(which_partition)] -= bytes; -#ifdef KELVIN_CAPACITY - log_info(gc)("FreeSet<%s>::decrease_capacity(%zu) yields: %zu", partition_name(which_partition), - bytes, _capacity[int(which_partition)]); -#endif } size_t ShenandoahRegionPartitions::get_capacity(ShenandoahFreeSetPartitionId which_partition) { @@ -500,11 +431,6 @@ void ShenandoahRegionPartitions::increase_available(ShenandoahFreeSetPartitionId shenandoah_assert_heaplocked(); assert (which_partition < NumPartitions, "Partition must be valid"); _available[int(which_partition)] += bytes; -#undef KELVIN_AVAILABLE -#ifdef KELVIN_AVAILABLE - log_info(gc)("FreeSet<%s>::increase_available(%zu) yields: %zu", partition_name(which_partition), - bytes, _available[int(which_partition)]); -#endif } void ShenandoahRegionPartitions::decrease_available(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { @@ -512,10 +438,6 @@ void ShenandoahRegionPartitions::decrease_available(ShenandoahFreeSetPartitionId assert (which_partition < NumPartitions, "Partition must be valid"); assert(_available[int(which_partition)] >= bytes, "Cannot remove more available bytes than are present"); _available[int(which_partition)] -= bytes; -#ifdef KELVIN_AVAILABLE - log_info(gc)("FreeSet<%s>::decrease_available(%zu) yields: %zu", partition_name(which_partition), - bytes, _available[int(which_partition)]); -#endif } size_t ShenandoahRegionPartitions::get_available(ShenandoahFreeSetPartitionId which_partition) { @@ -525,51 +447,27 @@ size_t ShenandoahRegionPartitions::get_available(ShenandoahFreeSetPartitionId wh void ShenandoahRegionPartitions::increase_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { _total_region_counts[int(which_partition)] += regions; -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Increase total region counts[%s] by %zu to %zu", partition_name(which_partition), regions, - _total_region_counts[int(which_partition)]); -#endif } void ShenandoahRegionPartitions::decrease_total_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Decreasing total region counts[%s] by %zu from %zu", partition_name(which_partition), regions, - _total_region_counts[int(which_partition)]); -#endif assert(_total_region_counts[int(which_partition)] >= regions, "Cannot remove more regions than are present"); _total_region_counts[int(which_partition)] -= regions; } void ShenandoahRegionPartitions::increase_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { _region_counts[int(which_partition)] += regions; -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Increase region counts[%s] by %zu to %zu", partition_name(which_partition), regions, - _region_counts[int(which_partition)]); -#endif } void ShenandoahRegionPartitions::decrease_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Decreasing region counts[%s] by %zu from %zu", partition_name(which_partition), regions, - _region_counts[int(which_partition)]); -#endif assert(_region_counts[int(which_partition)] >= regions, "Cannot remove more regions than are present"); _region_counts[int(which_partition)] -= regions; } void ShenandoahRegionPartitions::increase_empty_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { _empty_region_counts[int(which_partition)] += regions; -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Increase empty region counts[%s] by %zu to %zu", partition_name(which_partition), regions, - _empty_region_counts[int(which_partition)]); -#endif } void ShenandoahRegionPartitions::decrease_empty_region_counts(ShenandoahFreeSetPartitionId which_partition, size_t regions) { -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Decreasing empty_region counts[%s] by %zu from %zu", partition_name(which_partition), regions, - _empty_region_counts[int(which_partition)]); -#endif assert(_empty_region_counts[int(which_partition)] >= regions, "Cannot remove more regions than are present"); _empty_region_counts[int(which_partition)] -= regions; } @@ -582,10 +480,6 @@ void ShenandoahRegionPartitions::one_region_is_no_longer_empty(ShenandoahFreeSet void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary( ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, size_t num_regions) { assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values"); -#undef KELVIN_INTERVALS -#ifdef KELVIN_INTERVALS - log_info(gc)("shrink_interval_if_range_modifies_either_boundary(%s, %zd, %zd)", partition_name(partition), low_idx, high_idx); -#endif size_t span = high_idx + 1 - low_idx; bool regions_are_contiguous = (span == num_regions); if (low_idx == leftmost(partition)) { @@ -634,11 +528,6 @@ void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_bounda _leftmosts_empty[int(partition)] = _max; _rightmosts_empty[int(partition)] = -1; } -#ifdef KELVIN_INTERVALS - log_info(gc)("shrink_interval_if_range_modifies_either_boundary ends with range [%zd, %zd], empty range [%zd, %zd]", - _leftmosts[int(partition)], _rightmosts[int(partition)], - _leftmosts_empty[int(partition)], _rightmosts_empty[int(partition)]); -#endif } void ShenandoahRegionPartitions::establish_interval(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, @@ -663,12 +552,6 @@ void ShenandoahRegionPartitions::establish_interval(ShenandoahFreeSetPartitionId _rightmosts[int(partition)] = high_idx; _leftmosts_empty[int(partition)] = low_empty_idx; _rightmosts_empty[int(partition)] = high_empty_idx; - -#ifdef KELVIN_INTERVALS - log_info(gc)("%s: establish_interval ends with [%zd, %zd], empty range [%zd, %zd]", - partition_name(partition), _leftmosts[int(partition)], _rightmosts[int(partition)], - _leftmosts_empty[int(partition)], _rightmosts_empty[int(partition)]); -#endif } inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) { @@ -679,10 +562,6 @@ inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(She void ShenandoahRegionPartitions:: expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, idx_t low_empty_idx, idx_t high_empty_idx) { -#ifdef KELVIN_INTERVALS - log_info(gc)("expand_interval_if_range_modifies_either_boundary(%s, %zd, %zd, %zd, %zd)", - partition_name(partition), low_idx, high_idx, low_empty_idx, high_empty_idx); -#endif if (_leftmosts[int(partition)] > low_idx) { _leftmosts[int(partition)] = low_idx; } @@ -695,18 +574,10 @@ expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId p if (_rightmosts_empty[int(partition)] < high_empty_idx) { _rightmosts_empty[int(partition)] = high_empty_idx; } -#ifdef KELVIN_INTERVALS - log_info(gc)("expand_interval_if_range_modifies_either_boundary ends with range [%zd, %zd], empty range [%zd, %zd]", - _leftmosts[int(partition)], _rightmosts[int(partition)], - _leftmosts_empty[int(partition)], _rightmosts_empty[int(partition)]); -#endif } void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t region_available) { -#ifdef KELVIN_INTERVALS - log_info(gc)("expand_interval_if_boundary_modified(%s, %zd, %zu)", partition_name(partition), idx, region_available); -#endif if (_leftmosts[int(partition)] > idx) { _leftmosts[int(partition)] = idx; } @@ -721,11 +592,6 @@ void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(Shenandoah _rightmosts_empty[int(partition)] = idx; } } -#ifdef KELVIN_INTERVALS - log_info(gc)("expand_interval_if_boundary_modified ends with range [%zd, %zd], empty range [%zd, %zd]", - _leftmosts[int(partition)], _rightmosts[int(partition)], - _leftmosts_empty[int(partition)], _rightmosts_empty[int(partition)]); -#endif } void ShenandoahRegionPartitions::retire_range_from_partition( @@ -763,29 +629,6 @@ size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartit size_t fill_padding = _region_size_bytes - used_bytes; waste_bytes = fill_padding; increase_used(partition, fill_padding); -#ifdef KELVIN_DEPRECATE - // Kelvin note: I'm counting the fill as used, but I'm no longer - // allocating the fill object. This means ShenandoahVerify and - // FreeSet::assert_bounds() have to count retired region unused - // memory as if it is used. - - // Fill the unused memory so that verification will not be confused by inconsistent tallies of used - size_t fill_words = fill_padding / HeapWordSize; - ShenandoahHeapRegion*r = ShenandoahHeap::heap()->get_region(idx); - if (fill_words >= ShenandoahHeap::min_fill_size()) { - if (r->is_old()) { - // We hold the heap lock already - ShenandoahHeap::heap()->old_generation()->card_scan()->register_object(r->top()); - } - r->allocate_fill(fill_words); - } -#ifdef KELVIN_USED_PARTITION - else { - log_info(gc)("KELVIN!!!! Did not fill because padding: %zu is too small", waste_bytes); - } - log_info(gc)("Retiring region %zu with padding: %zu", idx, fill_padding); -#endif -#endif } _membership[int(partition)].clear_bit(idx); decrease_region_counts(partition, 1); @@ -894,18 +737,6 @@ void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, She increase_used(new_partition, used); expand_interval_if_boundary_modified(new_partition, idx, available); -#ifdef KELVIN_CAPACITY - log_info(gc)("Moving one region from %s to %s, adjusting capacities to %zu and %zu", - partition_name(orig_partition), partition_name(new_partition), - _capacity[int(orig_partition)], _capacity[int(new_partition)]); -#endif - -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Moving one region from %s to %s, adjusting region counts to %zu and %zu", - partition_name(orig_partition), partition_name(new_partition), - _region_counts[int(orig_partition)], _region_counts[int(new_partition)]); -#endif - if (available == _region_size_bytes) { _empty_region_counts[int(orig_partition)]--; _empty_region_counts[int(new_partition)]++; @@ -1354,11 +1185,6 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah } else { if (available_in_region >= ShenandoahHeap::min_fill_size() * HeapWordSize) { size_t fill_words = available_in_region / HeapWordSize; -#undef KELVIN_FILL -#ifdef KELVIN_FILL - log_info(gc)("add_pip_region_to_old_collector(region %zu) filling at " PTR_FORMAT ", size: %zu", - region->index(), p2i(region->top()), available_in_region); -#endif ShenandoahHeap::heap()->old_generation()->card_scan()->register_object(region->top()); region->allocate_fill(fill_words); } @@ -1390,23 +1216,6 @@ void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(Shenandoah recompute_total_used(); recompute_total_affiliated(); _partitions.assert_bounds(true); -#ifdef KELVIN_CAPACITY - log_info(gc)("Ater add_pip_to_old(%zu) from partition %s, " - " used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", - region->index(), - partition_name(p), - _partitions.get_used(p), - _partitions.get_capacity(p), - _partitions.get_available(p), - _partitions.get_region_counts(p), - _partitions.get_total_region_counts(p)); - log_info(gc)(" OldCollect used: %zu, capacity: %zu, available: %zu, region_counts: %zu, total_regions: %zu", - _partitions.get_used(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.get_capacity(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.get_available(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.get_region_counts(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector)); -#endif } HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(ShenandoahAffiliation affiliation, @@ -1689,12 +1498,6 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah r->end_preemptible_coalesce_and_fill(); _heap->old_generation()->clear_cards_for(r); } -#ifdef KELVIN_OUT_WITH_THE_OLD - _heap->generation_for(r->affiliation())->increment_affiliated_region_count(); - if (_heap->mode()->is_generational()) { - _heap->global_generation()->increment_affiliated_region_count(); - } -#endif #ifdef ASSERT ShenandoahMarkingContext* const ctx = _heap->marking_context(); assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom"); @@ -1819,18 +1622,9 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah _partitions.one_region_is_no_longer_empty(orig_partition); } size_t waste_bytes = _partitions.retire_from_partition(orig_partition, idx, r->used()); -#ifdef KELVIN_OUT_WITH_THE_OLD - if (waste_bytes > 0) { - request_generation->increase_used(waste_bytes); - if (_heap->mode()->is_generational()) { - _heap->global_generation()->increase_used(waste_bytes); - } - } -#else if (req.is_mutator_alloc() && (waste_bytes > 0)) { increase_bytes_allocated(waste_bytes); } -#endif } else if ((result != nullptr) && in_new_region) { _partitions.one_region_is_no_longer_empty(orig_partition); } @@ -1970,32 +1764,15 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo } _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_used); increase_bytes_allocated(total_used); - -#ifdef KELVIN_OUT_WITH_THE_OLD - // deprecated with ShenandoahPacing - if (used_words_in_last_region != 0) { - // Record this used_words_in_last_region as allocation waste - _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - used_words_in_last_region, true); - } -#endif - req.set_actual_size(words_size); // If !is_humongous, the "waste" is made availabe for new allocation if (waste_bytes > 0) { req.set_waste(waste_bytes / HeapWordSize); if (is_humongous) { -#ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet alloc_contiguous increasing mutator humongous waste by %zu bytes", waste * HeapWordSize); -#endif _partitions.increase_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, waste_bytes); _total_humongous_waste += waste_bytes; } } - -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Allocating humongous to span %zu regions", num); -#endif - recompute_total_young_used(); recompute_total_global_used(); recompute_total_affiliated(); @@ -2017,16 +1794,7 @@ class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionC void get_lock_and_flush_buffer(size_t region_count, size_t overflow_region_used, size_t overflow_region_index) { ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahHeapLocker locker(heap->lock()); -#undef KELVIN_RECYCLE -#ifdef KELVIN_RECYCLE - Thread* t = Thread::current(); - size_t p2i = (size_t) t; -#endif size_t recycled_regions = Atomic::load(&_recycled_region_count); -#ifdef KELVIN_RECYCLE - log_info(gc)("%zx: got lock, will flush buffer with %zu entries plus (used: %zu, index: %zu)", - p2i, recycled_regions, overflow_region_used, overflow_region_index); -#endif size_t region_tallies[int(ShenandoahRegionPartitions::NumPartitions)]; size_t used_byte_tallies[int(ShenandoahRegionPartitions::NumPartitions)]; for (int p = 0; p < int(ShenandoahRegionPartitions::NumPartitions); p++) { @@ -2047,51 +1815,24 @@ class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionC for (size_t i = 0; i < region_count; i++) { ssize_t used; // wait for other threads to finish updating their entries within the region buffer before processing entry -#ifdef KELVIN_RECYCLE - size_t retry_count = 0; -#endif do { used = _region_used[i]; -#ifdef KELVIN_RECYCLE - if (retry_count++ == 1024 * 1024) { - log_info(gc)("Too many used retries at index %zu", i); - retry_count = 0; - } -#endif } while (used == SentinelUsed); ssize_t index; -#ifdef KELVIN_RECYCLE - retry_count = 0; -#endif do { index = _region_indices[i]; -#ifdef KELVIN_RECYCLE - if (retry_count++ == 1024 * 1024) { - log_info(gc)("Too many index retries at index %zu", i); - retry_count = 0; - } -#endif } while (index == SentinelIndex); - ShenandoahFreeSetPartitionId p = _partitions->membership(index); - - + ShenandoahFreeSetPartitionId p = _partitions->membership(index); assert(p != ShenandoahFreeSetPartitionId::NotFree, "Trashed regions should be in a free partition"); used_byte_tallies[int(p)] += used; region_tallies[int(p)]++; -#ifdef KELVIN_RECYCLE - log_info(gc)("flushing bufffer[%zu], index: %zu, used: %zu, partition: %s, used_tally: %zu, region_tally: %zu", - i, index, used, partition_name(p), used_byte_tallies[int(p)], region_tallies[int(p)]); -#endif } if (region_count > 0) { for (size_t i = 0; i < MaxSavedRegions; i++) { _region_indices[i] = SentinelIndex; _region_used[i] = SentinelUsed; } -#ifdef KELVIN_RECYCLE - log_info(gc)("overwrote buffer to all sentinel values"); -#endif } // The almost last thing we do before releasing the lock is to set the _recycled_region_count to 0. What happens next? @@ -2124,37 +1865,11 @@ class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionC // unlikely that it will overflow again before all waiting workers have had a chance to clear their state. While I've // got the heap lock, I'll go ahead and update the global state for my overflow region. I'll let other heap regions // accumulate in the buffer to be processed when the buffer is once again full. -#ifdef KELVIN_RECYCLE - log_info(gc)("Not flushing buffer because other thread did it"); -#endif region_count = 0; } -#ifdef KELVIN_RECYCLE - log_info(gc)("Recycling %zu regions [M, C, O]: used: [%zu, %zu, %zu], regions [%zu, %zu, %zu]", - region_count + 1, - used_byte_tallies[int(ShenandoahFreeSetPartitionId::Mutator)], - used_byte_tallies[int(ShenandoahFreeSetPartitionId::Collector)], - used_byte_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)], - region_tallies[int(ShenandoahFreeSetPartitionId::Mutator)], - region_tallies[int(ShenandoahFreeSetPartitionId::Collector)], - region_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)]); -#endif for (size_t p = 0; p < int(ShenandoahRegionPartitions::NumPartitions); p++) { _partitions->decrease_used(ShenandoahFreeSetPartitionId(p), used_byte_tallies[p]); } -#ifdef KELVIN_OUT_WITH_THE_OLD - ShenandoahYoungGeneration* young_gen = heap->young_generation(); - ShenandoahOldGeneration* old_gen = heap->old_generation(); - young_gen->decrease_used(used_byte_tallies[int(ShenandoahFreeSetPartitionId::Mutator)] - + used_byte_tallies[int(ShenandoahFreeSetPartitionId::Collector)]); - young_gen->decrease_affiliated_region_count(region_tallies[int(ShenandoahFreeSetPartitionId::Mutator)] - + region_tallies[int(ShenandoahFreeSetPartitionId::Collector)]); - old_gen->decrease_used(used_byte_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)]); - old_gen->decrease_affiliated_region_count(region_tallies[int(ShenandoahFreeSetPartitionId::OldCollector)]); -#endif -#ifdef KELVIN_RECYCLE - log_info(gc)("Done with flush buffer, releasing global heap lock"); -#endif } public: @@ -2217,19 +1932,6 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap(); const size_t region_alloc_capacity = alloc_capacity(r); -#ifdef KELVIN_OUT_WITH_THE_OLD - bool transferred = gen_heap->generation_sizer()->transfer_to_old(1); - if (transferred) { - _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator, - ShenandoahFreeSetPartitionId::OldCollector, region_alloc_capacity); - _heap->old_generation()->augment_evacuation_reserve(region_alloc_capacity); - recompute_total_used(); - recompute_total_affiliated(); - _partitions.assert_bounds(true); - return true; - } - // replaced by the following -#endif if (transfer_one_region_from_mutator_to_old_collector(idx, region_alloc_capacity)) { return true; } @@ -2360,37 +2062,22 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r size_t total_mutator_regions = 0; size_t total_old_collector_regions = 0; -#undef KELVIN_MYSTERY bool is_generational = _heap->mode()->is_generational(); size_t num_regions = _heap->num_regions(); for (size_t idx = 0; idx < num_regions; idx++) { ShenandoahHeapRegion* region = _heap->get_region(idx); -#ifdef KELVIN_MYSTERY - log_info(gc)("find_alloc_capacity() on region %zu, used is %zu", region->index(), region->used()); -#endif - if (region->is_trash()) { // Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection // partition but have not yet been "cleaned up" following update refs. if (region->is_old()) { -#undef KELVIN_PUZZLE -#ifdef KELVIN_PUZZLE - log_info(gc)("find_alloc_capacity sees old trashed region %zu, with %zu used bytes", region->index(), region->used()); -#endif old_trashed_regions++; } else { assert(region->is_young(), "Trashed region should be old or young"); -#ifdef KELVIN_PUZZLE - log_info(gc)("find_alloc_capacity sees young trashed region %zu, with %zu used bytes", region->index(), region->used()); -#endif young_trashed_regions++; } } else if (region->is_old()) { // count both humongous and regular regions, but don't count trash (cset) regions. old_region_count++; -#ifdef KELVIN_PUZZLE - log_info(gc)("find_alloc_capacity sees old non-trashed region %zu, with %zu used bytes", region->index(), region->used()); -#endif if (first_old_region > idx) { first_old_region = idx; } @@ -2443,15 +2130,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r } else { // This region does not have enough free to be part of the free set. Count all of its memory as used. assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Region should have been retired"); -#ifdef KELVIN_DEPRECATE - // We no longer fill remnants in NOTFREE regions - // Furthermore, this code has a bug. If region->is_old(), we - // should have registered old object when we created the fill word - if (ac >= ShenandoahHeap::min_fill_size() * HeapWordSize) { - size_t fill_words = ac / HeapWordSize; - region->allocate_fill(fill_words); - } -#endif if (region->is_old()) { old_collector_used += region_size_bytes; total_old_collector_regions++; @@ -2480,15 +2158,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r size_t byte_size = obj->size() * HeapWordSize; size_t region_span = ShenandoahHeapRegion::required_regions(byte_size); humongous_waste_bytes = region_span * ShenandoahHeapRegion::region_size_bytes() - byte_size; -#ifdef KELVIN_DEPRECATE - // We no longer fill remnant memory in NotFree regions. The memory - // is still counted as used. Furthermore, there's a bug - // here. If the region is old, we need to register the fill object. - } else if (!region->is_humongous() && (ac >= ShenandoahHeap::min_fill_size() * HeapWordSize)) { - // Don't fill humongous continuations - size_t fill_words = ac / HeapWordSize; - region->allocate_fill(fill_words); -#endif } if (region->is_old()) { old_collector_used += region_size_bytes; @@ -2513,32 +2182,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r // No need to update generation sizes here. These are the sizes already recognized by the generations. These // adjustments allow the freeset tallies to match the generation tallies. -#ifdef KELVIN_ENHANCEMENTS - log_info(gc, free)(" At end of prep_to_rebuild, mutator_leftmost: %zu" - ", mutator_rightmost: %zu" - ", mutator_leftmost_empty: %zu" - ", mutator_rightmost_empty: %zu" - ", mutator_regions: %zu" - ", mutator_used: %zu", - mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty, - mutator_regions, mutator_used); - log_info(gc, free)(" old_collector_leftmost: %zu" - ", old_collector_rightmost: %zu" - ", old_collector_leftmost_empty: %zu" - ", old_collector_rightmost_empty: %zu" - ", old_collector_regions: %zu" - ", old_collector_used: %zu", - old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty, - old_collector_regions, old_collector_used); - log_info(gc, free)(" total_mutator_regions: %zu" - ", total_collector_regions: %zu" - ", total_old_collector_regions: %zu" - ", mutator_empty: %zu" - ", collector_empty: %zu" - ", old_collector_empty: %zu", - total_mutator_regions, (size_t) 0, total_old_collector_regions, - mutator_empty, collector_empty, old_collector_empty); -#endif log_debug(gc, free)(" At end of prep_to_rebuild, mutator_leftmost: %zu" ", mutator_rightmost: %zu" ", mutator_leftmost_empty: %zu" @@ -2579,16 +2222,6 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r recompute_total_used(); recompute_total_affiliated(); _partitions.assert_bounds(true); -#ifdef KELVIN_OUT_WITH_THE_OLD - // Update generations to assure consistency while we still hold the lock. This handles case that someone consults - // generation sizes between now and start of finish_rebuild. This may release from old memory that we intend to reserve - // for the old collector. - establish_generation_sizes(total_mutator_regions + young_cset_regions, total_old_collector_regions + old_cset_regions, - affiliated_mutator_regions + young_cset_regions, - affiliated_old_collector_regions + old_cset_regions, - mutator_used + young_cset_regions * region_size_bytes, - old_collector_used + old_cset_regions * region_size_bytes); -#endif #ifdef ASSERT if (_heap->mode()->is_generational()) { assert(young_affiliated_regions() == _heap->young_generation()->get_affiliated_region_count(), "sanity"); @@ -2609,14 +2242,6 @@ void ShenandoahFreeSet::transfer_humongous_regions_from_mutator_to_old_collector shenandoah_assert_heaplocked(); size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); -#ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("FreeSet humongous promotion, with waste %zu", humongous_waste_bytes); -#endif - -#ifdef KELVIN_REGION_COUNTS - log_info(gc)("Transferring %zu humongous regions from mutator to old (promoting)", xfer_regions); -#endif - _partitions.decrease_humongous_waste(ShenandoahFreeSetPartitionId::Mutator, humongous_waste_bytes); _partitions.decrease_used(ShenandoahFreeSetPartitionId::Mutator, xfer_regions * region_size_bytes); _partitions.decrease_total_region_counts(ShenandoahFreeSetPartitionId::Mutator, xfer_regions); @@ -2856,11 +2481,6 @@ void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_r transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::OldCollector, max_xfer_regions, old_collector_xfer); max_xfer_regions -= old_collector_regions; -#ifdef KELVIN_OUT_WITH_THE_OLD - if (old_collector_regions > 0) { - ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions); - } -#endif } // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition @@ -2890,99 +2510,10 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_trashed_regions, size_t // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the // mutator set otherwise. All trashed (cset) regions are affiliated young and placed in mutator set. - find_regions_with_alloc_capacity(young_trashed_regions, old_trashed_regions, first_old_region, last_old_region, old_region_count); -} - -#ifdef KELVIN_OUT_WITH_THE_OLD -// The totals reported here anticipate the recycling of trash regions. Their memory is counted as unused and fully -// available at this moment in time, even though the memory cannot be re-allocated until after it is recycled. -void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count, - size_t affiliated_young_regions, size_t affiliated_old_regions, - size_t young_used_bytes, size_t old_used_bytes) { - assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); -#undef KELVIN_RESERVE -#ifdef KELVIN_RESERVE - log_info(gc)("establish_generation_sizes(young_region_count: %zu, old_region_count: %zu, ", - young_region_count, old_region_count); - log_info(gc)(" young_used_bytes: %zu, old_used_bytes: %zu)", - young_used_bytes, old_used_bytes); -#endif - ShenandoahHeap* heap = ShenandoahHeap::heap(); - size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - if (heap->mode()->is_generational()) { - ShenandoahGenerationalHeap* gen_heap = (ShenandoahGenerationalHeap*) heap; - ShenandoahOldGeneration* old_gen = gen_heap->old_generation(); - ShenandoahYoungGeneration* young_gen = gen_heap->young_generation(); - ShenandoahGeneration* global_gen = gen_heap->global_generation(); - - size_t original_old_capacity = old_gen->max_capacity(); - size_t new_old_capacity = old_region_count * region_size_bytes; - size_t new_young_capacity = young_region_count * region_size_bytes; - old_gen->set_capacity(new_old_capacity); - old_gen->set_used(affiliated_old_regions, old_used_bytes); - young_gen->set_capacity(new_young_capacity); - young_gen->set_used(affiliated_young_regions, young_used_bytes); - global_gen->set_capacity(new_young_capacity + new_old_capacity); - global_gen->set_used(affiliated_young_regions + affiliated_old_regions, young_used_bytes + old_used_bytes); - - if (new_old_capacity > original_old_capacity) { - size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes; - log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, - region_count, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_old_capacity)); - } else if (new_old_capacity < original_old_capacity) { - size_t region_count = (original_old_capacity - new_old_capacity) / region_size_bytes; - log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, - region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity)); - } - } else { - ShenandoahGeneration* global_gen = heap->global_generation(); - assert(global_gen->max_capacity() == young_region_count * region_size_bytes, "sanity"); - global_gen->set_used(affiliated_young_regions, young_used_bytes); - } + find_regions_with_alloc_capacity(young_trashed_regions, old_trashed_regions, + first_old_region, last_old_region, old_region_count); } -// As part of finish_rebuild(), we reestablish generation sizes, which were originally established during prepare_to_rebuild(). -// During finish_rebuild(), we will reserve regions for the collector and old collector by transferring some regions from -// the Mutator partition. Regions transferred from Mutator to Collector have no effect on generation sizes. Regions transferred -// from Mutator to OldCollector may increase capacity of old and decrease capacity of young, but will have no effect on used -// within either generation because only empty regions are transferred. -void ShenandoahFreeSet::reestablish_generation_sizes(size_t young_region_count, size_t old_region_count) { - - assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity"); -#ifdef KELVIN_RESERVE - log_info(gc)("reestablish_generation_sizes(young_region_count: %zu, old_region_count: %zu, ", - young_region_count, old_region_count); -#endif - if (ShenandoahHeap::heap()->mode()->is_generational()) { - ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); - ShenandoahOldGeneration* old_gen = heap->old_generation(); - ShenandoahYoungGeneration* young_gen = heap->young_generation(); - ShenandoahGeneration* global_gen = heap->global_generation(); - - size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); - - size_t original_old_capacity = old_gen->max_capacity(); - size_t new_old_capacity = old_region_count * region_size_bytes; - size_t new_young_capacity = young_region_count * region_size_bytes; - old_gen->set_capacity(new_old_capacity); - young_gen->set_capacity(new_young_capacity); - global_gen->set_capacity(new_young_capacity + new_old_capacity); - - if (new_old_capacity > original_old_capacity) { - size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes; - log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, - region_count, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_old_capacity)); - } else if (new_old_capacity < original_old_capacity) { - size_t region_count = (original_old_capacity - new_old_capacity) / region_size_bytes; - log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, - region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity)); - } - // This balances generations, so clear any pending request to balance. - old_gen->set_region_balance(0); - } -} -#endif - void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_trashed_regions, size_t old_region_count, bool have_evacuation_reserves) { shenandoah_assert_heaplocked(); @@ -3003,9 +2534,6 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_ young_used_bytes, old_used_bytes); _total_young_regions = _heap->num_regions() - old_region_count; _total_global_regions = _heap->num_regions(); -#ifdef KELVIN_OUT_WITH_THE_OLD - reestablish_generation_sizes(_total_young_regions, old_region_count); -#endif establish_old_collector_alloc_bias(); _partitions.assert_bounds(true); log_status(); @@ -3030,11 +2558,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi shenandoah_assert_generational(); shenandoah_assert_heaplocked(); const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); -#undef KELVIN_RESERVE -#ifdef KELVIN_RESERVE - log_info(gc)("compute_young_and_old_reserve(young_trashed: %zu, old_trashed: %zu, have_reserves: %s)", - young_trashed_regions, old_trashed_regions, have_evacuation_reserves? "yes": "no"); -#endif ShenandoahOldGeneration* const old_generation = _heap->old_generation(); size_t old_available = old_generation->available(); size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions(); @@ -3042,11 +2565,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi size_t young_capacity = young_generation->max_capacity(); size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions(); -#ifdef KELVIN_RESERVE - log_info(gc)(" young_unaffiliated: %zu, capacity: %zu, old_unaffiliated: %zu, old_avail: %zu", - young_unaffiliated_regions, young_capacity, old_unaffiliated_regions, old_available); -#endif - // Add in the regions we anticipate to be freed by evacuation of the collection set old_unaffiliated_regions += old_trashed_regions; old_available += old_trashed_regions * region_size_bytes; @@ -3056,9 +2574,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi // The generation region transfers take place after we rebuild. old_region_balance represents number of regions // to transfer from old to young. ssize_t old_region_balance = old_generation->get_region_balance(); -#ifdef KELVIN_RESERVE - log_info(gc)(" old_region_balance: %zd", old_region_balance); -#endif if (old_region_balance != 0) { #ifdef ASSERT if (old_region_balance > 0) { @@ -3076,11 +2591,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi old_unaffiliated_regions -= old_region_balance; young_capacity += xfer_bytes; young_unaffiliated_regions += old_region_balance; - -#ifdef KELVIN_RESERVE - log_info(gc)(" xfer_bytes: %zd, adjusted old_available: %zu, old_unaffiliated: %zu, young_capacity: %zu, young_unaffiliated: %zu", - xfer_bytes, old_available, old_unaffiliated_regions, young_capacity, young_unaffiliated_regions); -#endif } // All allocations taken from the old collector set are performed by GC, generally using PLABs for both @@ -3093,10 +2603,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi const size_t old_evac_reserve = old_generation->get_evacuation_reserve(); young_reserve_result = young_generation->get_evacuation_reserve(); old_reserve_result = promoted_reserve + old_evac_reserve; -#ifdef KELVIN_RESERVE - log_info(gc)("have_evac_reserve, old_reserve_result (%zu), young_reserve_result (%zu)", - old_reserve_result, young_reserve_result); -#endif if (old_reserve_result > old_available) { // Try to transfer memory from young to old. size_t old_deficit = old_reserve_result - old_available; @@ -3108,13 +2614,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi old_unaffiliated_regions += old_region_deficit; old_region_balance -= old_region_deficit; old_generation->set_region_balance(old_region_balance); -#ifdef KELVIN_RESERVE - log_info(gc)(" overwrite old_gen->region_balance() to %zu", old_region_balance); - log_info(gc)(" old_reserve_result (%zu) > old_available (%zu), old_deficit: %zu, region_deficit: %zu", - old_reserve_result, old_available, old_deficit, old_region_deficit); - log_info(gc)(" young_unaffiliated: %zu, old_unaffiliated: %zu, old_region_balance: %zd", - young_unaffiliated_regions, old_unaffiliated_regions, old_region_balance); -#endif } } else { // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults) @@ -3123,10 +2622,6 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi // Affiliated old-gen regions are already in the OldCollector free set. Add in the relevant number of // unaffiliated regions. old_reserve_result = old_available; -#ifdef KELVIN_RESERVE - log_info(gc)("have_evac_reserve, old_reserve_result (%zu) > young_reserve_result (%zu)", - old_reserve_result, young_reserve_result); -#endif } // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector @@ -3137,16 +2632,10 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_trashed_regi _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) { old_reserve_result = _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes; -#ifdef KELVIN_RESERVE - log_info(gc)(" downsizing old_reserve_result due to capacity limit: %zu", old_reserve_result); -#endif } if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) { young_reserve_result = young_unaffiliated_regions * region_size_bytes; -#ifdef KELVIN_RESERVE - log_info(gc)(" downsizing young_reserve_result due to unaffilaited limit: %zu", young_reserve_result); -#endif } } @@ -3165,12 +2654,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old young_used_bytes = 0; old_used_bytes = 0; -#undef KELVIN_RESERVE -#ifdef KELVIN_RESERVE - log_info(gc)("reserve_regions(to_reserve: %zu, to_reserve_old: %zu", to_reserve, to_reserve_old); -#endif -#undef KELVIN_RESERVE - idx_t mutator_low_idx = _partitions.max(); idx_t mutator_high_idx = -1; idx_t mutator_empty_low_idx = _partitions.max(); @@ -3196,11 +2679,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old size_t old_collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);; size_t collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector); -#undef KELVIN_RESERVE -#ifdef KELVIN_RESERVE - log_info(gc)("reserve_regions() seeks to_reserve: %zu and to_reserve_old: %zu", to_reserve, to_reserve_old); -#endif - for (size_t i = _heap->num_regions(); i > 0; i--) { idx_t idx = i - 1; ShenandoahHeapRegion* r = _heap->get_region(idx); @@ -3213,20 +2691,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old bool move_to_old_collector = old_collector_available < to_reserve_old; bool move_to_collector = collector_available < to_reserve; -#ifdef KELVIN_RESERVE - log_info(gc)("reserve_regions() region %zu, has ac: %zu, old_avail: %zu, collector_avail: %zu, move old: %s, move young: %s", - idx, ac, old_collector_available, collector_available, move_to_old_collector? "yes": "no", - move_to_collector? "yes": "no"); -#endif - -#ifdef KELVIN_DEPRECATE - // We have to iterate through all regions in order to update generation accounting - if (!move_to_collector && !move_to_old_collector) { - // We've satisfied both to_reserve and to_reserved_old - break; - } -#endif - if (move_to_old_collector) { // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher // addresses than Collector regions. Presumably, OldCollector regions are more "stable" and less likely to @@ -3325,11 +2789,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old if (ac != region_size_bytes) { young_used_regions++; young_used_bytes += region_size_bytes - ac; - -#ifdef KELVIN_RESERVE - log_info(gc)(" region is not moved:, young_used_regions increments to: %zu, young_used_bytes becomes: %zu", - young_used_regions, young_used_bytes); -#endif } } else { // Region is not in Mutator partition. Do the accounting. @@ -3340,20 +2799,12 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old if (ac != region_size_bytes) { young_used_regions++; young_used_bytes = region_size_bytes - ac; -#ifdef KELVIN_RESERVE - log_info(gc)(" Collector Region %zu not in Mutator, ac: %zu, incr young regions to: %zu, young_used_bytes: %zu", - idx, ac, young_used_regions, young_used_bytes); -#endif } // else, unaffiliated region has no used } else if (p == ShenandoahFreeSetPartitionId::OldCollector) { if (ac != region_size_bytes) { old_used_regions++; old_used_bytes = region_size_bytes - ac; -#ifdef KELVIN_RESERVE - log_info(gc)(" OldCollector Region %zu not in Mutator, ac: %zu, incr old regions to: %zu, old_used_bytes: %zu", - idx, ac, old_used_regions, old_used_bytes); -#endif } // else, unaffiliated region has no used } else if (p == ShenandoahFreeSetPartitionId::NotFree) { @@ -3361,20 +2812,10 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old if (r->is_old()) { old_used_regions++; old_used_bytes += region_size_bytes - ac; -#ifdef KELVIN_RESERVE - log_info(gc)(" Retired old region %zu not in Mutator, ac: %zu, incr old regions to: %zu, old_used_bytes: %zu", - idx, ac, old_used_regions, old_used_bytes); -#endif - - // KELVIN TODO: do we add alignment padding into old_used_bytes? } else { assert(r->is_young(), "Retired region should be old or young"); young_used_regions++; young_used_bytes += region_size_bytes - ac; -#ifdef KELVIN_RESERVE - log_info(gc)(" Retired young region %zu not in Mutator, ac: %zu, incr young regions to: %zu, young_used_bytes: %zu", - idx, ac, young_used_regions, young_used_bytes); -#endif } } else { assert(p == ShenandoahFreeSetPartitionId::OldCollector, "Not mutator and not NotFree, so must be OldCollector"); @@ -3431,10 +2872,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::OldCollector, old_collector_low_idx, old_collector_high_idx, old_collector_empty_low_idx, old_collector_empty_high_idx); -#ifdef KELVIN_RESERVE - log_info(gc)("reserve_regions() establishes Mutator interval(low: %zu, high: %zu, low_empty: %zu, high_empty: %zu)", - mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx); -#endif _partitions.establish_interval(ShenandoahFreeSetPartitionId::Mutator, mutator_low_idx, mutator_high_idx, mutator_empty_low_idx, mutator_empty_high_idx); @@ -3453,11 +2890,6 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve)); } } -#ifdef KELVIN_RESERVE - log_info(gc)("reserve_regions end: old_region_count: %zu, young_used_regions: %zu," - " old_used_regions: %zu, young_used_bytes: %zu, old_used_bytes: %zu", - old_region_count, young_used_regions, old_used_regions, young_used_bytes, old_used_bytes); -#endif } void ShenandoahFreeSet::establish_old_collector_alloc_bias() { @@ -3608,11 +3040,6 @@ void ShenandoahFreeSet::log_status() { } total_used += used_in_region; total_free += free; -#undef KELVIN_DEBUG -#ifdef KELVIN_DEBUG - log_info(gc)("%sMutator region %zu has free: %zu, used: %zu, total_free: %zu, total_used: %zu", - r->is_trash()? "Trashed ": "", r->index(), free, used_in_region, total_free, total_used); -#endif max_contig = MAX2(max_contig, empty_contig); last_idx = idx; } @@ -3622,29 +3049,6 @@ void ShenandoahFreeSet::log_status() { // capacity() is capacity of mutator // used() is used of mutator size_t free = capacity() - used(); -#ifdef KELVIN_DEBUG - // kelvin problem: trashed regions should not count as used, - // because this gets subtracted from capacity to wrongly compute free. - log_info(gc)("capacity(): %zu, used: %zu", capacity(), used()); - log_info(gc)("capacity_of(Mutator): %zu, used_by(Mutator): %zu", - _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator), - _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator)); - log_info(gc)("capacity_of(Collector): %zu, used_by(Collector): %zu", - _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector), - _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); - log_info(gc)("capacity_of(OldCollector): %zu, used_by(OldCollector): %zu", - _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector), - _partitions.used_by(ShenandoahFreeSetPartitionId::OldCollector)); - - ShenandoahYoungGeneration* young_gen = ShenandoahHeap::heap()->young_generation(); - size_t total_capacity = (_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator) - * ShenandoahHeapRegion::region_size_bytes()); - assert(total_capacity = capacity(), "Total capacity (%zu) does not match capacity (%zu)", - total_capacity, capacity()); - assert(used() == young_gen->used() - _partitions.used_by(ShenandoahFreeSetPartitionId::Collector), - "Used by Mutator is young used minus Collector used"); -#endif - // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match // my internally tracked values of used() and free(). @@ -3731,16 +3135,6 @@ void ShenandoahFreeSet::decrease_humongous_waste_for_regular_bypass(ShenandoahHe _partitions.decrease_used(p, waste); _partitions.unretire_to_partition(r, p); } -#ifdef KELVIN_DEPRECATE - // We no longer fill remnant memory in NotFree regions. We do still count the remnant memory as used. -#ifdef ASSERT - else if (waste >= ShenandoahHeap::min_fill_size() * HeapWordSize) { - // Fill the unused memory so that verification will not be confused by inconsistent tallies of used - size_t fill_words = waste / HeapWordSize; - r->allocate_fill(fill_words); - } -#endif -#endif _total_humongous_waste -= waste; recompute_total_used(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 7baeb99d6fbd7..665917156f846 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -30,9 +30,6 @@ #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" #include "gc/shenandoah/shenandoahSimpleBitMap.hpp" - -#undef KELVIN_HUMONGOUS_WASTE - // Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId. enum class ShenandoahFreeSetPartitionId : uint8_t { Mutator, // Region is in the Mutator free set: available memory is available to mutators. @@ -319,12 +316,6 @@ class ShenandoahRegionPartitions { _used[int(which_partition)] -= bytes; _available[int(which_partition)] += bytes; -#undef KELVIN_USED_PARTITION -#ifdef KELVIN_USED_PARTITION - extern const char* _shenandoah_partition_name(ShenandoahFreeSetPartitionId t); - log_info(gc)("ShenRegionPartitions %s decrease_used(%zu) to %zu, available grows to %zu", - _shenandoah_partition_name(which_partition), bytes, _used[int(which_partition)], _available[int(which_partition)]); -#endif } inline size_t get_used(ShenandoahFreeSetPartitionId which_partition); @@ -335,12 +326,6 @@ class ShenandoahRegionPartitions { assert (which_partition < NumPartitions, "Partition must be valid"); assert(_humongous_waste[int(which_partition)] >= bytes, "Cannot decrease waste beyond what is there"); _humongous_waste[int(which_partition)] -= bytes; -#undef KELVIN_HUMONGOUS_WASTE -#ifdef KELVIN_HUMONGOUS_WASTE - extern const char* _shenandoah_humongous_partition_name(ShenandoahFreeSetPartitionId t); - log_info(gc)("FreeSet<%s>::decrease_humongous_waste(%zu) yields: %zu", _shenandoah_humongous_partition_name(which_partition), - bytes, _humongous_waste[int(which_partition)]); -#endif } inline size_t get_humongous_waste(ShenandoahFreeSetPartitionId which_partition); @@ -485,17 +470,6 @@ class ShenandoahFreeSet : public CHeapObj { size_t region_size_bytes = _partitions.region_size_bytes(); _total_young_used = (_partitions.used_by(ShenandoahFreeSetPartitionId::Mutator) + _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); -#undef KELVIN_USED -#ifdef KELVIN_USED - log_info(gc)(" recompute_total_young_used(): %zu from total regions M: %zu, C: %zu, allocatable regions M: %zu, C: %zu, " - "M used: %zu, C used: %zu", _total_young_used, - _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator), - _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Collector), - _partitions.get_region_counts(ShenandoahFreeSetPartitionId::Mutator), - _partitions.get_region_counts(ShenandoahFreeSetPartitionId::Collector), - _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator), - _partitions.used_by(ShenandoahFreeSetPartitionId::Collector)); -#endif } // bytes used by old @@ -504,9 +478,6 @@ class ShenandoahFreeSet : public CHeapObj { shenandoah_assert_heaplocked(); size_t region_size_bytes = _partitions.region_size_bytes(); _total_old_used =_partitions.used_by(ShenandoahFreeSetPartitionId::OldCollector); -#ifdef KELVIN_USED - log_info(gc)(" recompute_total_old_used(): %zu", _total_old_used); -#endif } // bytes used by global @@ -515,9 +486,6 @@ class ShenandoahFreeSet : public CHeapObj { inline void recompute_total_global_used() { shenandoah_assert_heaplocked(); _total_global_used = _total_young_used + _total_old_used; -#ifdef KELVIN_USED - log_info(gc)("recompute_total_global_used(): %zu", _total_global_used); -#endif } inline void recompute_total_used() { @@ -550,11 +518,6 @@ class ShenandoahFreeSet : public CHeapObj { _global_unaffiliated_regions = _young_unaffiliated_regions + _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector); _global_affiliated_regions = _young_affiliated_regions + _old_affiliated_regions; -#undef KELVIN_AFFILIATED -#ifdef KELVIN_AFFILIATED - log_info(gc)("recompute_affiliated(young: %zu, old: %zu, global: %zu)", - _young_affiliated_regions, _old_affiliated_regions, _global_affiliated_regions); -#endif #ifdef ASSERT if (ShenandoahHeap::heap()->mode()->is_generational()) { assert(_young_affiliated_regions * ShenandoahHeapRegion::region_size_bytes() >= _total_young_used, "sanity"); @@ -641,15 +604,6 @@ class ShenandoahFreeSet : public CHeapObj { // Determine whether we prefer to allocate from left to right or from right to left within the OldCollector free-set. void establish_old_collector_alloc_bias(); - -#ifdef KELVIN_OUT_WITH_THE_OLD - // Set max_capacity for young and old generations - void establish_generation_sizes(size_t young_region_count, size_t old_region_count, - size_t affiliated_young_regions, size_t affiliated_old_regions, - size_t young_used_bytes, size_t old_used_bytes); - - void reestablish_generation_sizes(size_t young_region_count, size_t old_region_count); -#endif size_t get_usable_free_words(size_t free_bytes) const; // log status, assuming lock has already been acquired by the caller. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index d6e511d268a70..cbc9f0a4e1d37 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -253,21 +253,8 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { phase4_compact_objects(worker_slices); -#ifdef KELVIN_OUT_WITH_THE_OLD - result = phase5_epilog(); -#else phase5_epilog(); -#endif } -#ifdef KELVIN_OUT_WITH_THE_OLD - if (heap->mode()->is_generational()) { - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Full GC", &ls); - } - } -#endif // Resize metaspace MetaspaceGC::compute_new_size(); @@ -999,25 +986,6 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { r->set_live_data(live); r->reset_alloc_metadata(); } - -#ifdef KELVIN_OUT_WITH_THE_OLD - void update_generation_usage() { - if (_is_generational) { - _heap->old_generation()->establish_usage(_old_regions, _old_usage, _old_humongous_waste); - _heap->young_generation()->establish_usage(_young_regions, _young_usage, _young_humongous_waste); - } else { - assert(_old_regions == 0, "Old regions only expected in generational mode"); - assert(_old_usage == 0, "Old usage only expected in generational mode"); - assert(_old_humongous_waste == 0, "Old humongous waste only expected in generational mode"); - } - - // In generational mode, global usage should be the sum of young and old. This is also true - // for non-generational modes except that there are no old regions. - _heap->global_generation()->establish_usage(_old_regions + _young_regions, - _old_usage + _young_usage, - _old_humongous_waste + _young_humongous_waste); - } -#endif }; void ShenandoahFullGC::compact_humongous_objects() { @@ -1136,11 +1104,7 @@ void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_s } } -#ifdef KELVIN_OUT_WITH_THE_OLD -ShenandoahGenerationalHeap::TransferResult ShenandoahFullGC::phase5_epilog() { -#else void ShenandoahFullGC::phase5_epilog() { -#endif GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer); ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahGenerationalHeap::TransferResult result; @@ -1158,13 +1122,6 @@ void ShenandoahFullGC::phase5_epilog() { ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); ShenandoahPostCompactClosure post_compact; heap->heap_region_iterate(&post_compact); -#ifdef KELVIN_OUT_WITH_THE_OLD - post_compact.update_generation_usage(); - if (heap->mode()->is_generational()) { - ShenandoahGenerationalFullGC::balance_generations_after_gc(heap); - } -#endif - heap->collection_set()->clear(); size_t young_cset_regions, old_cset_regions; size_t first_old, last_old, num_old; @@ -1190,12 +1147,6 @@ void ShenandoahFullGC::phase5_epilog() { // We defer generation resizing actions until after cset regions have been recycled. We do this even following an // abbreviated cycle. if (heap->mode()->is_generational()) { -#ifdef KELVIN_OUT_WITH_THE_OLD - result = ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set(); -#endif ShenandoahGenerationalFullGC::rebuild_remembered_set(heap); } -#ifdef KELVIN_OUT_WITH_THE_OLD - return result; -#endif } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp index 7ec55240ae835..45ed341de1799 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.hpp @@ -82,11 +82,7 @@ class ShenandoahFullGC : public ShenandoahGC { void phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices); void phase3_update_references(); void phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices); -#ifdef KELVIN_OUT_WITH_THE_OLD - ShenandoahGenerationalHeap::TransferResult phase5_epilog(); -#else void phase5_epilog(); -#endif void distribute_slices(ShenandoahHeapRegionSet** worker_slices); void calculate_target_humongous_objects(); void compact_humongous_objects(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 8533d2a1d7f81..2aa027c03713c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -148,20 +148,6 @@ ShenandoahHeuristics* ShenandoahGeneration::initialize_heuristics(ShenandoahMode return _heuristics; } -#ifdef KELVIN_OUT_WITH_THE_OLD -size_t ShenandoahGeneration::bytes_allocated_since_gc_start() const { - return Atomic::load(&_bytes_allocated_since_gc_start); -} - -void ShenandoahGeneration::reset_bytes_allocated_since_gc_start() { - Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0); -} - -void ShenandoahGeneration::increase_allocated(size_t bytes) { - Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed); -} -#endif - void ShenandoahGeneration::set_evacuation_reserve(size_t new_val) { shenandoah_assert_heaplocked(); _evacuation_reserve = new_val; @@ -475,31 +461,11 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* const heap, size_t excess_regions = excess_old / region_size_bytes; regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions); } - -#ifdef KELVIN_OUT_WITH_THE_OLD - if (regions_to_xfer > 0) { - // kelvin is here: how does this affect freeset when transfer_to_young is deprecated? - // we are adjusting evacuation budget after choosing the collection set. - - // momentarily, we will rebuild the free se. - - bool result = ShenandoahGenerationalHeap::cast(heap)->generation_sizer()->transfer_to_young(regions_to_xfer); - assert(excess_old >= regions_to_xfer * region_size_bytes, - "Cannot transfer (%zu, %zu) more than excess old (%zu)", - regions_to_xfer, region_size_bytes, excess_old); - excess_old -= regions_to_xfer * region_size_bytes; - log_debug(gc, ergo)("%s transferred %zu excess regions to young before start of evacuation", - result? "Successfully": "Unsuccessfully", regions_to_xfer); - } -#else - // kelvin conjecture: we do not need to transfer regions. we just need to adjust excess_old. that will - // cause regions to transfer after we rebuild the freeset. if (regions_to_xfer > 0) { excess_old -= regions_to_xfer * region_size_bytes; log_debug(gc, ergo)("Before start of evacuation, total_promotion reserve is young_advance_promoted_reserve: %zu " "plus excess: old: %zu", young_advance_promoted_reserve_used, excess_old); } -#endif // Add in the excess_old memory to hold unanticipated promotions, if any. If there are more unanticipated // promotions than fit in reserved memory, they will be deferred until a future GC pass. @@ -648,10 +614,6 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // This region was already not in the Collector or Mutator set, so no need to remove it. assert(free_set->membership(i) == ShenandoahFreeSetPartitionId::NotFree, "sanity"); } -#ifdef KELVIN_OUT_WITH_THE_OLD - // Even when we do not fill the remnant, we count the remnant as used - young_gen->increase_used(remnant_bytes); -#endif } // Else, we do not promote this region (either in place or by copy) because it has received new allocations. @@ -892,9 +854,6 @@ ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type, _ref_processor(new ShenandoahReferenceProcessor(MAX2(max_workers, 1U))), _affiliated_region_count(0), _humongous_waste(0), _evacuation_reserve(0), _used(0), -#ifdef KELVIN_OUT_WITH_THE_OLD - _bytes_allocated_since_gc_start(0), -#endif _max_capacity(max_capacity), _free_set(nullptr), _heuristics(nullptr) @@ -947,117 +906,6 @@ void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) { } } -#ifdef KELVIN_OUT_WITH_THE_OLD -size_t ShenandoahGeneration::increment_affiliated_region_count() { - shenandoah_assert_heaplocked_or_safepoint(); - // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced - // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with - // a coherent value. - size_t result = Atomic::add(&_affiliated_region_count, (size_t) 1); -#undef KELVIN_AFFILIATED -#ifdef KELVIN_AFFILIATED - log_info(gc)("%s: increment_affiliated_region_count() by 1: %zu", name(), result); -#endif - return result; -} - -size_t ShenandoahGeneration::decrement_affiliated_region_count() { - shenandoah_assert_heaplocked_or_safepoint(); - // During full gc, multiple GC worker threads may change region affiliations without a lock. No lock is enforced - // on read and write of _affiliated_region_count. At the end of full gc, a single thread overwrites the count with - // a coherent value. - auto affiliated_region_count = Atomic::sub(&_affiliated_region_count, (size_t) 1); - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (used() <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), - "used + humongous cannot exceed regions"); -#ifdef KELVIN_AFFILIATED - log_info(gc)("%s: decrement_affiliated_region_count() by 1: %zu", name(), affiliated_region_count); -#endif - return affiliated_region_count; -} - -size_t ShenandoahGeneration::decrement_affiliated_region_count_without_lock() { - size_t result = Atomic::sub(&_affiliated_region_count, (size_t) 1); -#ifdef KELVIN_AFFILIATED - log_info(gc)("%s: decrement_affiliated_region_count_without_lock() by 1: %zu", name(), result); -#endif - return result; -} - -size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) { - shenandoah_assert_heaplocked_or_safepoint(); - size_t result = Atomic::add(&_affiliated_region_count, delta); -#ifdef KELVIN_AFFILIATED - log_info(gc)("%s: increase_affiliated_region_count() by %zu: %zu", name(), delta, result); -#endif - return result; -} - -size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) { - shenandoah_assert_heaplocked_or_safepoint(); - assert(Atomic::load(&_affiliated_region_count) >= delta, "Affiliated region count cannot be negative"); - - auto const affiliated_region_count = Atomic::sub(&_affiliated_region_count, delta); - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (_used <= affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()), - "used + humongous cannot exceed regions"); -#ifdef KELVIN_AFFILIATED - log_info(gc)("%s: decrease_affiliated_region_count() by %zu: %zu", name(), delta, affiliated_region_count); -#endif - return affiliated_region_count; -} - -void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste) { - assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); - Atomic::store(&_affiliated_region_count, num_regions); - Atomic::store(&_used, num_bytes); - _humongous_waste = humongous_waste; -#ifdef KELVIN_AFFILIATED - log_info(gc)("%s: establish_usage(affiliated regions: %zu bytes: %zu, humongous_waste: %zu)", - name(), num_regions, num_bytes, humongous_waste); -#endif -} - -void ShenandoahGeneration::increase_used(size_t bytes) { - Atomic::add(&_used, bytes); -#undef KELVIN_MONITOR_USED -#ifdef KELVIN_MONITOR_USED - log_info(gc)("Generation %s increase_used(%zu) to %zu", shenandoah_generation_name(_type), bytes, _used); -#endif -} - -void ShenandoahGeneration::decrease_used(size_t bytes) { - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (_used >= bytes), "cannot reduce bytes used by generation below zero"); - Atomic::sub(&_used, bytes); -#ifdef KELVIN_MONITOR_USED - log_info(gc)("Generation %s decrease_used(%zu) to %zu", shenandoah_generation_name(_type), bytes, _used); -#endif -} - -void ShenandoahGeneration::increase_humongous_waste(size_t bytes) { - if (bytes > 0) { - Atomic::add(&_humongous_waste, bytes); -#ifdef KELVIN_MONITOR_HUMONGOUS - log_info(gc)("Generation %s humongous waste increased by %zu to %zu", - shenandoah_generation_name(_type), bytes, _humongous_waste); -#endif - } -} - -void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) { - if (bytes > 0) { - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes), - "Waste (%zu) cannot be negative (after subtracting %zu)", _humongous_waste, bytes); - Atomic::sub(&_humongous_waste, bytes); -#ifdef KELVIN_MONITOR_HUMONGOUS - log_info(gc)("Generation %s humongous waste decreased by %zu to %zu", - shenandoah_generation_name(_type), bytes, _humongous_waste); -#endif - } -} -#endif - size_t ShenandoahGeneration::used_regions() const { size_t result; switch (_type) { @@ -1073,26 +921,6 @@ size_t ShenandoahGeneration::used_regions() const { result = _free_set->global_affiliated_regions(); break; } -#ifdef KELVIN_OUT_WITH_THE_OLD - size_t original_result = Atomic::load(&_affiliated_region_count); -#ifdef KELVIN_SCAFFOLDING - static int problem_count = 0; - if (result != original_result) { - log_info(gc)("Problem with used regions for generation %s, freeset thinks %zu, generation thinks: %zu", - shenandoah_generation_name(_type), result, original_result); - if (problem_count++ > 8) { - assert(result == original_result, "Out of sync in used_regions for generation %s, freeset: %zu, generation: %zu", - shenandoah_generation_name(_type), result, original_result); - } - } else { - if (problem_count > 0) { - problem_count = 0; - log_info(gc)("used regions for generation %s is back in sync: %zu", - shenandoah_generation_name(_type), result); - } - } -#endif -#endif return result; } @@ -1111,11 +939,6 @@ size_t ShenandoahGeneration::max_capacity() const { total_regions = _free_set->total_global_regions(); break; } -#undef KELVIN_AVAILABLE -#ifdef KELVIN_AVAILABLE - log_info(gc)("max_capacity(_type: %d) returns %zu (%zu * %zu)", _type, total_regions * ShenandoahHeapRegion::region_size_bytes(), - total_regions, ShenandoahHeapRegion::region_size_bytes()); -#endif return total_regions * ShenandoahHeapRegion::region_size_bytes(); } @@ -1134,10 +957,6 @@ size_t ShenandoahGeneration::free_unaffiliated_regions() const { free_regions = _free_set->global_unaffiliated_regions(); break; } -#undef KELVIN_UNAFFILIATED -#ifdef KELVIN_UNAFFILIATED - log_info(gc)("free_unaffiliated_regions(_type == %d) returns %zu", _type, free_regions); -#endif return free_regions; } @@ -1174,61 +993,9 @@ size_t ShenandoahGeneration::soft_available() const { size_t ShenandoahGeneration::available(size_t capacity) const { size_t in_use = used(); -#ifdef KELVIN_AVAILABLE - log_info(gc)("ShenGen::available(%zu), with in_use: %zu", capacity, in_use); -#endif return in_use > capacity ? 0 : capacity - in_use; } -#ifdef KELVIN_OUT_WITH_THE_OLD -size_t ShenandoahGeneration::increase_capacity(size_t increment) { - shenandoah_assert_heaplocked_or_safepoint(); - - // We do not enforce that new capacity >= heap->max_size_for(this). The maximum generation size is treated as a rule of thumb - // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions - // in place. - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size"); - assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); - _max_capacity += increment; - - // This detects arithmetic wraparound on _used - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (used_regions_size() >= used()), - "Affiliated regions must hold more than what is currently used"); - return _max_capacity; -} - -size_t ShenandoahGeneration::set_capacity(size_t byte_size) { - shenandoah_assert_heaplocked_or_safepoint(); - _max_capacity = byte_size; - return _max_capacity; -} - -size_t ShenandoahGeneration::decrease_capacity(size_t decrement) { - shenandoah_assert_heaplocked_or_safepoint(); - - // We do not enforce that new capacity >= heap->min_size_for(this). The minimum generation size is treated as a rule of thumb - // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions - // in place. - assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size"); - assert(_max_capacity >= decrement, "Generation capacity cannot be negative"); - - _max_capacity -= decrement; - - // This detects arithmetic wraparound on _used - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (used_regions_size() >= used()), - "Affiliated regions must hold more than what is currently used"); - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (_used <= _max_capacity), "Cannot use more than capacity"); - assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || - (used_regions_size() <= _max_capacity), - "Cannot use more than capacity"); - return _max_capacity; -} -#endif - void ShenandoahGeneration::record_success_concurrent(bool abbreviated) { heuristics()->record_success_concurrent(); ShenandoahHeap::heap()->shenandoah_policy()->record_success_concurrent(is_young(), abbreviated); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index d685ab53b1e4a..abbc3b5dbe9e8 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -70,9 +70,6 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // Usage volatile size_t _used; -#ifdef KELVIN_OUT_WITH_THE_OLD - volatile size_t _bytes_allocated_since_gc_start; -#endif size_t _max_capacity; ShenandoahFreeSet* _free_set; ShenandoahHeuristics* _heuristics; @@ -150,32 +147,9 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { result = _free_set->global_used(); break; } - -#ifdef KELVIN_OUT_WITH_THE_OLD - size_t original_result = Atomic::load(&_used); -#undef KELVIN_SCAFFOLDING -#ifdef KELVIN_SCAFFOLDING - static int problem_count = 0; - if (result != original_result) { - if (problem_count++ > 6) { - assert(result == original_result, "Problem with used for generation %s, freeset thinks %zu, generation thinks: %zu", - shenandoah_generation_name(_type), result, original_result); - } else { - log_info(gc)("Problem with used for generation %s, freeset thinks %zu, generation thinks: %zu", - shenandoah_generation_name(_type), result, original_result); - } - } else { - if (problem_count > 0) { - log_info(gc)("Used for generation %s is back in sync: %zu", shenandoah_generation_name(_type), result); - } - problem_count = 0; - } -#endif -#endif return result; } - size_t available() const override; size_t available_with_reserve() const; size_t used_including_humongous_waste() const { @@ -199,26 +173,6 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { return 0; } } -#ifdef KELVIN_OUT_WITH_THE_OLD - void reset_bytes_allocated_since_gc_start(); - void increase_allocated(size_t bytes); - - // These methods change the capacity of the generation by adding or subtracting the given number of bytes from the current - // capacity, returning the capacity of the generation following the change. - size_t increase_capacity(size_t increment); - size_t decrease_capacity(size_t decrement); - - // Set the capacity of the generation, returning the value set - size_t set_capacity(size_t byte_size); - - void set_used(size_t affiliated_region_count, size_t byte_count) { - Atomic::store(&_used, byte_count); - Atomic::store(&_affiliated_region_count, affiliated_region_count); -#ifdef KELVIN_DEBUG - log_info(gc)("%s:set_used(regions: %zu, bytes: %zu)", shenandoah_generation_name(_type), affiliated_region_count, byte_count); -#endif - } -#endif void log_status(const char* msg) const; @@ -279,33 +233,6 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // Scan remembered set at start of concurrent young-gen marking. void scan_remembered_set(bool is_concurrent); -#ifdef KELVIN_OUT_WITH_THE_OLD - // Return the updated value of affiliated_region_count - size_t increment_affiliated_region_count(); - - // Return the updated value of affiliated_region_count - size_t decrement_affiliated_region_count(); - // Same as decrement_affiliated_region_count, but w/o the need to hold heap lock before being called. - size_t decrement_affiliated_region_count_without_lock(); - - // Return the updated value of affiliated_region_count - size_t increase_affiliated_region_count(size_t delta); - - // Return the updated value of affiliated_region_count - size_t decrease_affiliated_region_count(size_t delta); - - size_t get_affiliated_region_count() const { - return Atomic::load(&_affiliated_region_count); - } - - void establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste); - - void increase_used(size_t bytes); - void decrease_used(size_t bytes); - - void increase_humongous_waste(size_t bytes); - void decrease_humongous_waste(size_t bytes); -#else size_t get_affiliated_region_count() const { size_t result; switch (_type) { @@ -341,7 +268,6 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { } return result; } -#endif size_t get_humongous_waste() const { size_t result; @@ -358,14 +284,6 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { result = _free_set->total_humongous_waste(); break; } -#ifdef KELVIN_OUT_WITH_THE_OLD -#ifdef KELVIN_SCAFFOLDING - if (result != _humongous_waste) { - log_info(gc)("Generation %s expects consistency between humongous waste in free set (%zu) and in generation (%zu)", - shenandoah_generation_name(_type), result, _humongous_waste); - } -#endif -#endif return result; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp index ed5fe573b130a..640cf75ae41fe 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.cpp @@ -115,35 +115,6 @@ void ShenandoahGenerationSizer::heap_size_changed(size_t heap_size) { recalculate_min_max_young_length(heap_size / ShenandoahHeapRegion::region_size_bytes()); } -#ifdef KELVIN_OUT_WITH_THE_OLD -bool ShenandoahGenerationSizer::transfer_regions(ShenandoahGeneration* src, ShenandoahGeneration* dst, size_t regions) const { - const size_t bytes_to_transfer = regions * ShenandoahHeapRegion::region_size_bytes(); - - if (src->free_unaffiliated_regions() < regions) { - // Source does not have enough free regions for this transfer. The caller should have - // already capped the transfer based on available unaffiliated regions. - return false; - } - - if (dst->max_capacity() + bytes_to_transfer > max_size_for(dst)) { - // This transfer would cause the destination generation to grow above its configured maximum size. - return false; - } - - if (src->max_capacity() - bytes_to_transfer < min_size_for(src)) { - // This transfer would cause the source generation to shrink below its configured minimum size. - return false; - } - - src->decrease_capacity(bytes_to_transfer); - dst->increase_capacity(bytes_to_transfer); - const size_t new_size = dst->max_capacity(); - log_info(gc, ergo)("Transfer %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, - regions, src->name(), dst->name(), PROPERFMTARGS(new_size)); - return true; -} -#endif - size_t ShenandoahGenerationSizer::max_size_for(ShenandoahGeneration* generation) const { switch (generation->type()) { case YOUNG: @@ -172,34 +143,6 @@ size_t ShenandoahGenerationSizer::min_size_for(ShenandoahGeneration* generation) } } -#ifdef KELVIN_OUT_WITH_THE_OLD -// Returns true iff transfer is successful -bool ShenandoahGenerationSizer::transfer_to_old(size_t regions) const { - ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); - return transfer_regions(heap->young_generation(), heap->old_generation(), regions); -} - -// This is used when promoting humongous or highly utilized regular regions in place. It is not required in this situation -// that the transferred regions be unaffiliated. -void ShenandoahGenerationSizer::force_transfer_to_old(size_t regions) const { - ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); - ShenandoahGeneration* old_gen = heap->old_generation(); - ShenandoahGeneration* young_gen = heap->young_generation(); - const size_t bytes_to_transfer = regions * ShenandoahHeapRegion::region_size_bytes(); - - young_gen->decrease_capacity(bytes_to_transfer); - old_gen->increase_capacity(bytes_to_transfer); - const size_t new_size = old_gen->max_capacity(); - log_info(gc, ergo)("Forcing transfer of %zu region(s) from %s to %s, yielding increased size: " PROPERFMT, - regions, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_size)); -} - -bool ShenandoahGenerationSizer::transfer_to_young(size_t regions) const { - ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap(); - return transfer_regions(heap->old_generation(), heap->young_generation(), regions); -} -#endif - size_t ShenandoahGenerationSizer::min_young_size() const { return min_young_regions() * ShenandoahHeapRegion::region_size_bytes(); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp index aa385af146c97..d1a4dff81f8c4 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationSizer.hpp @@ -51,14 +51,6 @@ class ShenandoahGenerationSizer { // given the number of heap regions depending on the kind of sizing algorithm. void recalculate_min_max_young_length(size_t heap_region_count); -#ifdef KELVIN_OUT_WITH_THE_OLD - // This will attempt to transfer regions from the `src` generation to `dst` generation. - // If the transfer would violate the configured minimum size for the source or the configured - // maximum size of the destination, it will not perform the transfer and will return false. - // Returns true if the transfer is performed. - bool transfer_regions(ShenandoahGeneration* src, ShenandoahGeneration* dst, size_t regions) const; -#endif - public: ShenandoahGenerationSizer(); @@ -83,15 +75,6 @@ class ShenandoahGenerationSizer { size_t max_young_regions() const { return _max_desired_young_regions; } - -#ifdef KELVIN_OUT_WITH_THE_OLD - // True if transfer succeeds, else false. See transfer_regions. - bool transfer_to_young(size_t regions) const; - bool transfer_to_old(size_t regions) const; - - // force transfer is used when we promote humongous objects. May violate min/max limits on generation sizes - void force_transfer_to_old(size_t regions) const; -#endif }; #endif //SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONSIZER_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index 04ccf12a2bc34..0456bc552e731 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -210,10 +210,6 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size"); ShenandoahHeap::fill_with_object(obj_addr, fill_size); scanner->register_object_without_lock(obj_addr); -#undef KELVIN_FILL -#ifdef KELVIN_FILL - log_info(gc)("pip fills region %zu at " PTR_FORMAT ", %zu bytes", region->index(), p2i(obj_addr), fill_size * HeapWordSize); -#endif obj_addr = next_marked_obj; } } @@ -260,17 +256,6 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size() _heap->free_set()->add_promoted_in_place_region_to_old_collector(region); region->set_affiliation(OLD_GENERATION); - -#ifdef KELVIN_OUT_WITH_THE_OLD - young_gen->decrease_used(region_size_bytes); - young_gen->decrement_affiliated_region_count(); - - // transfer_to_old() increases capacity of old and decreases capacity of young - _heap->generation_sizer()->force_transfer_to_old(1); - - old_gen->increment_affiliated_region_count(); - old_gen->increase_used(region_to_be_used_in_old); -#endif } } @@ -301,18 +286,6 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio // usage totals, including humongous waste, after evacuation is done. log_debug(gc)("promoting humongous region %zu, spanning %zu", region->index(), spanned_regions); -#ifdef KELVIN_HUMONGOUS_WASTE - log_info(gc)("Promoting humongous object, transferring %zu bytes of humongous waste", humongous_waste); -#endif - -#ifdef KELVIN_OUT_WITH_THE_OLD - young_gen->decrease_used(spanned_regions * region_size_bytes); - young_gen->decrease_humongous_waste(humongous_waste); - young_gen->decrease_affiliated_region_count(spanned_regions); - - // transfer_to_old() increases capacity of old and decreases capacity of young - _heap->generation_sizer()->force_transfer_to_old(spanned_regions); -#endif // For this region and each humongous continuation region spanned by this humongous object, change // affiliation to OLD_GENERATION and adjust the generation-use tallies. The remnant of memory // in the last humongous region that is not spanned by obj is currently not used. @@ -326,11 +299,6 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio ShenandoahFreeSet* freeset = _heap->free_set(); freeset->transfer_humongous_regions_from_mutator_to_old_collector(spanned_regions, humongous_waste); -#ifdef KELVIN_OUT_WITH_THE_OLD - old_gen->increase_affiliated_region_count(spanned_regions); - old_gen->increase_used(spanned_regions * region_size_bytes); - old_gen->increase_humongous_waste(humongous_waste); -#endif } // Since this region may have served previously as OLD, it may hold obsolete object range info. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp index 61dbaef8371a8..3b43433e22d54 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.cpp @@ -105,35 +105,6 @@ void ShenandoahGenerationalFullGC::rebuild_remembered_set(ShenandoahHeap* heap) heap->old_generation()->set_parsable(true); } - -#ifdef KELVIN_OUT_WITH_THE_OLD -void ShenandoahGenerationalFullGC::balance_generations_after_gc(ShenandoahHeap* heap) { - ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(heap); - ShenandoahOldGeneration* const old_gen = gen_heap->old_generation(); - - size_t old_usage = old_gen->used_regions_size(); - size_t old_capacity = old_gen->max_capacity(); - - assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must align with region size"); - assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must align with region size"); - - if (old_capacity > old_usage) { - size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes(); - gen_heap->generation_sizer()->transfer_to_young(excess_old_regions); - } else if (old_capacity < old_usage) { - size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes(); - gen_heap->generation_sizer()->force_transfer_to_old(old_regions_deficit); - } - log_info(gc, ergo)("FullGC done: young usage: " PROPERFMT ", old usage: " PROPERFMT, - PROPERFMTARGS(gen_heap->young_generation()->used()), - PROPERFMTARGS(old_gen->used())); -} - -ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set() { - return ShenandoahGenerationalHeap::heap()->balance_generations(); -} -#endif - void ShenandoahGenerationalFullGC::log_live_in_old(ShenandoahHeap* heap) { LogTarget(Debug, gc) lt; if (lt.is_enabled()) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp index 62dc3087caef0..da1f6db01cf06 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalFullGC.hpp @@ -45,29 +45,11 @@ class ShenandoahGenerationalFullGC { // Records end of cycle for young and old and establishes size of live bytes in old static void handle_completion(ShenandoahHeap* heap); -#ifdef KELVIN_OUT_WITH_THE_OLD - // Full GC may have promoted regions and may have temporarily violated constraints on the usage and - // capacity of the old generation. This method will balance the accounting of regions between the - // young and old generations. This is somewhat vestigial, but the outcome of this method is used - // when rebuilding the free sets. - static void balance_generations_after_gc(ShenandoahHeap* heap); -#endif - // This will compute the target size for the old generation. It will be expressed in terms of // a region surplus and deficit, which will be redistributed accordingly after rebuilding the // free set. static void compute_balances(); -#ifdef KELVIN_OUT_WITH_THE_OLD - // Rebuilding the free set may have resulted in regions being pulled in to the old generation - // evacuation reserve. For this reason, we must update the usage and capacity of the generations - // again. In the distant past, the free set did not know anything about generations, so we had - // a layer built above it to represent how much young/old memory was available. This layer is - // redundant and adds complexity. We would like to one day remove it. Until then, we must keep it - // synchronized with the free set's view of things. - static ShenandoahGenerationalHeap::TransferResult balance_generations_after_rebuilding_free_set(); -#endif - // Logs the number of live bytes marked in the old generation. This is _not_ the same // value used as the baseline for the old generation _after_ the full gc is complete. // The value reported in the logs does not include objects and regions that may be diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index 5949f0161229c..d61e89a2d2d31 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -405,11 +405,6 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, Shena fill_with_object(copy, size); shenandoah_assert_correct(nullptr, copy_val); // For non-LAB allocations, the object has already been registered -#undef KELVIN_FILL -#ifdef KELVIN_FILL - log_info(gc)("try_evacuate_object() is filling abandoned copy at " PTR_FORMAT ", of size %zu", - p2i(copy), size * HeapWordSize); -#endif } shenandoah_assert_correct(nullptr, result); return result; @@ -588,37 +583,6 @@ void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) { retire_plab(plab, thread); } -#ifdef KELVIN_OUT_WITH_THE_OLD -ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() { - shenandoah_assert_heaplocked_or_safepoint(); - - ShenandoahOldGeneration* old_gen = old_generation(); - const ssize_t old_region_balance = old_gen->get_region_balance(); - old_gen->set_region_balance(0); - - if (old_region_balance > 0) { - const auto old_region_surplus = checked_cast(old_region_balance); - const bool success = generation_sizer()->transfer_to_young(old_region_surplus); - return TransferResult { - success, old_region_surplus, "young" - }; - } - - if (old_region_balance < 0) { - const auto old_region_deficit = checked_cast(-old_region_balance); - const bool success = generation_sizer()->transfer_to_old(old_region_deficit); - if (!success) { - old_gen->handle_failed_transfer(); - } - return TransferResult { - success, old_region_deficit, "old" - }; - } - - return TransferResult {true, 0, "none"}; -} -#endif - // Make sure old-generation is large enough, but no larger than is necessary, to hold mixed evacuations // and promotions, if we anticipate either. Any deficit is provided by the young generation, subject to // xfer_limit, and any surplus is transferred to the young generation. @@ -656,11 +620,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve)); -#undef KELVIN_REBALANCE -#ifdef KELVIN_REBALANCE - log_info(gc)("compute_old_gen_balance(%zu, %zu), bound_on_old_reserve: %.3f, max_old_reserve: %.3f", - old_xfer_limit, old_cset_regions, bound_on_old_reserve, max_old_reserve); -#endif const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); @@ -669,20 +628,15 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ if (old_generation()->has_unprocessed_collection_candidates()) { // We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation. - const double max_evac_need = (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste); + const double max_evac_need = + (double(old_generation()->unprocessed_collection_candidates_live_memory()) * ShenandoahOldEvacWaste); assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes, "Unaffiliated available must be less than total available"); - const double old_fragmented_available = double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes); + const double old_fragmented_available = + double(old_available - old_generation()->free_unaffiliated_regions() * region_size_bytes); reserve_for_mixed = max_evac_need + old_fragmented_available; -#ifdef KELVIN_REBALANCE - log_info(gc)(" max_evac_need: %.3f, old_fragmented_available: %.3f, reserve_for_mixed: %.3f", - max_evac_need, old_fragmented_available, reserve_for_mixed); -#endif if (reserve_for_mixed > max_old_reserve) { reserve_for_mixed = max_old_reserve; -#ifdef KELVIN_REBALANCE - log_info(gc)(" downsize reserve_for_mixed: %.3f", reserve_for_mixed); -#endif } } @@ -699,24 +653,15 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ // This is the total old we want to ideally reserve const size_t old_reserve = reserve_for_mixed + reserve_for_promo; -#ifdef KELVIN_REBALANCE - log_info(gc)(" reserve_for_promo: %zu, old_reserve: %zu", reserve_for_promo, old_reserve); -#endif assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations"); // We now check if the old generation is running a surplus or a deficit. const size_t max_old_available = old_generation()->available() + old_cset_regions * region_size_bytes; -#ifdef KELVIN_REBALANCE - log_info(gc)(" max_old_available: %zu", max_old_available); -#endif if (max_old_available >= old_reserve) { // We are running a surplus, so the old region surplus can go to young const size_t old_surplus = (max_old_available - old_reserve) / region_size_bytes; const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions; const size_t old_region_surplus = MIN2(old_surplus, unaffiliated_old_regions); -#ifdef KELVIN_REBALANCE - log_info(gc)("surplus of old, set old_gen->region_balance for transfer %zu regions to young", old_region_surplus); -#endif old_generation()->set_region_balance(checked_cast(old_region_surplus)); } else { // We are running a deficit which we'd like to fill from young. @@ -730,9 +675,6 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ // on young-gen memory, we restrict the xfer. Old-gen collection activities will be // curtailed if the budget is restricted. const size_t old_region_deficit = MIN2(old_need, max_old_region_xfer); -#ifdef KELVIN_REBALANCE - log_info(gc)("deficit of old, set old_gen->region_balance for transfer %zu regions to old", old_region_deficit); -#endif old_generation()->set_region_balance(0 - checked_cast(old_region_deficit)); } } @@ -1099,15 +1041,6 @@ void ShenandoahGenerationalHeap::complete_degenerated_cycle() { // a more detailed explanation. old_generation()->transfer_pointers_from_satb(); } -#ifdef KELVIN_OUT_WITH_THE_OLD - // We defer generation resizing actions until after cset regions have been recycled. - TransferResult result = balance_generations(); - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Degenerated GC", &ls); - } -#endif // In case degeneration interrupted concurrent evacuation or update references, we need to clean up // transient state. Otherwise, these actions have no effect. reset_generation_reserves(); @@ -1129,23 +1062,7 @@ void ShenandoahGenerationalHeap::complete_concurrent_cycle() { // throw off the heuristics. entry_global_coalesce_and_fill(); } -#ifdef KELVIN_OUT_WITH_THE_OLD - TransferResult result; - { - ShenandoahHeapLocker locker(lock()); - - result = balance_generations(); - reset_generation_reserves(); - } - - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Concurrent GC", &ls); - } -#else reset_generation_reserves(); -#endif } void ShenandoahGenerationalHeap::entry_global_coalesce_and_fill() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index 8ff2098377c91..71f85faed08ba 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -749,56 +749,11 @@ void ShenandoahHeap::increase_used(const ShenandoahAllocRequest& req) { if (req.is_gc_alloc()) { assert(wasted_bytes == 0 || req.type() == ShenandoahAllocRequest::_alloc_plab, "Only PLABs have waste"); -#ifdef KELVIN_OUT_WITH_THE_OLD - increase_used(generation, actual_bytes + wasted_bytes); -#endif } else { assert(req.is_mutator_alloc(), "Expected mutator alloc here"); -#ifdef KELVIN_OUT_WITH_THE_OLD - // padding and actual size both count towards allocation counter - generation->increase_allocated(actual_bytes + wasted_bytes); - - // Used within generation is actual bytes + alignment padding (wasted bytes) - increase_used(generation, actual_bytes + wasted_bytes); -#endif -#ifdef KELVIN_OUT_WITH_THE_OLD - if (wasted_bytes > 0 && ShenandoahHeapRegion::requires_humongous(req.actual_size())) { - increase_humongous_waste(generation, wasted_bytes); - } -#endif - } -} - -#ifdef KELVIN_OUT_WITH_THE_OLD -void ShenandoahHeap::increase_humongous_waste(ShenandoahGeneration* generation, size_t bytes) { - generation->increase_humongous_waste(bytes); - if (!generation->is_global()) { - global_generation()->increase_humongous_waste(bytes); - } -} - -void ShenandoahHeap::decrease_humongous_waste(ShenandoahGeneration* generation, size_t bytes) { - generation->decrease_humongous_waste(bytes); - if (!generation->is_global()) { - global_generation()->decrease_humongous_waste(bytes); } } -void ShenandoahHeap::increase_used(ShenandoahGeneration* generation, size_t bytes) { - generation->increase_used(bytes); - if (!generation->is_global()) { - global_generation()->increase_used(bytes); - } -} - -void ShenandoahHeap::decrease_used(ShenandoahGeneration* generation, size_t bytes) { - generation->decrease_used(bytes); - if (!generation->is_global()) { - global_generation()->decrease_used(bytes); - } -} -#endif - size_t ShenandoahHeap::capacity() const { return committed(); } @@ -2330,17 +2285,9 @@ address ShenandoahHeap::in_cset_fast_test_addr() { } void ShenandoahHeap::reset_bytes_allocated_since_gc_start() { -#ifdef KELVIN_OUT_WITH_THE_OLD - if (mode()->is_generational()) { - young_generation()->reset_bytes_allocated_since_gc_start(); - old_generation()->reset_bytes_allocated_since_gc_start(); - } - global_generation()->reset_bytes_allocated_since_gc_start(); -#else ShenandoahHeap* heap = ShenandoahHeap::heap(); ShenandoahHeapLocker locker(heap->lock()); free_set()->reset_bytes_allocated_since_gc_start(); -#endif } void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp index feec2a06ccf54..e22baf5095340 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp @@ -289,10 +289,6 @@ void ShenandoahHeapRegion::make_cset() { } void ShenandoahHeapRegion::make_trash() { -#undef KELVIN_TRASH -#ifdef KELVIN_TRASH - log_info(gc)("make_trash() on region %zu", index()); -#endif shenandoah_assert_heaplocked(); reset_age(); switch (state()) { @@ -315,9 +311,6 @@ void ShenandoahHeapRegion::make_trash() { } void ShenandoahHeapRegion::make_trash_immediate() { -#ifdef KELVIN_TRASH - log_info(gc)("make_trash_immediate() on region %zu", index()); -#endif make_trash(); // On this path, we know there are no marked objects in the region, @@ -911,14 +904,6 @@ void ShenandoahHeapRegion::decrement_humongous_waste() { size_t waste_bytes = free(); if (waste_bytes > 0) { ShenandoahHeap* heap = ShenandoahHeap::heap(); - ShenandoahGeneration* generation = heap->generation_for(affiliation()); -#undef KELVIN_HUMONGOUS_HEAP_REGION -#ifdef KELVIN_HUMONGOUS_HEAP_REGION - log_info(gc)("Decrementing humongous waste by %zu in ShenHeapRegion", waste_bytes); -#endif -#ifdef KELVIN_OUT_WITH_THE_OLD - heap->decrease_humongous_waste(generation, waste_bytes); -#endif heap->free_set()->decrease_humongous_waste_for_regular_bypass(this, waste_bytes); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp index 361ea9d5213d1..ed24a6e671992 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp @@ -146,23 +146,5 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) { size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0); heap->compute_old_generation_balance(allocation_runway, 0); - -#ifdef KELVIN_OUT_WITH_THE_OLD - // Kelvin says the following comment is not correct. We do rebuild_free following the end of old marking. - - // We do not rebuild_free following increments of old marking because memory has not been reclaimed. However, we may - // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow. - ShenandoahGenerationalHeap::TransferResult result; - { - ShenandoahHeapLocker locker(heap->lock()); - result = heap->balance_generations(); - } - - LogTarget(Info, gc, ergo) lt; - if (lt.is_enabled()) { - LogStream ls(lt); - result.print_on("Old Mark", &ls); - } -#endif return true; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp index b825994256e59..8e868b7c9b29f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp @@ -725,12 +725,6 @@ void ShenandoahOldGeneration::handle_evacuation(HeapWord* obj, size_t words, boo // do this in batch, in a background GC thread than to try to carefully dirty only cards // that hold interesting pointers right now. _card_scan->mark_range_as_dirty(obj, words); -#ifdef KELVIN_OUT_WITH_THE_OLD - if (promotion) { - // This evacuation was a promotion, track this as allocation against old gen - increase_allocated(words * HeapWordSize); - } -#endif } bool ShenandoahOldGeneration::has_unprocessed_collection_candidates() { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp index 56e4c21956287..edbe7b3538911 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp @@ -141,10 +141,6 @@ class ShenandoahOldGeneration : public ShenandoahGeneration { // See description in field declaration void set_region_balance(ssize_t balance) { -#undef KELVIN_BALANCIO -#ifdef KELVIN_BALANCIO - log_info(gc)("OldGen::set_region_balance(%zd)", balance); -#endif _region_balance = balance; } ssize_t get_region_balance() const { return _region_balance; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp index 1c33091e795aa..9e9129ee112cc 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp @@ -377,18 +377,9 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure private: size_t _used, _committed, _garbage, _regions, _humongous_waste, _trashed_regions, _trashed_used; size_t _region_size_bytes, _min_free_size; -#undef KELVIN_VERBOSE -#ifdef KELVIN_VERBOSE - const char* _nm; -#endif public: -#ifdef KELVIN_VERBOSE - ShenandoahCalculateRegionStatsClosure(const char *name) : - _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0), _nm(name) -#else ShenandoahCalculateRegionStatsClosure() : _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0), _trashed_used(0) -#endif { _region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); // Retired regions are not necessarily filled, thouugh their remnant memory is considered used. @@ -396,12 +387,6 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure }; void heap_region_do(ShenandoahHeapRegion* r) override { -#undef KELVIN_STATS -#ifdef KELVIN_STATS - log_info(gc)("%s:ShenandoahCalculateRegionStatsClosure::heap_region_do(), %s r: %zu used: %zu, garbage: %zu, is_trash: %s", - _nm, r->affiliation_name(), r->index(), r->used(), r->garbage(), r->is_trash()? "yes": "no"); -#endif - if (r->is_cset() || r->is_trash()) { // Count the entire cset or trashed (formerly cset) region as used // Note: Immediate garbage trash regions were never in the cset. @@ -419,9 +404,6 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure } else { size_t alloc_capacity = r->free(); if (alloc_capacity < _min_free_size) { -#ifdef KELVIN_STATS - log_info(gc)("KELVIN!!!! overwriting alloc_capacity %zu with 0 because too small", alloc_capacity); -#endif // this region has been retired already, count it as entirely consumed alloc_capacity = 0; } @@ -434,10 +416,6 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure } _committed += r->is_committed() ? _region_size_bytes : 0; _regions++; -#ifdef KELVIN_STATS - log_info(gc)(" _used: %zu, _garbage: %zu, _committed: %zu, _humongous_waste: %zu, _trashed_regions: %zu, _trashed_used: %zu", - _used, _garbage, _committed, _humongous_waste, _trashed_regions, _trashed_used); -#endif log_debug(gc)("ShenandoahCalculateRegionStatsClosure: adding %zu for %s Region %zu, yielding: %zu", r->used(), (r->is_humongous() ? "humongous" : "regular"), r->index(), _used); } @@ -457,21 +435,11 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { public: -#ifdef KELVIN_VERBOSE - ShenandoahCalculateRegionStatsClosure _old = ShenandoahCalculateRegionStatsClosure("Old"); - ShenandoahCalculateRegionStatsClosure _young = ShenandoahCalculateRegionStatsClosure("Young"); - ShenandoahCalculateRegionStatsClosure _global = ShenandoahCalculateRegionStatsClosure("Global"); -#else ShenandoahCalculateRegionStatsClosure _old; ShenandoahCalculateRegionStatsClosure _young; ShenandoahCalculateRegionStatsClosure _global; -#endif void heap_region_do(ShenandoahHeapRegion* r) override { -#ifdef KELVIN_STATS - log_info(gc)("ShenandoahGenerationalStatsClosure::heap_region_do(), %s region %zu has used: %zu, is_trash: %s", - r->affiliation_name(), r->index(), r->used(), r->is_trash()? "yes": "no"); -#endif switch (r->affiliation()) { case FREE: return; @@ -500,22 +468,7 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure { ShenandoahHeap* heap = ShenandoahHeap::heap(); size_t generation_used = generation->used(); size_t generation_used_regions = generation->used_regions(); -#ifdef KELVIN_DEPRECATE - // We no longer need to adjust for padding. Probably want to - // remove this argument altogether. - if (adjust_for_padding && (generation->is_young() || generation->is_global())) { - size_t pad = heap->old_generation()->get_pad_for_promote_in_place(); - generation_used += pad; - } -#endif -#undef KELVIN_EXTRA_NOISE -#ifdef KELVIN_EXTRA_NOISE - log_info(gc)("%s: generation (%s) used size must be consistent: generation-used: %zu, regions-used from stats: %zu, stats.used_after_recycle: %zu, adjust_for_trash: %s", - label, generation->name(), generation_used, stats.used(), stats.used_after_recycle(), adjust_for_trash? "yes": "no"); - // kelvin once thought he needed to use stats.used_after_recycle() - // in the following assertion, but maybe not... -#endif size_t stats_used = adjust_for_trash? stats.used_after_recycle(): stats.used(); guarantee(stats_used == generation_used, "%s: generation (%s) used size must be consistent: generation-used: " PROPERFMT ", regions-used: " PROPERFMT, @@ -943,11 +896,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label, { ShenandoahHeapLocker lock(_heap->lock()); -#ifdef KELVIN_VERBOSE - ShenandoahCalculateRegionStatsClosure cl = ShenandoahCalculateRegionStatsClosure("Global"); -#else ShenandoahCalculateRegionStatsClosure cl; -#endif _heap->heap_region_iterate(&cl); size_t heap_used; if (_heap->mode()->is_generational() && (sizeness == _verify_size_adjusted_for_padding)) { From b5b5c401e174d232b5a26d09ca99fbfdcd2e080f Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 20 Aug 2025 19:49:15 +0000 Subject: [PATCH 57/61] fix white space --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 10 +++++----- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 13 +++++-------- .../share/gc/shenandoah/shenandoahGeneration.hpp | 4 ++-- .../gc/shenandoah/shenandoahGenerationalHeap.cpp | 2 +- 4 files changed, 13 insertions(+), 16 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 0cf0b6d15a368..61d021adefe28 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -1836,7 +1836,7 @@ class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionC } // The almost last thing we do before releasing the lock is to set the _recycled_region_count to 0. What happens next? - // + // // 1. Any worker thread that attempted to buffer a new region while we were flushing the buffer will have seen // that _recycled_region_count > MaxSavedRegions. All such worker threads will first wait for the lock, then // discover that the _recycled_region_count is zero, then, while holding the lock, they will process the @@ -2068,7 +2068,7 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r ShenandoahHeapRegion* region = _heap->get_region(idx); if (region->is_trash()) { // Trashed regions represent immediate garbage identified by final mark and regions that had been in the collection - // partition but have not yet been "cleaned up" following update refs. + // partition but have not yet been "cleaned up" following update refs. if (region->is_old()) { old_trashed_regions++; } else { @@ -2432,7 +2432,7 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa } } // _empty_region_counts is unaffected, because we transfer only non-empty regions here. - + _partitions.decrease_used(which_collector, used_transfer); _partitions.expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId::Mutator, mutator_low_idx, mutator_high_idx, _partitions.max(), -1); @@ -2668,7 +2668,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old idx_t old_collector_high_idx = -1; idx_t old_collector_empty_low_idx = _partitions.max(); idx_t old_collector_empty_high_idx = -1; - + size_t used_to_collector = 0; size_t used_to_old_collector = 0; size_t regions_to_collector = 0; @@ -2753,7 +2753,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old } empty_regions_to_collector++; } - used_to_collector += + used_to_collector += _partitions.move_from_partition_to_partition_with_deferred_accounting(idx, ShenandoahFreeSetPartitionId::Mutator, ShenandoahFreeSetPartitionId::Collector, ac); collector_available += ac; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index 665917156f846..bf20b2def29f0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -86,10 +86,10 @@ class ShenandoahRegionPartitions { // // Unlike capacity, which represents the total amount of memory representing each partition as of the moment // the freeset was most recently constructed: - // + // // _region_counts[p] represents the number of regions associated with the partition which currently have available memory. // When a region is retired from partition p, _region_counts[p] is decremented. - // _total_region_counts[p] is _total_capacity[p] / RegionSizeBytes. + // _total_region_counts[p] is _total_capacity[p] / RegionSizeBytes. // _empty_region_counts[p] is number of regions associated with p which are entirely empty // // capacity and used values are expressed in bytes. @@ -310,10 +310,7 @@ class ShenandoahRegionPartitions { inline void decrease_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) { shenandoah_assert_heaplocked(); assert (which_partition < NumPartitions, "Partition must be valid"); - assert (_used[int(which_partition)] >= bytes, - "Must not use (%zu) less than zero after decrease by %zu", - _used[int(which_partition)], bytes); - + assert (_used[int(which_partition)] >= bytes, "Must not use less than zero after decrease"); _used[int(which_partition)] -= bytes; _available[int(which_partition)] += bytes; } @@ -479,7 +476,7 @@ class ShenandoahFreeSet : public CHeapObj { size_t region_size_bytes = _partitions.region_size_bytes(); _total_old_used =_partitions.used_by(ShenandoahFreeSetPartitionId::OldCollector); } - + // bytes used by global size_t _total_global_used; // Prerequisite: _total_young_used and _total_old_used are valid @@ -513,7 +510,7 @@ class ShenandoahFreeSet : public CHeapObj { _young_affiliated_regions = ((_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Mutator) + _partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::Collector)) - _young_unaffiliated_regions); - _old_affiliated_regions = (_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector) - + _old_affiliated_regions = (_partitions.get_total_region_counts(ShenandoahFreeSetPartitionId::OldCollector) - _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector)); _global_unaffiliated_regions = _young_unaffiliated_regions + _partitions.get_empty_region_counts(ShenandoahFreeSetPartitionId::OldCollector); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index abbc3b5dbe9e8..4a6e415936d90 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -173,7 +173,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { return 0; } } - + void log_status(const char* msg) const; // Used directly by FullGC @@ -268,7 +268,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { } return result; } - + size_t get_humongous_waste() const { size_t result; switch (_type) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp index d61e89a2d2d31..6124c8c54ea73 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp @@ -617,7 +617,7 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_ // In the case that ShenandoahOldEvacRatioPercent equals 100, max_old_reserve is limited only by xfer_limit. const double bound_on_old_reserve = old_available + old_xfer_limit + young_reserve; - const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve: + const double max_old_reserve = ((ShenandoahOldEvacRatioPercent == 100)? bound_on_old_reserve: MIN2(double(young_reserve * ShenandoahOldEvacRatioPercent) / double(100 - ShenandoahOldEvacRatioPercent), bound_on_old_reserve)); From 51d9e05be4f8cda561fed51a80b413f2848b51b5 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 20 Aug 2025 19:54:39 +0000 Subject: [PATCH 58/61] remove unused variable --- src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 4a6e415936d90..8c453c1a074a5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -164,7 +164,6 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { size_t soft_available() const override; size_t bytes_allocated_since_gc_start() const { - size_t result; if (_type == ShenandoahGenerationType::YOUNG) { return _free_set->get_bytes_allocated_since_gc_start(); } else if (ShenandoahHeap::heap()->mode()->is_generational() && (_type == ShenandoahGenerationType::NON_GEN)) { From 45555d012d672e8a61fc31c28f1e344b2a285a80 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 20 Aug 2025 21:03:12 +0000 Subject: [PATCH 59/61] Refinements to appease MS Windows compiler --- .../share/gc/shenandoah/shenandoahFreeSet.cpp | 284 +++++++++--------- .../share/gc/shenandoah/shenandoahFreeSet.hpp | 87 +++--- .../share/gc/shenandoah/shenandoahFullGC.cpp | 2 - .../gc/shenandoah/shenandoahGeneration.cpp | 12 +- .../gc/shenandoah/shenandoahSimpleBitMap.cpp | 20 +- .../gc/shenandoah/shenandoahSimpleBitMap.hpp | 54 ++-- .../shenandoahSimpleBitMap.inline.hpp | 20 +- 7 files changed, 243 insertions(+), 236 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp index 61d021adefe28..2f5ab4fc1aaec 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp @@ -52,12 +52,13 @@ static const char* partition_name(ShenandoahFreeSetPartitionId t) { class ShenandoahLeftRightIterator { private: - idx_t _idx; - idx_t _end; + index_type _idx; + index_type _end; ShenandoahRegionPartitions* _partitions; ShenandoahFreeSetPartitionId _partition; public: - explicit ShenandoahLeftRightIterator(ShenandoahRegionPartitions* partitions, ShenandoahFreeSetPartitionId partition, bool use_empty = false) + explicit ShenandoahLeftRightIterator(ShenandoahRegionPartitions* partitions, + ShenandoahFreeSetPartitionId partition, bool use_empty = false) : _idx(0), _end(0), _partitions(partitions), _partition(partition) { _idx = use_empty ? _partitions->leftmost_empty(_partition) : _partitions->leftmost(_partition); _end = use_empty ? _partitions->rightmost_empty(_partition) : _partitions->rightmost(_partition); @@ -71,11 +72,11 @@ class ShenandoahLeftRightIterator { return false; } - idx_t current() const { + index_type current() const { return _idx; } - idx_t next() { + index_type next() { _idx = _partitions->find_index_of_next_available_region(_partition, _idx + 1); return current(); } @@ -83,12 +84,13 @@ class ShenandoahLeftRightIterator { class ShenandoahRightLeftIterator { private: - idx_t _idx; - idx_t _end; + index_type _idx; + index_type _end; ShenandoahRegionPartitions* _partitions; ShenandoahFreeSetPartitionId _partition; public: - explicit ShenandoahRightLeftIterator(ShenandoahRegionPartitions* partitions, ShenandoahFreeSetPartitionId partition, bool use_empty = false) + explicit ShenandoahRightLeftIterator(ShenandoahRegionPartitions* partitions, + ShenandoahFreeSetPartitionId partition, bool use_empty = false) : _idx(0), _end(0), _partitions(partitions), _partition(partition) { _idx = use_empty ? _partitions->rightmost_empty(_partition) : _partitions->rightmost(_partition); _end = use_empty ? _partitions->leftmost_empty(_partition) : _partitions->leftmost(_partition); @@ -102,11 +104,11 @@ class ShenandoahRightLeftIterator { return false; } - idx_t current() const { + index_type current() const { return _idx; } - idx_t next() { + index_type next() { _idx = _partitions->find_index_of_previous_available_region(_partition, _idx - 1); return current(); } @@ -136,21 +138,21 @@ void ShenandoahRegionPartitions::dump_bitmap() const { dump_bitmap_range(0, _max-1); } -void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const { - assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition"); - assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition"); - idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx); - idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx); - idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment(); +void ShenandoahRegionPartitions::dump_bitmap_range(index_type start_region_idx, index_type end_region_idx) const { + assert((start_region_idx >= 0) && (start_region_idx < (index_type) _max), "precondition"); + assert((end_region_idx >= 0) && (end_region_idx < (index_type) _max), "precondition"); + index_type aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx); + index_type aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx); + index_type alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment(); while (aligned_start <= aligned_end) { dump_bitmap_row(aligned_start); aligned_start += alignment; } } -void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const { - assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition"); - idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx); +void ShenandoahRegionPartitions::dump_bitmap_row(index_type region_idx) const { + assert((region_idx >= 0) && (region_idx < (index_type) _max), "precondition"); + index_type aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx); uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx); uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx); uintx old_collector_bits = _membership[int(ShenandoahFreeSetPartitionId::OldCollector)].bits_at(aligned_idx); @@ -262,9 +264,9 @@ void ShenandoahFreeSet::increase_bytes_allocated(size_t bytes) { _mutator_bytes_allocated_since_gc_start += bytes; } -inline idx_t ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId which_partition) const { +inline index_type ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId which_partition) const { assert (which_partition < NumPartitions, "selected free partition must be valid"); - idx_t idx = _leftmosts[int(which_partition)]; + index_type idx = _leftmosts[int(which_partition)]; if (idx >= _max) { return _max; } else { @@ -275,9 +277,9 @@ inline idx_t ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId w } } -inline idx_t ShenandoahRegionPartitions::rightmost(ShenandoahFreeSetPartitionId which_partition) const { +inline index_type ShenandoahRegionPartitions::rightmost(ShenandoahFreeSetPartitionId which_partition) const { assert (which_partition < NumPartitions, "selected free partition must be valid"); - idx_t idx = _rightmosts[int(which_partition)]; + index_type idx = _rightmosts[int(which_partition)]; // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used // to query the original value of leftmost when leftmost must be adjusted because the interval representing // which_partition is shrinking after the region that used to be leftmost is retired. @@ -310,8 +312,8 @@ void ShenandoahRegionPartitions::make_all_regions_unavailable() { _empty_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; } -void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost, - idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty, +void ShenandoahRegionPartitions::establish_mutator_intervals(index_type mutator_leftmost, index_type mutator_rightmost, + index_type mutator_leftmost_empty, index_type mutator_rightmost_empty, size_t total_mutator_regions, size_t empty_mutator_regions, size_t mutator_region_count, size_t mutator_used, size_t mutator_humongous_waste_bytes) { @@ -347,9 +349,10 @@ void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftm _empty_region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0; } -void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost, - idx_t old_collector_leftmost_empty, - idx_t old_collector_rightmost_empty, +void ShenandoahRegionPartitions::establish_old_collector_intervals(index_type old_collector_leftmost, + index_type old_collector_rightmost, + index_type old_collector_leftmost_empty, + index_type old_collector_rightmost_empty, size_t total_old_collector_region_count, size_t old_collector_empty, size_t old_collector_regions, size_t old_collector_used, @@ -478,7 +481,7 @@ void ShenandoahRegionPartitions::one_region_is_no_longer_empty(ShenandoahFreeSet // All members of partition between low_idx and high_idx inclusive have been removed. void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary( - ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, size_t num_regions) { + ShenandoahFreeSetPartitionId partition, index_type low_idx, index_type high_idx, size_t num_regions) { assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values"); size_t span = high_idx + 1 - low_idx; bool regions_are_contiguous = (span == num_regions); @@ -530,8 +533,8 @@ void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_bounda } } -void ShenandoahRegionPartitions::establish_interval(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, - idx_t low_empty_idx, idx_t high_empty_idx) { +void ShenandoahRegionPartitions::establish_interval(ShenandoahFreeSetPartitionId partition, index_type low_idx, + index_type high_idx, index_type low_empty_idx, index_type high_empty_idx) { #ifdef ASSERT assert (partition < NumPartitions, "invalid partition"); if (low_idx != max()) { @@ -554,14 +557,15 @@ void ShenandoahRegionPartitions::establish_interval(ShenandoahFreeSetPartitionId _rightmosts_empty[int(partition)] = high_empty_idx; } -inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) { +inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, + index_type idx) { shrink_interval_if_range_modifies_either_boundary(partition, idx, idx, 1); } // Some members of partition between low_idx and high_idx inclusive have been added. void ShenandoahRegionPartitions:: -expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, - idx_t low_empty_idx, idx_t high_empty_idx) { +expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, index_type low_idx, index_type high_idx, + index_type low_empty_idx, index_type high_empty_idx) { if (_leftmosts[int(partition)] > low_idx) { _leftmosts[int(partition)] = low_idx; } @@ -577,7 +581,7 @@ expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId p } void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, - idx_t idx, size_t region_available) { + index_type idx, size_t region_available) { if (_leftmosts[int(partition)] > idx) { _leftmosts[int(partition)] = idx; } @@ -595,14 +599,14 @@ void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(Shenandoah } void ShenandoahRegionPartitions::retire_range_from_partition( - ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) { + ShenandoahFreeSetPartitionId partition, index_type low_idx, index_type high_idx) { // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size() assert ((low_idx < _max) && (high_idx < _max), "Both indices are sane: %zu and %zu < %zu", low_idx, high_idx, _max); assert (partition < NumPartitions, "Cannot remove from free partitions if not already free"); - for (idx_t idx = low_idx; idx <= high_idx; idx++) { + for (index_type idx = low_idx; idx <= high_idx; idx++) { #ifdef ASSERT ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx); assert (in_free_set(partition, idx), "Must be in partition to remove from partition"); @@ -616,7 +620,8 @@ void ShenandoahRegionPartitions::retire_range_from_partition( shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx, num_regions); } -size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) { +size_t ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, + index_type idx, size_t used_bytes) { size_t waste_bytes = 0; // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size() @@ -648,7 +653,7 @@ void ShenandoahRegionPartitions::unretire_to_partition(ShenandoahHeapRegion* r, // The caller is responsible for increasing capacity and available and used in which_partition, and decreasing the // same quantities for the original partition -void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) { +void ShenandoahRegionPartitions::make_free(index_type idx, ShenandoahFreeSetPartitionId which_partition, size_t available) { shenandoah_assert_heaplocked(); assert (idx < _max, "index is sane: %zu < %zu", idx, _max); assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free"); @@ -677,7 +682,7 @@ bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_reg // Do not adjust capacities, available, or used. Return used delta. size_t ShenandoahRegionPartitions:: -move_from_partition_to_partition_with_deferred_accounting(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, +move_from_partition_to_partition_with_deferred_accounting(index_type idx, ShenandoahFreeSetPartitionId orig_partition, ShenandoahFreeSetPartitionId new_partition, size_t available) { ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx); shenandoah_assert_heaplocked(); @@ -719,7 +724,7 @@ move_from_partition_to_partition_with_deferred_accounting(idx_t idx, ShenandoahF return used; } -void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, +void ShenandoahRegionPartitions::move_from_partition_to_partition(index_type idx, ShenandoahFreeSetPartitionId orig_partition, ShenandoahFreeSetPartitionId new_partition, size_t available) { size_t used = move_from_partition_to_partition_with_deferred_accounting(idx, orig_partition, new_partition, available); @@ -743,12 +748,12 @@ void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, She } } -const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const { +const char* ShenandoahRegionPartitions::partition_membership_name(index_type idx) const { return partition_name(membership(idx)); } #ifdef ASSERT -inline bool ShenandoahRegionPartitions::partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId test_partition) const { +inline bool ShenandoahRegionPartitions::partition_id_matches(index_type idx, ShenandoahFreeSetPartitionId test_partition) const { assert (idx < _max, "index is sane: %zu < %zu", idx, _max); assert (test_partition < ShenandoahFreeSetPartitionId::NotFree, "must be a valid partition"); @@ -761,15 +766,15 @@ inline bool ShenandoahRegionPartitions::is_empty(ShenandoahFreeSetPartitionId wh return (leftmost(which_partition) > rightmost(which_partition)); } -inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_region( - ShenandoahFreeSetPartitionId which_partition, idx_t start_index) const { - idx_t rightmost_idx = rightmost(which_partition); - idx_t leftmost_idx = leftmost(which_partition); +inline index_type ShenandoahRegionPartitions::find_index_of_next_available_region( + ShenandoahFreeSetPartitionId which_partition, index_type start_index) const { + index_type rightmost_idx = rightmost(which_partition); + index_type leftmost_idx = leftmost(which_partition); if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max; if (start_index < leftmost_idx) { start_index = leftmost_idx; } - idx_t result = _membership[int(which_partition)].find_first_set_bit(start_index, rightmost_idx + 1); + index_type result = _membership[int(which_partition)].find_first_set_bit(start_index, rightmost_idx + 1); if (result > rightmost_idx) { result = _max; } @@ -777,16 +782,16 @@ inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_region( return result; } -inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_region( - ShenandoahFreeSetPartitionId which_partition, idx_t last_index) const { - idx_t rightmost_idx = rightmost(which_partition); - idx_t leftmost_idx = leftmost(which_partition); +inline index_type ShenandoahRegionPartitions::find_index_of_previous_available_region( + ShenandoahFreeSetPartitionId which_partition, index_type last_index) const { + index_type rightmost_idx = rightmost(which_partition); + index_type leftmost_idx = leftmost(which_partition); // if (leftmost_idx == max) then (last_index < leftmost_idx) if (last_index < leftmost_idx) return -1; if (last_index > rightmost_idx) { last_index = rightmost_idx; } - idx_t result = _membership[int(which_partition)].find_last_set_bit(-1, last_index); + index_type result = _membership[int(which_partition)].find_last_set_bit(-1, last_index); if (result < leftmost_idx) { result = -1; } @@ -794,12 +799,13 @@ inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_region return result; } -inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_cluster_of_regions( - ShenandoahFreeSetPartitionId which_partition, idx_t start_index, size_t cluster_size) const { - idx_t rightmost_idx = rightmost(which_partition); - idx_t leftmost_idx = leftmost(which_partition); +inline index_type ShenandoahRegionPartitions::find_index_of_next_available_cluster_of_regions( + ShenandoahFreeSetPartitionId which_partition, index_type start_index, size_t cluster_size) const { + index_type rightmost_idx = rightmost(which_partition); + index_type leftmost_idx = leftmost(which_partition); if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max; - idx_t result = _membership[int(which_partition)].find_first_consecutive_set_bits(start_index, rightmost_idx + 1, cluster_size); + index_type result = + _membership[int(which_partition)].find_first_consecutive_set_bits(start_index, rightmost_idx + 1, cluster_size); if (result > rightmost_idx) { result = _max; } @@ -807,12 +813,12 @@ inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_cluster_of return result; } -inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_cluster_of_regions( - ShenandoahFreeSetPartitionId which_partition, idx_t last_index, size_t cluster_size) const { - idx_t leftmost_idx = leftmost(which_partition); +inline index_type ShenandoahRegionPartitions::find_index_of_previous_available_cluster_of_regions( + ShenandoahFreeSetPartitionId which_partition, index_type last_index, size_t cluster_size) const { + index_type leftmost_idx = leftmost(which_partition); // if (leftmost_idx == max) then (last_index < leftmost_idx) if (last_index < leftmost_idx) return -1; - idx_t result = _membership[int(which_partition)].find_last_consecutive_set_bits(leftmost_idx - 1, last_index, cluster_size); + index_type result = _membership[int(which_partition)].find_last_consecutive_set_bits(leftmost_idx - 1, last_index, cluster_size); if (result <= leftmost_idx) { result = -1; } @@ -820,13 +826,13 @@ inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_cluste return result; } -idx_t ShenandoahRegionPartitions::leftmost_empty(ShenandoahFreeSetPartitionId which_partition) { +index_type ShenandoahRegionPartitions::leftmost_empty(ShenandoahFreeSetPartitionId which_partition) { assert (which_partition < NumPartitions, "selected free partition must be valid"); - idx_t max_regions = _max; + index_type max_regions = _max; if (_leftmosts_empty[int(which_partition)] == _max) { return _max; } - for (idx_t idx = find_index_of_next_available_region(which_partition, _leftmosts_empty[int(which_partition)]); + for (index_type idx = find_index_of_next_available_region(which_partition, _leftmosts_empty[int(which_partition)]); idx < max_regions; ) { assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: %zd", idx); if (_free_set->alloc_capacity(idx) == _region_size_bytes) { @@ -840,12 +846,12 @@ idx_t ShenandoahRegionPartitions::leftmost_empty(ShenandoahFreeSetPartitionId wh return _max; } -idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId which_partition) { +index_type ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId which_partition) { assert (which_partition < NumPartitions, "selected free partition must be valid"); if (_rightmosts_empty[int(which_partition)] < 0) { return -1; } - for (idx_t idx = find_index_of_previous_available_region(which_partition, _rightmosts_empty[int(which_partition)]); + for (index_type idx = find_index_of_previous_available_region(which_partition, _rightmosts_empty[int(which_partition)]); idx >= 0; ) { assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: %zd", idx); if (_free_set->alloc_capacity(idx) == _region_size_bytes) { @@ -875,10 +881,10 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { size_t young_retired_capacity = 0; size_t young_humongous_waste = 0; - idx_t leftmosts[UIntNumPartitions]; - idx_t rightmosts[UIntNumPartitions]; - idx_t empty_leftmosts[UIntNumPartitions]; - idx_t empty_rightmosts[UIntNumPartitions]; + index_type leftmosts[UIntNumPartitions]; + index_type rightmosts[UIntNumPartitions]; + index_type empty_leftmosts[UIntNumPartitions]; + index_type empty_rightmosts[UIntNumPartitions]; for (uint i = 0; i < UIntNumPartitions; i++) { leftmosts[i] = _max; @@ -891,7 +897,7 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { humongous_waste[i] = 0; } - for (idx_t i = 0; i < _max; i++) { + for (index_type i = 0; i < _max; i++) { ShenandoahFreeSetPartitionId partition = membership(i); size_t capacity = _free_set->alloc_capacity(i); switch (partition) { @@ -978,8 +984,8 @@ void ShenandoahRegionPartitions::assert_bounds(bool validate_totals) { // If Mutator partition is empty, leftmosts will both equal max, rightmosts will both equal zero. // Likewise for empty region partitions. - idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; - idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; + index_type beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; + index_type end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)]; assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator), "Mutator free regions before the leftmost: %zd, bound %zd", beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator)); @@ -1234,7 +1240,7 @@ HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(Shenandoah template HeapWord* ShenandoahFreeSet::allocate_with_affiliation(Iter& iterator, ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) { - for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) { + for (index_type idx = iterator.current(); iterator.has_next(); idx = iterator.next()) { ShenandoahHeapRegion* r = _heap->get_region(idx); if (r->affiliation() == affiliation) { HeapWord* result = try_allocate_in(r, req, in_new_region); @@ -1319,9 +1325,9 @@ void ShenandoahFreeSet::update_allocation_bias() { // 1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions // 2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures // late in the GC cycle. - idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator) + index_type non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator) - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator)); - idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator) + index_type non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator) - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator)); _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left)); _alloc_bias_weight = INITIAL_ALLOC_BIAS_WEIGHT; @@ -1330,7 +1336,7 @@ void ShenandoahFreeSet::update_allocation_bias() { template HeapWord* ShenandoahFreeSet::allocate_from_regions(Iter& iterator, ShenandoahAllocRequest &req, bool &in_new_region) { - for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) { + for (index_type idx = iterator.current(); iterator.has_next(); idx = iterator.next()) { ShenandoahHeapRegion* r = _heap->get_region(idx); size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab) ? req.min_size() : req.size(); if (alloc_capacity(r) >= min_size * HeapWordSize) { @@ -1399,7 +1405,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_from_mutator(ShenandoahAllocRequest& r // The collector prefers to keep longer lived regions toward the right side of the heap, so it always // searches for regions from right to left here. ShenandoahRightLeftIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator, true); - for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) { + for (index_type idx = iterator.current(); iterator.has_next(); idx = iterator.next()) { ShenandoahHeapRegion* r = _heap->get_region(idx); if (can_allocate_from(r)) { if (req.is_old()) { @@ -1639,29 +1645,29 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo shenandoah_assert_heaplocked(); size_t words_size = req.size(); - idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); + index_type num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); assert(req.is_young(), "Humongous regions always allocated in YOUNG"); ShenandoahGeneration* generation = _heap->generation_for(req.affiliation()); // Check if there are enough regions left to satisfy allocation. - if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) { + if (num > (index_type) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) { return nullptr; } - idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator); - idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1; - idx_t last_possible_start = end_range - num; + index_type start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator); + index_type end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1; + index_type last_possible_start = end_range - num; // Find the continuous interval of $num regions, starting from $beg and ending in $end, // inclusive. Contiguous allocations are biased to the beginning. - idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator, + index_type beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator, start_range, num); if (beg > last_possible_start) { // Hit the end, goodbye return nullptr; } - idx_t end = beg; + index_type end = beg; while (true) { // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership. @@ -1669,12 +1675,12 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo // the existing range, we can exploit that certain regions are already known to be in the Mutator free set. while (!can_allocate_from(_heap->get_region(end))) { // region[end] is not empty, so we restart our search after region[end] - idx_t slide_delta = end + 1 - beg; + index_type slide_delta = end + 1 - beg; if (beg + slide_delta > last_possible_start) { // no room to slide return nullptr; } - for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) { + for (index_type span_end = beg + num; slide_delta > 0; slide_delta--) { if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) { beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator, span_end + 1, num); @@ -1708,7 +1714,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo // Humongous allocation retires all regions at once: no allocation is possible anymore. // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate and will recompute affiliated. _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end); - for (idx_t i = beg; i <= end; i++) { + for (index_type i = beg; i <= end; i++) { ShenandoahHeapRegion* r = _heap->get_region(i); assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); r->try_recycle_under_lock(); @@ -1733,7 +1739,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req, bo } else { // Non-humongous allocation retires only the regions that cannot be used for allocation anymore. waste_bytes = 0; - for (idx_t i = beg; i <= end; i++) { + for (index_type i = beg; i <= end; i++) { ShenandoahHeapRegion* r = _heap->get_region(i); assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous"); assert(r->is_empty(), "Should be empty"); @@ -1944,7 +1950,7 @@ bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) { // partition. // 1. Find a temporarily unusable trash region in the old collector partition ShenandoahRightLeftIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::OldCollector, true); - idx_t unusable_trash = -1; + index_type unusable_trash = -1; for (unusable_trash = iterator.current(); iterator.has_next(); unusable_trash = iterator.next()) { const ShenandoahHeapRegion* region = _heap->get_region(unusable_trash); if (region->is_trash() && _heap->is_concurrent_weak_root_in_progress()) { @@ -2202,14 +2208,14 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_trashed_r ", mutator_empty: %zu, old_collector_empty: %zu", total_mutator_regions, total_old_collector_regions, mutator_empty, old_collector_empty); - idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost; - idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty; + index_type rightmost_idx = (mutator_leftmost == max_regions)? -1: (index_type) mutator_rightmost; + index_type rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (index_type) mutator_rightmost_empty; _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx, total_mutator_regions + young_cset_regions, mutator_empty, mutator_regions, mutator_used + young_cset_regions * region_size_bytes, mutator_humongous_waste); - rightmost_idx = (old_collector_leftmost == max_regions)? -1: (idx_t) old_collector_rightmost; - rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (idx_t) old_collector_rightmost_empty; + rightmost_idx = (old_collector_leftmost == max_regions)? -1: (index_type) old_collector_rightmost; + rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (index_type) old_collector_rightmost_empty; _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, old_collector_leftmost_empty, rightmost_empty_idx, total_old_collector_regions + old_cset_regions, @@ -2271,12 +2277,12 @@ void ShenandoahFreeSet::transfer_empty_regions_from_to(ShenandoahFreeSetPartitio const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); size_t transferred_regions = 0; size_t used_transfer = 0; - idx_t source_low_idx = _partitions.max(); - idx_t source_high_idx = -1; - idx_t dest_low_idx = _partitions.max(); - idx_t dest_high_idx = -1; + index_type source_low_idx = _partitions.max(); + index_type source_high_idx = -1; + index_type dest_low_idx = _partitions.max(); + index_type dest_high_idx = -1; ShenandoahLeftRightIterator iterator(&_partitions, source, true); - for (idx_t idx = iterator.current(); transferred_regions < num_regions && iterator.has_next(); idx = iterator.next()) { + for (index_type idx = iterator.current(); transferred_regions < num_regions && iterator.has_next(); idx = iterator.next()) { // Note: can_allocate_from() denotes that region is entirely empty if (can_allocate_from(idx)) { if (idx < source_low_idx) { @@ -2340,12 +2346,12 @@ size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_s const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes(); size_t transferred_regions = 0; size_t used_transfer = 0; - idx_t collector_low_idx = _partitions.max(); - idx_t collector_high_idx = -1; - idx_t mutator_low_idx = _partitions.max(); - idx_t mutator_high_idx = -1; + index_type collector_low_idx = _partitions.max(); + index_type collector_high_idx = -1; + index_type mutator_low_idx = _partitions.max(); + index_type mutator_high_idx = -1; ShenandoahLeftRightIterator iterator(&_partitions, which_collector, true); - for (idx_t idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) { + for (index_type idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) { // Note: can_allocate_from() denotes that region is entirely empty if (can_allocate_from(idx)) { if (idx < collector_low_idx) { @@ -2402,13 +2408,13 @@ transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPa size_t region_size_bytes = _partitions.region_size_bytes(); size_t transferred_regions = 0; size_t used_transfer = 0; - idx_t collector_low_idx = _partitions.max(); - idx_t collector_high_idx = -1; - idx_t mutator_low_idx = _partitions.max(); - idx_t mutator_high_idx = -1; + index_type collector_low_idx = _partitions.max(); + index_type collector_high_idx = -1; + index_type mutator_low_idx = _partitions.max(); + index_type mutator_high_idx = -1; ShenandoahLeftRightIterator iterator(&_partitions, which_collector, false); - for (idx_t idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) { + for (index_type idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) { size_t ac = alloc_capacity(idx); if (ac > 0) { if (idx < collector_low_idx) { @@ -2529,7 +2535,7 @@ void ShenandoahFreeSet::finish_rebuild(size_t young_trashed_regions, size_t old_ // Move some of the mutator regions into the Collector and OldCollector partitions in order to satisfy // young_reserve and old_reserve. - size_t young_used_regions, old_used_regions, young_used_bytes, old_used_bytes, affiliated_young_regions, affiliated_old_regions; + size_t young_used_regions, old_used_regions, young_used_bytes, old_used_bytes; reserve_regions(young_reserve, old_reserve, old_region_count, young_used_regions, old_used_regions, young_used_bytes, old_used_bytes); _total_young_regions = _heap->num_regions() - old_region_count; @@ -2654,20 +2660,20 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old young_used_bytes = 0; old_used_bytes = 0; - idx_t mutator_low_idx = _partitions.max(); - idx_t mutator_high_idx = -1; - idx_t mutator_empty_low_idx = _partitions.max(); - idx_t mutator_empty_high_idx = -1; + index_type mutator_low_idx = _partitions.max(); + index_type mutator_high_idx = -1; + index_type mutator_empty_low_idx = _partitions.max(); + index_type mutator_empty_high_idx = -1; - idx_t collector_low_idx = _partitions.max(); - idx_t collector_high_idx = -1; - idx_t collector_empty_low_idx = _partitions.max(); - idx_t collector_empty_high_idx = -1; + index_type collector_low_idx = _partitions.max(); + index_type collector_high_idx = -1; + index_type collector_empty_low_idx = _partitions.max(); + index_type collector_empty_high_idx = -1; - idx_t old_collector_low_idx = _partitions.max(); - idx_t old_collector_high_idx = -1; - idx_t old_collector_empty_low_idx = _partitions.max(); - idx_t old_collector_empty_high_idx = -1; + index_type old_collector_low_idx = _partitions.max(); + index_type old_collector_high_idx = -1; + index_type old_collector_empty_low_idx = _partitions.max(); + index_type old_collector_empty_high_idx = -1; size_t used_to_collector = 0; size_t used_to_old_collector = 0; @@ -2680,7 +2686,7 @@ void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old size_t collector_available = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector); for (size_t i = _heap->num_regions(); i > 0; i--) { - idx_t idx = i - 1; + index_type idx = i - 1; ShenandoahHeapRegion* r = _heap->get_region(idx); if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) { // Note: trashed regions have region_size_bytes alloc capacity. @@ -2896,19 +2902,19 @@ void ShenandoahFreeSet::establish_old_collector_alloc_bias() { ShenandoahHeap* heap = ShenandoahHeap::heap(); shenandoah_assert_heaplocked(); - idx_t left_idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector); - idx_t right_idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); - idx_t middle = (left_idx + right_idx) / 2; + index_type left_idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector); + index_type right_idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); + index_type middle = (left_idx + right_idx) / 2; size_t available_in_first_half = 0; size_t available_in_second_half = 0; - for (idx_t index = left_idx; index < middle; index++) { + for (index_type index = left_idx; index < middle; index++) { if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) { ShenandoahHeapRegion* r = heap->get_region((size_t) index); available_in_first_half += r->free(); } } - for (idx_t index = middle; index <= right_idx; index++) { + for (index_type index = middle; index <= right_idx; index++) { if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) { ShenandoahHeapRegion* r = heap->get_region(index); available_in_second_half += r->free(); @@ -3010,7 +3016,7 @@ void ShenandoahFreeSet::log_status() { LogStream ls(lt); { - idx_t last_idx = 0; + index_type last_idx = 0; size_t max = 0; size_t max_contig = 0; size_t empty_contig = 0; @@ -3020,7 +3026,7 @@ void ShenandoahFreeSet::log_status() { size_t total_free_ext = 0; size_t total_trashed_free = 0; - for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); + for (index_type idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx++) { if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) { ShenandoahHeapRegion *r = _heap->get_region(idx); @@ -3086,7 +3092,7 @@ void ShenandoahFreeSet::log_status() { size_t total_free = 0; size_t total_used = 0; - for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); + for (index_type idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) { if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) { ShenandoahHeapRegion *r = _heap->get_region(idx); @@ -3107,7 +3113,7 @@ void ShenandoahFreeSet::log_status() { size_t total_free = 0; size_t total_used = 0; - for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector); + for (index_type idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector); idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); idx++) { if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, idx)) { ShenandoahHeapRegion *r = _heap->get_region(idx); @@ -3169,19 +3175,19 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_ void ShenandoahFreeSet::print_on(outputStream* out) const { out->print_cr("Mutator Free Set: %zu", _partitions.count(ShenandoahFreeSetPartitionId::Mutator)); ShenandoahLeftRightIterator mutator(const_cast(&_partitions), ShenandoahFreeSetPartitionId::Mutator); - for (idx_t index = mutator.current(); mutator.has_next(); index = mutator.next()) { + for (index_type index = mutator.current(); mutator.has_next(); index = mutator.next()) { _heap->get_region(index)->print_on(out); } out->print_cr("Collector Free Set: %zu", _partitions.count(ShenandoahFreeSetPartitionId::Collector)); ShenandoahLeftRightIterator collector(const_cast(&_partitions), ShenandoahFreeSetPartitionId::Collector); - for (idx_t index = collector.current(); collector.has_next(); index = collector.next()) { + for (index_type index = collector.current(); collector.has_next(); index = collector.next()) { _heap->get_region(index)->print_on(out); } if (_heap->mode()->is_generational()) { out->print_cr("Old Collector Free Set: %zu", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector)); - for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector); + for (index_type index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector); index <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); index++) { if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) { _heap->get_region(index)->print_on(out); @@ -3195,7 +3201,7 @@ double ShenandoahFreeSet::internal_fragmentation() { double linear = 0; ShenandoahLeftRightIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator); - for (idx_t index = iterator.current(); iterator.has_next(); index = iterator.next()) { + for (index_type index = iterator.current(); iterator.has_next(); index = iterator.next()) { ShenandoahHeapRegion* r = _heap->get_region(index); size_t used = r->used(); squared += used * used; @@ -3211,13 +3217,13 @@ double ShenandoahFreeSet::internal_fragmentation() { } double ShenandoahFreeSet::external_fragmentation() { - idx_t last_idx = 0; + index_type last_idx = 0; size_t max_contig = 0; size_t empty_contig = 0; size_t free = 0; ShenandoahLeftRightIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator); - for (idx_t index = iterator.current(); iterator.has_next(); index = iterator.next()) { + for (index_type index = iterator.current(); iterator.has_next(); index = iterator.next()) { ShenandoahHeapRegion* r = _heap->get_region(index); if (r->is_empty()) { free += ShenandoahHeapRegion::region_size_bytes(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp index bf20b2def29f0..e7b52b02f840d 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp @@ -57,7 +57,7 @@ class ShenandoahRegionPartitions { static constexpr uint UIntNumPartitions = uint(ShenandoahFreeSetPartitionId::NotFree); private: - const idx_t _max; // The maximum number of heap regions + const index_type _max; // The maximum number of heap regions const size_t _region_size_bytes; const ShenandoahFreeSet* _free_set; // For each partition, we maintain a bitmap of which regions are affiliated with his partition. @@ -66,15 +66,15 @@ class ShenandoahRegionPartitions { // not to be found. This makes searches for free space more efficient. For each partition p, _leftmosts[p] // represents its least index, and its _rightmosts[p] its greatest index. Empty intervals are indicated by the // canonical [_max, -1]. - idx_t _leftmosts[UIntNumPartitions]; - idx_t _rightmosts[UIntNumPartitions]; + index_type _leftmosts[UIntNumPartitions]; + index_type _rightmosts[UIntNumPartitions]; // Allocation for humongous objects needs to find regions that are entirely empty. For each partion p, _leftmosts_empty[p] // represents the first region belonging to this partition that is completely empty and _rightmosts_empty[p] represents the // last region that is completely empty. If there is no completely empty region in this partition, this is represented // by the canonical [_max, -1]. - idx_t _leftmosts_empty[UIntNumPartitions]; - idx_t _rightmosts_empty[UIntNumPartitions]; + index_type _leftmosts_empty[UIntNumPartitions]; + index_type _rightmosts_empty[UIntNumPartitions]; // For each partition p: // _capacity[p] represents the total amount of memory within the partition, including retired regions, as adjusted @@ -133,8 +133,8 @@ class ShenandoahRegionPartitions { inline bool available_implies_empty(size_t available); #ifndef PRODUCT - void dump_bitmap_row(idx_t region_idx) const; - void dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const; + void dump_bitmap_row(index_type region_idx) const; + void dump_bitmap_range(index_type start_region_idx, index_type end_region_idx) const; void dump_bitmap() const; #endif public: @@ -143,7 +143,7 @@ class ShenandoahRegionPartitions { static const size_t FreeSetUnderConstruction = SIZE_MAX; - inline idx_t max() const { return _max; } + inline index_type max() const { return _max; } // At initialization, reset OldCollector tallies void initialize_old_collector(); @@ -166,84 +166,86 @@ class ShenandoahRegionPartitions { // Set the Mutator intervals, usage, and capacity according to arguments. Reset the Collector intervals, used, capacity // to represent empty Collector free set. We use this at the end of rebuild_free_set() to avoid the overhead of making // many redundant incremental adjustments to the mutator intervals as the free set is being rebuilt. - void establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost, - idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty, + void establish_mutator_intervals(index_type mutator_leftmost, index_type mutator_rightmost, + index_type mutator_leftmost_empty, index_type mutator_rightmost_empty, size_t total_mutator_regions, size_t empty_mutator_regions, size_t mutator_region_count, size_t mutator_used, size_t mutator_humongous_words_waste); // Set the OldCollector intervals, usage, and capacity according to arguments. We use this at the end of rebuild_free_set() // to avoid the overhead of making many redundant incremental adjustments to the mutator intervals as the free set is being // rebuilt. - void establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost, - idx_t old_collector_leftmost_empty, idx_t old_collector_rightmost_empty, + void establish_old_collector_intervals(index_type old_collector_leftmost, index_type old_collector_rightmost, + index_type old_collector_leftmost_empty, index_type old_collector_rightmost_empty, size_t total_old_collector_region_count, size_t old_collector_empty, size_t old_collector_regions, size_t old_collector_used, size_t old_collector_humongous_words_waste); - void establish_interval(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx, - idx_t low_empty_idx, idx_t high_empty_idx); + void establish_interval(ShenandoahFreeSetPartitionId partition, index_type low_idx, index_type high_idx, + index_type low_empty_idx, index_type high_empty_idx); // Shrink the intervals associated with partition when region idx is removed from this free set - inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx); + inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, index_type idx); // Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, - idx_t low_idx, idx_t high_idx, size_t num_regions); + index_type low_idx, index_type high_idx, size_t num_regions); - void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t capacity); + void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, index_type idx, size_t capacity); void expand_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, - idx_t low_idx, idx_t high_idx, - idx_t low_empty_idx, idx_t high_empty_idx); + index_type low_idx, index_type high_idx, + index_type low_empty_idx, index_type high_empty_idx); // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals. // Requires that region idx is in in the Mutator or Collector partitions. Hereafter, identifies this region as NotFree. // Any remnant of available memory at the time of retirement is added to the original partition's total of used bytes. // Return the number of waste bytes (if any). - size_t retire_from_partition(ShenandoahFreeSetPartitionId p, idx_t idx, size_t used_bytes); + size_t retire_from_partition(ShenandoahFreeSetPartitionId p, index_type idx, size_t used_bytes); // Retire all regions between low_idx and high_idx inclusive from within partition. Requires that each region idx is // in the same Mutator or Collector partition. Hereafter, identifies each region as NotFree. Assumes that each region // is now considered fully used, since the region is presumably used to represent a humongous object. - void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx); + void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, index_type low_idx, index_type high_idx); void unretire_to_partition(ShenandoahHeapRegion* region, ShenandoahFreeSetPartitionId which_partition); // Place region idx into free set which_partition. Requires that idx is currently NotFree. - void make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity); + void make_free(index_type idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity); // Place region idx into free partition new_partition, not adjusting used and capacity totals for the original and new partition. // available represents bytes that can still be allocated within this region. Requires that idx is currently not NotFree. - size_t move_from_partition_to_partition_with_deferred_accounting(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, + size_t move_from_partition_to_partition_with_deferred_accounting(index_type idx, ShenandoahFreeSetPartitionId orig_partition, ShenandoahFreeSetPartitionId new_partition, size_t available); // Place region idx into free partition new_partition, adjusting used and capacity totals for the original and new partition. // available represents bytes that can still be allocated within this region. Requires that idx is currently not NotFree. - void move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition, + void move_from_partition_to_partition(index_type idx, ShenandoahFreeSetPartitionId orig_partition, ShenandoahFreeSetPartitionId new_partition, size_t available); - const char* partition_membership_name(idx_t idx) const; + const char* partition_membership_name(index_type idx) const; // Return the index of the next available region >= start_index, or maximum_regions if not found. - inline idx_t find_index_of_next_available_region(ShenandoahFreeSetPartitionId which_partition, idx_t start_index) const; + inline index_type find_index_of_next_available_region(ShenandoahFreeSetPartitionId which_partition, + index_type start_index) const; // Return the index of the previous available region <= last_index, or -1 if not found. - inline idx_t find_index_of_previous_available_region(ShenandoahFreeSetPartitionId which_partition, idx_t last_index) const; + inline index_type find_index_of_previous_available_region(ShenandoahFreeSetPartitionId which_partition, + index_type last_index) const; // Return the index of the next available cluster of cluster_size regions >= start_index, or maximum_regions if not found. - inline idx_t find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition, - idx_t start_index, size_t cluster_size) const; + inline index_type find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition, + index_type start_index, size_t cluster_size) const; // Return the index of the previous available cluster of cluster_size regions <= last_index, or -1 if not found. - inline idx_t find_index_of_previous_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition, - idx_t last_index, size_t cluster_size) const; + inline index_type find_index_of_previous_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition, + index_type last_index, size_t cluster_size) const; - inline bool in_free_set(ShenandoahFreeSetPartitionId which_partition, idx_t idx) const { + inline bool in_free_set(ShenandoahFreeSetPartitionId which_partition, index_type idx) const { return _membership[int(which_partition)].is_set(idx); } // Returns the ShenandoahFreeSetPartitionId affiliation of region idx, NotFree if this region is not currently in any partition. // This does not enforce that free_set membership implies allocation capacity. - inline ShenandoahFreeSetPartitionId membership(idx_t idx) const { + inline ShenandoahFreeSetPartitionId membership(index_type idx) const { assert (idx < _max, "index is sane: %zu < %zu", idx, _max); ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree; for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) { @@ -258,7 +260,7 @@ class ShenandoahRegionPartitions { #ifdef ASSERT // Returns true iff region idx's membership is which_partition. If which_partition represents a free set, asserts // that the region has allocation capacity. - inline bool partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId which_partition) const; + inline bool partition_id_matches(index_type idx, ShenandoahFreeSetPartitionId which_partition) const; #endif inline size_t region_size_bytes() const { return _region_size_bytes; }; @@ -270,10 +272,10 @@ class ShenandoahRegionPartitions { // leftmost() and leftmost_empty() return _max, rightmost() and rightmost_empty() return 0 // otherwise, expect the following: // 0 <= leftmost <= leftmost_empty <= rightmost_empty <= rightmost < _max - inline idx_t leftmost(ShenandoahFreeSetPartitionId which_partition) const; - inline idx_t rightmost(ShenandoahFreeSetPartitionId which_partition) const; - idx_t leftmost_empty(ShenandoahFreeSetPartitionId which_partition); - idx_t rightmost_empty(ShenandoahFreeSetPartitionId which_partition); + inline index_type leftmost(ShenandoahFreeSetPartitionId which_partition) const; + inline index_type rightmost(ShenandoahFreeSetPartitionId which_partition) const; + index_type leftmost_empty(ShenandoahFreeSetPartitionId which_partition); + index_type rightmost_empty(ShenandoahFreeSetPartitionId which_partition); inline bool is_empty(ShenandoahFreeSetPartitionId which_partition) const; @@ -353,7 +355,7 @@ class ShenandoahRegionPartitions { assert(_available[int(which_partition)] == _capacity[int(which_partition)] - _used[int(which_partition)], "Expect available (%zu) equals capacity (%zu) - used (%zu) for partition %s", _available[int(which_partition)], _capacity[int(which_partition)], _used[int(which_partition)], - partition_membership_name(idx_t(which_partition))); + partition_membership_name(index_type(which_partition))); return _available[int(which_partition)]; } @@ -376,7 +378,7 @@ class ShenandoahRegionPartitions { (_available[int(which_partition)] == _capacity[int(which_partition)] - _used[int(which_partition)]), "Expect available (%zu) equals capacity (%zu) - used (%zu) for partition %s", _available[int(which_partition)], _capacity[int(which_partition)], _used[int(which_partition)], - partition_membership_name(idx_t(which_partition))); + partition_membership_name(index_type(which_partition))); #endif return _available[int(which_partition)]; } @@ -570,7 +572,8 @@ class ShenandoahFreeSet : public CHeapObj { // Search for allocation in region with same affiliation as request, using given iterator. template - HeapWord* allocate_with_affiliation(Iter& iterator, ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region); + HeapWord* allocate_with_affiliation(Iter& iterator, ShenandoahAffiliation affiliation, + ShenandoahAllocRequest& req, bool& in_new_region); // Return true if the respective generation for this request has free regions. bool can_allocate_in_new_region(const ShenandoahAllocRequest& req); @@ -614,7 +617,7 @@ class ShenandoahFreeSet : public CHeapObj { inline size_t max_regions() const { return _partitions.max(); } ShenandoahFreeSetPartitionId membership(size_t index) const { return _partitions.membership(index); } inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition, - idx_t low_idx, idx_t high_idx, size_t num_regions) { + index_type low_idx, index_type high_idx, size_t num_regions) { return _partitions.shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx, num_regions); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp index cbc9f0a4e1d37..dfe3022af49ec 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp @@ -239,7 +239,6 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) { worker_slices[i] = new ShenandoahHeapRegionSet(); } - ShenandoahGenerationalHeap::TransferResult result; { // The rest of code performs region moves, where region status is undefined // until all phases run together. @@ -1107,7 +1106,6 @@ void ShenandoahFullGC::phase4_compact_objects(ShenandoahHeapRegionSet** worker_s void ShenandoahFullGC::phase5_epilog() { GCTraceTime(Info, gc, phases) time("Phase 5: Full GC epilog", _gc_timer); ShenandoahHeap* heap = ShenandoahHeap::heap(); - ShenandoahGenerationalHeap::TransferResult result; // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer // and must ensure the bitmap is in sync. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 2aa027c03713c..49f23ad01e6a5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -540,7 +540,7 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { // Sort the promotion-eligible regions in order of increasing live-data-bytes so that we can first reclaim regions that require // less evacuation effort. This prioritizes garbage first, expanding the allocation pool early before we reclaim regions that // have more live data. - const idx_t num_regions = heap->num_regions(); + const index_type num_regions = heap->num_regions(); ResourceMark rm; AgedRegionData* sorted_regions = NEW_RESOURCE_ARRAY(AgedRegionData, num_regions); @@ -548,14 +548,14 @@ size_t ShenandoahGeneration::select_aged_regions(size_t old_available) { ShenandoahFreeSet* freeset = heap->free_set(); // Any region that is to be promoted in place needs to be retired from its Collector or Mutator partition. - idx_t pip_low_collector_idx = freeset->max_regions(); - idx_t pip_high_collector_idx = -1; - idx_t pip_low_mutator_idx = freeset->max_regions(); - idx_t pip_high_mutator_idx = -1; + index_type pip_low_collector_idx = freeset->max_regions(); + index_type pip_high_collector_idx = -1; + index_type pip_low_mutator_idx = freeset->max_regions(); + index_type pip_high_mutator_idx = -1; size_t collector_regions_to_pip = 0; size_t mutator_regions_to_pip = 0; - for (idx_t i = 0; i < num_regions; i++) { + for (index_type i = 0; i < num_regions; i++) { ShenandoahHeapRegion* const r = heap->get_region(i); if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) { // skip over regions that aren't regular young with some live data diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp index 82a759e34dbdf..eb55755b1711f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp @@ -25,7 +25,7 @@ #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp" -ShenandoahSimpleBitMap::ShenandoahSimpleBitMap(idx_t num_bits) : +ShenandoahSimpleBitMap::ShenandoahSimpleBitMap(index_type num_bits) : _num_bits(num_bits), _num_words(align_up(num_bits, BitsPerWord) / BitsPerWord), _bitmap(NEW_C_HEAP_ARRAY(uintx, _num_words, mtGC)) @@ -39,7 +39,7 @@ ShenandoahSimpleBitMap::~ShenandoahSimpleBitMap() { } } -size_t ShenandoahSimpleBitMap::count_leading_ones(idx_t start_idx) const { +size_t ShenandoahSimpleBitMap::count_leading_ones(index_type start_idx) const { assert((start_idx >= 0) && (start_idx < _num_bits), "precondition"); size_t array_idx = start_idx >> LogBitsPerWord; uintx element_bits = _bitmap[array_idx]; @@ -66,7 +66,7 @@ size_t ShenandoahSimpleBitMap::count_leading_ones(idx_t start_idx) const { return counted_ones + count_trailing_zeros(complement); } -size_t ShenandoahSimpleBitMap::count_trailing_ones(idx_t last_idx) const { +size_t ShenandoahSimpleBitMap::count_trailing_ones(index_type last_idx) const { assert((last_idx >= 0) && (last_idx < _num_bits), "precondition"); size_t array_idx = last_idx >> LogBitsPerWord; uintx element_bits = _bitmap[array_idx]; @@ -93,11 +93,11 @@ size_t ShenandoahSimpleBitMap::count_trailing_ones(idx_t last_idx) const { return counted_ones + count_leading_zeros(complement); } -bool ShenandoahSimpleBitMap::is_forward_consecutive_ones(idx_t start_idx, idx_t count) const { +bool ShenandoahSimpleBitMap::is_forward_consecutive_ones(index_type start_idx, index_type count) const { while (count > 0) { assert((start_idx >= 0) && (start_idx < _num_bits), "precondition: start_idx: %zd, count: %zd", start_idx, count); - assert(start_idx + count <= (idx_t) _num_bits, "precondition"); + assert(start_idx + count <= (index_type) _num_bits, "precondition"); size_t array_idx = start_idx >> LogBitsPerWord; uintx bit_number = start_idx & (BitsPerWord - 1); uintx element_bits = _bitmap[array_idx]; @@ -123,7 +123,7 @@ bool ShenandoahSimpleBitMap::is_forward_consecutive_ones(idx_t start_idx, idx_t return true; } -bool ShenandoahSimpleBitMap::is_backward_consecutive_ones(idx_t last_idx, idx_t count) const { +bool ShenandoahSimpleBitMap::is_backward_consecutive_ones(index_type last_idx, index_type count) const { while (count > 0) { assert((last_idx >= 0) && (last_idx < _num_bits), "precondition"); assert(last_idx - count >= -1, "precondition"); @@ -152,11 +152,11 @@ bool ShenandoahSimpleBitMap::is_backward_consecutive_ones(idx_t last_idx, idx_t return true; } -idx_t ShenandoahSimpleBitMap::find_first_consecutive_set_bits(idx_t beg, idx_t end, size_t num_bits) const { +index_type ShenandoahSimpleBitMap::find_first_consecutive_set_bits(index_type beg, index_type end, size_t num_bits) const { assert((beg >= 0) && (beg < _num_bits), "precondition"); // Stop looking if there are not num_bits remaining in probe space. - idx_t start_boundary = end - num_bits; + index_type start_boundary = end - num_bits; if (beg > start_boundary) { return end; } @@ -231,12 +231,12 @@ idx_t ShenandoahSimpleBitMap::find_first_consecutive_set_bits(idx_t beg, idx_t e } } -idx_t ShenandoahSimpleBitMap::find_last_consecutive_set_bits(const idx_t beg, idx_t end, const size_t num_bits) const { +index_type ShenandoahSimpleBitMap::find_last_consecutive_set_bits(const index_type beg, index_type end, const size_t num_bits) const { assert((end >= 0) && (end < _num_bits), "precondition"); // Stop looking if there are not num_bits remaining in probe space. - idx_t last_boundary = beg + num_bits; + index_type last_boundary = beg + num_bits; if (end < last_boundary) { return beg; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp index 5127e8b221a53..7f86220d38e1f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp @@ -35,29 +35,29 @@ // 1. Allow searches from high to low memory (when biasing allocations towards the top of the heap) // 2. Allow searches for clusters of contiguous set bits (to expedite allocation for humongous objects) // -// idx_t is defined here as ssize_t. In src/hotspot/share/utiliities/bitMap.hpp, idx is defined as size_t. +// index_type is defined here as ssize_t. In src/hotspot/share/utiliities/bitMap.hpp, idx is defined as size_t. // This is a significant incompatibility. // -// The API and internal implementation of ShenandoahSimpleBitMap and ShenandoahRegionPartitions use idx_t to +// The API and internal implementation of ShenandoahSimpleBitMap and ShenandoahRegionPartitions use index_type to // represent index, even though index is "inherently" unsigned. There are two reasons for this choice: // 1. We use -1 as a sentinel value to represent empty partitions. This same value may be used to represent // failure to find a previous set bit or previous range of set bits. -// 2. Certain loops are written most naturally if the iterator, which may hold the sentinel -1 value, can be +// 2. Certain loops are written most naturally if the induction variable, which may hold the sentinel -1 value, can be // declared as signed and the terminating condition can be < 0. -typedef ssize_t idx_t; +typedef ssize_t index_type; // ShenandoahSimpleBitMap resembles CHeapBitMap but adds missing support for find_first_consecutive_set_bits() and // find_last_consecutive_set_bits. An alternative refactoring of code would subclass CHeapBitMap, but this might // break abstraction rules, because efficient implementation requires assumptions about superclass internals that // might be violated through future software maintenance. class ShenandoahSimpleBitMap { - const idx_t _num_bits; + const index_type _num_bits; const size_t _num_words; uintx* const _bitmap; public: - ShenandoahSimpleBitMap(idx_t num_bits); + ShenandoahSimpleBitMap(index_type num_bits); ~ShenandoahSimpleBitMap(); @@ -71,42 +71,42 @@ class ShenandoahSimpleBitMap { // Count consecutive ones in forward order, starting from start_idx. Requires that there is at least one zero // between start_idx and index value (_num_bits - 1), inclusive. - size_t count_leading_ones(idx_t start_idx) const; + size_t count_leading_ones(index_type start_idx) const; // Count consecutive ones in reverse order, starting from last_idx. Requires that there is at least one zero // between last_idx and index value zero, inclusive. - size_t count_trailing_ones(idx_t last_idx) const; + size_t count_trailing_ones(index_type last_idx) const; - bool is_forward_consecutive_ones(idx_t start_idx, idx_t count) const; - bool is_backward_consecutive_ones(idx_t last_idx, idx_t count) const; + bool is_forward_consecutive_ones(index_type start_idx, index_type count) const; + bool is_backward_consecutive_ones(index_type last_idx, index_type count) const; static inline uintx tail_mask(uintx bit_number); public: - inline idx_t aligned_index(idx_t idx) const { + inline index_type aligned_index(index_type idx) const { assert((idx >= 0) && (idx < _num_bits), "precondition"); - idx_t array_idx = idx & ~(BitsPerWord - 1); + index_type array_idx = idx & ~(BitsPerWord - 1); return array_idx; } - inline constexpr idx_t alignment() const { + inline constexpr index_type alignment() const { return BitsPerWord; } // For testing - inline idx_t size() const { + inline index_type size() const { return _num_bits; } // Return the word that holds idx bit and its neighboring bits. - inline uintx bits_at(idx_t idx) const { + inline uintx bits_at(index_type idx) const { assert((idx >= 0) && (idx < _num_bits), "precondition"); - idx_t array_idx = idx >> LogBitsPerWord; + index_type array_idx = idx >> LogBitsPerWord; return _bitmap[array_idx]; } - inline void set_bit(idx_t idx) { + inline void set_bit(index_type idx) { assert((idx >= 0) && (idx < _num_bits), "precondition"); size_t array_idx = idx >> LogBitsPerWord; uintx bit_number = idx & (BitsPerWord - 1); @@ -114,7 +114,7 @@ class ShenandoahSimpleBitMap { _bitmap[array_idx] |= the_bit; } - inline void clear_bit(idx_t idx) { + inline void clear_bit(index_type idx) { assert((idx >= 0) && (idx < _num_bits), "precondition"); size_t array_idx = idx >> LogBitsPerWord; uintx bit_number = idx & (BitsPerWord - 1); @@ -122,7 +122,7 @@ class ShenandoahSimpleBitMap { _bitmap[array_idx] &= ~the_bit; } - inline bool is_set(idx_t idx) const { + inline bool is_set(index_type idx) const { assert((idx >= 0) && (idx < _num_bits), "precondition"); size_t array_idx = idx >> LogBitsPerWord; uintx bit_number = idx & (BitsPerWord - 1); @@ -132,39 +132,39 @@ class ShenandoahSimpleBitMap { // Return the index of the first set bit in the range [beg, size()), or size() if none found. // precondition: beg and end form a valid range for the bitmap. - inline idx_t find_first_set_bit(idx_t beg) const; + inline index_type find_first_set_bit(index_type beg) const; // Return the index of the first set bit in the range [beg, end), or end if none found. // precondition: beg and end form a valid range for the bitmap. - inline idx_t find_first_set_bit(idx_t beg, idx_t end) const; + inline index_type find_first_set_bit(index_type beg, index_type end) const; // Return the index of the last set bit in the range (-1, end], or -1 if none found. // precondition: beg and end form a valid range for the bitmap. - inline idx_t find_last_set_bit(idx_t end) const; + inline index_type find_last_set_bit(index_type end) const; // Return the index of the last set bit in the range (beg, end], or beg if none found. // precondition: beg and end form a valid range for the bitmap. - inline idx_t find_last_set_bit(idx_t beg, idx_t end) const; + inline index_type find_last_set_bit(index_type beg, index_type end) const; // Return the start index of the first run of consecutive set bits for which the first set bit is within // the range [beg, size()), or size() if the run of is not found within this range. // precondition: beg is within the valid range for the bitmap. - inline idx_t find_first_consecutive_set_bits(idx_t beg, size_t num_bits) const; + inline index_type find_first_consecutive_set_bits(index_type beg, size_t num_bits) const; // Return the start index of the first run of consecutive set bits for which the first set bit is within // the range [beg, end), or end if the run of is not found within this range. // precondition: beg and end form a valid range for the bitmap. - idx_t find_first_consecutive_set_bits(idx_t beg, idx_t end, size_t num_bits) const; + index_type find_first_consecutive_set_bits(index_type beg, index_type end, size_t num_bits) const; // Return the start index of the last run of consecutive set bits for which the entire run of set bits is within // the range (-1, end], or -1 if the run of is not found within this range. // precondition: end is within the valid range for the bitmap. - inline idx_t find_last_consecutive_set_bits(idx_t end, size_t num_bits) const; + inline index_type find_last_consecutive_set_bits(index_type end, size_t num_bits) const; // Return the start index of the first run of consecutive set bits for which the entire run of set bits is within // the range (beg, end], or beg if the run of is not found within this range. // precondition: beg and end form a valid range for the bitmap. - idx_t find_last_consecutive_set_bits(idx_t beg, idx_t end, size_t num_bits) const; + index_type find_last_consecutive_set_bits(index_type beg, index_type end, size_t num_bits) const; }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHSIMPLEBITMAP_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.inline.hpp index 4582ab9a781dd..423c876a880d0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.inline.hpp @@ -34,7 +34,7 @@ inline uintx ShenandoahSimpleBitMap::tail_mask(uintx bit_number) { return (uintx(1) << bit_number) - 1; } -inline idx_t ShenandoahSimpleBitMap::find_first_set_bit(idx_t beg, idx_t end) const { +inline index_type ShenandoahSimpleBitMap::find_first_set_bit(index_type beg, index_type end) const { assert((beg >= 0) && (beg < _num_bits), "precondition"); assert((end > beg) && (end <= _num_bits), "precondition"); do { @@ -49,7 +49,7 @@ inline idx_t ShenandoahSimpleBitMap::find_first_set_bit(idx_t beg, idx_t end) co // The next set bit is here. Find first set bit >= bit_number; uintx aligned = element_bits >> bit_number; uintx first_set_bit = count_trailing_zeros(aligned); - idx_t candidate_result = (array_idx * BitsPerWord) + bit_number + first_set_bit; + index_type candidate_result = (array_idx * BitsPerWord) + bit_number + first_set_bit; return (candidate_result < end)? candidate_result: end; } else { // Next bit is not here. Try the next array element @@ -59,16 +59,16 @@ inline idx_t ShenandoahSimpleBitMap::find_first_set_bit(idx_t beg, idx_t end) co return end; } -inline idx_t ShenandoahSimpleBitMap::find_first_set_bit(idx_t beg) const { +inline index_type ShenandoahSimpleBitMap::find_first_set_bit(index_type beg) const { assert((beg >= 0) && (beg < size()), "precondition"); return find_first_set_bit(beg, size()); } -inline idx_t ShenandoahSimpleBitMap::find_last_set_bit(idx_t beg, idx_t end) const { +inline index_type ShenandoahSimpleBitMap::find_last_set_bit(index_type beg, index_type end) const { assert((end >= 0) && (end < _num_bits), "precondition"); assert((beg >= -1) && (beg < end), "precondition"); do { - idx_t array_idx = end >> LogBitsPerWord; + index_type array_idx = end >> LogBitsPerWord; uint8_t bit_number = end & (BitsPerWord - 1); uintx element_bits = _bitmap[array_idx]; if (bit_number < BitsPerWord - 1){ @@ -79,7 +79,7 @@ inline idx_t ShenandoahSimpleBitMap::find_last_set_bit(idx_t beg, idx_t end) con // The prev set bit is here. Find the first set bit <= bit_number uintx aligned = element_bits << (BitsPerWord - (bit_number + 1)); uintx first_set_bit = count_leading_zeros(aligned); - idx_t candidate_result = array_idx * BitsPerWord + (bit_number - first_set_bit); + index_type candidate_result = array_idx * BitsPerWord + (bit_number - first_set_bit); return (candidate_result > beg)? candidate_result: beg; } else { // Next bit is not here. Try the previous array element @@ -89,19 +89,19 @@ inline idx_t ShenandoahSimpleBitMap::find_last_set_bit(idx_t beg, idx_t end) con return beg; } -inline idx_t ShenandoahSimpleBitMap::find_last_set_bit(idx_t end) const { +inline index_type ShenandoahSimpleBitMap::find_last_set_bit(index_type end) const { assert((end >= 0) && (end < _num_bits), "precondition"); return find_last_set_bit(-1, end); } -inline idx_t ShenandoahSimpleBitMap::find_first_consecutive_set_bits(idx_t beg, size_t num_bits) const { +inline index_type ShenandoahSimpleBitMap::find_first_consecutive_set_bits(index_type beg, size_t num_bits) const { assert((beg >= 0) && (beg < _num_bits), "precondition"); return find_first_consecutive_set_bits(beg, size(), num_bits); } -inline idx_t ShenandoahSimpleBitMap::find_last_consecutive_set_bits(idx_t end, size_t num_bits) const { +inline index_type ShenandoahSimpleBitMap::find_last_consecutive_set_bits(index_type end, size_t num_bits) const { assert((end >= 0) && (end < _num_bits), "precondition"); - return find_last_consecutive_set_bits((idx_t) -1, end, num_bits); + return find_last_consecutive_set_bits((index_type) -1, end, num_bits); } #endif // SHARE_GC_SHENANDOAH_SHENANDOAHSIMPLEBITMAP_INLINE_HPP From c85a4544adda1884edc590818adfc4dfe689c602 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Wed, 20 Aug 2025 22:34:56 +0000 Subject: [PATCH 60/61] add override qualifier --- src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index 8c453c1a074a5..e50ecb5cf4d67 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -163,7 +163,7 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { // max heap size will cause the adaptive heuristic to run more frequent cycles. size_t soft_available() const override; - size_t bytes_allocated_since_gc_start() const { + size_t bytes_allocated_since_gc_start() const override { if (_type == ShenandoahGenerationType::YOUNG) { return _free_set->get_bytes_allocated_since_gc_start(); } else if (ShenandoahHeap::heap()->mode()->is_generational() && (_type == ShenandoahGenerationType::NON_GEN)) { From 666b566a56e69c28e0976cb4426e47386b7a4c04 Mon Sep 17 00:00:00 2001 From: Kelvin Nilsen Date: Fri, 22 Aug 2025 19:08:15 +0000 Subject: [PATCH 61/61] fix problem with traditional shenandoah bytes_allocated_since_gc --- .../gc/shenandoah/shenandoahGeneration.cpp | 27 +++++++++++++- .../gc/shenandoah/shenandoahGeneration.hpp | 37 +++++++++++++++++-- 2 files changed, 59 insertions(+), 5 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp index 49f23ad01e6a5..4325aba8eadcf 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp @@ -921,6 +921,9 @@ size_t ShenandoahGeneration::used_regions() const { result = _free_set->global_affiliated_regions(); break; } +#ifdef KELVIN_DEBUG + log_info(gc)("used_regions(_type: %d) returning %zu", _type, result); +#endif return result; } @@ -939,6 +942,9 @@ size_t ShenandoahGeneration::max_capacity() const { total_regions = _free_set->total_global_regions(); break; } +#ifdef KELVIN_DEBUG + log_info(gc)("max_capacity(_type: %d) returning %zu", _type, total_regions * ShenandoahHeapRegion::region_size_bytes()); +#endif return total_regions * ShenandoahHeapRegion::region_size_bytes(); } @@ -957,6 +963,9 @@ size_t ShenandoahGeneration::free_unaffiliated_regions() const { free_regions = _free_set->global_unaffiliated_regions(); break; } +#ifdef KELVIN_DEBUG + log_info(gc)("free_unaffiliated_regions(_type: %d) returning %zu", _type, free_regions); +#endif return free_regions; } @@ -975,25 +984,41 @@ size_t ShenandoahGeneration::used_regions_size() const { used_regions = _free_set->global_affiliated_regions(); break; } +#ifdef KELVIN_DEBUG + log_info(gc)("used_regions_size(_type: %d) returning %zu", _type, used_regions * ShenandoahHeapRegion::region_size_bytes()); +#endif return used_regions * ShenandoahHeapRegion::region_size_bytes(); } size_t ShenandoahGeneration::available() const { +#ifdef KELVIN_DEBUG + log_info(gc)("available(_type: %d) returning %zu", _type, available(max_capacity())); +#endif return available(max_capacity()); } // For ShenandoahYoungGeneration, Include the young available that may have been reserved for the Collector. size_t ShenandoahGeneration::available_with_reserve() const { +#ifdef KELVIN_DEBUG + log_info(gc)("available_with_reserve(_type: %d) returning %zu", _type, available(max_capacity())); +#endif return available(max_capacity()); } size_t ShenandoahGeneration::soft_available() const { +#ifdef KELVIN_DEBUG + log_info(gc)("soft_available(_type: %d) returning %zu", _type, available(ShenandoahHeap::heap()->soft_max_capacity())); +#endif return available(ShenandoahHeap::heap()->soft_max_capacity()); } size_t ShenandoahGeneration::available(size_t capacity) const { size_t in_use = used(); - return in_use > capacity ? 0 : capacity - in_use; + size_t result = in_use > capacity ? 0 : capacity - in_use; +#ifdef KELVIN_DEBUG + log_info(gc)("available(_type: %d, capacity: %zu) returning %zu", _type, capacity, result); +#endif + return result; } void ShenandoahGeneration::record_success_concurrent(bool abbreviated) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp index e50ecb5cf4d67..5bdb3c1d66aef 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp @@ -147,6 +147,10 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { result = _free_set->global_used(); break; } +#undef KELVIN_DEBUG +#ifdef KELVIN_DEBUG + log_info(gc)("used(_type: %d) returning %zu", _type, result); +#endif return result; } @@ -154,6 +158,9 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { size_t available_with_reserve() const; size_t used_including_humongous_waste() const { // In the current implementation, used() includes humongous waste +#ifdef KELVIN_DEBUG + log_info(gc)("used_including_humongous_waste(_type: %d) returning %zu", _type, used()); +#endif return used(); } @@ -165,11 +172,24 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { size_t bytes_allocated_since_gc_start() const override { if (_type == ShenandoahGenerationType::YOUNG) { - return _free_set->get_bytes_allocated_since_gc_start(); - } else if (ShenandoahHeap::heap()->mode()->is_generational() && (_type == ShenandoahGenerationType::NON_GEN)) { - return _free_set->get_bytes_allocated_since_gc_start(); + size_t result = _free_set->get_bytes_allocated_since_gc_start(); +#ifdef KELVIN_DEBUG + log_info(gc)("bytes_allocated_since_gc_start(_type: %d) returning %zu", _type, result); +#endif + return result; + } else if (_type == ShenandoahGenerationType::NON_GEN) { + assert(!ShenandoahHeap::heap()->mode()->is_generational(), "NON_GEN implies not generational"); + size_t result = _free_set->get_bytes_allocated_since_gc_start(); +#ifdef KELVIN_DEBUG + log_info(gc)("bytes_allocated_since_gc_start(_type: %d) returning %zu", _type, result); +#endif + return result; } else { - return 0; + size_t result = 0; +#ifdef KELVIN_DEBUG + log_info(gc)("bytes_allocated_since_gc_start(_type: %d) returning %zu", _type, result); +#endif + return result; } } @@ -247,6 +267,9 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { result = _free_set->global_affiliated_regions(); break; } +#ifdef KELVIN_DEBUG + log_info(gc)("get_affiliated_region_count(_type: %d) returning %zu", _type, result); +#endif return result; } @@ -265,6 +288,9 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { result = _free_set->total_global_regions(); break; } +#ifdef KELVIN_DEBUG + log_info(gc)("get_total_region_count(_type: %d) returning %zu", _type, result); +#endif return result; } @@ -283,6 +309,9 @@ class ShenandoahGeneration : public CHeapObj, public ShenandoahSpaceInfo { result = _free_set->total_humongous_waste(); break; } +#ifdef KELVIN_DEBUG + log_info(gc)("get_humongous_waste()(_type: %d) returning %zu", _type, result); +#endif return result; }