Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions src/hotspot/share/gc/shared/markBitMap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,11 +79,19 @@ class MarkBitMap {
}

// Return the address corresponding to the next marked bit at or after
// "addr", and before "limit", if "limit" is non-null. If there is no
// such bit, returns "limit" if that is non-null, or else "endWord()".
// "addr", and before "limit", which are required to be non-null. In other
// words, we look for a marked address in the range [addr, limit). If there is no
// such bit, returns "limit".
inline HeapWord* get_next_marked_addr(const HeapWord* addr,
HeapWord* limit) const;

// Return the address corresponding to the last marked bit before
// "addr" but at or after "limit", which are required to be non-null.
// In other words, we look for a marked address in the range [limit, addr).
// If there is no such bit, returns "addr".
inline HeapWord* get_last_marked_addr(HeapWord* limit,
const HeapWord* addr) const;

void print_on(outputStream* st, const char* prefix) const;

// Write marks.
Expand Down
15 changes: 14 additions & 1 deletion src/hotspot/share/gc/shared/markBitMap.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,26 @@
inline HeapWord* MarkBitMap::get_next_marked_addr(const HeapWord* const addr,
HeapWord* const limit) const {
assert(limit != nullptr, "limit must not be null");
// Round addr up to a possible object boundary to be safe.
assert(addr != nullptr, "addr must not be null");
// Round up to bitmap's alignment boundary to start looking
size_t const addr_offset = addr_to_offset(align_up(addr, HeapWordSize << _shifter));
size_t const limit_offset = addr_to_offset(limit);
size_t const nextOffset = _bm.find_first_set_bit(addr_offset, limit_offset);
return offset_to_addr(nextOffset);
}

inline HeapWord* MarkBitMap::get_last_marked_addr(HeapWord* const limit,
const HeapWord* const addr) const {
assert(limit != nullptr, "limit must not be null");
assert(addr != nullptr, "addr must not be null");
size_t const limit_offset = addr_to_offset(limit);
// Round down to bitmap's alignment boundary to start looking back
size_t const addr_offset = addr_to_offset(align_down(addr, HeapWordSize << _shifter));
size_t const lastOffset = _bm.find_last_set_bit(limit_offset, addr_offset);
return offset_to_addr(lastOffset);
}


inline void MarkBitMap::mark(HeapWord* addr) {
check_mark(addr);
_bm.set_bit(addr_to_offset(addr));
Expand Down
19 changes: 19 additions & 0 deletions src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,25 @@ bool ShenandoahMarkBitMap::is_bitmap_clear_range(const HeapWord* start, const He
}


HeapWord* ShenandoahMarkBitMap::get_last_marked_addr(const HeapWord* limit,
const HeapWord* addr) const {
#ifdef ASSERT
ShenandoahHeap* heap = ShenandoahHeap::heap();
ShenandoahHeapRegion* r = heap->heap_region_containing(addr);
ShenandoahMarkingContext* ctx = heap->marking_context();
HeapWord* tams = ctx->top_at_mark_start(r);
assert(limit != nullptr, "limit must not be null");
assert(limit >= r->bottom(), "limit must be more than bottom");
assert(addr <= tams, "addr must be less than TAMS");
#endif

// Round addr up to a possible object boundary to be safe.
size_t const addr_offset = address_to_index(align_down(addr, HeapWordSize << LogMinObjAlignment));
size_t const limit_offset = address_to_index(limit);
size_t const nextOffset = get_last_one_offset(limit_offset, addr_offset);
return index_to_address(nextOffset);
}

HeapWord* ShenandoahMarkBitMap::get_next_marked_addr(const HeapWord* addr,
const HeapWord* limit) const {
#ifdef ASSERT
Expand Down
19 changes: 16 additions & 3 deletions src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,16 @@
template<bm_word_t flip, bool aligned_right>
inline idx_t get_next_bit_impl(idx_t l_index, idx_t r_index) const;

// Helper for get_last_{zero,one}_bit variants.
// - flip designates whether searching for 1s or 0s. Must be one of
// find_{zeros,ones}_flip.
// - aligned_left is true if l_index is a priori on a bm_word_t boundary.
template<bm_word_t flip, bool aligned_left>
inline idx_t get_last_bit_impl(idx_t l_index, idx_t r_index) const;


inline idx_t get_next_one_offset (idx_t l_index, idx_t r_index) const;
inline idx_t get_last_one_offset (idx_t l_index, idx_t r_index) const;

void clear_large_range (idx_t beg, idx_t end);

Expand Down Expand Up @@ -162,12 +171,16 @@

bool is_bitmap_clear_range(const HeapWord* start, const HeapWord* end) const;

// Return the address corresponding to the next marked bit at or after
// "addr", and before "limit", if "limit" is non-null. If there is no
// such bit, returns "limit" if that is non-null, or else "endWord()".
// Return the first marked address in the range [addr, limit), or limit
// if none found.
HeapWord* get_next_marked_addr(const HeapWord* addr,
const HeapWord* limit) const;

// Return the last marked address in the range (limit, addr], or limit
// if none found.

Check failure on line 180 in src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.hpp

View check run for this annotation

openjdk / jcheck-openjdk/jdk-26865

Whitespace error

Column 19: trailing whitespace
HeapWord* get_last_marked_addr(const HeapWord* limit,
const HeapWord* addr) const;

bm_word_t inverted_bit_mask_for_range(idx_t beg, idx_t end) const;
void clear_range_within_word (idx_t beg, idx_t end);
void clear_range (idx_t beg, idx_t end);
Expand Down
67 changes: 67 additions & 0 deletions src/hotspot/share/gc/shenandoah/shenandoahMarkBitMap.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -171,10 +171,77 @@ inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_next_bit_impl(idx_t
return r_index;
}

template<ShenandoahMarkBitMap::bm_word_t flip, bool aligned_left>
inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_last_bit_impl(idx_t l_index, idx_t r_index) const {
STATIC_ASSERT(flip == find_ones_flip || flip == find_zeros_flip);
verify_range(l_index, r_index);
assert(!aligned_left || is_aligned(l_index, BitsPerWord), "l_index not aligned");

// The first word often contains an interesting bit, either due to
// density or because of features of the calling algorithm. So it's
// important to examine that first word with a minimum of fuss,
// minimizing setup time for later words that will be wasted if the
// first word is indeed interesting.

// The benefit from aligned_left being true is relatively small.
// It saves an operation in the setup for the word search loop.
// It also eliminates the range check on the final result.
// However, callers often have a comparison with l_index, and
// inlining often allows the two comparisons to be combined; it is
// important when !aligned_left that return paths either return
// l_index or a value dominating a comparison with l_index.
// aligned_left is still helpful when the caller doesn't have a
// range check because features of the calling algorithm guarantee
// an interesting bit will be present.

if (l_index < r_index) {
// Get the word containing r_index, and shift out low bits.
idx_t index = to_words_align_down(r_index);
bm_word_t cword = (map(index) ^ flip) >> bit_in_word(r_index);
if ((cword & 1) != 0) {
// The first bit is similarly often interesting. When it matters
// (density or features of the calling algorithm make it likely
// the first bit is set), going straight to the next clause compares
// poorly with doing this check first; count_trailing_zeros can be
// relatively expensive, plus there is the additional range check.
// But when the first bit isn't set, the cost of having tested for
// it is relatively small compared to the rest of the search.
return r_index;
} else if (cword != 0) {
// Flipped and shifted first word is non-zero.
idx_t result = r_index + count_trailing_zeros(cword);
if (aligned_left || (result > l_index)) return result;
// Result is beyond range bound; return l_index.
} else {
// Flipped and shifted first word is zero. Word search through
// aligned up r_index for a non-zero flipped word.
idx_t limit = aligned_left
? to_words_align_down(l_index) // Minuscule savings when aligned.
: to_words_align_up(l_index);
while (--index > limit) {
cword = map(index) ^ flip;
if (cword != 0) {
idx_t result = bit_index(index) + count_trailing_zeros(cword);
if (aligned_left || (result > l_index)) return result;
// Result is beyond range bound; return r_index.
assert((index - 1) == limit, "invariant");
break;
}
}
// No bits in range; return l_index.
}
}
return l_index;
}

inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_next_one_offset(idx_t l_offset, idx_t r_offset) const {
return get_next_bit_impl<find_ones_flip, false>(l_offset, r_offset);
}

inline ShenandoahMarkBitMap::idx_t ShenandoahMarkBitMap::get_last_one_offset(idx_t l_offset, idx_t r_offset) const {
return get_last_bit_impl<find_ones_flip, false>(l_offset, r_offset);
}

// Returns a bit mask for a range of bits [beg, end) within a single word. Each
// bit in the mask is 0 if the bit is in the range, 1 if not in the range. The
// returned mask can be used directly to clear the range, or inverted to set the
Expand Down
3 changes: 3 additions & 0 deletions src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,10 @@ class ShenandoahMarkingContext : public CHeapObj<mtGC> {
inline bool is_marked_or_old(oop obj) const;
inline bool is_marked_strong_or_old(oop obj) const;

// get the first marked address in the range [addr,linit), or limit if none found
inline HeapWord* get_next_marked_addr(const HeapWord* addr, const HeapWord* limit) const;
// get the last marked address in the range (limit, addr), or limit if none found
inline HeapWord* get_last_marked_addr(const HeapWord* limit, const HeapWord* addr) const;

inline bool allocated_after_mark_start(const oop obj) const;
inline bool allocated_after_mark_start(const HeapWord* addr) const;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,10 @@ inline HeapWord* ShenandoahMarkingContext::get_next_marked_addr(const HeapWord*
return _mark_bit_map.get_next_marked_addr(start, limit);
}

inline HeapWord* ShenandoahMarkingContext::get_last_marked_addr(const HeapWord* limit, const HeapWord* start) const {
return _mark_bit_map.get_last_marked_addr(limit, start);
}

inline bool ShenandoahMarkingContext::allocated_after_mark_start(oop obj) const {
const HeapWord* addr = cast_from_oop<HeapWord*>(obj);
return allocated_after_mark_start(addr);
Expand Down
96 changes: 76 additions & 20 deletions src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,8 @@ void ShenandoahDirectCardMarkRememberedSet::mark_read_table_as_clean() {

// No lock required because arguments align with card boundaries.
void ShenandoahCardCluster::reset_object_range(HeapWord* from, HeapWord* to) {
assert(((((unsigned long long) from) & (CardTable::card_size() - 1)) == 0) &&
((((unsigned long long) to) & (CardTable::card_size() - 1)) == 0),
"reset_object_range bounds must align with card boundaries");
assert(CardTable::is_card_aligned(from) && CardTable::is_card_aligned(to),
"Must align with card boundaries");
size_t card_at_start = _rs->card_index_for_addr(from);
size_t num_cards = (to - from) / CardTable::card_size_in_words();

Expand Down Expand Up @@ -222,31 +221,74 @@ size_t ShenandoahCardCluster::get_last_start(size_t card_index) const {
return _object_starts[card_index].offsets.last;
}

// Given a card_index, return the starting address of the first block in the heap
// that straddles into this card. If this card is co-initial with an object, then
// this would return the first address of the range that this card covers, which is
// where the card's first object also begins.
HeapWord* ShenandoahCardCluster::block_start(const size_t card_index) const {
// Given a card_index, return the starting address of the first object in the heap
// that intersects with this card. This must be a valid, parsable object, and must
// be the first such object that intersects with this card. The object may start before,
// at, or after the start of the card, and may end in or after the card. If no
// such object exists, a null value is returned.
// Expects to be called for a card in an region affiliated with the old generation in
// generational heap, otherwise behavior is undefined.
// If not null, ctx holds the complete marking context of the old generation. If null,
// we expect that the marking context isn't available and the crossing maps are valid.
// Note that crossing maps may be invalid following class unloading and before dead
// or unloaded objects have been coalesced and filled (updating the crossing maps).
HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, const ShenandoahMarkingContext* const ctx) const {

HeapWord* left = _rs->addr_for_card_index(card_index);
ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(left);

#ifdef ASSERT
assert(ShenandoahHeap::heap()->mode()->is_generational(), "Do not use in non-generational mode");
ShenandoahHeapRegion* region = ShenandoahHeap::heap()->heap_region_containing(left);
assert(region->is_old(), "Do not use for young regions");
// For HumongousRegion:s it's more efficient to jump directly to the
// start region.
assert(!region->is_humongous(), "Use region->humongous_start_region() instead");
#endif

// if marking context is valid, we use the marking bit map to find the
// first marked object that intersects with this card, and if no such
// object exists, we return null
if (ctx != nullptr) {
if (ctx->is_marked(left)) {
oop obj = cast_to_oop(left);
assert(oopDesc::is_oop(obj), "Should be an object");
return left;
}
// get the previous marked object, if any
if (region->bottom() < left) {
HeapWord* prev = ctx->get_last_marked_addr(region->bottom(), left);
if (prev != nullptr) {
oop obj = cast_to_oop(prev);
assert(oopDesc::is_oop(obj), "Should be an object");
HeapWord* obj_end = prev + obj->size();
if (obj_end > left) {
return prev;
}
}
}
// the prev marked object, if any, ends before left;
// find the next marked object if any on this card
assert(!ctx->is_marked(left), "Was dealt with above");
HeapWord* right = MIN2(region->top(), ctx->top_at_mark_start(region));
HeapWord* next = ctx->get_next_marked_addr(left, right);
if (next < right) {
oop obj = cast_to_oop(next);
assert(oopDesc::is_oop(obj), "Should be an object");
}
return next;
}
assert(ctx == nullptr, "Should have returned above");

// The following code assumes that the region has only parsable objects
assert(ShenandoahGenerationalHeap::heap()->old_generation()->is_parsable(),
"The code that follows expects a parsable heap");
if (starts_object(card_index) && get_first_start(card_index) == 0) {
// This card contains a co-initial object; a fortiori, it covers
// also the case of a card being the first in a region.
assert(oopDesc::is_oop(cast_to_oop(left)), "Should be an object");
return left;
}

HeapWord* p = nullptr;
oop obj = cast_to_oop(p);
ssize_t cur_index = (ssize_t)card_index;
assert(cur_index >= 0, "Overflow");
assert(cur_index > 0, "Should have returned above");
Expand All @@ -262,31 +304,45 @@ HeapWord* ShenandoahCardCluster::block_start(const size_t card_index) const {
assert(starts_object(cur_index), "Error");
size_t offset = get_last_start(cur_index);
// can avoid call via card size arithmetic below instead
p = _rs->addr_for_card_index(cur_index) + offset;
HeapWord* p = _rs->addr_for_card_index(cur_index) + offset;
// Recall that we already dealt with the co-initial object case above
assert(p < left, "obj should start before left");
// While it is safe to ask an object its size in the loop that
// follows, the (ifdef'd out) loop should never be needed.
// 1. we ask this question only for regions in the old generation
// 1. we ask this question only for regions in the old generation, and those
// that are not humongous regions
// 2. there is no direct allocation ever by mutators in old generation
// regions. Only GC will ever allocate in old regions, and then
// too only during promotion/evacuation phases. Thus there is no danger
// regions walked by this code. Only GC will ever allocate in old regions,
// and then too only during promotion/evacuation phases. Thus there is no danger
// of races between reading from and writing to the object start array,
// or of asking partially initialized objects their size (in the loop below).
// Furthermore, humongous regions (and their dirty cards) are never processed
// by this code.
// 3. only GC asks this question during phases when it is not concurrently
// evacuating/promoting, viz. during concurrent root scanning (before
// the evacuation phase) and during concurrent update refs (after the
// evacuation phase) of young collections. This is never called
// during old or global collections.
// during global collections during marking or update refs..
// 4. Every allocation under TAMS updates the object start array.
NOT_PRODUCT(obj = cast_to_oop(p);)
oop obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
#define WALK_FORWARD_IN_BLOCK_START false
assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr");
#ifdef ASSERT
#define WALK_FORWARD_IN_BLOCK_START true
#else
#define WALK_FORWARD_IN_BLOCK_START true
#endif // ASSERT
while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) {
p += obj->size();
obj = cast_to_oop(p);
assert(oopDesc::is_oop(obj), "Should be an object");
assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr");
// Check assumptions in previous block comment if this assert fires
guarantee(false, "Should never need forward walk in block start");
}
#undef WALK_FORWARD_IN_BLOCK_START // false
assert(p + obj->size() > left, "obj should end after left");
#undef WALK_FORWARD_IN_BLOCK_START
guarantee(p <= left, "p should start at or before left end of card");
guarantee(p + obj->size() > left, "obj should end after left end of card");
return p;
}

Expand Down
18 changes: 12 additions & 6 deletions src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -650,12 +650,18 @@ class ShenandoahCardCluster: public CHeapObj<mtGC> {
size_t get_last_start(size_t card_index) const;


// Given a card_index, return the starting address of the first block in the heap
// that straddles into the card. If the card is co-initial with an object, then
// this would return the starting address of the heap that this card covers.
// Expects to be called for a card affiliated with the old generation in
// generational mode.
HeapWord* block_start(size_t card_index) const;
// Given a card_index, return the starting address of the first object in the heap
// that intersects with this card. This must be a valid, parsable object, and must
// be the first such object that intersects with this card. The object may start before,
// at, or after the start of the card, and may end in or after the card. If no
// such object exists, a null value is returned.
// Expects to be called for a card in an region affiliated with the old generation in
// generational heap, otherwise behavior is undefined.
// If not null, ctx holds the complete marking context of the old generation. If null,
// we expect that the marking context isn't available and the crossing maps are valid.
// Note that crossing maps may be invalid following class unloading and before dead
// or unloaded objects have been coalesced and filled (updating the crossing maps).
HeapWord* first_object_start(size_t card_index, const ShenandoahMarkingContext* const ctx) const;
};

// ShenandoahScanRemembered is a concrete class representing the
Expand Down
Loading