src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp
Print this page
@@ -118,11 +118,13 @@
void reset();
MemRegion covered_region() { return _covered_region; }
void allocate_block(HeapWord* p) {
- assert(_covered_region.contains(p), "Must be in covered region");
+ assert(_covered_region.contains(p),
+ err_msg("p (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT ")",
+ p2i(p), p2i(_covered_region.start()), p2i(_covered_region.end())));
jbyte* block = block_for_addr(p);
HeapWord* block_base = addr_for_block(block);
size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
assert(offset < 128, "Sanity");
// When doing MT offsets, we can't assert this.
@@ -133,11 +135,13 @@
// Optimized for finding the first object that crosses into
// a given block. The blocks contain the offset of the last
// object in that block. Scroll backwards by one, and the first
// object hit should be at the beginning of the block
HeapWord* object_start(HeapWord* addr) const {
- assert(_covered_region.contains(addr), "Must be in covered region");
+ assert(_covered_region.contains(addr),
+ err_msg("addr (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT ")",
+ p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end())));
jbyte* block = block_for_addr(addr);
HeapWord* scroll_forward = offset_addr_for_block(block--);
while (scroll_forward > addr) {
scroll_forward = offset_addr_for_block(block--);
}
@@ -151,11 +155,13 @@
assert(addr <= next, "wrong order for arg and next");
return scroll_forward;
}
bool is_block_allocated(HeapWord* addr) {
- assert(_covered_region.contains(addr), "Must be in covered region");
+ assert(_covered_region.contains(addr),
+ err_msg("addr (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT ")",
+ p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end())));
jbyte* block = block_for_addr(addr);
if (*block == clean_block)
return false;
return true;