< prev index next >
src/share/vm/gc/g1/g1CollectedHeap.cpp
Print this page
*** 36,45 ****
--- 36,46 ----
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectorPolicy.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
+ #include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1MarkSweep.hpp"
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1ParScanThreadState.inline.hpp"
#include "gc/g1/g1RegionToSpaceMapper.hpp"
#include "gc/g1/g1RemSet.inline.hpp"
*** 296,687 ****
}
}
return res;
}
- HeapWord*
- G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
- uint num_regions,
- size_t word_size,
- AllocationContext_t context) {
- assert(first != G1_NO_HRM_INDEX, "pre-condition");
- assert(is_humongous(word_size), "word_size should be humongous");
- assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
-
- // Index of last region in the series.
- uint last = first + num_regions - 1;
-
- // We need to initialize the region(s) we just discovered. This is
- // a bit tricky given that it can happen concurrently with
- // refinement threads refining cards on these regions and
- // potentially wanting to refine the BOT as they are scanning
- // those cards (this can happen shortly after a cleanup; see CR
- // 6991377). So we have to set up the region(s) carefully and in
- // a specific order.
-
- // The word size sum of all the regions we will allocate.
- size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
- assert(word_size <= word_size_sum, "sanity");
-
- // This will be the "starts humongous" region.
- HeapRegion* first_hr = region_at(first);
- // The header of the new object will be placed at the bottom of
- // the first region.
- HeapWord* new_obj = first_hr->bottom();
- // This will be the new top of the new object.
- HeapWord* obj_top = new_obj + word_size;
-
- // First, we need to zero the header of the space that we will be
- // allocating. When we update top further down, some refinement
- // threads might try to scan the region. By zeroing the header we
- // ensure that any thread that will try to scan the region will
- // come across the zero klass word and bail out.
- //
- // NOTE: It would not have been correct to have used
- // CollectedHeap::fill_with_object() and make the space look like
- // an int array. The thread that is doing the allocation will
- // later update the object header to a potentially different array
- // type and, for a very short period of time, the klass and length
- // fields will be inconsistent. This could cause a refinement
- // thread to calculate the object size incorrectly.
- Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
-
- // How many words we use for filler objects.
- size_t word_fill_size = word_size_sum - word_size;
-
- // How many words memory we "waste" which cannot hold a filler object.
- size_t words_not_fillable = 0;
-
- if (word_fill_size >= min_fill_size()) {
- fill_with_objects(obj_top, word_fill_size);
- } else if (word_fill_size > 0) {
- // We have space to fill, but we cannot fit an object there.
- words_not_fillable = word_fill_size;
- word_fill_size = 0;
- }
-
- // We will set up the first region as "starts humongous". This
- // will also update the BOT covering all the regions to reflect
- // that there is a single object that starts at the bottom of the
- // first region.
- first_hr->set_starts_humongous(obj_top, word_fill_size);
- first_hr->set_allocation_context(context);
- // Then, if there are any, we will set up the "continues
- // humongous" regions.
- HeapRegion* hr = NULL;
- for (uint i = first + 1; i <= last; ++i) {
- hr = region_at(i);
- hr->set_continues_humongous(first_hr);
- hr->set_allocation_context(context);
- }
-
- // Up to this point no concurrent thread would have been able to
- // do any scanning on any region in this series. All the top
- // fields still point to bottom, so the intersection between
- // [bottom,top] and [card_start,card_end] will be empty. Before we
- // update the top fields, we'll do a storestore to make sure that
- // no thread sees the update to top before the zeroing of the
- // object header and the BOT initialization.
- OrderAccess::storestore();
-
- // Now, we will update the top fields of the "continues humongous"
- // regions except the last one.
- for (uint i = first; i < last; ++i) {
- hr = region_at(i);
- hr->set_top(hr->end());
- }
-
- hr = region_at(last);
- // If we cannot fit a filler object, we must set top to the end
- // of the humongous object, otherwise we cannot iterate the heap
- // and the BOT will not be complete.
- hr->set_top(hr->end() - words_not_fillable);
-
- assert(hr->bottom() < obj_top && obj_top <= hr->end(),
- "obj_top should be in last region");
-
- check_bitmaps("Humongous Region Allocation", first_hr);
-
- assert(words_not_fillable == 0 ||
- first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
- "Miscalculation in humongous allocation");
-
- increase_used((word_size_sum - words_not_fillable) * HeapWordSize);
-
- for (uint i = first; i <= last; ++i) {
- hr = region_at(i);
- _humongous_set.add(hr);
- _hr_printer.alloc(hr);
- }
-
- return new_obj;
- }
-
- size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
- assert(is_humongous(word_size), "Object of size " SIZE_FORMAT " must be humongous here", word_size);
- return align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
- }
-
- // If could fit into free regions w/o expansion, try.
- // Otherwise, if can expand, do so.
- // Otherwise, if using ex regions might help, try with ex given back.
- HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
- assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
-
- verify_region_sets_optional();
-
- uint first = G1_NO_HRM_INDEX;
- uint obj_regions = (uint) humongous_obj_size_in_regions(word_size);
-
- if (obj_regions == 1) {
- // Only one region to allocate, try to use a fast path by directly allocating
- // from the free lists. Do not try to expand here, we will potentially do that
- // later.
- HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */);
- if (hr != NULL) {
- first = hr->hrm_index();
- }
- } else {
- // We can't allocate humongous regions spanning more than one region while
- // cleanupComplete() is running, since some of the regions we find to be
- // empty might not yet be added to the free list. It is not straightforward
- // to know in which list they are on so that we can remove them. We only
- // need to do this if we need to allocate more than one region to satisfy the
- // current humongous allocation request. If we are only allocating one region
- // we use the one-region region allocation code (see above), that already
- // potentially waits for regions from the secondary free list.
- wait_while_free_regions_coming();
- append_secondary_free_list_if_not_empty_with_lock();
-
- // Policy: Try only empty regions (i.e. already committed first). Maybe we
- // are lucky enough to find some.
- first = _hrm.find_contiguous_only_empty(obj_regions);
- if (first != G1_NO_HRM_INDEX) {
- _hrm.allocate_free_regions_starting_at(first, obj_regions);
- }
- }
-
- if (first == G1_NO_HRM_INDEX) {
- // Policy: We could not find enough regions for the humongous object in the
- // free list. Look through the heap to find a mix of free and uncommitted regions.
- // If so, try expansion.
- first = _hrm.find_contiguous_empty_or_unavailable(obj_regions);
- if (first != G1_NO_HRM_INDEX) {
- // We found something. Make sure these regions are committed, i.e. expand
- // the heap. Alternatively we could do a defragmentation GC.
- log_debug(gc, ergo, heap)("Attempt heap expansion (humongous allocation request failed). Allocation request: " SIZE_FORMAT "B",
- word_size * HeapWordSize);
-
-
- _hrm.expand_at(first, obj_regions);
- g1_policy()->record_new_heap_size(num_regions());
-
- #ifdef ASSERT
- for (uint i = first; i < first + obj_regions; ++i) {
- HeapRegion* hr = region_at(i);
- assert(hr->is_free(), "sanity");
- assert(hr->is_empty(), "sanity");
- assert(is_on_master_free_list(hr), "sanity");
- }
- #endif
- _hrm.allocate_free_regions_starting_at(first, obj_regions);
- } else {
- // Policy: Potentially trigger a defragmentation GC.
- }
- }
-
- HeapWord* result = NULL;
- if (first != G1_NO_HRM_INDEX) {
- result = humongous_obj_allocate_initialize_regions(first, obj_regions,
- word_size, context);
- assert(result != NULL, "it should always return a valid result");
-
- // A successful humongous object allocation changes the used space
- // information of the old generation so we need to recalculate the
- // sizes and update the jstat counters here.
- g1mm()->update_sizes();
- }
-
- verify_region_sets_optional();
-
- return result;
- }
-
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
! assert_heap_not_locked_and_not_at_safepoint();
! assert(!is_humongous(word_size), "we do not allow humongous TLABs");
!
! uint dummy_gc_count_before;
! uint dummy_gclocker_retry_count = 0;
! return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
}
! HeapWord*
! G1CollectedHeap::mem_allocate(size_t word_size,
bool* gc_overhead_limit_was_exceeded) {
! assert_heap_not_locked_and_not_at_safepoint();
!
! // Loop until the allocation is satisfied, or unsatisfied after GC.
! for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
! uint gc_count_before;
!
! HeapWord* result = NULL;
! if (!is_humongous(word_size)) {
! result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
! } else {
! result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
! }
! if (result != NULL) {
! return result;
! }
!
! // Create the garbage collection operation...
! VM_G1CollectForAllocation op(gc_count_before, word_size);
! op.set_allocation_context(AllocationContext::current());
!
! // ...and get the VM thread to execute it.
! VMThread::execute(&op);
!
! if (op.prologue_succeeded() && op.pause_succeeded()) {
! // If the operation was successful we'll return the result even
! // if it is NULL. If the allocation attempt failed immediately
! // after a Full GC, it's unlikely we'll be able to allocate now.
! HeapWord* result = op.result();
! if (result != NULL && !is_humongous(word_size)) {
! // Allocations that take place on VM operations do not do any
! // card dirtying and we have to do it here. We only have to do
! // this for non-humongous allocations, though.
! dirty_young_block(result, word_size);
! }
! return result;
! } else {
! if (gclocker_retry_count > GCLockerRetryAllocationCount) {
! return NULL;
! }
! assert(op.result() == NULL,
! "the result should be NULL if the VM op did not succeed");
! }
!
! // Give a warning if we seem to be looping forever.
! if ((QueuedAllocationWarningCount > 0) &&
! (try_count % QueuedAllocationWarningCount == 0)) {
! warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
! }
! }
!
! ShouldNotReachHere();
! return NULL;
! }
!
! HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
! AllocationContext_t context,
! uint* gc_count_before_ret,
! uint* gclocker_retry_count_ret) {
! // Make sure you read the note in attempt_allocation_humongous().
!
! assert_heap_not_locked_and_not_at_safepoint();
! assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
! "be called for humongous allocation requests");
!
! // We should only get here after the first-level allocation attempt
! // (attempt_allocation()) failed to allocate.
!
! // We will loop until a) we manage to successfully perform the
! // allocation or b) we successfully schedule a collection which
! // fails to perform the allocation. b) is the only case when we'll
! // return NULL.
! HeapWord* result = NULL;
! for (int try_count = 1; /* we'll return */; try_count += 1) {
! bool should_try_gc;
! uint gc_count_before;
!
! {
! MutexLockerEx x(Heap_lock);
! result = _allocator->attempt_allocation_locked(word_size, context);
! if (result != NULL) {
! return result;
! }
!
! if (GCLocker::is_active_and_needs_gc()) {
! if (g1_policy()->can_expand_young_list()) {
! // No need for an ergo verbose message here,
! // can_expand_young_list() does this when it returns true.
! result = _allocator->attempt_allocation_force(word_size, context);
! if (result != NULL) {
! return result;
! }
! }
! should_try_gc = false;
! } else {
! // The GCLocker may not be active but the GCLocker initiated
! // GC may not yet have been performed (GCLocker::needs_gc()
! // returns true). In this case we do not try this GC and
! // wait until the GCLocker initiated GC is performed, and
! // then retry the allocation.
! if (GCLocker::needs_gc()) {
! should_try_gc = false;
! } else {
! // Read the GC count while still holding the Heap_lock.
! gc_count_before = total_collections();
! should_try_gc = true;
! }
! }
! }
!
! if (should_try_gc) {
! bool succeeded;
! result = do_collection_pause(word_size, gc_count_before, &succeeded,
! GCCause::_g1_inc_collection_pause);
! if (result != NULL) {
! assert(succeeded, "only way to get back a non-NULL result");
! return result;
! }
!
! if (succeeded) {
! // If we get here we successfully scheduled a collection which
! // failed to allocate. No point in trying to allocate
! // further. We'll just return NULL.
! MutexLockerEx x(Heap_lock);
! *gc_count_before_ret = total_collections();
! return NULL;
! }
! } else {
! if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
! MutexLockerEx x(Heap_lock);
! *gc_count_before_ret = total_collections();
! return NULL;
! }
! // The GCLocker is either active or the GCLocker initiated
! // GC has not yet been performed. Stall until it is and
! // then retry the allocation.
! GCLocker::stall_until_clear();
! (*gclocker_retry_count_ret) += 1;
! }
!
! // We can reach here if we were unsuccessful in scheduling a
! // collection (because another thread beat us to it) or if we were
! // stalled due to the GC locker. In either can we should retry the
! // allocation attempt in case another thread successfully
! // performed a collection and reclaimed enough space. We do the
! // first attempt (without holding the Heap_lock) here and the
! // follow-on attempt will be at the start of the next loop
! // iteration (after taking the Heap_lock).
! result = _allocator->attempt_allocation(word_size, context);
! if (result != NULL) {
! return result;
! }
!
! // Give a warning if we seem to be looping forever.
! if ((QueuedAllocationWarningCount > 0) &&
! (try_count % QueuedAllocationWarningCount == 0)) {
! warning("G1CollectedHeap::attempt_allocation_slow() "
! "retries %d times", try_count);
! }
! }
!
! ShouldNotReachHere();
! return NULL;
}
void G1CollectedHeap::begin_archive_alloc_range() {
assert_at_safepoint(true /* should_be_vm_thread */);
if (_archive_allocator == NULL) {
--- 297,313 ----
}
}
return res;
}
HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
! return _allocator->allocate_new_tlab(word_size);
}
! HeapWord* G1CollectedHeap::mem_allocate(size_t word_size,
bool* gc_overhead_limit_was_exceeded) {
! return _allocator->mem_allocate(word_size, gc_overhead_limit_was_exceeded);
}
void G1CollectedHeap::begin_archive_alloc_range() {
assert_at_safepoint(true /* should_be_vm_thread */);
if (_archive_allocator == NULL) {
*** 881,913 ****
increase_used(fill_size * HeapWordSize);
}
}
}
- inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
- uint* gc_count_before_ret,
- uint* gclocker_retry_count_ret) {
- assert_heap_not_locked_and_not_at_safepoint();
- assert(!is_humongous(word_size), "attempt_allocation() should not "
- "be called for humongous allocation requests");
-
- AllocationContext_t context = AllocationContext::current();
- HeapWord* result = _allocator->attempt_allocation(word_size, context);
-
- if (result == NULL) {
- result = attempt_allocation_slow(word_size,
- context,
- gc_count_before_ret,
- gclocker_retry_count_ret);
- }
- assert_heap_not_locked();
- if (result != NULL) {
- dirty_young_block(result, word_size);
- }
- return result;
- }
-
void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
--- 507,516 ----
*** 976,1125 ****
HeapRegion::GrainWords * HeapWordSize * uncommitted_regions);
}
decrease_used(size_used);
}
- HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
- uint* gc_count_before_ret,
- uint* gclocker_retry_count_ret) {
- // The structure of this method has a lot of similarities to
- // attempt_allocation_slow(). The reason these two were not merged
- // into a single one is that such a method would require several "if
- // allocation is not humongous do this, otherwise do that"
- // conditional paths which would obscure its flow. In fact, an early
- // version of this code did use a unified method which was harder to
- // follow and, as a result, it had subtle bugs that were hard to
- // track down. So keeping these two methods separate allows each to
- // be more readable. It will be good to keep these two in sync as
- // much as possible.
-
- assert_heap_not_locked_and_not_at_safepoint();
- assert(is_humongous(word_size), "attempt_allocation_humongous() "
- "should only be called for humongous allocations");
-
- // Humongous objects can exhaust the heap quickly, so we should check if we
- // need to start a marking cycle at each humongous object allocation. We do
- // the check before we do the actual allocation. The reason for doing it
- // before the allocation is that we avoid having to keep track of the newly
- // allocated memory while we do a GC.
- if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
- word_size)) {
- collect(GCCause::_g1_humongous_allocation);
- }
-
- // We will loop until a) we manage to successfully perform the
- // allocation or b) we successfully schedule a collection which
- // fails to perform the allocation. b) is the only case when we'll
- // return NULL.
- HeapWord* result = NULL;
- for (int try_count = 1; /* we'll return */; try_count += 1) {
- bool should_try_gc;
- uint gc_count_before;
-
- {
- MutexLockerEx x(Heap_lock);
-
- // Given that humongous objects are not allocated in young
- // regions, we'll first try to do the allocation without doing a
- // collection hoping that there's enough space in the heap.
- result = humongous_obj_allocate(word_size, AllocationContext::current());
- if (result != NULL) {
- size_t size_in_regions = humongous_obj_size_in_regions(word_size);
- g1_policy()->add_bytes_allocated_in_old_since_last_gc(size_in_regions * HeapRegion::GrainBytes);
- return result;
- }
-
- if (GCLocker::is_active_and_needs_gc()) {
- should_try_gc = false;
- } else {
- // The GCLocker may not be active but the GCLocker initiated
- // GC may not yet have been performed (GCLocker::needs_gc()
- // returns true). In this case we do not try this GC and
- // wait until the GCLocker initiated GC is performed, and
- // then retry the allocation.
- if (GCLocker::needs_gc()) {
- should_try_gc = false;
- } else {
- // Read the GC count while still holding the Heap_lock.
- gc_count_before = total_collections();
- should_try_gc = true;
- }
- }
- }
-
- if (should_try_gc) {
- // If we failed to allocate the humongous object, we should try to
- // do a collection pause (if we're allowed) in case it reclaims
- // enough space for the allocation to succeed after the pause.
-
- bool succeeded;
- result = do_collection_pause(word_size, gc_count_before, &succeeded,
- GCCause::_g1_humongous_allocation);
- if (result != NULL) {
- assert(succeeded, "only way to get back a non-NULL result");
- return result;
- }
-
- if (succeeded) {
- // If we get here we successfully scheduled a collection which
- // failed to allocate. No point in trying to allocate
- // further. We'll just return NULL.
- MutexLockerEx x(Heap_lock);
- *gc_count_before_ret = total_collections();
- return NULL;
- }
- } else {
- if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
- MutexLockerEx x(Heap_lock);
- *gc_count_before_ret = total_collections();
- return NULL;
- }
- // The GCLocker is either active or the GCLocker initiated
- // GC has not yet been performed. Stall until it is and
- // then retry the allocation.
- GCLocker::stall_until_clear();
- (*gclocker_retry_count_ret) += 1;
- }
-
- // We can reach here if we were unsuccessful in scheduling a
- // collection (because another thread beat us to it) or if we were
- // stalled due to the GC locker. In either can we should retry the
- // allocation attempt in case another thread successfully
- // performed a collection and reclaimed enough space. Give a
- // warning if we seem to be looping forever.
-
- if ((QueuedAllocationWarningCount > 0) &&
- (try_count % QueuedAllocationWarningCount == 0)) {
- warning("G1CollectedHeap::attempt_allocation_humongous() "
- "retries %d times", try_count);
- }
- }
-
- ShouldNotReachHere();
- return NULL;
- }
-
- HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
- AllocationContext_t context,
- bool expect_null_mutator_alloc_region) {
- assert_at_safepoint(true /* should_be_vm_thread */);
- assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
- "the current alloc region was unexpectedly found to be non-NULL");
-
- if (!is_humongous(word_size)) {
- return _allocator->attempt_allocation_locked(word_size, context);
- } else {
- HeapWord* result = humongous_obj_allocate(word_size, context);
- if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
- collector_state()->set_initiate_conc_mark_if_possible(true);
- }
- return result;
- }
-
- ShouldNotReachHere();
- }
-
class PostMCRemSetClearClosure: public HeapRegionClosure {
G1CollectedHeap* _g1h;
ModRefBarrierSet* _mr_bs;
public:
PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
--- 579,588 ----
*** 1228,1238 ****
print_heap_before_gc();
trace_heap_before_gc(gc_tracer);
size_t metadata_prev_used = MetaspaceAux::used_bytes();
! verify_region_sets_optional();
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
collector_policy()->should_clear_all_soft_refs();
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
--- 691,701 ----
print_heap_before_gc();
trace_heap_before_gc(gc_tracer);
size_t metadata_prev_used = MetaspaceAux::used_bytes();
! _verifier->verify_region_sets_optional();
const bool do_clear_all_soft_refs = clear_all_soft_refs ||
collector_policy()->should_clear_all_soft_refs();
ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
*** 1269,1281 ****
increment_total_collections(true /* full gc */);
increment_old_marking_cycles_started();
assert(used() == recalculate_used(), "Should be equal");
! verify_before_gc();
! check_bitmaps("Full GC Start");
pre_full_gc_dump(gc_timer);
#if defined(COMPILER2) || INCLUDE_JVMCI
DerivedPointerTable::clear();
#endif
--- 732,744 ----
increment_total_collections(true /* full gc */);
increment_old_marking_cycles_started();
assert(used() == recalculate_used(), "Should be equal");
! _verifier->verify_before_gc();
! _verifier->check_bitmaps("Full GC Start");
pre_full_gc_dump(gc_timer);
#if defined(COMPILER2) || INCLUDE_JVMCI
DerivedPointerTable::clear();
#endif
*** 1406,1418 ****
// Update the number of full collections that have been completed.
increment_old_marking_cycles_completed(false /* concurrent */);
_hrm.verify_optional();
! verify_region_sets_optional();
! verify_after_gc();
// Clear the previous marking bitmap, if needed for bitmap verification.
// Note we cannot do this when we clear the next marking bitmap in
// ConcurrentMark::abort() above since VerifyDuringGC verifies the
// objects marked during a full GC against the previous bitmap.
--- 869,881 ----
// Update the number of full collections that have been completed.
increment_old_marking_cycles_completed(false /* concurrent */);
_hrm.verify_optional();
! _verifier->verify_region_sets_optional();
! _verifier->verify_after_gc();
// Clear the previous marking bitmap, if needed for bitmap verification.
// Note we cannot do this when we clear the next marking bitmap in
// ConcurrentMark::abort() above since VerifyDuringGC verifies the
// objects marked during a full GC against the previous bitmap.
*** 1420,1430 ****
// the full GC has compacted objects and updated TAMS but not updated
// the prev bitmap.
if (G1VerifyBitmaps) {
((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
}
! check_bitmaps("Full GC End");
// Start a new incremental collection set for the next pause
assert(g1_policy()->collection_set() == NULL, "must be");
g1_policy()->start_incremental_cset_building();
--- 883,893 ----
// the full GC has compacted objects and updated TAMS but not updated
// the prev bitmap.
if (G1VerifyBitmaps) {
((CMBitMap*) concurrent_mark()->prevMarkBitMap())->clearAll();
}
! _verifier->check_bitmaps("Full GC End");
// Start a new incremental collection set for the next pause
assert(g1_policy()->collection_set() == NULL, "must be");
g1_policy()->start_incremental_cset_building();
*** 1548,1558 ****
bool expect_null_mutator_alloc_region,
bool* gc_succeeded) {
*gc_succeeded = true;
// Let's attempt the allocation first.
HeapWord* result =
! attempt_allocation_at_safepoint(word_size,
context,
expect_null_mutator_alloc_region);
if (result != NULL) {
assert(*gc_succeeded, "sanity");
return result;
--- 1011,1021 ----
bool expect_null_mutator_alloc_region,
bool* gc_succeeded) {
*gc_succeeded = true;
// Let's attempt the allocation first.
HeapWord* result =
! _allocator->attempt_allocation_at_safepoint(word_size,
context,
expect_null_mutator_alloc_region);
if (result != NULL) {
assert(*gc_succeeded, "sanity");
return result;
*** 1637,1657 ****
// allocated block, or else "NULL".
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
assert_at_safepoint(true /* should_be_vm_thread */);
! verify_region_sets_optional();
size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
word_size * HeapWordSize);
if (expand(expand_bytes)) {
_hrm.verify_optional();
! verify_region_sets_optional();
! return attempt_allocation_at_safepoint(word_size,
context,
false /* expect_null_mutator_alloc_region */);
}
return NULL;
}
--- 1100,1120 ----
// allocated block, or else "NULL".
HeapWord* G1CollectedHeap::expand_and_allocate(size_t word_size, AllocationContext_t context) {
assert_at_safepoint(true /* should_be_vm_thread */);
! _verifier->verify_region_sets_optional();
size_t expand_bytes = MAX2(word_size * HeapWordSize, MinHeapDeltaBytes);
log_debug(gc, ergo, heap)("Attempt heap expansion (allocation request failed). Allocation request: " SIZE_FORMAT "B",
word_size * HeapWordSize);
if (expand(expand_bytes)) {
_hrm.verify_optional();
! _verifier->verify_region_sets_optional();
! return _allocator->attempt_allocation_at_safepoint(word_size,
context,
false /* expect_null_mutator_alloc_region */);
}
return NULL;
}
*** 1715,1725 ****
log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
}
}
void G1CollectedHeap::shrink(size_t shrink_bytes) {
! verify_region_sets_optional();
// We should only reach here at the end of a Full GC which means we
// should not not be holding to any GC alloc regions. The method
// below will make sure of that and do any remaining clean up.
_allocator->abandon_gc_alloc_regions();
--- 1178,1188 ----
log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
}
}
void G1CollectedHeap::shrink(size_t shrink_bytes) {
! _verifier->verify_region_sets_optional();
// We should only reach here at the end of a Full GC which means we
// should not not be holding to any GC alloc regions. The method
// below will make sure of that and do any remaining clean up.
_allocator->abandon_gc_alloc_regions();
*** 1730,1740 ****
tear_down_region_sets(true /* free_list_only */);
shrink_helper(shrink_bytes);
rebuild_region_sets(true /* free_list_only */);
_hrm.verify_optional();
! verify_region_sets_optional();
}
// Public methods.
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
--- 1193,1203 ----
tear_down_region_sets(true /* free_list_only */);
shrink_helper(shrink_bytes);
rebuild_region_sets(true /* free_list_only */);
_hrm.verify_optional();
! _verifier->verify_region_sets_optional();
}
// Public methods.
G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
*** 1777,1786 ****
--- 1240,1250 ----
_workers = new WorkGang("GC Thread", ParallelGCThreads,
/* are_GC_task_threads */true,
/* are_ConcurrentGC_threads */false);
_workers->initialize_workers();
+ _verifier = new G1HeapVerifier(this);
_allocator = G1Allocator::create_allocator(this);
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
// Override the default _filler_array_max_size so that no humongous filler
// objects are created.
*** 2226,2262 ****
case GCCause::_g1_humongous_allocation: return true;
default: return is_user_requested_concurrent_full_gc(cause);
}
}
- #ifndef PRODUCT
- void G1CollectedHeap::allocate_dummy_regions() {
- // Let's fill up most of the region
- size_t word_size = HeapRegion::GrainWords - 1024;
- // And as a result the region we'll allocate will be humongous.
- guarantee(is_humongous(word_size), "sanity");
-
- // _filler_array_max_size is set to humongous object threshold
- // but temporarily change it to use CollectedHeap::fill_with_object().
- SizeTFlagSetting fs(_filler_array_max_size, word_size);
-
- for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
- // Let's use the existing mechanism for the allocation
- HeapWord* dummy_obj = humongous_obj_allocate(word_size,
- AllocationContext::system());
- if (dummy_obj != NULL) {
- MemRegion mr(dummy_obj, word_size);
- CollectedHeap::fill_with_object(mr);
- } else {
- // If we can't allocate once, we probably cannot allocate
- // again. Let's get out of the loop.
- break;
- }
- }
- }
- #endif // !PRODUCT
-
void G1CollectedHeap::increment_old_marking_cycles_started() {
assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
_old_marking_cycles_started == _old_marking_cycles_completed + 1,
"Wrong marking cycle count (started: %d, completed: %d)",
_old_marking_cycles_started, _old_marking_cycles_completed);
--- 1690,1699 ----
*** 2665,3120 ****
// assert(false, "NYI");
return 0;
}
void G1CollectedHeap::prepare_for_verify() {
! if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
! ensure_parsability(false);
! }
! g1_rem_set()->prepare_for_verify();
! }
!
! bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
! VerifyOption vo) {
! switch (vo) {
! case VerifyOption_G1UsePrevMarking:
! return hr->obj_allocated_since_prev_marking(obj);
! case VerifyOption_G1UseNextMarking:
! return hr->obj_allocated_since_next_marking(obj);
! case VerifyOption_G1UseMarkWord:
! return false;
! default:
! ShouldNotReachHere();
! }
! return false; // keep some compilers happy
! }
!
! HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
! switch (vo) {
! case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
! case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
! case VerifyOption_G1UseMarkWord: return NULL;
! default: ShouldNotReachHere();
! }
! return NULL; // keep some compilers happy
! }
!
! bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
! switch (vo) {
! case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
! case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
! case VerifyOption_G1UseMarkWord: return obj->is_gc_marked();
! default: ShouldNotReachHere();
! }
! return false; // keep some compilers happy
! }
!
! const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
! switch (vo) {
! case VerifyOption_G1UsePrevMarking: return "PTAMS";
! case VerifyOption_G1UseNextMarking: return "NTAMS";
! case VerifyOption_G1UseMarkWord: return "NONE";
! default: ShouldNotReachHere();
! }
! return NULL; // keep some compilers happy
! }
!
! class VerifyRootsClosure: public OopClosure {
! private:
! G1CollectedHeap* _g1h;
! VerifyOption _vo;
! bool _failures;
! public:
! // _vo == UsePrevMarking -> use "prev" marking information,
! // _vo == UseNextMarking -> use "next" marking information,
! // _vo == UseMarkWord -> use mark word from object header.
! VerifyRootsClosure(VerifyOption vo) :
! _g1h(G1CollectedHeap::heap()),
! _vo(vo),
! _failures(false) { }
!
! bool failures() { return _failures; }
!
! template <class T> void do_oop_nv(T* p) {
! T heap_oop = oopDesc::load_heap_oop(p);
! if (!oopDesc::is_null(heap_oop)) {
! oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
! if (_g1h->is_obj_dead_cond(obj, _vo)) {
! LogHandle(gc, verify) log;
! log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
! if (_vo == VerifyOption_G1UseMarkWord) {
! log.info(" Mark word: " PTR_FORMAT, p2i(obj->mark()));
! }
! ResourceMark rm;
! obj->print_on(log.info_stream());
! _failures = true;
! }
! }
! }
!
! void do_oop(oop* p) { do_oop_nv(p); }
! void do_oop(narrowOop* p) { do_oop_nv(p); }
! };
!
! class G1VerifyCodeRootOopClosure: public OopClosure {
! G1CollectedHeap* _g1h;
! OopClosure* _root_cl;
! nmethod* _nm;
! VerifyOption _vo;
! bool _failures;
!
! template <class T> void do_oop_work(T* p) {
! // First verify that this root is live
! _root_cl->do_oop(p);
!
! if (!G1VerifyHeapRegionCodeRoots) {
! // We're not verifying the code roots attached to heap region.
! return;
! }
!
! // Don't check the code roots during marking verification in a full GC
! if (_vo == VerifyOption_G1UseMarkWord) {
! return;
! }
!
! // Now verify that the current nmethod (which contains p) is
! // in the code root list of the heap region containing the
! // object referenced by p.
!
! T heap_oop = oopDesc::load_heap_oop(p);
! if (!oopDesc::is_null(heap_oop)) {
! oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
!
! // Now fetch the region containing the object
! HeapRegion* hr = _g1h->heap_region_containing(obj);
! HeapRegionRemSet* hrrs = hr->rem_set();
! // Verify that the strong code root list for this region
! // contains the nmethod
! if (!hrrs->strong_code_roots_list_contains(_nm)) {
! log_info(gc, verify)("Code root location " PTR_FORMAT " "
! "from nmethod " PTR_FORMAT " not in strong "
! "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")",
! p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end()));
! _failures = true;
! }
! }
! }
!
! public:
! G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo):
! _g1h(g1h), _root_cl(root_cl), _vo(vo), _nm(NULL), _failures(false) {}
!
! void do_oop(oop* p) { do_oop_work(p); }
! void do_oop(narrowOop* p) { do_oop_work(p); }
!
! void set_nmethod(nmethod* nm) { _nm = nm; }
! bool failures() { return _failures; }
! };
!
! class G1VerifyCodeRootBlobClosure: public CodeBlobClosure {
! G1VerifyCodeRootOopClosure* _oop_cl;
!
! public:
! G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl):
! _oop_cl(oop_cl) {}
!
! void do_code_blob(CodeBlob* cb) {
! nmethod* nm = cb->as_nmethod_or_null();
! if (nm != NULL) {
! _oop_cl->set_nmethod(nm);
! nm->oops_do(_oop_cl);
! }
! }
! };
!
! class YoungRefCounterClosure : public OopClosure {
! G1CollectedHeap* _g1h;
! int _count;
! public:
! YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {}
! void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } }
! void do_oop(narrowOop* p) { ShouldNotReachHere(); }
!
! int count() { return _count; }
! void reset_count() { _count = 0; };
! };
!
! class VerifyKlassClosure: public KlassClosure {
! YoungRefCounterClosure _young_ref_counter_closure;
! OopClosure *_oop_closure;
! public:
! VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
! void do_klass(Klass* k) {
! k->oops_do(_oop_closure);
!
! _young_ref_counter_closure.reset_count();
! k->oops_do(&_young_ref_counter_closure);
! if (_young_ref_counter_closure.count() > 0) {
! guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
! }
! }
! };
!
! class VerifyLivenessOopClosure: public OopClosure {
! G1CollectedHeap* _g1h;
! VerifyOption _vo;
! public:
! VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo):
! _g1h(g1h), _vo(vo)
! { }
! void do_oop(narrowOop *p) { do_oop_work(p); }
! void do_oop( oop *p) { do_oop_work(p); }
!
! template <class T> void do_oop_work(T *p) {
! oop obj = oopDesc::load_decode_heap_oop(p);
! guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
! "Dead object referenced by a not dead object");
! }
! };
!
! class VerifyObjsInRegionClosure: public ObjectClosure {
! private:
! G1CollectedHeap* _g1h;
! size_t _live_bytes;
! HeapRegion *_hr;
! VerifyOption _vo;
! public:
! // _vo == UsePrevMarking -> use "prev" marking information,
! // _vo == UseNextMarking -> use "next" marking information,
! // _vo == UseMarkWord -> use mark word from object header.
! VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo)
! : _live_bytes(0), _hr(hr), _vo(vo) {
! _g1h = G1CollectedHeap::heap();
! }
! void do_object(oop o) {
! VerifyLivenessOopClosure isLive(_g1h, _vo);
! assert(o != NULL, "Huh?");
! if (!_g1h->is_obj_dead_cond(o, _vo)) {
! // If the object is alive according to the mark word,
! // then verify that the marking information agrees.
! // Note we can't verify the contra-positive of the
! // above: if the object is dead (according to the mark
! // word), it may not be marked, or may have been marked
! // but has since became dead, or may have been allocated
! // since the last marking.
! if (_vo == VerifyOption_G1UseMarkWord) {
! guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
! }
!
! o->oop_iterate_no_header(&isLive);
! if (!_hr->obj_allocated_since_prev_marking(o)) {
! size_t obj_size = o->size(); // Make sure we don't overflow
! _live_bytes += (obj_size * HeapWordSize);
! }
! }
! }
! size_t live_bytes() { return _live_bytes; }
! };
!
! class VerifyArchiveOopClosure: public OopClosure {
! public:
! VerifyArchiveOopClosure(HeapRegion *hr) { }
! void do_oop(narrowOop *p) { do_oop_work(p); }
! void do_oop( oop *p) { do_oop_work(p); }
!
! template <class T> void do_oop_work(T *p) {
! oop obj = oopDesc::load_decode_heap_oop(p);
! guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
! "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
! p2i(p), p2i(obj));
! }
! };
!
! class VerifyArchiveRegionClosure: public ObjectClosure {
! public:
! VerifyArchiveRegionClosure(HeapRegion *hr) { }
! // Verify that all object pointers are to archive regions.
! void do_object(oop o) {
! VerifyArchiveOopClosure checkOop(NULL);
! assert(o != NULL, "Should not be here for NULL oops");
! o->oop_iterate_no_header(&checkOop);
! }
! };
!
! class VerifyRegionClosure: public HeapRegionClosure {
! private:
! bool _par;
! VerifyOption _vo;
! bool _failures;
! public:
! // _vo == UsePrevMarking -> use "prev" marking information,
! // _vo == UseNextMarking -> use "next" marking information,
! // _vo == UseMarkWord -> use mark word from object header.
! VerifyRegionClosure(bool par, VerifyOption vo)
! : _par(par),
! _vo(vo),
! _failures(false) {}
!
! bool failures() {
! return _failures;
! }
!
! bool doHeapRegion(HeapRegion* r) {
! // For archive regions, verify there are no heap pointers to
! // non-pinned regions. For all others, verify liveness info.
! if (r->is_archive()) {
! VerifyArchiveRegionClosure verify_oop_pointers(r);
! r->object_iterate(&verify_oop_pointers);
! return true;
! }
! if (!r->is_continues_humongous()) {
! bool failures = false;
! r->verify(_vo, &failures);
! if (failures) {
! _failures = true;
! } else if (!r->is_starts_humongous()) {
! VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
! r->object_iterate(¬_dead_yet_cl);
! if (_vo != VerifyOption_G1UseNextMarking) {
! if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
! log_info(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT,
! p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes());
! _failures = true;
! }
! } else {
! // When vo == UseNextMarking we cannot currently do a sanity
! // check on the live bytes as the calculation has not been
! // finalized yet.
! }
! }
! }
! return false; // stop the region iteration if we hit a failure
! }
! };
!
! // This is the task used for parallel verification of the heap regions
!
! class G1ParVerifyTask: public AbstractGangTask {
! private:
! G1CollectedHeap* _g1h;
! VerifyOption _vo;
! bool _failures;
! HeapRegionClaimer _hrclaimer;
!
! public:
! // _vo == UsePrevMarking -> use "prev" marking information,
! // _vo == UseNextMarking -> use "next" marking information,
! // _vo == UseMarkWord -> use mark word from object header.
! G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) :
! AbstractGangTask("Parallel verify task"),
! _g1h(g1h),
! _vo(vo),
! _failures(false),
! _hrclaimer(g1h->workers()->active_workers()) {}
!
! bool failures() {
! return _failures;
! }
!
! void work(uint worker_id) {
! HandleMark hm;
! VerifyRegionClosure blk(true, _vo);
! _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer);
! if (blk.failures()) {
! _failures = true;
! }
! }
! };
!
! void G1CollectedHeap::verify(VerifyOption vo) {
! if (!SafepointSynchronize::is_at_safepoint()) {
! log_info(gc, verify)("Skipping verification. Not at safepoint.");
! }
!
! assert(Thread::current()->is_VM_thread(),
! "Expected to be executed serially by the VM thread at this point");
!
! log_debug(gc, verify)("Roots");
! VerifyRootsClosure rootsCl(vo);
! VerifyKlassClosure klassCl(this, &rootsCl);
! CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
!
! // We apply the relevant closures to all the oops in the
! // system dictionary, class loader data graph, the string table
! // and the nmethods in the code cache.
! G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
! G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
!
! {
! G1RootProcessor root_processor(this, 1);
! root_processor.process_all_roots(&rootsCl,
! &cldCl,
! &blobsCl);
! }
!
! bool failures = rootsCl.failures() || codeRootsCl.failures();
!
! if (vo != VerifyOption_G1UseMarkWord) {
! // If we're verifying during a full GC then the region sets
! // will have been torn down at the start of the GC. Therefore
! // verifying the region sets will fail. So we only verify
! // the region sets when not in a full GC.
! log_debug(gc, verify)("HeapRegionSets");
! verify_region_sets();
! }
!
! log_debug(gc, verify)("HeapRegions");
! if (GCParallelVerificationEnabled && ParallelGCThreads > 1) {
!
! G1ParVerifyTask task(this, vo);
! workers()->run_task(&task);
! if (task.failures()) {
! failures = true;
! }
!
! } else {
! VerifyRegionClosure blk(false, vo);
! heap_region_iterate(&blk);
! if (blk.failures()) {
! failures = true;
! }
! }
!
! if (G1StringDedup::is_enabled()) {
! log_debug(gc, verify)("StrDedup");
! G1StringDedup::verify();
! }
!
! if (failures) {
! log_info(gc, verify)("Heap after failed verification:");
! // It helps to have the per-region information in the output to
! // help us track down what went wrong. This is why we call
! // print_extended_on() instead of print_on().
! LogHandle(gc, verify) log;
! ResourceMark rm;
! print_extended_on(log.info_stream());
! }
! guarantee(!failures, "there should not have been any failures");
! }
!
! double G1CollectedHeap::verify(bool guard, const char* msg) {
! double verify_time_ms = 0.0;
!
! if (guard && total_collections() >= VerifyGCStartAt) {
! double verify_start = os::elapsedTime();
! HandleMark hm; // Discard invalid handles created during verification
! prepare_for_verify();
! Universe::verify(VerifyOption_G1UsePrevMarking, msg);
! verify_time_ms = (os::elapsedTime() - verify_start) * 1000;
! }
!
! return verify_time_ms;
! }
!
! void G1CollectedHeap::verify_before_gc() {
! double verify_time_ms = verify(VerifyBeforeGC, "Before GC");
! g1_policy()->phase_times()->record_verify_before_time_ms(verify_time_ms);
! }
!
! void G1CollectedHeap::verify_after_gc() {
! double verify_time_ms = verify(VerifyAfterGC, "After GC");
! g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
}
class PrintRegionClosure: public HeapRegionClosure {
outputStream* _st;
public:
--- 2102,2112 ----
// assert(false, "NYI");
return 0;
}
void G1CollectedHeap::prepare_for_verify() {
! _verifier->prepare_for_verify();
}
class PrintRegionClosure: public HeapRegionClosure {
outputStream* _st;
public:
*** 3655,3666 ****
wait_for_root_region_scanning();
print_heap_before_gc();
trace_heap_before_gc(_gc_tracer_stw);
! verify_region_sets_optional();
! verify_dirty_young_regions();
// This call will decide whether this pause is an initial-mark
// pause. If it is, during_initial_mark_pause() will return true
// for the duration of this pause.
g1_policy()->decide_on_conc_mark_initiation();
--- 2647,2658 ----
wait_for_root_region_scanning();
print_heap_before_gc();
trace_heap_before_gc(_gc_tracer_stw);
! _verifier->verify_region_sets_optional();
! _verifier->verify_dirty_young_regions();
// This call will decide whether this pause is an initial-mark
// pause. If it is, during_initial_mark_pause() will return true
// for the duration of this pause.
g1_policy()->decide_on_conc_mark_initiation();
*** 3739,3751 ****
log_info(gc, verify)("[Verifying RemSets before GC]");
VerifyRegionRemSetClosure v_cl;
heap_region_iterate(&v_cl);
}
! verify_before_gc();
! check_bitmaps("GC Start");
#if defined(COMPILER2) || INCLUDE_JVMCI
DerivedPointerTable::clear();
#endif
--- 2731,2743 ----
log_info(gc, verify)("[Verifying RemSets before GC]");
VerifyRegionRemSetClosure v_cl;
heap_region_iterate(&v_cl);
}
! _verifier->verify_before_gc();
! _verifier->check_bitmaps("GC Start");
#if defined(COMPILER2) || INCLUDE_JVMCI
DerivedPointerTable::clear();
#endif
*** 3799,3809 ****
// entries that need to be handled.
g1_rem_set()->cleanupHRRS();
register_humongous_regions_with_cset();
! assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
_cm->note_start_of_gc();
// We call this after finalize_cset() to
// ensure that the CSet has been finalized.
_cm->verify_no_cset_oops();
--- 2791,2801 ----
// entries that need to be handled.
g1_rem_set()->cleanupHRRS();
register_humongous_regions_with_cset();
! assert(_verifier->check_cset_fast_test(), "Inconsistency in the InCSetState table.");
_cm->note_start_of_gc();
// We call this after finalize_cset() to
// ensure that the CSet has been finalized.
_cm->verify_no_cset_oops();
*** 3884,3894 ****
// Note that we don't actually trigger the CM thread at
// this point. We do that later when we're sure that
// the current thread has completed its logging output.
}
! allocate_dummy_regions();
_allocator->init_mutator_alloc_region();
{
size_t expand_bytes = g1_policy()->expansion_amount();
--- 2876,2896 ----
// Note that we don't actually trigger the CM thread at
// this point. We do that later when we're sure that
// the current thread has completed its logging output.
}
! #ifdef ASSERT
! {
! // Let's fill up most of the region
! size_t word_size = HeapRegion::GrainWords - 1024;
!
! // _filler_array_max_size is set to humongous object threshold
! // but temporarily change it to use CollectedHeap::fill_with_object().
! SizeTFlagSetting fs(_filler_array_max_size, word_size);
! _allocator->allocate_dummy_regions(word_size);
! }
! #endif
_allocator->init_mutator_alloc_region();
{
size_t expand_bytes = g1_policy()->expansion_amount();
*** 3949,3960 ****
log_info(gc, verify)("[Verifying RemSets after GC]");
VerifyRegionRemSetClosure v_cl;
heap_region_iterate(&v_cl);
}
! verify_after_gc();
! check_bitmaps("GC End");
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
ref_processor_stw()->verify_no_references_recorded();
// CM reference discovery will be re-enabled if necessary.
--- 2951,2962 ----
log_info(gc, verify)("[Verifying RemSets after GC]");
VerifyRegionRemSetClosure v_cl;
heap_region_iterate(&v_cl);
}
! _verifier->verify_after_gc();
! _verifier->check_bitmaps("GC End");
assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
ref_processor_stw()->verify_no_references_recorded();
// CM reference discovery will be re-enabled if necessary.
*** 3974,3984 ****
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
// logging output either.
_hrm.verify_optional();
! verify_region_sets_optional();
TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
print_heap_after_gc();
--- 2976,2986 ----
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
// logging output either.
_hrm.verify_optional();
! _verifier->verify_region_sets_optional();
TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
print_heap_after_gc();
*** 5243,5443 ****
_ct_bs->clear(MemRegion(r->bottom(), r->end()));
}
}
};
- #ifndef PRODUCT
- class G1VerifyCardTableCleanup: public HeapRegionClosure {
- G1CollectedHeap* _g1h;
- G1SATBCardTableModRefBS* _ct_bs;
- public:
- G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
- : _g1h(g1h), _ct_bs(ct_bs) { }
- virtual bool doHeapRegion(HeapRegion* r) {
- if (r->is_survivor()) {
- _g1h->verify_dirty_region(r);
- } else {
- _g1h->verify_not_dirty_region(r);
- }
- return false;
- }
- };
-
- void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
- // All of the region should be clean.
- G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
- MemRegion mr(hr->bottom(), hr->end());
- ct_bs->verify_not_dirty_region(mr);
- }
-
- void G1CollectedHeap::verify_dirty_region(HeapRegion* hr) {
- // We cannot guarantee that [bottom(),end()] is dirty. Threads
- // dirty allocated blocks as they allocate them. The thread that
- // retires each region and replaces it with a new one will do a
- // maximal allocation to fill in [pre_dummy_top(),end()] but will
- // not dirty that area (one less thing to have to do while holding
- // a lock). So we can only verify that [bottom(),pre_dummy_top()]
- // is dirty.
- G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
- MemRegion mr(hr->bottom(), hr->pre_dummy_top());
- if (hr->is_young()) {
- ct_bs->verify_g1_young_region(mr);
- } else {
- ct_bs->verify_dirty_region(mr);
- }
- }
-
- void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
- G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
- for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
- verify_dirty_region(hr);
- }
- }
-
- void G1CollectedHeap::verify_dirty_young_regions() {
- verify_dirty_young_list(_young_list->first_region());
- }
-
- bool G1CollectedHeap::verify_no_bits_over_tams(const char* bitmap_name, CMBitMapRO* bitmap,
- HeapWord* tams, HeapWord* end) {
- guarantee(tams <= end,
- "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end));
- HeapWord* result = bitmap->getNextMarkedWordAddress(tams, end);
- if (result < end) {
- log_info(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result));
- log_info(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end));
- return false;
- }
- return true;
- }
-
- bool G1CollectedHeap::verify_bitmaps(const char* caller, HeapRegion* hr) {
- CMBitMapRO* prev_bitmap = concurrent_mark()->prevMarkBitMap();
- CMBitMapRO* next_bitmap = (CMBitMapRO*) concurrent_mark()->nextMarkBitMap();
-
- HeapWord* bottom = hr->bottom();
- HeapWord* ptams = hr->prev_top_at_mark_start();
- HeapWord* ntams = hr->next_top_at_mark_start();
- HeapWord* end = hr->end();
-
- bool res_p = verify_no_bits_over_tams("prev", prev_bitmap, ptams, end);
-
- bool res_n = true;
- // We reset mark_in_progress() before we reset _cmThread->in_progress() and in this window
- // we do the clearing of the next bitmap concurrently. Thus, we can not verify the bitmap
- // if we happen to be in that state.
- if (collector_state()->mark_in_progress() || !_cmThread->in_progress()) {
- res_n = verify_no_bits_over_tams("next", next_bitmap, ntams, end);
- }
- if (!res_p || !res_n) {
- log_info(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr));
- log_info(gc, verify)("#### Caller: %s", caller);
- return false;
- }
- return true;
- }
-
- void G1CollectedHeap::check_bitmaps(const char* caller, HeapRegion* hr) {
- if (!G1VerifyBitmaps) return;
-
- guarantee(verify_bitmaps(caller, hr), "bitmap verification");
- }
-
- class G1VerifyBitmapClosure : public HeapRegionClosure {
- private:
- const char* _caller;
- G1CollectedHeap* _g1h;
- bool _failures;
-
- public:
- G1VerifyBitmapClosure(const char* caller, G1CollectedHeap* g1h) :
- _caller(caller), _g1h(g1h), _failures(false) { }
-
- bool failures() { return _failures; }
-
- virtual bool doHeapRegion(HeapRegion* hr) {
- bool result = _g1h->verify_bitmaps(_caller, hr);
- if (!result) {
- _failures = true;
- }
- return false;
- }
- };
-
- void G1CollectedHeap::check_bitmaps(const char* caller) {
- if (!G1VerifyBitmaps) return;
-
- G1VerifyBitmapClosure cl(caller, this);
- heap_region_iterate(&cl);
- guarantee(!cl.failures(), "bitmap verification");
- }
-
- class G1CheckCSetFastTableClosure : public HeapRegionClosure {
- private:
- bool _failures;
- public:
- G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
-
- virtual bool doHeapRegion(HeapRegion* hr) {
- uint i = hr->hrm_index();
- InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
- if (hr->is_humongous()) {
- if (hr->in_collection_set()) {
- log_info(gc, verify)("## humongous region %u in CSet", i);
- _failures = true;
- return true;
- }
- if (cset_state.is_in_cset()) {
- log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for humongous region %u", cset_state.value(), i);
- _failures = true;
- return true;
- }
- if (hr->is_continues_humongous() && cset_state.is_humongous()) {
- log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for continues humongous region %u", cset_state.value(), i);
- _failures = true;
- return true;
- }
- } else {
- if (cset_state.is_humongous()) {
- log_info(gc, verify)("## inconsistent cset state " CSETSTATE_FORMAT " for non-humongous region %u", cset_state.value(), i);
- _failures = true;
- return true;
- }
- if (hr->in_collection_set() != cset_state.is_in_cset()) {
- log_info(gc, verify)("## in CSet %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
- hr->in_collection_set(), cset_state.value(), i);
- _failures = true;
- return true;
- }
- if (cset_state.is_in_cset()) {
- if (hr->is_young() != (cset_state.is_young())) {
- log_info(gc, verify)("## is_young %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
- hr->is_young(), cset_state.value(), i);
- _failures = true;
- return true;
- }
- if (hr->is_old() != (cset_state.is_old())) {
- log_info(gc, verify)("## is_old %d / cset state " CSETSTATE_FORMAT " inconsistency for region %u",
- hr->is_old(), cset_state.value(), i);
- _failures = true;
- return true;
- }
- }
- }
- return false;
- }
-
- bool failures() const { return _failures; }
- };
-
- bool G1CollectedHeap::check_cset_fast_test() {
- G1CheckCSetFastTableClosure cl;
- _hrm.iterate(&cl);
- return !cl.failures();
- }
- #endif // PRODUCT
-
class G1ParScrubRemSetTask: public AbstractGangTask {
protected:
G1RemSet* _g1rs;
BitMap* _region_bm;
BitMap* _card_bm;
--- 4245,4254 ----
*** 5471,5484 ****
// Iterate over the dirty cards region list.
G1ParCleanupCTTask cleanup_task(ct_bs, this);
workers()->run_task(&cleanup_task);
#ifndef PRODUCT
! if (G1VerifyCTCleanup || VerifyAfterGC) {
! G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
! heap_region_iterate(&cleanup_verifier);
! }
#endif
}
double elapsed = os::elapsedTime() - start;
g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
--- 4282,4292 ----
// Iterate over the dirty cards region list.
G1ParCleanupCTTask cleanup_task(ct_bs, this);
workers()->run_task(&cleanup_task);
#ifndef PRODUCT
! _verifier->verify_card_table_cleanup();
#endif
}
double elapsed = os::elapsedTime() - start;
g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
*** 5996,6006 ****
false /* is_old */,
false /* do_expand */);
if (new_alloc_region != NULL) {
set_region_short_lived_locked(new_alloc_region);
_hr_printer.alloc(new_alloc_region, young_list_full);
! check_bitmaps("Mutator Region Allocation", new_alloc_region);
return new_alloc_region;
}
}
return NULL;
}
--- 4804,4814 ----
false /* is_old */,
false /* do_expand */);
if (new_alloc_region != NULL) {
set_region_short_lived_locked(new_alloc_region);
_hr_printer.alloc(new_alloc_region, young_list_full);
! _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
return new_alloc_region;
}
}
return NULL;
}
*** 6036,6049 ****
// should never scan survivors. But it doesn't hurt to do it
// for survivors too.
new_alloc_region->record_timestamp();
if (is_survivor) {
new_alloc_region->set_survivor();
! check_bitmaps("Survivor Region Allocation", new_alloc_region);
} else {
new_alloc_region->set_old();
! check_bitmaps("Old Region Allocation", new_alloc_region);
}
_hr_printer.alloc(new_alloc_region);
bool during_im = collector_state()->during_initial_mark_pause();
new_alloc_region->note_start_of_copying(during_im);
return new_alloc_region;
--- 4844,4857 ----
// should never scan survivors. But it doesn't hurt to do it
// for survivors too.
new_alloc_region->record_timestamp();
if (is_survivor) {
new_alloc_region->set_survivor();
! _verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
} else {
new_alloc_region->set_old();
! _verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
}
_hr_printer.alloc(new_alloc_region);
bool during_im = collector_state()->during_initial_mark_pause();
new_alloc_region->note_start_of_copying(during_im);
return new_alloc_region;
*** 6079,6175 ****
return region_at(index);
}
return NULL;
}
- // Heap region set verification
-
- class VerifyRegionListsClosure : public HeapRegionClosure {
- private:
- HeapRegionSet* _old_set;
- HeapRegionSet* _humongous_set;
- HeapRegionManager* _hrm;
-
- public:
- uint _old_count;
- uint _humongous_count;
- uint _free_count;
-
- VerifyRegionListsClosure(HeapRegionSet* old_set,
- HeapRegionSet* humongous_set,
- HeapRegionManager* hrm) :
- _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
- _old_count(), _humongous_count(), _free_count(){ }
-
- bool doHeapRegion(HeapRegion* hr) {
- if (hr->is_young()) {
- // TODO
- } else if (hr->is_humongous()) {
- assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set.", hr->hrm_index());
- _humongous_count++;
- } else if (hr->is_empty()) {
- assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
- _free_count++;
- } else if (hr->is_old()) {
- assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
- _old_count++;
- } else {
- // There are no other valid region types. Check for one invalid
- // one we can identify: pinned without old or humongous set.
- assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
- ShouldNotReachHere();
- }
- return false;
- }
-
- void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
- guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
- guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
- guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
- }
- };
-
- void G1CollectedHeap::verify_region_sets() {
- assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
-
- // First, check the explicit lists.
- _hrm.verify();
- {
- // Given that a concurrent operation might be adding regions to
- // the secondary free list we have to take the lock before
- // verifying it.
- MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
- _secondary_free_list.verify_list();
- }
-
- // If a concurrent region freeing operation is in progress it will
- // be difficult to correctly attributed any free regions we come
- // across to the correct free list given that they might belong to
- // one of several (free_list, secondary_free_list, any local lists,
- // etc.). So, if that's the case we will skip the rest of the
- // verification operation. Alternatively, waiting for the concurrent
- // operation to complete will have a non-trivial effect on the GC's
- // operation (no concurrent operation will last longer than the
- // interval between two calls to verification) and it might hide
- // any issues that we would like to catch during testing.
- if (free_regions_coming()) {
- return;
- }
-
- // Make sure we append the secondary_free_list on the free_list so
- // that all free regions we will come across can be safely
- // attributed to the free_list.
- append_secondary_free_list_if_not_empty_with_lock();
-
- // Finally, make sure that the region accounting in the lists is
- // consistent with what we see in the heap.
-
- VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm);
- heap_region_iterate(&cl);
- cl.verify_counts(&_old_set, &_humongous_set, &_hrm);
- }
-
// Optimized nmethod scanning
class RegisterNMethodOopClosure: public OopClosure {
G1CollectedHeap* _g1h;
nmethod* _nm;
--- 4887,4896 ----
< prev index next >