--- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2014-12-03 13:26:02.182088007 +0100 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2014-12-03 13:26:02.113085986 +0100 @@ -3819,6 +3819,8 @@ register_humongous_regions_with_in_cset_fast_test(); + assert(check_cset_fast_test(), "Inconsistency in the InCSetState table."); + _cm->note_start_of_gc(); // We should not verify the per-thread SATB buffers given that // we have not filtered them yet (we'll do so during the @@ -4048,29 +4050,6 @@ return true; } -size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) -{ - size_t gclab_word_size; - switch (purpose) { - case GCAllocForSurvived: - gclab_word_size = _survivor_plab_stats.desired_plab_sz(); - break; - case GCAllocForTenured: - gclab_word_size = _old_plab_stats.desired_plab_sz(); - break; - default: - assert(false, "unknown GCAllocPurpose"); - gclab_word_size = _old_plab_stats.desired_plab_sz(); - break; - } - - // Prevent humongous PLAB sizes for two reasons: - // * PLABs are allocated using a similar paths as oops, but should - // never be in a humongous region - // * Allowing humongous PLABs needlessly churns the region free lists - return MIN2(_humongous_object_threshold_in_words, gclab_word_size); -} - void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { _drain_in_progress = false; set_evac_failure_closure(cl); @@ -4196,35 +4175,6 @@ } } -HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, - size_t word_size, - AllocationContext_t context) { - if (purpose == GCAllocForSurvived) { - HeapWord* result = survivor_attempt_allocation(word_size, context); - if (result != NULL) { - return result; - } else { - // Let's try to allocate in the old gen in case we can fit the - // object there. - return old_attempt_allocation(word_size, context); - } - } else { - assert(purpose == GCAllocForTenured, "sanity"); - HeapWord* result = old_attempt_allocation(word_size, context); - if (result != NULL) { - return result; - } else { - // Let's try to allocate in the survivors in case we can fit the - // object there. - return survivor_attempt_allocation(word_size, context); - } - } - - ShouldNotReachHere(); - // Trying to keep some compilers happy. - return NULL; -} - void G1ParCopyHelper::mark_object(oop obj) { assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); @@ -4267,15 +4217,14 @@ assert(_worker_id == _par_scan_state->queue_num(), "sanity"); - G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj); - - if (state == G1CollectedHeap::InCSet) { + const in_cset_state_t state = _g1->in_cset_state(obj); + if (InCSetState::is_in_cset(state)) { oop forwardee; markOop m = obj->mark(); if (m->is_marked()) { forwardee = (oop) m->decode_pointer(); } else { - forwardee = _par_scan_state->copy_to_survivor_space(obj, m); + forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); } assert(forwardee != NULL, "forwardee should not be NULL"); oopDesc::encode_store_heap_oop(p, forwardee); @@ -4289,7 +4238,7 @@ do_klass_barrier(p, forwardee); } } else { - if (state == G1CollectedHeap::IsHumongous) { + if (InCSetState::is_humongous(state)) { _g1->set_humongous_is_live(obj); } // The object is not in collection set. If we're a root scanning @@ -5145,16 +5094,16 @@ oop obj = *p; assert(obj != NULL, "the caller should have filtered out NULL values"); - G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj); - if (cset_state == G1CollectedHeap::InNeither) { + const in_cset_state_t cset_state = _g1->in_cset_state(obj); + if (InCSetState::is_not_in_cset(cset_state)) { return; } - if (cset_state == G1CollectedHeap::InCSet) { + if (InCSetState::is_in_cset(cset_state)) { assert( obj->is_forwarded(), "invariant" ); *p = obj->forwardee(); } else { assert(!obj->is_forwarded(), "invariant" ); - assert(cset_state == G1CollectedHeap::IsHumongous, + assert(InCSetState::is_humongous(cset_state), err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state)); _g1->set_humongous_is_live(obj); } @@ -5820,7 +5769,9 @@ } }; -#ifndef PRODUCT +#ifdef PRODUCT +bool G1CollectedHeap::check_cset_fast_test() { return true; } +#else class G1VerifyCardTableCleanup: public HeapRegionClosure { G1CollectedHeap* _g1h; G1SATBCardTableModRefBS* _ct_bs; @@ -5951,6 +5902,58 @@ heap_region_iterate(&cl); guarantee(!cl.failures(), "bitmap verification"); } + +bool G1CollectedHeap::check_cset_fast_test() { + bool failures = false; + for (uint i = 0; i < _hrm.length(); i += 1) { + HeapRegion* hr = _hrm.at(i); + in_cset_state_t cset_state = (in_cset_state_t) _in_cset_fast_test.get_by_index((uint) i); + if (hr->is_humongous()) { + if (hr->in_collection_set()) { + gclog_or_tty->print_cr("\n## humongous region %u in CSet", i); + failures = true; + break; + } + if (InCSetState::is_in_cset(cset_state)) { + gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state, i); + failures = true; + break; + } + if (hr->is_continues_humongous() && InCSetState::is_humongous(cset_state)) { + gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state, i); + failures = true; + break; + } + } else { + if (InCSetState::is_humongous(cset_state)) { + gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state, i); + failures = true; + break; + } + if (hr->in_collection_set() != InCSetState::is_in_cset(cset_state)) { + gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u", + hr->in_collection_set(), cset_state, i); + failures = true; + break; + } + if (InCSetState::is_in_cset(cset_state)) { + if (hr->is_young() != (cset_state == InCSetState::Young)) { + gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u", + hr->is_young(), cset_state, i); + failures = true; + break; + } + if (hr->is_old() != (cset_state == InCSetState::Old)) { + gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u", + hr->is_old(), cset_state, i); + failures = true; + break; + } + } + } + } + return !failures; +} #endif // PRODUCT void G1CollectedHeap::cleanUpCardTable() { @@ -6519,20 +6522,20 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, uint count, - GCAllocPurpose ap) { + in_cset_state_t dest) { assert(FreeList_lock->owned_by_self(), "pre-condition"); - if (count < g1_policy()->max_regions(ap)) { - bool survivor = (ap == GCAllocForSurvived); + if (count < g1_policy()->max_regions(dest)) { + const bool is_survivor = (dest == InCSetState::Young); HeapRegion* new_alloc_region = new_region(word_size, - !survivor, + !is_survivor, true /* do_expand */); if (new_alloc_region != NULL) { // We really only need to do this for old regions given that we // should never scan survivors. But it doesn't hurt to do it // for survivors too. new_alloc_region->record_timestamp(); - if (survivor) { + if (is_survivor) { new_alloc_region->set_survivor(); _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); check_bitmaps("Survivor Region Allocation", new_alloc_region); @@ -6544,8 +6547,6 @@ bool during_im = g1_policy()->during_initial_mark_pause(); new_alloc_region->note_start_of_copying(during_im); return new_alloc_region; - } else { - g1_policy()->note_alloc_region_limit_reached(ap); } } return NULL; @@ -6553,11 +6554,11 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, - GCAllocPurpose ap) { + in_cset_state_t dest) { bool during_im = g1_policy()->during_initial_mark_pause(); alloc_region->note_end_of_copying(during_im); g1_policy()->record_bytes_copied_during_gc(allocated_bytes); - if (ap == GCAllocForSurvived) { + if (dest == InCSetState::Young) { young_list()->add_survivor_region(alloc_region); } else { _old_set.add(alloc_region);