< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7557 : 8060025: Object copy time regressions after JDK-8031323 and JDK-8057536
Summary: Evaluate and improve object copy time by micro-optimizations and splitting out slow and fast paths aggressively.
Reviewed-by:
Contributed-by: Tony Printezis <tprintezis@twitter.com>, Thomas Schatzl <thomas.schatzl@oracle.com>
rev 7558 : imported patch 8060025-mikael-review1
rev 7559 : imported patch mikael-refactor-cset-state
rev 7560 : imported patch kim-review

*** 3817,3826 **** --- 3817,3828 ---- g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info); register_humongous_regions_with_in_cset_fast_test(); + assert(check_cset_fast_test(), "Inconsistency in the InCSetState table."); + _cm->note_start_of_gc(); // We should not verify the per-thread SATB buffers given that // we have not filtered them yet (we'll do so during the // GC). We also call this after finalize_cset() to // ensure that the CSet has been finalized.
*** 4046,4078 **** } return true; } - size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose) - { - size_t gclab_word_size; - switch (purpose) { - case GCAllocForSurvived: - gclab_word_size = _survivor_plab_stats.desired_plab_sz(); - break; - case GCAllocForTenured: - gclab_word_size = _old_plab_stats.desired_plab_sz(); - break; - default: - assert(false, "unknown GCAllocPurpose"); - gclab_word_size = _old_plab_stats.desired_plab_sz(); - break; - } - - // Prevent humongous PLAB sizes for two reasons: - // * PLABs are allocated using a similar paths as oops, but should - // never be in a humongous region - // * Allowing humongous PLABs needlessly churns the region free lists - return MIN2(_humongous_object_threshold_in_words, gclab_word_size); - } - void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) { _drain_in_progress = false; set_evac_failure_closure(cl); _evac_failure_scan_stack = new (ResourceObj::C_HEAP, mtGC) GrowableArray<oop>(40, true); } --- 4048,4057 ----
*** 4194,4232 **** _objs_with_preserved_marks.push(obj); _preserved_marks_of_objs.push(m); } } - HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose, - size_t word_size, - AllocationContext_t context) { - if (purpose == GCAllocForSurvived) { - HeapWord* result = survivor_attempt_allocation(word_size, context); - if (result != NULL) { - return result; - } else { - // Let's try to allocate in the old gen in case we can fit the - // object there. - return old_attempt_allocation(word_size, context); - } - } else { - assert(purpose == GCAllocForTenured, "sanity"); - HeapWord* result = old_attempt_allocation(word_size, context); - if (result != NULL) { - return result; - } else { - // Let's try to allocate in the survivors in case we can fit the - // object there. - return survivor_attempt_allocation(word_size, context); - } - } - - ShouldNotReachHere(); - // Trying to keep some compilers happy. - return NULL; - } - void G1ParCopyHelper::mark_object(oop obj) { assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet"); // We know that the object is not moving so it's safe to read its size. _cm->grayRoot(obj, (size_t) obj->size(), _worker_id); --- 4173,4182 ----
*** 4265,4283 **** oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); assert(_worker_id == _par_scan_state->queue_num(), "sanity"); ! G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj); ! ! if (state == G1CollectedHeap::InCSet) { oop forwardee; markOop m = obj->mark(); if (m->is_marked()) { forwardee = (oop) m->decode_pointer(); } else { ! forwardee = _par_scan_state->copy_to_survivor_space(obj, m); } assert(forwardee != NULL, "forwardee should not be NULL"); oopDesc::encode_store_heap_oop(p, forwardee); if (do_mark_object != G1MarkNone && forwardee != obj) { // If the object is self-forwarded we don't need to explicitly --- 4215,4232 ---- oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); assert(_worker_id == _par_scan_state->queue_num(), "sanity"); ! const InCSetState state = _g1->in_cset_state(obj); ! if (state.is_in_cset()) { oop forwardee; markOop m = obj->mark(); if (m->is_marked()) { forwardee = (oop) m->decode_pointer(); } else { ! forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); } assert(forwardee != NULL, "forwardee should not be NULL"); oopDesc::encode_store_heap_oop(p, forwardee); if (do_mark_object != G1MarkNone && forwardee != obj) { // If the object is self-forwarded we don't need to explicitly
*** 4287,4297 **** if (barrier == G1BarrierKlass) { do_klass_barrier(p, forwardee); } } else { ! if (state == G1CollectedHeap::IsHumongous) { _g1->set_humongous_is_live(obj); } // The object is not in collection set. If we're a root scanning // closure during an initial mark pause then attempt to mark the object. if (do_mark_object == G1MarkFromRoot) { --- 4236,4246 ---- if (barrier == G1BarrierKlass) { do_klass_barrier(p, forwardee); } } else { ! if (state.is_humongous()) { _g1->set_humongous_is_live(obj); } // The object is not in collection set. If we're a root scanning // closure during an initial mark pause then attempt to mark the object. if (do_mark_object == G1MarkFromRoot) {
*** 5143,5163 **** void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } void do_oop(oop* p) { oop obj = *p; assert(obj != NULL, "the caller should have filtered out NULL values"); ! G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj); ! if (cset_state == G1CollectedHeap::InNeither) { return; } ! if (cset_state == G1CollectedHeap::InCSet) { assert( obj->is_forwarded(), "invariant" ); *p = obj->forwardee(); } else { assert(!obj->is_forwarded(), "invariant" ); ! assert(cset_state == G1CollectedHeap::IsHumongous, ! err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state)); _g1->set_humongous_is_live(obj); } } }; --- 5092,5112 ---- void do_oop(narrowOop* p) { guarantee(false, "Not needed"); } void do_oop(oop* p) { oop obj = *p; assert(obj != NULL, "the caller should have filtered out NULL values"); ! const InCSetState cset_state = _g1->in_cset_state(obj); ! if (!cset_state.is_in_cset_or_humongous()) { return; } ! if (cset_state.is_in_cset()) { assert( obj->is_forwarded(), "invariant" ); *p = obj->forwardee(); } else { assert(!obj->is_forwarded(), "invariant" ); ! assert(cset_state.is_humongous(), ! err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value())); _g1->set_humongous_is_live(obj); } } };
*** 5949,5958 **** --- 5898,5959 ---- G1VerifyBitmapClosure cl(caller, this); heap_region_iterate(&cl); guarantee(!cl.failures(), "bitmap verification"); } + + bool G1CollectedHeap::check_cset_fast_test() { + bool failures = false; + for (uint i = 0; i < _hrm.length(); i += 1) { + HeapRegion* hr = _hrm.at(i); + InCSetState cset_state = (InCSetState) _in_cset_fast_test.get_by_index((uint) i); + if (hr->is_humongous()) { + if (hr->in_collection_set()) { + gclog_or_tty->print_cr("\n## humongous region %u in CSet", i); + failures = true; + break; + } + if (cset_state.is_in_cset()) { + gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i); + failures = true; + break; + } + if (hr->is_continues_humongous() && cset_state.is_humongous()) { + gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i); + failures = true; + break; + } + } else { + if (cset_state.is_humongous()) { + gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i); + failures = true; + break; + } + if (hr->in_collection_set() != cset_state.is_in_cset()) { + gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u", + hr->in_collection_set(), cset_state.value(), i); + failures = true; + break; + } + if (cset_state.is_in_cset()) { + if (hr->is_young() != (cset_state.is_young())) { + gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u", + hr->is_young(), cset_state.value(), i); + failures = true; + break; + } + if (hr->is_old() != (cset_state.is_old())) { + gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u", + hr->is_old(), cset_state.value(), i); + failures = true; + break; + } + } + } + } + return !failures; + } #endif // PRODUCT void G1CollectedHeap::cleanUpCardTable() { G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); double start = os::elapsedTime();
*** 6517,6540 **** // Methods for the GC alloc regions HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, uint count, ! GCAllocPurpose ap) { assert(FreeList_lock->owned_by_self(), "pre-condition"); ! if (count < g1_policy()->max_regions(ap)) { ! bool survivor = (ap == GCAllocForSurvived); HeapRegion* new_alloc_region = new_region(word_size, ! !survivor, true /* do_expand */); if (new_alloc_region != NULL) { // We really only need to do this for old regions given that we // should never scan survivors. But it doesn't hurt to do it // for survivors too. new_alloc_region->record_timestamp(); ! if (survivor) { new_alloc_region->set_survivor(); _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); check_bitmaps("Survivor Region Allocation", new_alloc_region); } else { new_alloc_region->set_old(); --- 6518,6541 ---- // Methods for the GC alloc regions HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, uint count, ! InCSetState dest) { assert(FreeList_lock->owned_by_self(), "pre-condition"); ! if (count < g1_policy()->max_regions(dest)) { ! const bool is_survivor = (dest.is_young()); HeapRegion* new_alloc_region = new_region(word_size, ! !is_survivor, true /* do_expand */); if (new_alloc_region != NULL) { // We really only need to do this for old regions given that we // should never scan survivors. But it doesn't hurt to do it // for survivors too. new_alloc_region->record_timestamp(); ! if (is_survivor) { new_alloc_region->set_survivor(); _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor); check_bitmaps("Survivor Region Allocation", new_alloc_region); } else { new_alloc_region->set_old();
*** 6542,6565 **** check_bitmaps("Old Region Allocation", new_alloc_region); } bool during_im = g1_policy()->during_initial_mark_pause(); new_alloc_region->note_start_of_copying(during_im); return new_alloc_region; - } else { - g1_policy()->note_alloc_region_limit_reached(ap); } } return NULL; } void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, ! GCAllocPurpose ap) { bool during_im = g1_policy()->during_initial_mark_pause(); alloc_region->note_end_of_copying(during_im); g1_policy()->record_bytes_copied_during_gc(allocated_bytes); ! if (ap == GCAllocForSurvived) { young_list()->add_survivor_region(alloc_region); } else { _old_set.add(alloc_region); } _hr_printer.retire(alloc_region); --- 6543,6564 ---- check_bitmaps("Old Region Allocation", new_alloc_region); } bool during_im = g1_policy()->during_initial_mark_pause(); new_alloc_region->note_start_of_copying(during_im); return new_alloc_region; } } return NULL; } void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region, size_t allocated_bytes, ! InCSetState dest) { bool during_im = g1_policy()->during_initial_mark_pause(); alloc_region->note_end_of_copying(during_im); g1_policy()->record_bytes_copied_during_gc(allocated_bytes); ! if (dest.is_young()) { young_list()->add_survivor_region(alloc_region); } else { _old_set.add(alloc_region); } _hr_printer.retire(alloc_region);
< prev index next >