--- old/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp 2018-03-26 12:40:02.791434807 +0200 +++ new/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp 2018-03-26 12:40:02.504426039 +0200 @@ -247,7 +247,7 @@ forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m); } assert(forwardee != NULL, "forwardee should not be NULL"); - RawAccess<>::oop_store(p, forwardee); + RawAccess::oop_store(p, forwardee); if (do_mark_object != G1MarkNone && forwardee != obj) { // If the object is self-forwarded we don't need to explicitly // mark it, the evacuation failure protocol will do so. --- old/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp 2018-03-26 12:40:03.887468289 +0200 +++ new/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp 2018-03-26 12:40:03.630460438 +0200 @@ -161,9 +161,10 @@ inline void do_oop_partial_array(oop* p); // This method is applied to the fields of the objects that have just been copied. - template inline void do_oop_evac(T* p, HeapRegion* from); + template inline void do_oop_evac(T* p); - template inline void deal_with_reference(T* ref_to_scan); + inline void deal_with_reference(oop* ref_to_scan); + inline void deal_with_reference(narrowOop* ref_to_scan); inline void dispatch_reference(StarTask ref); --- old/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp 2018-03-26 12:40:04.975501526 +0200 +++ new/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp 2018-03-26 12:40:04.711493461 +0200 @@ -30,14 +30,15 @@ #include "oops/access.inline.hpp" #include "oops/oop.inline.hpp" -template void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) { +template void G1ParScanThreadState::do_oop_evac(T* p) { // Reference should not be NULL here as such are never pushed to the task queue. oop obj = RawAccess::oop_load(p); // Although we never intentionally push references outside of the collection // set, due to (benign) races in the claim mechanism during RSet scanning more // than one thread might claim the same card. So the same card may be - // processed multiple times. So redo this check. + // processed multiple times, and so we might get references into old gen here. + // So we need to redo this check. const InCSetState in_cset_state = _g1h->in_cset_state(obj); if (in_cset_state.is_in_cset()) { markOop m = obj->mark(); @@ -46,16 +47,17 @@ } else { obj = copy_to_survivor_space(in_cset_state, obj, m); } - RawAccess<>::oop_store(p, obj); + RawAccess::oop_store(p, obj); } else if (in_cset_state.is_humongous()) { _g1h->set_humongous_is_live(obj); } else { assert(in_cset_state.is_default(), - "In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value()); + "In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value()); } assert(obj != NULL, "Must be"); if (!HeapRegion::is_in_same_region(p, obj)) { + HeapRegion* from = _g1h->heap_region_containing(p); update_rs(from, p, obj); } } @@ -114,15 +116,19 @@ to_obj_array->oop_iterate_range(&_scanner, start, end); } -template inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { +inline void G1ParScanThreadState::deal_with_reference(oop* ref_to_scan) { if (!has_partial_array_mask(ref_to_scan)) { - HeapRegion* r = _g1h->heap_region_containing(ref_to_scan); - do_oop_evac(ref_to_scan, r); + do_oop_evac(ref_to_scan); } else { - do_oop_partial_array((oop*)ref_to_scan); + do_oop_partial_array(ref_to_scan); } } +inline void G1ParScanThreadState::deal_with_reference(narrowOop* ref_to_scan) { + assert(!has_partial_array_mask(ref_to_scan), "NarrowOop* elements should never be partial arrays."); + do_oop_evac(ref_to_scan); +} + inline void G1ParScanThreadState::dispatch_reference(StarTask ref) { assert(verify_task(ref), "sanity"); if (ref.is_narrow()) {