# HG changeset patch # User pliden # Date 1385653861 -3600 # Thu Nov 28 16:51:01 2013 +0100 # Node ID 5faa61145e369e3721d051409e0ebea6b870d5dd # Parent 55a0da3d420b4d64d337d9a7dbf2964033d063b5 8029255: G1: Reference processing should not enqueue references on the shared SATB queue Reviewed-by: diff --git a/src/share/vm/memory/referenceProcessor.cpp b/src/share/vm/memory/referenceProcessor.cpp --- a/src/share/vm/memory/referenceProcessor.cpp +++ b/src/share/vm/memory/referenceProcessor.cpp @@ -100,7 +100,6 @@ _enqueuing_is_done(false), _is_alive_non_header(is_alive_non_header), _discovered_list_needs_barrier(discovered_list_needs_barrier), - _bs(NULL), _processing_is_mt(mt_processing), _next_id(0) { @@ -126,10 +125,6 @@ _discovered_refs[i].set_length(0); } - // If we do barriers, cache a copy of the barrier set. - if (discovered_list_needs_barrier) { - _bs = Universe::heap()->barrier_set(); - } setup_policy(false /* default soft ref policy */); } @@ -317,13 +312,9 @@ // Enqueue references that are not made active again, and // clear the decks for the next collection (cycle). ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); - // Do the oop-check on pending_list_addr missed in - // enqueue_discovered_reflist. We should probably - // do a raw oop_check so that future such idempotent - // oop_stores relying on the oop-check side-effect - // may be elided automatically and safely without - // affecting correctness. - oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); + // Do the post-barrier on pending_list_addr missed in + // enqueue_discovered_reflist. + oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); // Stop treating discovered references specially. ref->disable_discovery(); @@ -372,15 +363,16 @@ assert(java_lang_ref_Reference::next(obj) == NULL, "Reference not active; should not be discovered"); // Self-loop next, so as to make Ref not active. - java_lang_ref_Reference::set_next(obj, obj); + java_lang_ref_Reference::set_next_raw(obj, obj); if (next_d == obj) { // obj is last // Swap refs_list into pendling_list_addr and // set obj's discovered to what we read from pending_list_addr. oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); - // Need oop_check on pending_list_addr above; - // see special oop-check code at the end of + // Need post-barrier on pending_list_addr above; + // see special post-barrier code at the end of // enqueue_discovered_reflists() further below. - java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL + java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL + oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); } } } else { // Old behaviour @@ -516,13 +508,11 @@ // the reference object and will fail // CT verification. if (UseG1GC) { - BarrierSet* bs = oopDesc::bs(); HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); - if (UseCompressedOops) { - bs->write_ref_field_pre((narrowOop*)next_addr, NULL); + oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL); } else { - bs->write_ref_field_pre((oop*)next_addr, NULL); + oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL); } java_lang_ref_Reference::set_next_raw(_ref, NULL); } else { @@ -790,10 +780,9 @@ }; void ReferenceProcessor::set_discovered(oop ref, oop value) { + java_lang_ref_Reference::set_discovered_raw(ref, value); if (_discovered_list_needs_barrier) { - java_lang_ref_Reference::set_discovered(ref, value); - } else { - java_lang_ref_Reference::set_discovered_raw(ref, value); + oopDesc::bs()->write_ref_field(ref, value); } } @@ -1085,7 +1074,7 @@ // so this will expand to nothing. As a result, we have manually // elided this out for G1, but left in the test for some future // collector that might have need for a pre-barrier here, e.g.:- - // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); + // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); assert(!_discovered_list_needs_barrier || UseG1GC, "Need to check non-G1 collector: " "may need a pre-write-barrier for CAS from NULL below"); @@ -1098,7 +1087,7 @@ refs_list.set_head(obj); refs_list.inc_length(1); if (_discovered_list_needs_barrier) { - _bs->write_ref_field((void*)discovered_addr, next_discovered); + oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); } if (TraceReferenceGC) { @@ -1260,13 +1249,13 @@ // As in the case further above, since we are over-writing a NULL // pre-value, we can safely elide the pre-barrier here for the case of G1. - // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); + // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); assert(discovered == NULL, "control point invariant"); assert(!_discovered_list_needs_barrier || UseG1GC, "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); oop_store_raw(discovered_addr, next_discovered); if (_discovered_list_needs_barrier) { - _bs->write_ref_field((void*)discovered_addr, next_discovered); + oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); } list->set_head(obj); list->inc_length(1); diff --git a/src/share/vm/memory/referenceProcessor.hpp b/src/share/vm/memory/referenceProcessor.hpp --- a/src/share/vm/memory/referenceProcessor.hpp +++ b/src/share/vm/memory/referenceProcessor.hpp @@ -235,7 +235,6 @@ // discovery.) bool _discovered_list_needs_barrier; - BarrierSet* _bs; // Cached copy of BarrierSet. bool _enqueuing_is_done; // true if all weak references enqueued bool _processing_is_mt; // true during phases when // reference processing is MT. @@ -420,25 +419,6 @@ void update_soft_ref_master_clock(); public: - // constructor - ReferenceProcessor(): - _span((HeapWord*)NULL, (HeapWord*)NULL), - _discovered_refs(NULL), - _discoveredSoftRefs(NULL), _discoveredWeakRefs(NULL), - _discoveredFinalRefs(NULL), _discoveredPhantomRefs(NULL), - _discovering_refs(false), - _discovery_is_atomic(true), - _enqueuing_is_done(false), - _discovery_is_mt(false), - _discovered_list_needs_barrier(false), - _bs(NULL), - _is_alive_non_header(NULL), - _num_q(0), - _max_num_q(0), - _processing_is_mt(false), - _next_id(0) - { } - // Default parameters give you a vanilla reference processor. ReferenceProcessor(MemRegion span, bool mt_processing = false, uint mt_processing_degree = 1,