src/share/vm/memory/referenceProcessor.cpp

Print this page
rev 5694 : 8029255: G1: Reference processing should not enqueue references on the shared SATB queue
Reviewed-by:

*** 98,108 **** bool discovered_list_needs_barrier) : _discovering_refs(false), _enqueuing_is_done(false), _is_alive_non_header(is_alive_non_header), _discovered_list_needs_barrier(discovered_list_needs_barrier), - _bs(NULL), _processing_is_mt(mt_processing), _next_id(0) { _span = span; _discovery_is_atomic = atomic_discovery; --- 98,107 ----
*** 124,137 **** for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { _discovered_refs[i].set_head(NULL); _discovered_refs[i].set_length(0); } - // If we do barriers, cache a copy of the barrier set. - if (discovered_list_needs_barrier) { - _bs = Universe::heap()->barrier_set(); - } setup_policy(false /* default soft ref policy */); } #ifndef PRODUCT void ReferenceProcessor::verify_no_references_recorded() { --- 123,132 ----
*** 315,331 **** T old_pending_list_value = *pending_list_addr; // Enqueue references that are not made active again, and // clear the decks for the next collection (cycle). ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); ! // Do the oop-check on pending_list_addr missed in ! // enqueue_discovered_reflist. We should probably ! // do a raw oop_check so that future such idempotent ! // oop_stores relying on the oop-check side-effect ! // may be elided automatically and safely without ! // affecting correctness. ! oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); // Stop treating discovered references specially. ref->disable_discovery(); // Return true if new pending references were added --- 310,322 ---- T old_pending_list_value = *pending_list_addr; // Enqueue references that are not made active again, and // clear the decks for the next collection (cycle). ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); ! // Do the post-barrier on pending_list_addr missed in ! // enqueue_discovered_reflist. ! oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); // Stop treating discovered references specially. ref->disable_discovery(); // Return true if new pending references were added
*** 370,388 **** (void *)obj, (void *)next_d); } assert(java_lang_ref_Reference::next(obj) == NULL, "Reference not active; should not be discovered"); // Self-loop next, so as to make Ref not active. ! java_lang_ref_Reference::set_next(obj, obj); if (next_d == obj) { // obj is last // Swap refs_list into pendling_list_addr and // set obj's discovered to what we read from pending_list_addr. oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); ! // Need oop_check on pending_list_addr above; ! // see special oop-check code at the end of // enqueue_discovered_reflists() further below. ! java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL } } } else { // Old behaviour // Walk down the list, copying the discovered field into // the next field and clearing the discovered field. --- 361,380 ---- (void *)obj, (void *)next_d); } assert(java_lang_ref_Reference::next(obj) == NULL, "Reference not active; should not be discovered"); // Self-loop next, so as to make Ref not active. ! java_lang_ref_Reference::set_next_raw(obj, obj); if (next_d == obj) { // obj is last // Swap refs_list into pendling_list_addr and // set obj's discovered to what we read from pending_list_addr. oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); ! // Need post-barrier on pending_list_addr above; ! // see special post-barrier code at the end of // enqueue_discovered_reflists() further below. ! java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL ! oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); } } } else { // Old behaviour // Walk down the list, copying the discovered field into // the next field and clearing the discovered field.
*** 514,530 **** // For G1 we don't want to use set_next - it // will dirty the card for the next field of // the reference object and will fail // CT verification. if (UseG1GC) { - BarrierSet* bs = oopDesc::bs(); HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); - if (UseCompressedOops) { ! bs->write_ref_field_pre((narrowOop*)next_addr, NULL); } else { ! bs->write_ref_field_pre((oop*)next_addr, NULL); } java_lang_ref_Reference::set_next_raw(_ref, NULL); } else { java_lang_ref_Reference::set_next(_ref, NULL); } --- 506,520 ---- // For G1 we don't want to use set_next - it // will dirty the card for the next field of // the reference object and will fail // CT verification. if (UseG1GC) { HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); if (UseCompressedOops) { ! oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL); } else { ! oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL); } java_lang_ref_Reference::set_next_raw(_ref, NULL); } else { java_lang_ref_Reference::set_next(_ref, NULL); }
*** 788,801 **** private: bool _clear_referent; }; void ReferenceProcessor::set_discovered(oop ref, oop value) { - if (_discovered_list_needs_barrier) { - java_lang_ref_Reference::set_discovered(ref, value); - } else { java_lang_ref_Reference::set_discovered_raw(ref, value); } } // Balances reference queues. // Move entries from all queues[0, 1, ..., _max_num_q-1] to --- 778,790 ---- private: bool _clear_referent; }; void ReferenceProcessor::set_discovered(oop ref, oop value) { java_lang_ref_Reference::set_discovered_raw(ref, value); + if (_discovered_list_needs_barrier) { + oopDesc::bs()->write_ref_field(ref, value); } } // Balances reference queues. // Move entries from all queues[0, 1, ..., _max_num_q-1] to
*** 1083,1093 **** // not necessary because the only case we are interested in // here is when *discovered_addr is NULL (see the CAS further below), // so this will expand to nothing. As a result, we have manually // elided this out for G1, but left in the test for some future // collector that might have need for a pre-barrier here, e.g.:- ! // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); assert(!_discovered_list_needs_barrier || UseG1GC, "Need to check non-G1 collector: " "may need a pre-write-barrier for CAS from NULL below"); oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, NULL); --- 1072,1082 ---- // not necessary because the only case we are interested in // here is when *discovered_addr is NULL (see the CAS further below), // so this will expand to nothing. As a result, we have manually // elided this out for G1, but left in the test for some future // collector that might have need for a pre-barrier here, e.g.:- ! // oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); assert(!_discovered_list_needs_barrier || UseG1GC, "Need to check non-G1 collector: " "may need a pre-write-barrier for CAS from NULL below"); oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, NULL);
*** 1096,1106 **** // We have separate lists for enqueueing, so no synchronization // is necessary. refs_list.set_head(obj); refs_list.inc_length(1); if (_discovered_list_needs_barrier) { ! _bs->write_ref_field((void*)discovered_addr, next_discovered); } if (TraceReferenceGC) { gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", (void *)obj, obj->klass()->internal_name()); --- 1085,1095 ---- // We have separate lists for enqueueing, so no synchronization // is necessary. refs_list.set_head(obj); refs_list.inc_length(1); if (_discovered_list_needs_barrier) { ! oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); } if (TraceReferenceGC) { gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", (void *)obj, obj->klass()->internal_name());
*** 1258,1274 **** // The last ref must have its discovered field pointing to itself. oop next_discovered = (current_head != NULL) ? current_head : obj; // As in the case further above, since we are over-writing a NULL // pre-value, we can safely elide the pre-barrier here for the case of G1. ! // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); assert(discovered == NULL, "control point invariant"); assert(!_discovered_list_needs_barrier || UseG1GC, "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); oop_store_raw(discovered_addr, next_discovered); if (_discovered_list_needs_barrier) { ! _bs->write_ref_field((void*)discovered_addr, next_discovered); } list->set_head(obj); list->inc_length(1); if (TraceReferenceGC) { --- 1247,1263 ---- // The last ref must have its discovered field pointing to itself. oop next_discovered = (current_head != NULL) ? current_head : obj; // As in the case further above, since we are over-writing a NULL // pre-value, we can safely elide the pre-barrier here for the case of G1. ! // e.g.:- oopDesc::bs()->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); assert(discovered == NULL, "control point invariant"); assert(!_discovered_list_needs_barrier || UseG1GC, "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); oop_store_raw(discovered_addr, next_discovered); if (_discovered_list_needs_barrier) { ! oopDesc::bs()->write_ref_field((void*)discovered_addr, next_discovered); } list->set_head(obj); list->inc_length(1); if (TraceReferenceGC) {