--- old/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp 2018-04-23 17:00:30.423818818 +0200 +++ new/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp 2018-04-23 17:00:30.134809810 +0200 @@ -5178,7 +5178,7 @@ rp->setup_policy(false); verify_work_stacks_empty(); - ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q()); + ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues()); { GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm); --- old/src/hotspot/share/gc/cms/parNewGeneration.cpp 2018-04-23 17:00:31.669857658 +0200 +++ new/src/hotspot/share/gc/cms/parNewGeneration.cpp 2018-04-23 17:00:31.386848837 +0200 @@ -983,7 +983,7 @@ // Can the mt_degree be set later (at run_task() time would be best)? rp->set_active_mt_degree(active_workers); ReferenceProcessorStats stats; - ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q()); + ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues()); if (rp->processing_is_mt()) { ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set); stats = rp->process_discovered_references(&is_alive, &keep_alive, --- old/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-04-23 17:00:32.832893911 +0200 +++ new/src/hotspot/share/gc/g1/g1CollectedHeap.cpp 2018-04-23 17:00:32.547885027 +0200 @@ -3904,9 +3904,9 @@ uint no_of_gc_workers = workers()->active_workers(); // Parallel reference processing - assert(no_of_gc_workers <= rp->max_num_q(), + assert(no_of_gc_workers <= rp->max_num_queues(), "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u", - no_of_gc_workers, rp->max_num_q()); + no_of_gc_workers, rp->max_num_queues()); G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, no_of_gc_workers); stats = rp->process_discovered_references(&is_alive, @@ -3944,9 +3944,9 @@ uint n_workers = workers()->active_workers(); - assert(n_workers <= rp->max_num_q(), + assert(n_workers <= rp->max_num_queues(), "Mismatch between the number of GC workers %u and the maximum number of Reference process queues %u", - n_workers, rp->max_num_q()); + n_workers, rp->max_num_queues()); G1STWRefProcTaskExecutor par_task_executor(this, per_thread_states, workers(), _task_queues, n_workers); rp->enqueue_discovered_references(&par_task_executor, pt); --- old/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2018-04-23 17:00:34.075932657 +0200 +++ new/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp 2018-04-23 17:00:33.784923587 +0200 @@ -1656,7 +1656,7 @@ // Reference lists are balanced (see balance_all_queues() and balance_queues()). rp->set_active_mt_degree(active_workers); - ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_q()); + ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues()); // Process the weak references. const ReferenceProcessorStats& stats = @@ -1675,7 +1675,7 @@ assert(has_overflown() || _global_mark_stack.is_empty(), "Mark stack should be empty (unless it has overflown)"); - assert(rp->num_q() == active_workers, "why not"); + assert(rp->num_queues() == active_workers, "why not"); rp->enqueue_discovered_references(executor, &pt); --- old/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp 2018-04-23 17:00:35.272969970 +0200 +++ new/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp 2018-04-23 17:00:34.988961117 +0200 @@ -34,7 +34,7 @@ G1FullGCReferenceProcessingExecutor::G1FullGCReferenceProcessingExecutor(G1FullCollector* collector) : _collector(collector), _reference_processor(collector->reference_processor()), - _old_mt_degree(_reference_processor->num_q()) { + _old_mt_degree(_reference_processor->num_queues()) { if (_reference_processor->processing_is_mt()) { _reference_processor->set_active_mt_degree(_collector->workers()); } @@ -92,7 +92,7 @@ G1FullGCMarker* marker = _collector->marker(0); G1IsAliveClosure is_alive(_collector->mark_bitmap()); G1FullKeepAliveClosure keep_alive(marker); - ReferenceProcessorPhaseTimes pt(timer, _reference_processor->num_q()); + ReferenceProcessorPhaseTimes pt(timer, _reference_processor->num_queues()); AbstractRefProcTaskExecutor* executor = _reference_processor->processing_is_mt() ? this : NULL; // Process discovered references, use this executor if multi-threaded --- old/src/hotspot/share/gc/parallel/psMarkSweep.cpp 2018-04-23 17:00:36.435006191 +0200 +++ new/src/hotspot/share/gc/parallel/psMarkSweep.cpp 2018-04-23 17:00:36.149997308 +0200 @@ -257,7 +257,7 @@ DerivedPointerTable::update_pointers(); #endif - ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q()); + ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues()); ref_processor()->enqueue_discovered_references(NULL, &pt); @@ -536,7 +536,7 @@ GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer); ref_processor()->setup_policy(clear_all_softrefs); - ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q()); + ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues()); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt); --- old/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2018-04-23 17:00:37.603042600 +0200 +++ new/src/hotspot/share/gc/parallel/psParallelCompact.cpp 2018-04-23 17:00:37.316033654 +0200 @@ -1038,7 +1038,7 @@ DerivedPointerTable::update_pointers(); #endif - ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q()); + ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_queues()); ref_processor()->enqueue_discovered_references(NULL, &pt); @@ -2105,7 +2105,7 @@ GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); ReferenceProcessorStats stats; - ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_q()); + ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_queues()); if (ref_processor()->processing_is_mt()) { RefProcTaskExecutor task_executor; stats = ref_processor()->process_discovered_references( --- old/src/hotspot/share/gc/parallel/psScavenge.cpp 2018-04-23 17:00:38.796079788 +0200 +++ new/src/hotspot/share/gc/parallel/psScavenge.cpp 2018-04-23 17:00:38.512070935 +0200 @@ -416,7 +416,7 @@ PSKeepAliveClosure keep_alive(promotion_manager); PSEvacuateFollowersClosure evac_followers(promotion_manager); ReferenceProcessorStats stats; - ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_q()); + ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_queues()); if (reference_processor()->processing_is_mt()) { PSRefProcTaskExecutor task_executor; stats = reference_processor()->process_discovered_references( --- old/src/hotspot/share/gc/serial/defNewGeneration.cpp 2018-04-23 17:00:39.962116134 +0200 +++ new/src/hotspot/share/gc/serial/defNewGeneration.cpp 2018-04-23 17:00:39.674107157 +0200 @@ -646,7 +646,7 @@ FastKeepAliveClosure keep_alive(this, &scan_weak_ref); ReferenceProcessor* rp = ref_processor(); rp->setup_policy(clear_all_soft_refs); - ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_q()); + ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues()); const ReferenceProcessorStats& stats = rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers, NULL, &pt); --- old/src/hotspot/share/gc/serial/genMarkSweep.cpp 2018-04-23 17:00:41.120152231 +0200 +++ new/src/hotspot/share/gc/serial/genMarkSweep.cpp 2018-04-23 17:00:40.837143409 +0200 @@ -208,7 +208,7 @@ GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer()); ref_processor()->setup_policy(clear_all_softrefs); - ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_q()); + ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues()); const ReferenceProcessorStats& stats = ref_processor()->process_discovered_references( &is_alive, &keep_alive, &follow_stack_closure, NULL, &pt); --- old/src/hotspot/share/gc/shared/genCollectedHeap.cpp 2018-04-23 17:00:42.281188421 +0200 +++ new/src/hotspot/share/gc/shared/genCollectedHeap.cpp 2018-04-23 17:00:41.996179537 +0200 @@ -515,7 +515,7 @@ } gen->collect(full, clear_soft_refs, size, is_tlab); if (!rp->enqueuing_is_done()) { - ReferenceProcessorPhaseTimes pt(NULL, rp->num_q()); + ReferenceProcessorPhaseTimes pt(NULL, rp->num_queues()); rp->enqueue_discovered_references(NULL, &pt); pt.print_enqueue_phase(); } else { --- old/src/hotspot/share/gc/shared/referenceProcessor.cpp 2018-04-23 17:00:43.467225391 +0200 +++ new/src/hotspot/share/gc/shared/referenceProcessor.cpp 2018-04-23 17:00:43.177216351 +0200 @@ -110,21 +110,21 @@ _discovery_is_atomic = atomic_discovery; _discovery_is_mt = mt_discovery; - _num_q = MAX2(1U, mt_processing_degree); - _max_num_q = MAX2(_num_q, mt_discovery_degree); + _num_queues = MAX2(1U, mt_processing_degree); + _max_num_queues = MAX2(_num_queues, mt_discovery_degree); _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, - _max_num_q * number_of_subclasses_of_ref(), mtGC); + _max_num_queues * number_of_subclasses_of_ref(), mtGC); if (_discovered_refs == NULL) { vm_exit_during_initialization("Could not allocated RefProc Array"); } _discoveredSoftRefs = &_discovered_refs[0]; - _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; - _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; - _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; + _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; + _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; + _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues]; // Initialize all entries to NULL - for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { + for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { _discovered_refs[i].set_head(NULL); _discovered_refs[i].set_length(0); } @@ -153,7 +153,7 @@ #ifndef PRODUCT void ReferenceProcessor::verify_no_references_recorded() { guarantee(!_discovering_refs, "Discovering refs?"); - for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { + for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { guarantee(_discovered_refs[i].is_empty(), "Found non-empty discovered list at %u", i); } @@ -161,7 +161,7 @@ #endif void ReferenceProcessor::weak_oops_do(OopClosure* f) { - for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { + for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { if (UseCompressedOops) { f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); } else { @@ -201,7 +201,7 @@ size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { size_t total = 0; - for (uint i = 0; i < _max_num_q; ++i) { + for (uint i = 0; i < _max_num_queues; ++i) { total += lists[i].length(); } return total; @@ -301,21 +301,21 @@ log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list)); oop obj = NULL; - oop next_d = refs_list.head(); + oop next_discovered = refs_list.head(); // Walk down the list, self-looping the next field // so that the References are not considered active. - while (obj != next_d) { - obj = next_d; + while (obj != next_discovered) { + obj = next_discovered; assert(obj->is_instance(), "should be an instance object"); assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object"); - next_d = java_lang_ref_Reference::discovered(obj); - log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d)); + next_discovered = java_lang_ref_Reference::discovered(obj); + log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_discovered " INTPTR_FORMAT, p2i(obj), p2i(next_discovered)); assert(java_lang_ref_Reference::next(obj) == NULL, "Reference not active; should not be discovered"); // Self-loop next, so as to make Ref not active. java_lang_ref_Reference::set_next_raw(obj, obj); - if (next_d != obj) { - HeapAccess::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, next_d); + if (next_discovered != obj) { + HeapAccess::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, next_discovered); } else { // This is the last object. // Swap refs_list into pending list and set obj's @@ -339,14 +339,14 @@ virtual void work(unsigned int work_id) { RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefEnqueue, _phase_times, work_id); - assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); + assert(work_id < (unsigned int)_ref_processor.max_num_queues(), "Index out-of-bounds"); // Simplest first cut: static partitioning. int index = work_id; // The increment on "index" must correspond to the maximum number of queues // (n_queues) with which that ReferenceProcessor was created. That // is because of the "clever" way the discovered references lists were // allocated and are indexed into. - assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); + assert(_n_queues == (int) _ref_processor.max_num_queues(), "Different number not expected"); for (int j = 0; j < ReferenceProcessor::number_of_subclasses_of_ref(); j++, index += _n_queues) { @@ -370,11 +370,11 @@ if (_processing_is_mt && task_executor != NULL) { // Parallel code - RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q, phase_times); + RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_queues, phase_times); task_executor->execute(tsk); } else { // Serial code: call the parent class's implementation - for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { + for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { enqueue_discovered_reflist(_discovered_refs[i]); _discovered_refs[i].set_head(NULL); _discovered_refs[i].set_length(0); @@ -383,13 +383,14 @@ } void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { - _discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_ref); - oop discovered = java_lang_ref_Reference::discovered(_ref); - assert(_discovered_addr && oopDesc::is_oop_or_null(discovered), + _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered); + oop discovered = java_lang_ref_Reference::discovered(_current_discovered); + assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); - _next = discovered; - _referent_addr = java_lang_ref_Reference::referent_addr_raw(_ref); - _referent = java_lang_ref_Reference::referent(_ref); + _next_discovered = discovered; + + _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered); + _referent = java_lang_ref_Reference::referent(_current_discovered); assert(Universe::heap()->is_in_reserved_or_null(_referent), "Wrong oop found in java.lang.Reference object"); assert(allow_null_referent ? @@ -401,23 +402,23 @@ } void DiscoveredListIterator::remove() { - assert(oopDesc::is_oop(_ref), "Dropping a bad reference"); - RawAccess<>::oop_store(_discovered_addr, oop(NULL)); + assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference"); + RawAccess<>::oop_store(_current_discovered_addr, oop(NULL)); // First _prev_next ref actually points into DiscoveredList (gross). oop new_next; - if (_next == _ref) { + if (_next_discovered == _current_discovered) { // At the end of the list, we should make _prev point to itself. // If _ref is the first ref, then _prev_next will be in the DiscoveredList, // and _prev will be NULL. - new_next = _prev; + new_next = _prev_discovered; } else { - new_next = _next; + new_next = _next_discovered; } // Remove Reference object from discovered list. Note that G1 does not need a // pre-barrier here because we know the Reference has already been found/marked, // that's how it ended up in the discovered list in the first place. - RawAccess<>::oop_store(_prev_next, new_next); + RawAccess<>::oop_store(_prev_discovered_addr, new_next); NOT_PRODUCT(_removed++); _refs_list.dec_length(1); } @@ -557,15 +558,11 @@ ) } -// Traverse the list and process the referents, by either -// clearing them or keeping them (and their reachable -// closure) alive. -void -ReferenceProcessor::process_phase3(DiscoveredList& refs_list, - bool clear_referent, - BoolObjectClosure* is_alive, - OopClosure* keep_alive, - VoidClosure* complete_gc) { +void ReferenceProcessor::process_phase3(DiscoveredList& refs_list, + bool clear_referent, + BoolObjectClosure* is_alive, + OopClosure* keep_alive, + VoidClosure* complete_gc) { ResourceMark rm; DiscoveredListIterator iter(refs_list, keep_alive, is_alive); while (iter.has_next()) { @@ -601,8 +598,8 @@ void ReferenceProcessor::abandon_partial_discovery() { // loop over the lists - for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { - if ((i % _max_num_q) == 0) { + for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { + if ((i % _max_num_queues) == 0) { log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); } clear_discovered_references(_discovered_refs[i]); @@ -710,7 +707,7 @@ } log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); #ifdef ASSERT - for (uint i = active_length; i < _max_num_q; i++) { + for (uint i = active_length; i < _max_num_queues; i++) { assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", ref_lists[i].length(), i); } @@ -719,7 +716,7 @@ #endif void ReferenceProcessor::set_active_mt_degree(uint v) { - _num_q = v; + _num_queues = v; _next_id = 0; } @@ -733,20 +730,20 @@ size_t total_refs = 0; log_develop_trace(gc, ref)("Balance ref_lists "); - for (uint i = 0; i < _max_num_q; ++i) { + for (uint i = 0; i < _max_num_queues; ++i) { total_refs += ref_lists[i].length(); } - log_reflist_counts(ref_lists, _max_num_q, total_refs); - size_t avg_refs = total_refs / _num_q + 1; + log_reflist_counts(ref_lists, _max_num_queues, total_refs); + size_t avg_refs = total_refs / _num_queues + 1; uint to_idx = 0; - for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { + for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { bool move_all = false; - if (from_idx >= _num_q) { + if (from_idx >= _num_queues) { move_all = ref_lists[from_idx].length() > 0; } while ((ref_lists[from_idx].length() > avg_refs) || move_all) { - assert(to_idx < _num_q, "Sanity Check!"); + assert(to_idx < _num_queues, "Sanity Check!"); if (ref_lists[to_idx].length() < avg_refs) { // move superfluous refs size_t refs_to_move; @@ -792,16 +789,16 @@ break; } } else { - to_idx = (to_idx + 1) % _num_q; + to_idx = (to_idx + 1) % _num_queues; } } } #ifdef ASSERT size_t balanced_total_refs = 0; - for (uint i = 0; i < _num_q; ++i) { + for (uint i = 0; i < _num_queues; ++i) { balanced_total_refs += ref_lists[i].length(); } - log_reflist_counts(ref_lists, _num_q, balanced_total_refs); + log_reflist_counts(ref_lists, _num_queues, balanced_total_refs); assert(total_refs == balanced_total_refs, "Balancing was incomplete"); #endif } @@ -844,7 +841,7 @@ RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times); task_executor->execute(phase1); } else { - for (uint i = 0; i < _max_num_q; i++) { + for (uint i = 0; i < _max_num_queues; i++) { process_phase1(refs_lists[i], policy, is_alive, keep_alive, complete_gc); } @@ -863,7 +860,7 @@ RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times); task_executor->execute(phase2); } else { - for (uint i = 0; i < _max_num_q; i++) { + for (uint i = 0; i < _max_num_queues; i++) { process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); } } @@ -878,7 +875,7 @@ RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times); task_executor->execute(phase3); } else { - for (uint i = 0; i < _max_num_q; i++) { + for (uint i = 0; i < _max_num_queues; i++) { process_phase3(refs_lists[i], clear_referent, is_alive, keep_alive, complete_gc); } @@ -901,7 +898,7 @@ id = next_id(); } } - assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q); + assert(id < _max_num_queues, "Id is out-of-bounds id %u and max id %u)", id, _max_num_queues); // Get the discovered queue to which we will add DiscoveredList* list = NULL; @@ -1115,7 +1112,7 @@ } bool ReferenceProcessor::has_discovered_references() { - for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { + for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { if (!_discovered_refs[i].is_empty()) { return true; } @@ -1137,7 +1134,7 @@ // Soft references { GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); - for (uint i = 0; i < _max_num_q; i++) { + for (uint i = 0; i < _max_num_queues; i++) { if (yield->should_return()) { return; } @@ -1149,7 +1146,7 @@ // Weak references { GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); - for (uint i = 0; i < _max_num_q; i++) { + for (uint i = 0; i < _max_num_queues; i++) { if (yield->should_return()) { return; } @@ -1161,7 +1158,7 @@ // Final references { GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); - for (uint i = 0; i < _max_num_q; i++) { + for (uint i = 0; i < _max_num_queues; i++) { if (yield->should_return()) { return; } @@ -1173,7 +1170,7 @@ // Phantom references { GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); - for (uint i = 0; i < _max_num_q; i++) { + for (uint i = 0; i < _max_num_queues; i++) { if (yield->should_return()) { return; } @@ -1236,10 +1233,10 @@ } const char* ReferenceProcessor::list_name(uint i) { - assert(i <= _max_num_q * number_of_subclasses_of_ref(), + assert(i <= _max_num_queues * number_of_subclasses_of_ref(), "Out of bounds index"); - int j = i / _max_num_q; + int j = i / _max_num_queues; switch (j) { case 0: return "SoftRef"; case 1: return "WeakRef"; --- old/src/hotspot/share/gc/shared/referenceProcessor.hpp 2018-04-23 17:00:44.650262267 +0200 +++ new/src/hotspot/share/gc/shared/referenceProcessor.hpp 2018-04-23 17:00:44.366253414 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,13 +76,15 @@ class DiscoveredListIterator { private: DiscoveredList& _refs_list; - HeapWord* _prev_next; - oop _prev; - oop _ref; - HeapWord* _discovered_addr; - oop _next; + HeapWord* _prev_discovered_addr; + oop _prev_discovered; + oop _current_discovered; + HeapWord* _current_discovered_addr; + oop _next_discovered; + HeapWord* _referent_addr; oop _referent; + OopClosure* _keep_alive; BoolObjectClosure* _is_alive; @@ -101,10 +103,10 @@ BoolObjectClosure* is_alive); // End Of List. - inline bool has_next() const { return _ref != NULL; } + inline bool has_next() const { return _current_discovered != NULL; } // Get oop to the Reference object. - inline oop obj() const { return _ref; } + inline oop obj() const { return _current_discovered; } // Get oop to the referent object. inline oop referent() const { return _referent; } @@ -123,8 +125,8 @@ // Move to the next discovered reference. inline void next() { - _prev_next = _discovered_addr; - _prev = _ref; + _prev_discovered_addr = _current_discovered_addr; + _prev_discovered = _current_discovered; move_to_next(); } @@ -150,13 +152,13 @@ ) inline void move_to_next() { - if (_ref == _next) { + if (_current_discovered == _next_discovered) { // End of the list. - _ref = NULL; + _current_discovered = NULL; } else { - _ref = _next; + _current_discovered = _next_discovered; } - assert(_ref != _first_seen, "cyclic ref_list found"); + assert(_current_discovered != _first_seen, "cyclic ref_list found"); NOT_PRODUCT(_processed++); } }; @@ -179,7 +181,7 @@ bool _enqueuing_is_done; // true if all weak references enqueued bool _processing_is_mt; // true during phases when // reference processing is MT. - uint _next_id; // round-robin mod _num_q counter in + uint _next_id; // round-robin mod _num_queues counter in // support of work distribution // For collectors that do not keep GC liveness information @@ -200,9 +202,9 @@ // The discovered ref lists themselves // The active MT'ness degree of the queues below - uint _num_q; + uint _num_queues; // The maximum MT'ness degree of the queues below - uint _max_num_q; + uint _max_num_queues; // Master array of discovered oops DiscoveredList* _discovered_refs; @@ -216,8 +218,8 @@ public: static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } - uint num_q() { return _num_q; } - uint max_num_q() { return _max_num_q; } + uint num_queues() const { return _num_queues; } + uint max_num_queues() const { return _max_num_queues; } void set_active_mt_degree(uint v); DiscoveredList* discovered_refs() { return _discovered_refs; } @@ -263,7 +265,7 @@ OopClosure* keep_alive, VoidClosure* complete_gc); // Phase3: process the referents by either clearing them - // or keeping them alive (and their closure) + // or keeping them alive (and their closure), and enqueuing them. void process_phase3(DiscoveredList& refs_list, bool clear_referent, BoolObjectClosure* is_alive, @@ -289,7 +291,7 @@ GCTimer* gc_timer); // Returns the name of the discovered reference list - // occupying the i / _num_q slot. + // occupying the i / _num_queues slot. const char* list_name(uint i); void enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, @@ -304,14 +306,14 @@ VoidClosure* complete_gc, YieldClosure* yield); private: - // round-robin mod _num_q (not: _not_ mode _max_num_q) + // round-robin mod _num_queues (not: _not_ mod _max_num_queues) uint next_id() { uint id = _next_id; assert(!_discovery_is_mt, "Round robin should only be used in serial discovery"); - if (++_next_id == _num_q) { + if (++_next_id == _num_queues) { _next_id = 0; } - assert(_next_id < _num_q, "_next_id %u _num_q %u _max_num_q %u", _next_id, _num_q, _max_num_q); + assert(_next_id < _num_queues, "_next_id %u _num_queues %u _max_num_queues %u", _next_id, _num_queues, _max_num_queues); return id; } DiscoveredList* get_discovered_list(ReferenceType rt); --- old/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp 2018-04-23 17:00:45.821298769 +0200 +++ new/src/hotspot/share/gc/shared/referenceProcessor.inline.hpp 2018-04-23 17:00:45.536289885 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,9 +51,9 @@ OopClosure* keep_alive, BoolObjectClosure* is_alive): _refs_list(refs_list), - _prev_next(refs_list.adr_head()), - _prev(NULL), - _ref(refs_list.head()), + _prev_discovered_addr(refs_list.adr_head()), + _prev_discovered(NULL), + _current_discovered(refs_list.head()), #ifdef ASSERT _first_seen(refs_list.head()), #endif @@ -61,7 +61,7 @@ _processed(0), _removed(0), #endif - _next(NULL), + _next_discovered(NULL), _keep_alive(keep_alive), _is_alive(is_alive) { }