1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.inline.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "gc/shared/collectedHeap.hpp"
  29 #include "gc/shared/collectedHeap.inline.hpp"
  30 #include "gc/shared/gcTimer.hpp"
  31 #include "gc/shared/gcTraceTime.inline.hpp"
  32 #include "gc/shared/referencePolicy.hpp"
  33 #include "gc/shared/referenceProcessor.inline.hpp"
  34 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  35 #include "logging/log.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.hpp"
  39 #include "oops/access.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/java.hpp"
  42 
  43 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
  44 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
  45 jlong            ReferenceProcessor::_soft_ref_timestamp_clock = 0;
  46 
  47 void referenceProcessor_init() {
  48   ReferenceProcessor::init_statics();
  49 }
  50 
  51 void ReferenceProcessor::init_statics() {
  52   // We need a monotonically non-decreasing time in ms but
  53   // os::javaTimeMillis() does not guarantee monotonicity.
  54   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  55 
  56   // Initialize the soft ref timestamp clock.
  57   _soft_ref_timestamp_clock = now;
  58   // Also update the soft ref clock in j.l.r.SoftReference
  59   java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock);
  60 
  61   _always_clear_soft_ref_policy = new AlwaysClearPolicy();
  62   if (is_server_compilation_mode_vm()) {
  63     _default_soft_ref_policy = new LRUMaxHeapPolicy();
  64   } else {
  65     _default_soft_ref_policy = new LRUCurrentHeapPolicy();
  66   }
  67   if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
  68     vm_exit_during_initialization("Could not allocate reference policy object");
  69   }
  70   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
  71             RefDiscoveryPolicy == ReferentBasedDiscovery,
  72             "Unrecognized RefDiscoveryPolicy");
  73 }
  74 
  75 void ReferenceProcessor::enable_discovery(bool check_no_refs) {
  76 #ifdef ASSERT
  77   // Verify that we're not currently discovering refs
  78   assert(!_discovering_refs, "nested call?");
  79 
  80   if (check_no_refs) {
  81     // Verify that the discovered lists are empty
  82     verify_no_references_recorded();
  83   }
  84 #endif // ASSERT
  85 
  86   // Someone could have modified the value of the static
  87   // field in the j.l.r.SoftReference class that holds the
  88   // soft reference timestamp clock using reflection or
  89   // Unsafe between GCs. Unconditionally update the static
  90   // field in ReferenceProcessor here so that we use the new
  91   // value during reference discovery.
  92 
  93   _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
  94   _discovering_refs = true;
  95 }
  96 
  97 ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery,
  98                                        bool      mt_processing,
  99                                        uint      mt_processing_degree,
 100                                        bool      mt_discovery,
 101                                        uint      mt_discovery_degree,
 102                                        bool      atomic_discovery,
 103                                        BoolObjectClosure* is_alive_non_header,
 104                                        bool      adjust_no_of_processing_threads)  :
 105   _is_subject_to_discovery(is_subject_to_discovery),
 106   _discovering_refs(false),
 107   _enqueuing_is_done(false),
 108   _processing_is_mt(mt_processing),
 109   _next_id(0),
 110   _adjust_no_of_processing_threads(adjust_no_of_processing_threads),
 111   _is_alive_non_header(is_alive_non_header)
 112 {
 113   assert(is_subject_to_discovery != NULL, "must be set");
 114 
 115   _discovery_is_atomic = atomic_discovery;
 116   _discovery_is_mt     = mt_discovery;
 117   _num_queues          = MAX2(1U, mt_processing_degree);
 118   _max_num_queues      = MAX2(_num_queues, mt_discovery_degree);
 119   _discovered_refs     = NEW_C_HEAP_ARRAY(DiscoveredList,
 120             _max_num_queues * number_of_subclasses_of_ref(), mtGC);
 121 
 122   if (_discovered_refs == NULL) {
 123     vm_exit_during_initialization("Could not allocated RefProc Array");
 124   }
 125   _discoveredSoftRefs    = &_discovered_refs[0];
 126   _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_queues];
 127   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_queues];
 128   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues];
 129 
 130   // Initialize all entries to NULL
 131   for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
 132     _discovered_refs[i].clear();
 133   }
 134 
 135   setup_policy(false /* default soft ref policy */);
 136 }
 137 
 138 #ifndef PRODUCT
 139 void ReferenceProcessor::verify_no_references_recorded() {
 140   guarantee(!_discovering_refs, "Discovering refs?");
 141   for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
 142     guarantee(_discovered_refs[i].is_empty(),
 143               "Found non-empty discovered list at %u", i);
 144   }
 145 }
 146 #endif
 147 
 148 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
 149   for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
 150     if (UseCompressedOops) {
 151       f->do_oop((narrowOop*)_discovered_refs[i].adr_head());
 152     } else {
 153       f->do_oop((oop*)_discovered_refs[i].adr_head());
 154     }
 155   }
 156 }
 157 
 158 void ReferenceProcessor::update_soft_ref_master_clock() {
 159   // Update (advance) the soft ref master clock field. This must be done
 160   // after processing the soft ref list.
 161 
 162   // We need a monotonically non-decreasing time in ms but
 163   // os::javaTimeMillis() does not guarantee monotonicity.
 164   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
 165   jlong soft_ref_clock = java_lang_ref_SoftReference::clock();
 166   assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync");
 167 
 168   NOT_PRODUCT(
 169   if (now < _soft_ref_timestamp_clock) {
 170     log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT,
 171                     _soft_ref_timestamp_clock, now);
 172   }
 173   )
 174   // The values of now and _soft_ref_timestamp_clock are set using
 175   // javaTimeNanos(), which is guaranteed to be monotonically
 176   // non-decreasing provided the underlying platform provides such
 177   // a time source (and it is bug free).
 178   // In product mode, however, protect ourselves from non-monotonicity.
 179   if (now > _soft_ref_timestamp_clock) {
 180     _soft_ref_timestamp_clock = now;
 181     java_lang_ref_SoftReference::set_clock(now);
 182   }
 183   // Else leave clock stalled at its old value until time progresses
 184   // past clock value.
 185 }
 186 
 187 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const {
 188   size_t total = 0;
 189   for (uint i = 0; i < _max_num_queues; ++i) {
 190     total += lists[i].length();
 191   }
 192   return total;
 193 }
 194 
 195 #ifdef ASSERT
 196 void ReferenceProcessor::verify_total_count_zero(DiscoveredList lists[], const char* type) {
 197   size_t count = total_count(lists);
 198   assert(count == 0, "%ss must be empty but has " SIZE_FORMAT " elements", type, count);
 199 }
 200 #endif
 201 
 202 ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
 203   BoolObjectClosure*            is_alive,
 204   OopClosure*                   keep_alive,
 205   VoidClosure*                  complete_gc,
 206   AbstractRefProcTaskExecutor*  task_executor,
 207   ReferenceProcessorPhaseTimes* phase_times) {
 208 
 209   double start_time = os::elapsedTime();
 210 
 211   assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
 212   // Stop treating discovered references specially.
 213   disable_discovery();
 214 
 215   // If discovery was concurrent, someone could have modified
 216   // the value of the static field in the j.l.r.SoftReference
 217   // class that holds the soft reference timestamp clock using
 218   // reflection or Unsafe between when discovery was enabled and
 219   // now. Unconditionally update the static field in ReferenceProcessor
 220   // here so that we use the new value during processing of the
 221   // discovered soft refs.
 222 
 223   _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
 224 
 225   ReferenceProcessorStats stats(total_count(_discoveredSoftRefs),
 226                                 total_count(_discoveredWeakRefs),
 227                                 total_count(_discoveredFinalRefs),
 228                                 total_count(_discoveredPhantomRefs));
 229 
 230   {
 231     RefProcTotalPhaseTimesTracker tt(RefPhase1, phase_times, this);
 232     process_soft_ref_reconsider(is_alive, keep_alive, complete_gc,
 233                                 task_executor, phase_times);
 234   }
 235 
 236   update_soft_ref_master_clock();
 237 
 238   {
 239     RefProcTotalPhaseTimesTracker tt(RefPhase2, phase_times, this);
 240     process_soft_weak_final_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times);
 241   }
 242 
 243   {
 244     RefProcTotalPhaseTimesTracker tt(RefPhase3, phase_times, this);
 245     process_final_keep_alive(keep_alive, complete_gc, task_executor, phase_times);
 246   }
 247 
 248   {
 249     RefProcTotalPhaseTimesTracker tt(RefPhase4, phase_times, this);
 250     process_phantom_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times);
 251   }
 252 
 253   if (task_executor != NULL) {
 254     // Record the work done by the parallel workers.
 255     task_executor->set_single_threaded_mode();
 256   }
 257 
 258   phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000);
 259 
 260   return stats;
 261 }
 262 
 263 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
 264   _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered);
 265   oop discovered = java_lang_ref_Reference::discovered(_current_discovered);
 266   assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered),
 267          "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
 268   _next_discovered = discovered;
 269 
 270   _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered);
 271   _referent = java_lang_ref_Reference::referent(_current_discovered);
 272   assert(Universe::heap()->is_in_reserved_or_null(_referent),
 273          "Wrong oop found in java.lang.Reference object");
 274   assert(allow_null_referent ?
 275              oopDesc::is_oop_or_null(_referent)
 276            : oopDesc::is_oop(_referent),
 277          "Expected an oop%s for referent field at " PTR_FORMAT,
 278          (allow_null_referent ? " or NULL" : ""),
 279          p2i(_referent));
 280 }
 281 
 282 void DiscoveredListIterator::remove() {
 283   assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference");
 284   RawAccess<>::oop_store(_current_discovered_addr, oop(NULL));
 285 
 286   // First _prev_next ref actually points into DiscoveredList (gross).
 287   oop new_next;
 288   if (oopDesc::equals_raw(_next_discovered, _current_discovered)) {
 289     // At the end of the list, we should make _prev point to itself.
 290     // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
 291     // and _prev will be NULL.
 292     new_next = _prev_discovered;
 293   } else {
 294     new_next = _next_discovered;
 295   }
 296   // Remove Reference object from discovered list. Note that G1 does not need a
 297   // pre-barrier here because we know the Reference has already been found/marked,
 298   // that's how it ended up in the discovered list in the first place.
 299   RawAccess<>::oop_store(_prev_discovered_addr, new_next);
 300   _removed++;
 301   _refs_list.dec_length(1);
 302 }
 303 
 304 void DiscoveredListIterator::clear_referent() {
 305   RawAccess<>::oop_store(_referent_addr, oop(NULL));
 306 }
 307 
 308 void DiscoveredListIterator::enqueue() {
 309   HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_current_discovered,
 310                                             java_lang_ref_Reference::discovered_offset,
 311                                             _next_discovered);
 312 }
 313 
 314 void DiscoveredListIterator::complete_enqueue() {
 315   if (_prev_discovered != NULL) {
 316     // This is the last object.
 317     // Swap refs_list into pending list and set obj's
 318     // discovered to what we read from the pending list.
 319     oop old = Universe::swap_reference_pending_list(_refs_list.head());
 320     HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_prev_discovered, java_lang_ref_Reference::discovered_offset, old);
 321   }
 322 }
 323 
 324 inline void log_dropped_ref(const DiscoveredListIterator& iter, const char* reason) {
 325   if (log_develop_is_enabled(Trace, gc, ref)) {
 326     ResourceMark rm;
 327     log_develop_trace(gc, ref)("Dropping %s reference " PTR_FORMAT ": %s",
 328                                reason, p2i(iter.obj()),
 329                                iter.obj()->klass()->internal_name());
 330   }
 331 }
 332 
 333 inline void log_enqueued_ref(const DiscoveredListIterator& iter, const char* reason) {
 334   if (log_develop_is_enabled(Trace, gc, ref)) {
 335     ResourceMark rm;
 336     log_develop_trace(gc, ref)("Enqueue %s reference (" INTPTR_FORMAT ": %s)",
 337                                reason, p2i(iter.obj()), iter.obj()->klass()->internal_name());
 338   }
 339   assert(oopDesc::is_oop(iter.obj(), UseConcMarkSweepGC), "Adding a bad reference");
 340 }
 341 
 342 size_t ReferenceProcessor::process_soft_ref_reconsider_work(DiscoveredList&    refs_list,
 343                                                             ReferencePolicy*   policy,
 344                                                             BoolObjectClosure* is_alive,
 345                                                             OopClosure*        keep_alive,
 346                                                             VoidClosure*       complete_gc) {
 347   assert(policy != NULL, "Must have a non-NULL policy");
 348   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
 349   // Decide which softly reachable refs should be kept alive.
 350   while (iter.has_next()) {
 351     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
 352     bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive();
 353     if (referent_is_dead &&
 354         !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
 355       log_dropped_ref(iter, "by policy");
 356       // Remove Reference object from list
 357       iter.remove();
 358       // keep the referent around
 359       iter.make_referent_alive();
 360       iter.move_to_next();
 361     } else {
 362       iter.next();
 363     }
 364   }
 365   // Close the reachable set
 366   complete_gc->do_void();
 367 
 368   log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT,
 369                              iter.removed(), iter.processed(), p2i(&refs_list));
 370   return iter.removed();
 371 }
 372 
 373 size_t ReferenceProcessor::process_soft_weak_final_refs_work(DiscoveredList&    refs_list,
 374                                                              BoolObjectClosure* is_alive,
 375                                                              OopClosure*        keep_alive,
 376                                                              bool               do_enqueue_and_clear) {
 377   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
 378   while (iter.has_next()) {
 379     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
 380     if (iter.referent() == NULL) {
 381       // Reference has been cleared since discovery; only possible if
 382       // discovery is not atomic (checked by load_ptrs).  Remove
 383       // reference from list.
 384       log_dropped_ref(iter, "cleared");
 385       iter.remove();
 386       iter.move_to_next();
 387     } else if (iter.is_referent_alive()) {
 388       // The referent is reachable after all.
 389       // Remove reference from list.
 390       log_dropped_ref(iter, "reachable");
 391       iter.remove();
 392       // Update the referent pointer as necessary.  Note that this
 393       // should not entail any recursive marking because the
 394       // referent must already have been traversed.
 395       iter.make_referent_alive();
 396       iter.move_to_next();
 397     } else {
 398       if (do_enqueue_and_clear) {
 399         iter.clear_referent();
 400         iter.enqueue();
 401         log_enqueued_ref(iter, "cleared");
 402       }
 403       // Keep in discovered list
 404       iter.next();
 405     }
 406   }
 407   if (do_enqueue_and_clear) {
 408     iter.complete_enqueue();
 409     refs_list.clear();
 410   }
 411 
 412   log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT
 413                              " Refs in discovered list " INTPTR_FORMAT,
 414                              iter.removed(), iter.processed(), p2i(&refs_list));
 415   return iter.removed();
 416 }
 417 
 418 size_t ReferenceProcessor::process_final_keep_alive_work(DiscoveredList& refs_list,
 419                                                          OopClosure*     keep_alive,
 420                                                          VoidClosure*    complete_gc) {
 421   DiscoveredListIterator iter(refs_list, keep_alive, NULL);
 422   while (iter.has_next()) {
 423     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
 424     // keep the referent and followers around
 425     iter.make_referent_alive();
 426 
 427     // Self-loop next, to mark the FinalReference not active.
 428     assert(java_lang_ref_Reference::next(iter.obj()) == NULL, "enqueued FinalReference");
 429     java_lang_ref_Reference::set_next_raw(iter.obj(), iter.obj());
 430 
 431     iter.enqueue();
 432     log_enqueued_ref(iter, "Final");
 433     iter.next();
 434   }
 435   iter.complete_enqueue();
 436   // Close the reachable set
 437   complete_gc->do_void();
 438   refs_list.clear();
 439 
 440   assert(iter.removed() == 0, "This phase does not remove anything.");
 441   return iter.removed();
 442 }
 443 
 444 size_t ReferenceProcessor::process_phantom_refs_work(DiscoveredList&    refs_list,
 445                                           BoolObjectClosure* is_alive,
 446                                           OopClosure*        keep_alive,
 447                                           VoidClosure*       complete_gc) {
 448   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
 449   while (iter.has_next()) {
 450     iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */));
 451 
 452     oop const referent = iter.referent();
 453 
 454     if (referent == NULL || iter.is_referent_alive()) {
 455       iter.make_referent_alive();
 456       iter.remove();
 457       iter.move_to_next();
 458     } else {
 459       iter.clear_referent();
 460       iter.enqueue();
 461       log_enqueued_ref(iter, "cleared Phantom");
 462       iter.next();
 463     }
 464   }
 465   iter.complete_enqueue();
 466   // Close the reachable set; needed for collectors which keep_alive_closure do
 467   // not immediately complete their work.
 468   complete_gc->do_void();
 469   refs_list.clear();
 470 
 471   return iter.removed();
 472 }
 473 
 474 void
 475 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) {
 476   oop obj = NULL;
 477   oop next = refs_list.head();
 478   while (!oopDesc::equals_raw(next, obj)) {
 479     obj = next;
 480     next = java_lang_ref_Reference::discovered(obj);
 481     java_lang_ref_Reference::set_discovered_raw(obj, NULL);
 482   }
 483   refs_list.clear();
 484 }
 485 
 486 void ReferenceProcessor::abandon_partial_discovery() {
 487   // loop over the lists
 488   for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
 489     if ((i % _max_num_queues) == 0) {
 490       log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i));
 491     }
 492     clear_discovered_references(_discovered_refs[i]);
 493   }
 494 }
 495 
 496 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const {
 497   DiscoveredList* list = NULL;
 498 
 499   switch (type) {
 500     case REF_SOFT:
 501       list = _discoveredSoftRefs;
 502       break;
 503     case REF_WEAK:
 504       list = _discoveredWeakRefs;
 505       break;
 506     case REF_FINAL:
 507       list = _discoveredFinalRefs;
 508       break;
 509     case REF_PHANTOM:
 510       list = _discoveredPhantomRefs;
 511       break;
 512     case REF_OTHER:
 513     case REF_NONE:
 514     default:
 515       ShouldNotReachHere();
 516   }
 517   return total_count(list);
 518 }
 519 
 520 class RefProcPhase1Task : public AbstractRefProcTaskExecutor::ProcessTask {
 521 public:
 522   RefProcPhase1Task(ReferenceProcessor&           ref_processor,
 523                     ReferenceProcessorPhaseTimes* phase_times,
 524                     ReferencePolicy*              policy)
 525     : ProcessTask(ref_processor, true /* marks_oops_alive */, phase_times),
 526       _policy(policy) { }
 527 
 528   virtual void work(uint worker_id,
 529                     BoolObjectClosure& is_alive,
 530                     OopClosure& keep_alive,
 531                     VoidClosure& complete_gc)
 532   {
 533     RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase1, _phase_times, worker_id);
 534     size_t const removed = _ref_processor.process_soft_ref_reconsider_work(_ref_processor._discoveredSoftRefs[worker_id],
 535                                                                            _policy,
 536                                                                            &is_alive,
 537                                                                            &keep_alive,
 538                                                                            &complete_gc);
 539     _phase_times->add_ref_cleared(REF_SOFT, removed);
 540   }
 541 private:
 542   ReferencePolicy* _policy;
 543 };
 544 
 545 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask {
 546   void run_phase2(uint worker_id,
 547                   DiscoveredList list[],
 548                   BoolObjectClosure& is_alive,
 549                   OopClosure& keep_alive,
 550                   bool do_enqueue_and_clear,
 551                   ReferenceType ref_type) {
 552     size_t const removed = _ref_processor.process_soft_weak_final_refs_work(list[worker_id],
 553                                                                             &is_alive,
 554                                                                             &keep_alive,
 555                                                                             do_enqueue_and_clear);
 556     _phase_times->add_ref_cleared(ref_type, removed);
 557   }
 558 
 559 public:
 560   RefProcPhase2Task(ReferenceProcessor& ref_processor,
 561                     ReferenceProcessorPhaseTimes* phase_times)
 562     : ProcessTask(ref_processor, false /* marks_oops_alive */, phase_times) { }
 563 
 564   virtual void work(uint worker_id,
 565                     BoolObjectClosure& is_alive,
 566                     OopClosure& keep_alive,
 567                     VoidClosure& complete_gc) {
 568     RefProcWorkerTimeTracker t(_phase_times->phase2_worker_time_sec(), worker_id);
 569     {
 570       RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase2, _phase_times, worker_id);
 571       run_phase2(worker_id, _ref_processor._discoveredSoftRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_SOFT);
 572     }
 573     {
 574       RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::WeakRefSubPhase2, _phase_times, worker_id);
 575       run_phase2(worker_id, _ref_processor._discoveredWeakRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_WEAK);
 576     }
 577     {
 578       RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase2, _phase_times, worker_id);
 579       run_phase2(worker_id, _ref_processor._discoveredFinalRefs, is_alive, keep_alive, false /* do_enqueue_and_clear */, REF_FINAL);
 580     }
 581     // Close the reachable set; needed for collectors which keep_alive_closure do
 582     // not immediately complete their work.
 583     complete_gc.do_void();
 584   }
 585 };
 586 
 587 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask {
 588 public:
 589   RefProcPhase3Task(ReferenceProcessor&           ref_processor,
 590                     ReferenceProcessorPhaseTimes* phase_times)
 591     : ProcessTask(ref_processor, true /* marks_oops_alive */, phase_times) { }
 592 
 593   virtual void work(uint worker_id,
 594                     BoolObjectClosure& is_alive,
 595                     OopClosure& keep_alive,
 596                     VoidClosure& complete_gc)
 597   {
 598     RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase3, _phase_times, worker_id);
 599     _ref_processor.process_final_keep_alive_work(_ref_processor._discoveredFinalRefs[worker_id], &keep_alive, &complete_gc);
 600   }
 601 };
 602 
 603 class RefProcPhase4Task: public AbstractRefProcTaskExecutor::ProcessTask {
 604 public:
 605   RefProcPhase4Task(ReferenceProcessor&           ref_processor,
 606                     ReferenceProcessorPhaseTimes* phase_times)
 607     : ProcessTask(ref_processor, false /* marks_oops_alive */, phase_times) { }
 608 
 609   virtual void work(uint worker_id,
 610                     BoolObjectClosure& is_alive,
 611                     OopClosure& keep_alive,
 612                     VoidClosure& complete_gc)
 613   {
 614     RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::PhantomRefSubPhase4, _phase_times, worker_id);
 615     size_t const removed = _ref_processor.process_phantom_refs_work(_ref_processor._discoveredPhantomRefs[worker_id],
 616                                                                     &is_alive,
 617                                                                     &keep_alive,
 618                                                                     &complete_gc);
 619     _phase_times->add_ref_cleared(REF_PHANTOM, removed);
 620   }
 621 };
 622 
 623 void ReferenceProcessor::log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues) {
 624   LogTarget(Trace, gc, ref) lt;
 625 
 626   if (!lt.is_enabled()) {
 627     return;
 628   }
 629 
 630   size_t total = 0;
 631 
 632   LogStream ls(lt);
 633   ls.print("%s", prefix);
 634   for (uint i = 0; i < num_active_queues; i++) {
 635     ls.print(SIZE_FORMAT " ", list[i].length());
 636     total += list[i].length();
 637   }
 638   ls.print_cr("(" SIZE_FORMAT ")", total);
 639 }
 640 
 641 #ifndef PRODUCT
 642 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) {
 643   if (!log_is_enabled(Trace, gc, ref)) {
 644     return;
 645   }
 646 
 647   log_reflist("", ref_lists, num_active_queues);
 648 #ifdef ASSERT
 649   for (uint i = num_active_queues; i < _max_num_queues; i++) {
 650     assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u",
 651            ref_lists[i].length(), i);
 652   }
 653 #endif
 654 }
 655 #endif
 656 
 657 void ReferenceProcessor::set_active_mt_degree(uint v) {
 658   _num_queues = v;
 659   _next_id = 0;
 660 }
 661 
 662 bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) {
 663   assert(_processing_is_mt, "why balance non-mt processing?");
 664   // _num_queues is the processing degree.  Only list entries up to
 665   // _num_queues will be processed, so any non-empty lists beyond
 666   // that must be redistributed to lists in that range.  Even if not
 667   // needed for that, balancing may be desirable to eliminate poor
 668   // distribution of references among the lists.
 669   if (ParallelRefProcBalancingEnabled) {
 670     return true;                // Configuration says do it.
 671   } else {
 672     // Configuration says don't balance, but if there are non-empty
 673     // lists beyond the processing degree, then must ignore the
 674     // configuration and balance anyway.
 675     for (uint i = _num_queues; i < _max_num_queues; ++i) {
 676       if (!refs_lists[i].is_empty()) {
 677         return true;            // Must balance despite configuration.
 678       }
 679     }
 680     return false;               // Safe to obey configuration and not balance.
 681   }
 682 }
 683 
 684 void ReferenceProcessor::maybe_balance_queues(DiscoveredList refs_lists[]) {
 685   assert(_processing_is_mt, "Should not call this otherwise");
 686   if (need_balance_queues(refs_lists)) {
 687     balance_queues(refs_lists);
 688   }
 689 }
 690 
 691 // Balances reference queues.
 692 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
 693 // queues[0, 1, ..., _num_q-1] because only the first _num_q
 694 // corresponding to the active workers will be processed.
 695 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
 696 {
 697   // calculate total length
 698   size_t total_refs = 0;
 699   log_develop_trace(gc, ref)("Balance ref_lists ");
 700 
 701   log_reflist_counts(ref_lists, _max_num_queues);
 702 
 703   for (uint i = 0; i < _max_num_queues; ++i) {
 704     total_refs += ref_lists[i].length();
 705   }
 706   size_t avg_refs = total_refs / _num_queues + 1;
 707   uint to_idx = 0;
 708   for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) {
 709     bool move_all = false;
 710     if (from_idx >= _num_queues) {
 711       move_all = ref_lists[from_idx].length() > 0;
 712     }
 713     while ((ref_lists[from_idx].length() > avg_refs) ||
 714            move_all) {
 715       assert(to_idx < _num_queues, "Sanity Check!");
 716       if (ref_lists[to_idx].length() < avg_refs) {
 717         // move superfluous refs
 718         size_t refs_to_move;
 719         // Move all the Ref's if the from queue will not be processed.
 720         if (move_all) {
 721           refs_to_move = MIN2(ref_lists[from_idx].length(),
 722                               avg_refs - ref_lists[to_idx].length());
 723         } else {
 724           refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
 725                               avg_refs - ref_lists[to_idx].length());
 726         }
 727 
 728         assert(refs_to_move > 0, "otherwise the code below will fail");
 729 
 730         oop move_head = ref_lists[from_idx].head();
 731         oop move_tail = move_head;
 732         oop new_head  = move_head;
 733         // find an element to split the list on
 734         for (size_t j = 0; j < refs_to_move; ++j) {
 735           move_tail = new_head;
 736           new_head = java_lang_ref_Reference::discovered(new_head);
 737         }
 738 
 739         // Add the chain to the to list.
 740         if (ref_lists[to_idx].head() == NULL) {
 741           // to list is empty. Make a loop at the end.
 742           java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail);
 743         } else {
 744           java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head());
 745         }
 746         ref_lists[to_idx].set_head(move_head);
 747         ref_lists[to_idx].inc_length(refs_to_move);
 748 
 749         // Remove the chain from the from list.
 750         if (oopDesc::equals_raw(move_tail, new_head)) {
 751           // We found the end of the from list.
 752           ref_lists[from_idx].set_head(NULL);
 753         } else {
 754           ref_lists[from_idx].set_head(new_head);
 755         }
 756         ref_lists[from_idx].dec_length(refs_to_move);
 757         if (ref_lists[from_idx].length() == 0) {
 758           break;
 759         }
 760       } else {
 761         to_idx = (to_idx + 1) % _num_queues;
 762       }
 763     }
 764   }
 765 #ifdef ASSERT
 766   log_reflist_counts(ref_lists, _num_queues);
 767   size_t balanced_total_refs = 0;
 768   for (uint i = 0; i < _num_queues; ++i) {
 769     balanced_total_refs += ref_lists[i].length();
 770   }
 771   assert(total_refs == balanced_total_refs, "Balancing was incomplete");
 772 #endif
 773 }
 774 
 775 bool ReferenceProcessor::is_mt_processing_set_up(AbstractRefProcTaskExecutor* task_executor) const {
 776   return task_executor != NULL && _processing_is_mt;
 777 }
 778 
 779 void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive,
 780                                                      OopClosure* keep_alive,
 781                                                      VoidClosure* complete_gc,
 782                                                      AbstractRefProcTaskExecutor* task_executor,
 783                                                      ReferenceProcessorPhaseTimes* phase_times) {
 784   assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
 785 
 786   size_t const num_soft_refs = total_count(_discoveredSoftRefs);
 787   phase_times->set_ref_discovered(REF_SOFT, num_soft_refs);
 788 
 789   phase_times->set_processing_is_mt(_processing_is_mt);
 790 
 791   if (num_soft_refs == 0 || _current_soft_ref_policy == NULL) {
 792     log_debug(gc, ref)("Skipped phase1 of Reference Processing due to unavailable references");
 793     return;
 794   }
 795 
 796   RefProcMTDegreeAdjuster a(this, RefPhase1, num_soft_refs);
 797 
 798   if (_processing_is_mt) {
 799     RefProcBalanceQueuesTimeTracker tt(RefPhase1, phase_times);
 800     maybe_balance_queues(_discoveredSoftRefs);
 801   }
 802 
 803   RefProcPhaseTimeTracker tt(RefPhase1, phase_times);
 804 
 805   log_reflist("Phase1 Soft before", _discoveredSoftRefs, _max_num_queues);
 806   if (_processing_is_mt) {
 807     RefProcPhase1Task phase1(*this, phase_times, _current_soft_ref_policy);
 808     task_executor->execute(phase1, num_queues());
 809   } else {
 810     size_t removed = 0;
 811 
 812     RefProcSubPhasesWorkerTimeTracker tt2(SoftRefSubPhase1, phase_times, 0);
 813     for (uint i = 0; i < _max_num_queues; i++) {
 814       removed += process_soft_ref_reconsider_work(_discoveredSoftRefs[i], _current_soft_ref_policy,
 815                                                   is_alive, keep_alive, complete_gc);
 816     }
 817 
 818     phase_times->add_ref_cleared(REF_SOFT, removed);
 819   }
 820   log_reflist("Phase1 Soft after", _discoveredSoftRefs, _max_num_queues);
 821 }
 822 
 823 void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_alive,
 824                                                       OopClosure* keep_alive,
 825                                                       VoidClosure* complete_gc,
 826                                                       AbstractRefProcTaskExecutor*  task_executor,
 827                                                       ReferenceProcessorPhaseTimes* phase_times) {
 828   assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
 829 
 830   size_t const num_soft_refs = total_count(_discoveredSoftRefs);
 831   size_t const num_weak_refs = total_count(_discoveredWeakRefs);
 832   size_t const num_final_refs = total_count(_discoveredFinalRefs);
 833   size_t const num_total_refs = num_soft_refs + num_weak_refs + num_final_refs;
 834   phase_times->set_ref_discovered(REF_WEAK, num_weak_refs);
 835   phase_times->set_ref_discovered(REF_FINAL, num_final_refs);
 836 
 837   phase_times->set_processing_is_mt(_processing_is_mt);
 838 
 839   if (num_total_refs == 0) {
 840     log_debug(gc, ref)("Skipped phase2 of Reference Processing due to unavailable references");
 841     return;
 842   }
 843 
 844   RefProcMTDegreeAdjuster a(this, RefPhase2, num_total_refs);
 845 
 846   if (_processing_is_mt) {
 847     RefProcBalanceQueuesTimeTracker tt(RefPhase2, phase_times);
 848     maybe_balance_queues(_discoveredSoftRefs);
 849     maybe_balance_queues(_discoveredWeakRefs);
 850     maybe_balance_queues(_discoveredFinalRefs);
 851   }
 852 
 853   RefProcPhaseTimeTracker tt(RefPhase2, phase_times);
 854 
 855   log_reflist("Phase2 Soft before", _discoveredSoftRefs, _max_num_queues);
 856   log_reflist("Phase2 Weak before", _discoveredWeakRefs, _max_num_queues);
 857   log_reflist("Phase2 Final before", _discoveredFinalRefs, _max_num_queues);
 858   if (_processing_is_mt) {
 859     RefProcPhase2Task phase2(*this, phase_times);
 860     task_executor->execute(phase2, num_queues());
 861   } else {
 862     RefProcWorkerTimeTracker t(phase_times->phase2_worker_time_sec(), 0);
 863     {
 864       size_t removed = 0;
 865 
 866       RefProcSubPhasesWorkerTimeTracker tt2(SoftRefSubPhase2, phase_times, 0);
 867       for (uint i = 0; i < _max_num_queues; i++) {
 868         removed += process_soft_weak_final_refs_work(_discoveredSoftRefs[i], is_alive, keep_alive, true /* do_enqueue */);
 869       }
 870 
 871       phase_times->add_ref_cleared(REF_SOFT, removed);
 872     }
 873     {
 874       size_t removed = 0;
 875 
 876       RefProcSubPhasesWorkerTimeTracker tt2(WeakRefSubPhase2, phase_times, 0);
 877       for (uint i = 0; i < _max_num_queues; i++) {
 878         removed += process_soft_weak_final_refs_work(_discoveredWeakRefs[i], is_alive, keep_alive, true /* do_enqueue */);
 879       }
 880 
 881       phase_times->add_ref_cleared(REF_WEAK, removed);
 882     }
 883     {
 884       size_t removed = 0;
 885 
 886       RefProcSubPhasesWorkerTimeTracker tt2(FinalRefSubPhase2, phase_times, 0);
 887       for (uint i = 0; i < _max_num_queues; i++) {
 888         removed += process_soft_weak_final_refs_work(_discoveredFinalRefs[i], is_alive, keep_alive, false /* do_enqueue */);
 889       }
 890 
 891       phase_times->add_ref_cleared(REF_FINAL, removed);
 892     }
 893     complete_gc->do_void();
 894   }
 895   verify_total_count_zero(_discoveredSoftRefs, "SoftReference");
 896   verify_total_count_zero(_discoveredWeakRefs, "WeakReference");
 897   log_reflist("Phase2 Final after", _discoveredFinalRefs, _max_num_queues);
 898 }
 899 
 900 void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive,
 901                                                   VoidClosure* complete_gc,
 902                                                   AbstractRefProcTaskExecutor*  task_executor,
 903                                                   ReferenceProcessorPhaseTimes* phase_times) {
 904   assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
 905 
 906   size_t const num_final_refs = total_count(_discoveredFinalRefs);
 907 
 908   phase_times->set_processing_is_mt(_processing_is_mt);
 909 
 910   if (num_final_refs == 0) {
 911     log_debug(gc, ref)("Skipped phase3 of Reference Processing due to unavailable references");
 912     return;
 913   }
 914 
 915   RefProcMTDegreeAdjuster a(this, RefPhase3, num_final_refs);
 916 
 917   if (_processing_is_mt) {
 918     RefProcBalanceQueuesTimeTracker tt(RefPhase3, phase_times);
 919     maybe_balance_queues(_discoveredFinalRefs);
 920   }
 921 
 922   // Phase 3:
 923   // . Traverse referents of final references and keep them and followers alive.
 924   RefProcPhaseTimeTracker tt(RefPhase3, phase_times);
 925 
 926   if (_processing_is_mt) {
 927     RefProcPhase3Task phase3(*this, phase_times);
 928     task_executor->execute(phase3, num_queues());
 929   } else {
 930     RefProcSubPhasesWorkerTimeTracker tt2(FinalRefSubPhase3, phase_times, 0);
 931     for (uint i = 0; i < _max_num_queues; i++) {
 932       process_final_keep_alive_work(_discoveredFinalRefs[i], keep_alive, complete_gc);
 933     }
 934   }
 935   verify_total_count_zero(_discoveredFinalRefs, "FinalReference");
 936 }
 937 
 938 void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive,
 939                                               OopClosure* keep_alive,
 940                                               VoidClosure* complete_gc,
 941                                               AbstractRefProcTaskExecutor* task_executor,
 942                                               ReferenceProcessorPhaseTimes* phase_times) {
 943   assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set.");
 944 
 945   size_t const num_phantom_refs = total_count(_discoveredPhantomRefs);
 946   phase_times->set_ref_discovered(REF_PHANTOM, num_phantom_refs);
 947 
 948   phase_times->set_processing_is_mt(_processing_is_mt);
 949 
 950   if (num_phantom_refs == 0) {
 951     log_debug(gc, ref)("Skipped phase4 of Reference Processing due to unavailable references");
 952     return;
 953   }
 954 
 955   RefProcMTDegreeAdjuster a(this, RefPhase4, num_phantom_refs);
 956 
 957   if (_processing_is_mt) {
 958     RefProcBalanceQueuesTimeTracker tt(RefPhase4, phase_times);
 959     maybe_balance_queues(_discoveredPhantomRefs);
 960   }
 961 
 962   // Phase 4: Walk phantom references appropriately.
 963   RefProcPhaseTimeTracker tt(RefPhase4, phase_times);
 964 
 965   log_reflist("Phase4 Phantom before", _discoveredPhantomRefs, _max_num_queues);
 966   if (_processing_is_mt) {
 967     RefProcPhase4Task phase4(*this, phase_times);
 968     task_executor->execute(phase4, num_queues());
 969   } else {
 970     size_t removed = 0;
 971 
 972     RefProcSubPhasesWorkerTimeTracker tt(PhantomRefSubPhase4, phase_times, 0);
 973     for (uint i = 0; i < _max_num_queues; i++) {
 974       removed += process_phantom_refs_work(_discoveredPhantomRefs[i], is_alive, keep_alive, complete_gc);
 975     }
 976 
 977     phase_times->add_ref_cleared(REF_PHANTOM, removed);
 978   }
 979   verify_total_count_zero(_discoveredPhantomRefs, "PhantomReference");
 980 }
 981 
 982 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
 983   uint id = 0;
 984   // Determine the queue index to use for this object.
 985   if (_discovery_is_mt) {
 986     // During a multi-threaded discovery phase,
 987     // each thread saves to its "own" list.
 988     Thread* thr = Thread::current();
 989     id = thr->as_Worker_thread()->id();
 990   } else {
 991     // single-threaded discovery, we save in round-robin
 992     // fashion to each of the lists.
 993     if (_processing_is_mt) {
 994       id = next_id();
 995     }
 996   }
 997   assert(id < _max_num_queues, "Id is out of bounds id %u and max id %u)", id, _max_num_queues);
 998 
 999   // Get the discovered queue to which we will add
1000   DiscoveredList* list = NULL;
1001   switch (rt) {
1002     case REF_OTHER:
1003       // Unknown reference type, no special treatment
1004       break;
1005     case REF_SOFT:
1006       list = &_discoveredSoftRefs[id];
1007       break;
1008     case REF_WEAK:
1009       list = &_discoveredWeakRefs[id];
1010       break;
1011     case REF_FINAL:
1012       list = &_discoveredFinalRefs[id];
1013       break;
1014     case REF_PHANTOM:
1015       list = &_discoveredPhantomRefs[id];
1016       break;
1017     case REF_NONE:
1018       // we should not reach here if we are an InstanceRefKlass
1019     default:
1020       ShouldNotReachHere();
1021   }
1022   log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list));
1023   return list;
1024 }
1025 
1026 inline void
1027 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1028                                               oop             obj,
1029                                               HeapWord*       discovered_addr) {
1030   assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1031   // First we must make sure this object is only enqueued once. CAS in a non null
1032   // discovered_addr.
1033   oop current_head = refs_list.head();
1034   // The last ref must have its discovered field pointing to itself.
1035   oop next_discovered = (current_head != NULL) ? current_head : obj;
1036 
1037   oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL));
1038 
1039   if (retest == NULL) {
1040     // This thread just won the right to enqueue the object.
1041     // We have separate lists for enqueueing, so no synchronization
1042     // is necessary.
1043     refs_list.set_head(obj);
1044     refs_list.inc_length(1);
1045 
1046     log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
1047                                p2i(obj), obj->klass()->internal_name());
1048   } else {
1049     // If retest was non NULL, another thread beat us to it:
1050     // The reference has already been discovered...
1051     log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
1052                                p2i(obj), obj->klass()->internal_name());
1053   }
1054 }
1055 
1056 #ifndef PRODUCT
1057 // Non-atomic (i.e. concurrent) discovery might allow us
1058 // to observe j.l.References with NULL referents, being those
1059 // cleared concurrently by mutators during (or after) discovery.
1060 void ReferenceProcessor::verify_referent(oop obj) {
1061   bool da = discovery_is_atomic();
1062   oop referent = java_lang_ref_Reference::referent(obj);
1063   assert(da ? oopDesc::is_oop(referent) : oopDesc::is_oop_or_null(referent),
1064          "Bad referent " INTPTR_FORMAT " found in Reference "
1065          INTPTR_FORMAT " during %satomic discovery ",
1066          p2i(referent), p2i(obj), da ? "" : "non-");
1067 }
1068 #endif
1069 
1070 bool ReferenceProcessor::is_subject_to_discovery(oop const obj) const {
1071   return _is_subject_to_discovery->do_object_b(obj);
1072 }
1073 
1074 // We mention two of several possible choices here:
1075 // #0: if the reference object is not in the "originating generation"
1076 //     (or part of the heap being collected, indicated by our "span"
1077 //     we don't treat it specially (i.e. we scan it as we would
1078 //     a normal oop, treating its references as strong references).
1079 //     This means that references can't be discovered unless their
1080 //     referent is also in the same span. This is the simplest,
1081 //     most "local" and most conservative approach, albeit one
1082 //     that may cause weak references to be enqueued least promptly.
1083 //     We call this choice the "ReferenceBasedDiscovery" policy.
1084 // #1: the reference object may be in any generation (span), but if
1085 //     the referent is in the generation (span) being currently collected
1086 //     then we can discover the reference object, provided
1087 //     the object has not already been discovered by
1088 //     a different concurrently running collector (as may be the
1089 //     case, for instance, if the reference object is in CMS and
1090 //     the referent in DefNewGeneration), and provided the processing
1091 //     of this reference object by the current collector will
1092 //     appear atomic to every other collector in the system.
1093 //     (Thus, for instance, a concurrent collector may not
1094 //     discover references in other generations even if the
1095 //     referent is in its own generation). This policy may,
1096 //     in certain cases, enqueue references somewhat sooner than
1097 //     might Policy #0 above, but at marginally increased cost
1098 //     and complexity in processing these references.
1099 //     We call this choice the "RefeferentBasedDiscovery" policy.
1100 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1101   // Make sure we are discovering refs (rather than processing discovered refs).
1102   if (!_discovering_refs || !RegisterReferences) {
1103     return false;
1104   }
1105 
1106   if ((rt == REF_FINAL) && (java_lang_ref_Reference::next(obj) != NULL)) {
1107     // Don't rediscover non-active FinalReferences.
1108     return false;
1109   }
1110 
1111   if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1112       !is_subject_to_discovery(obj)) {
1113     // Reference is not in the originating generation;
1114     // don't treat it specially (i.e. we want to scan it as a normal
1115     // object with strong references).
1116     return false;
1117   }
1118 
1119   // We only discover references whose referents are not (yet)
1120   // known to be strongly reachable.
1121   if (is_alive_non_header() != NULL) {
1122     verify_referent(obj);
1123     if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1124       return false;  // referent is reachable
1125     }
1126   }
1127   if (rt == REF_SOFT) {
1128     // For soft refs we can decide now if these are not
1129     // current candidates for clearing, in which case we
1130     // can mark through them now, rather than delaying that
1131     // to the reference-processing phase. Since all current
1132     // time-stamp policies advance the soft-ref clock only
1133     // at a full collection cycle, this is always currently
1134     // accurate.
1135     if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) {
1136       return false;
1137     }
1138   }
1139 
1140   ResourceMark rm;      // Needed for tracing.
1141 
1142   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj);
1143   const oop  discovered = java_lang_ref_Reference::discovered(obj);
1144   assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered));
1145   if (discovered != NULL) {
1146     // The reference has already been discovered...
1147     log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
1148                                p2i(obj), obj->klass()->internal_name());
1149     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1150       // assumes that an object is not processed twice;
1151       // if it's been already discovered it must be on another
1152       // generation's discovered list; so we won't discover it.
1153       return false;
1154     } else {
1155       assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1156              "Unrecognized policy");
1157       // Check assumption that an object is not potentially
1158       // discovered twice except by concurrent collectors that potentially
1159       // trace the same Reference object twice.
1160       assert(UseConcMarkSweepGC || UseG1GC || UseShenandoahGC,
1161              "Only possible with a concurrent marking collector");
1162       return true;
1163     }
1164   }
1165 
1166   if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1167     verify_referent(obj);
1168     // Discover if and only if EITHER:
1169     // .. reference is in our span, OR
1170     // .. we are an atomic collector and referent is in our span
1171     if (is_subject_to_discovery(obj) ||
1172         (discovery_is_atomic() &&
1173          is_subject_to_discovery(java_lang_ref_Reference::referent(obj)))) {
1174     } else {
1175       return false;
1176     }
1177   } else {
1178     assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1179            is_subject_to_discovery(obj), "code inconsistency");
1180   }
1181 
1182   // Get the right type of discovered queue head.
1183   DiscoveredList* list = get_discovered_list(rt);
1184   if (list == NULL) {
1185     return false;   // nothing special needs to be done
1186   }
1187 
1188   if (_discovery_is_mt) {
1189     add_to_discovered_list_mt(*list, obj, discovered_addr);
1190   } else {
1191     // We do a raw store here: the field will be visited later when processing
1192     // the discovered references.
1193     oop current_head = list->head();
1194     // The last ref must have its discovered field pointing to itself.
1195     oop next_discovered = (current_head != NULL) ? current_head : obj;
1196 
1197     assert(discovered == NULL, "control point invariant");
1198     RawAccess<>::oop_store(discovered_addr, next_discovered);
1199     list->set_head(obj);
1200     list->inc_length(1);
1201 
1202     log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name());
1203   }
1204   assert(oopDesc::is_oop(obj), "Discovered a bad reference");
1205   verify_referent(obj);
1206   return true;
1207 }
1208 
1209 bool ReferenceProcessor::has_discovered_references() {
1210   for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) {
1211     if (!_discovered_refs[i].is_empty()) {
1212       return true;
1213     }
1214   }
1215   return false;
1216 }
1217 
1218 void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_alive,
1219                                                         OopClosure* keep_alive,
1220                                                         VoidClosure* complete_gc,
1221                                                         YieldClosure* yield,
1222                                                         GCTimer* gc_timer) {
1223   // These lists can be handled here in any order and, indeed, concurrently.
1224 
1225   // Soft references
1226   {
1227     GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer);
1228     log_reflist("SoftRef before: ", _discoveredSoftRefs, _max_num_queues);
1229     for (uint i = 0; i < _max_num_queues; i++) {
1230       if (yield->should_return()) {
1231         return;
1232       }
1233       if (preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive,
1234                                       keep_alive, complete_gc, yield)) {
1235         log_reflist("SoftRef abort: ", _discoveredSoftRefs, _max_num_queues);
1236         return;
1237       }
1238     }
1239     log_reflist("SoftRef after: ", _discoveredSoftRefs, _max_num_queues);
1240   }
1241 
1242   // Weak references
1243   {
1244     GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer);
1245     log_reflist("WeakRef before: ", _discoveredWeakRefs, _max_num_queues);
1246     for (uint i = 0; i < _max_num_queues; i++) {
1247       if (yield->should_return()) {
1248         return;
1249       }
1250       if (preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive,
1251                                       keep_alive, complete_gc, yield)) {
1252         log_reflist("WeakRef abort: ", _discoveredWeakRefs, _max_num_queues);
1253         return;
1254       }
1255     }
1256     log_reflist("WeakRef after: ", _discoveredWeakRefs, _max_num_queues);
1257   }
1258 
1259   // Final references
1260   {
1261     GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer);
1262     log_reflist("FinalRef before: ", _discoveredFinalRefs, _max_num_queues);
1263     for (uint i = 0; i < _max_num_queues; i++) {
1264       if (yield->should_return()) {
1265         return;
1266       }
1267       if (preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive,
1268                                       keep_alive, complete_gc, yield)) {
1269         log_reflist("FinalRef abort: ", _discoveredFinalRefs, _max_num_queues);
1270         return;
1271       }
1272     }
1273     log_reflist("FinalRef after: ", _discoveredFinalRefs, _max_num_queues);
1274   }
1275 
1276   // Phantom references
1277   {
1278     GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer);
1279     log_reflist("PhantomRef before: ", _discoveredPhantomRefs, _max_num_queues);
1280     for (uint i = 0; i < _max_num_queues; i++) {
1281       if (yield->should_return()) {
1282         return;
1283       }
1284       if (preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive,
1285                                       keep_alive, complete_gc, yield)) {
1286         log_reflist("PhantomRef abort: ", _discoveredPhantomRefs, _max_num_queues);
1287         return;
1288       }
1289     }
1290     log_reflist("PhantomRef after: ", _discoveredPhantomRefs, _max_num_queues);
1291   }
1292 }
1293 
1294 // Walk the given discovered ref list, and remove all reference objects
1295 // whose referents are still alive, whose referents are NULL or which
1296 // are not active (have a non-NULL next field). NOTE: When we are
1297 // thus precleaning the ref lists (which happens single-threaded today),
1298 // we do not disable refs discovery to honor the correct semantics of
1299 // java.lang.Reference. As a result, we need to be careful below
1300 // that ref removal steps interleave safely with ref discovery steps
1301 // (in this thread).
1302 bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList&    refs_list,
1303                                                      BoolObjectClosure* is_alive,
1304                                                      OopClosure*        keep_alive,
1305                                                      VoidClosure*       complete_gc,
1306                                                      YieldClosure*      yield) {
1307   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
1308   while (iter.has_next()) {
1309     if (yield->should_return_fine_grain()) {
1310       return true;
1311     }
1312     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1313     if (iter.referent() == NULL || iter.is_referent_alive()) {
1314       // The referent has been cleared, or is alive; we need to trace
1315       // and mark its cohort.
1316       log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)",
1317                                  p2i(iter.obj()), iter.obj()->klass()->internal_name());
1318       // Remove Reference object from list
1319       iter.remove();
1320       // Keep alive its cohort.
1321       iter.make_referent_alive();
1322       iter.move_to_next();
1323     } else {
1324       iter.next();
1325     }
1326   }
1327   // Close the reachable set
1328   complete_gc->do_void();
1329 
1330   if (iter.processed() > 0) {
1331     log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT,
1332                                iter.removed(), iter.processed(), p2i(&refs_list));
1333   }
1334   return false;
1335 }
1336 
1337 const char* ReferenceProcessor::list_name(uint i) {
1338    assert(i <= _max_num_queues * number_of_subclasses_of_ref(),
1339           "Out of bounds index");
1340 
1341    int j = i / _max_num_queues;
1342    switch (j) {
1343      case 0: return "SoftRef";
1344      case 1: return "WeakRef";
1345      case 2: return "FinalRef";
1346      case 3: return "PhantomRef";
1347    }
1348    ShouldNotReachHere();
1349    return NULL;
1350 }
1351 
1352 uint RefProcMTDegreeAdjuster::ergo_proc_thread_count(size_t ref_count,
1353                                                      uint max_threads,
1354                                                      RefProcPhases phase) const {
1355   assert(0 < max_threads, "must allow at least one thread");
1356 
1357   if (use_max_threads(phase) || (ReferencesPerThread == 0)) {
1358     return max_threads;
1359   }
1360 
1361   size_t thread_count = 1 + (ref_count / ReferencesPerThread);
1362   return (uint)MIN3(thread_count,
1363                     static_cast<size_t>(max_threads),
1364                     (size_t)os::active_processor_count());
1365 }
1366 
1367 bool RefProcMTDegreeAdjuster::use_max_threads(RefProcPhases phase) const {
1368   // Even a small number of references in either of those cases could produce large amounts of work.
1369   return (phase == ReferenceProcessor::RefPhase1 || phase == ReferenceProcessor::RefPhase3);
1370 }
1371 
1372 RefProcMTDegreeAdjuster::RefProcMTDegreeAdjuster(ReferenceProcessor* rp,
1373                                                  RefProcPhases phase,
1374                                                  size_t ref_count):
1375     _rp(rp),
1376     _saved_mt_processing(_rp->processing_is_mt()),
1377     _saved_num_queues(_rp->num_queues()) {
1378   if (!_rp->processing_is_mt() || !_rp->adjust_no_of_processing_threads() || (ReferencesPerThread == 0)) {
1379     return;
1380   }
1381 
1382   uint workers = ergo_proc_thread_count(ref_count, _rp->num_queues(), phase);
1383 
1384   _rp->set_mt_processing(workers > 1);
1385   _rp->set_active_mt_degree(workers);
1386 }
1387 
1388 RefProcMTDegreeAdjuster::~RefProcMTDegreeAdjuster() {
1389   // Revert to previous status.
1390   _rp->set_mt_processing(_saved_mt_processing);
1391   _rp->set_active_mt_degree(_saved_num_queues);
1392 }