src/share/vm/memory/referenceProcessor.cpp

Print this page
rev 2691 : [mq]: g1-reference-processing


  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "gc_interface/collectedHeap.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/referencePolicy.hpp"
  31 #include "memory/referenceProcessor.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/jniHandles.hpp"
  35 
  36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
  37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
  38 const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;
  39 bool             ReferenceProcessor::_pending_list_uses_discovered_field = false;
  40 
  41 // List of discovered references.
  42 class DiscoveredList {
  43 public:
  44   DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
  45   oop head() const     {
  46      return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
  47                                 _oop_head;
  48   }
  49   HeapWord* adr_head() {
  50     return UseCompressedOops ? (HeapWord*)&_compressed_head :
  51                                (HeapWord*)&_oop_head;
  52   }
  53   void   set_head(oop o) {
  54     if (UseCompressedOops) {
  55       // Must compress the head ptr.
  56       _compressed_head = oopDesc::encode_heap_oop(o);
  57     } else {
  58       _oop_head = o;
  59     }
  60   }
  61   bool   empty() const          { return head() == NULL; }
  62   size_t length()               { return _len; }
  63   void   set_length(size_t len) { _len = len;  }
  64   void   inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
  65   void   dec_length(size_t dec) { _len -= dec; }
  66 private:
  67   // Set value depending on UseCompressedOops. This could be a template class
  68   // but then we have to fix all the instantiations and declarations that use this class.
  69   oop       _oop_head;
  70   narrowOop _compressed_head;
  71   size_t _len;
  72 };
  73 
  74 void referenceProcessor_init() {
  75   ReferenceProcessor::init_statics();
  76 }
  77 
  78 void ReferenceProcessor::init_statics() {
  79   // Initialize the master soft ref clock.
  80   java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
  81 
  82   _always_clear_soft_ref_policy = new AlwaysClearPolicy();
  83   _default_soft_ref_policy      = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
  84                                       NOT_COMPILER2(LRUCurrentHeapPolicy());
  85   if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
  86     vm_exit_during_initialization("Could not allocate reference policy object");
  87   }
  88   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
  89             RefDiscoveryPolicy == ReferentBasedDiscovery,
  90             "Unrecongnized RefDiscoveryPolicy");
  91   _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
  92 }
  93 


  95                                        bool      mt_processing,
  96                                        int       mt_processing_degree,
  97                                        bool      mt_discovery,
  98                                        int       mt_discovery_degree,
  99                                        bool      atomic_discovery,
 100                                        BoolObjectClosure* is_alive_non_header,
 101                                        bool      discovered_list_needs_barrier)  :
 102   _discovering_refs(false),
 103   _enqueuing_is_done(false),
 104   _is_alive_non_header(is_alive_non_header),
 105   _discovered_list_needs_barrier(discovered_list_needs_barrier),
 106   _bs(NULL),
 107   _processing_is_mt(mt_processing),
 108   _next_id(0)
 109 {
 110   _span = span;
 111   _discovery_is_atomic = atomic_discovery;
 112   _discovery_is_mt     = mt_discovery;
 113   _num_q               = MAX2(1, mt_processing_degree);
 114   _max_num_q           = MAX2(_num_q, mt_discovery_degree);
 115   _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);

 116   if (_discoveredSoftRefs == NULL) {
 117     vm_exit_during_initialization("Could not allocated RefProc Array");
 118   }
 119   _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_q];
 120   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
 121   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
 122   // Initialized all entries to NULL
 123   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
 124     _discoveredSoftRefs[i].set_head(NULL);
 125     _discoveredSoftRefs[i].set_length(0);
 126   }
 127   // If we do barriers, cache a copy of the barrier set.
 128   if (discovered_list_needs_barrier) {
 129     _bs = Universe::heap()->barrier_set();
 130   }
 131   setup_policy(false /* default soft ref policy */);
 132 }
 133 
 134 #ifndef PRODUCT
 135 void ReferenceProcessor::verify_no_references_recorded() {
 136   guarantee(!_discovering_refs, "Discovering refs?");
 137   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
 138     guarantee(_discoveredSoftRefs[i].empty(),
 139               "Found non-empty discovered list");
 140   }
 141 }
 142 #endif
 143 
 144 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
 145   // Should this instead be
 146   // for (int i = 0; i < subclasses_of_ref; i++_ {


 147   //   for (int j = 0; j < _num_q; j++) {
 148   //     int index = i * _max_num_q + j;
 149   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {

 150     if (UseCompressedOops) {
 151       f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
 152     } else {
 153       f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
 154     }
 155   }
 156 }
 157 
 158 void ReferenceProcessor::update_soft_ref_master_clock() {
 159   // Update (advance) the soft ref master clock field. This must be done
 160   // after processing the soft ref list.
 161   jlong now = os::javaTimeMillis();
 162   jlong clock = java_lang_ref_SoftReference::clock();
 163   NOT_PRODUCT(
 164   if (now < clock) {
 165     warning("time warp: %d to %d", clock, now);
 166   }
 167   )
 168   // In product mode, protect ourselves from system time being adjusted
 169   // externally and going backward; see note in the implementation of


 387 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
 388 public:
 389   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
 390                      DiscoveredList      discovered_refs[],
 391                      HeapWord*           pending_list_addr,
 392                      int                 n_queues)
 393     : EnqueueTask(ref_processor, discovered_refs,
 394                   pending_list_addr, n_queues)
 395   { }
 396 
 397   virtual void work(unsigned int work_id) {
 398     assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
 399     // Simplest first cut: static partitioning.
 400     int index = work_id;
 401     // The increment on "index" must correspond to the maximum number of queues
 402     // (n_queues) with which that ReferenceProcessor was created.  That
 403     // is because of the "clever" way the discovered references lists were
 404     // allocated and are indexed into.
 405     assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
 406     for (int j = 0;
 407          j < subclasses_of_ref;
 408          j++, index += _n_queues) {
 409       _ref_processor.enqueue_discovered_reflist(
 410         _refs_lists[index], _pending_list_addr);
 411       _refs_lists[index].set_head(NULL);
 412       _refs_lists[index].set_length(0);
 413     }
 414   }
 415 };
 416 
 417 // Enqueue references that are not made active again
 418 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
 419   AbstractRefProcTaskExecutor* task_executor) {
 420   if (_processing_is_mt && task_executor != NULL) {
 421     // Parallel code
 422     RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
 423                            pending_list_addr, _max_num_q);
 424     task_executor->execute(tsk);
 425   } else {
 426     // Serial code: call the parent class's implementation
 427     for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
 428       enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
 429       _discoveredSoftRefs[i].set_head(NULL);
 430       _discoveredSoftRefs[i].set_length(0);
 431     }
 432   }
 433 }
 434 
 435 // Iterator for the list of discovered references.
 436 class DiscoveredListIterator {
 437 public:
 438   inline DiscoveredListIterator(DiscoveredList&    refs_list,
 439                                 OopClosure*        keep_alive,
 440                                 BoolObjectClosure* is_alive);
 441 
 442   // End Of List.
 443   inline bool has_next() const { return _ref != NULL; }
 444 
 445   // Get oop to the Reference object.
 446   inline oop obj() const { return _ref; }
 447 
 448   // Get oop to the referent object.
 449   inline oop referent() const { return _referent; }
 450 
 451   // Returns true if referent is alive.
 452   inline bool is_referent_alive() const;
 453 
 454   // Loads data for the current reference.
 455   // The "allow_null_referent" argument tells us to allow for the possibility
 456   // of a NULL referent in the discovered Reference object. This typically
 457   // happens in the case of concurrent collectors that may have done the
 458   // discovery concurrently, or interleaved, with mutator execution.
 459   inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
 460 
 461   // Move to the next discovered reference.
 462   inline void next();
 463 
 464   // Remove the current reference from the list
 465   inline void remove();
 466 
 467   // Make the Reference object active again.
 468   inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
 469 
 470   // Make the referent alive.
 471   inline void make_referent_alive() {
 472     if (UseCompressedOops) {
 473       _keep_alive->do_oop((narrowOop*)_referent_addr);
 474     } else {
 475       _keep_alive->do_oop((oop*)_referent_addr);
 476     }
 477   }
 478 
 479   // Update the discovered field.
 480   inline void update_discovered() {
 481     // First _prev_next ref actually points into DiscoveredList (gross).
 482     if (UseCompressedOops) {
 483       if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
 484         _keep_alive->do_oop((narrowOop*)_prev_next);
 485       }
 486     } else {
 487       if (!oopDesc::is_null(*(oop*)_prev_next)) {
 488         _keep_alive->do_oop((oop*)_prev_next);
 489       }
 490     }
 491   }
 492 
 493   // NULL out referent pointer.
 494   inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
 495 
 496   // Statistics
 497   NOT_PRODUCT(
 498   inline size_t processed() const { return _processed; }
 499   inline size_t removed() const   { return _removed; }
 500   )
 501 
 502   inline void move_to_next();
 503 
 504 private:
 505   DiscoveredList&    _refs_list;
 506   HeapWord*          _prev_next;
 507   oop                _prev;
 508   oop                _ref;
 509   HeapWord*          _discovered_addr;
 510   oop                _next;
 511   HeapWord*          _referent_addr;
 512   oop                _referent;
 513   OopClosure*        _keep_alive;
 514   BoolObjectClosure* _is_alive;
 515   DEBUG_ONLY(
 516   oop                _first_seen; // cyclic linked list check
 517   )
 518   NOT_PRODUCT(
 519   size_t             _processed;
 520   size_t             _removed;
 521   )
 522 };
 523 
 524 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList&    refs_list,
 525                                                       OopClosure*        keep_alive,
 526                                                       BoolObjectClosure* is_alive)
 527   : _refs_list(refs_list),
 528     _prev_next(refs_list.adr_head()),
 529     _prev(NULL),
 530     _ref(refs_list.head()),
 531 #ifdef ASSERT
 532     _first_seen(refs_list.head()),
 533 #endif
 534 #ifndef PRODUCT
 535     _processed(0),
 536     _removed(0),
 537 #endif
 538     _next(NULL),
 539     _keep_alive(keep_alive),
 540     _is_alive(is_alive)
 541 { }
 542 
 543 inline bool DiscoveredListIterator::is_referent_alive() const {
 544   return _is_alive->do_object_b(_referent);
 545 }
 546 
 547 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
 548   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
 549   oop discovered = java_lang_ref_Reference::discovered(_ref);
 550   assert(_discovered_addr && discovered->is_oop_or_null(),
 551          "discovered field is bad");
 552   _next = discovered;
 553   _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
 554   _referent = java_lang_ref_Reference::referent(_ref);
 555   assert(Universe::heap()->is_in_reserved_or_null(_referent),
 556          "Wrong oop found in java.lang.Reference object");
 557   assert(allow_null_referent ?
 558              _referent->is_oop_or_null()
 559            : _referent->is_oop(),
 560          "bad referent");
 561 }
 562 
 563 inline void DiscoveredListIterator::next() {
 564   _prev_next = _discovered_addr;
 565   _prev = _ref;
 566   move_to_next();
 567 }
 568 
 569 inline void DiscoveredListIterator::remove() {
 570   assert(_ref->is_oop(), "Dropping a bad reference");
 571   oop_store_raw(_discovered_addr, NULL);
 572 
 573   // First _prev_next ref actually points into DiscoveredList (gross).
 574   oop new_next;
 575   if (_next == _ref) {
 576     // At the end of the list, we should make _prev point to itself.
 577     // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
 578     // and _prev will be NULL.
 579     new_next = _prev;
 580   } else {
 581     new_next = _next;
 582   }
 583 
 584   if (UseCompressedOops) {
 585     // Remove Reference object from list.
 586     oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
 587   } else {
 588     // Remove Reference object from list.
 589     oopDesc::store_heap_oop((oop*)_prev_next, new_next);
 590   }
 591   NOT_PRODUCT(_removed++);
 592   _refs_list.dec_length(1);
 593 }
 594 
 595 inline void DiscoveredListIterator::move_to_next() {
 596   if (_ref == _next) {
 597     // End of the list.
 598     _ref = NULL;












 599   } else {
 600     _ref = _next;
 601   }
 602   assert(_ref != _first_seen, "cyclic ref_list found");
 603   NOT_PRODUCT(_processed++);


 604 }
 605 
 606 // NOTE: process_phase*() are largely similar, and at a high level
 607 // merely iterate over the extant list applying a predicate to
 608 // each of its elements and possibly removing that element from the
 609 // list and applying some further closures to that element.
 610 // We should consider the possibility of replacing these
 611 // process_phase*() methods by abstracting them into
 612 // a single general iterator invocation that receives appropriate
 613 // closures that accomplish this work.
 614 
 615 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
 616 // referents are not alive, but that should be kept alive for policy reasons.
 617 // Keep alive the transitive closure of all such referents.
 618 void
 619 ReferenceProcessor::process_phase1(DiscoveredList&    refs_list,
 620                                    ReferencePolicy*   policy,
 621                                    BoolObjectClosure* is_alive,
 622                                    OopClosure*        keep_alive,
 623                                    VoidClosure*       complete_gc) {


 769 void
 770 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) {
 771   oop obj = NULL;
 772   oop next = refs_list.head();
 773   while (next != obj) {
 774     obj = next;
 775     next = java_lang_ref_Reference::discovered(obj);
 776     java_lang_ref_Reference::set_discovered_raw(obj, NULL);
 777   }
 778   refs_list.set_head(NULL);
 779   refs_list.set_length(0);
 780 }
 781 
 782 void
 783 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
 784   clear_discovered_references(refs_list);
 785 }
 786 
 787 void ReferenceProcessor::abandon_partial_discovery() {
 788   // loop over the lists
 789   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
 790     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
 791       gclog_or_tty->print_cr("\nAbandoning %s discovered list",
 792                              list_name(i));
 793     }
 794     abandon_partial_discovered_list(_discoveredSoftRefs[i]);
 795   }
 796 }
 797 
 798 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
 799 public:
 800   RefProcPhase1Task(ReferenceProcessor& ref_processor,
 801                     DiscoveredList      refs_lists[],
 802                     ReferencePolicy*    policy,
 803                     bool                marks_oops_alive)
 804     : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
 805       _policy(policy)
 806   { }
 807   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
 808                     OopClosure& keep_alive,
 809                     VoidClosure& complete_gc)
 810   {
 811     Thread* thr = Thread::current();
 812     int refs_list_index = ((WorkerThread*)thr)->id();


 841                     bool                marks_oops_alive)
 842     : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
 843       _clear_referent(clear_referent)
 844   { }
 845   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
 846                     OopClosure& keep_alive,
 847                     VoidClosure& complete_gc)
 848   {
 849     // Don't use "refs_list_index" calculated in this way because
 850     // balance_queues() has moved the Ref's into the first n queues.
 851     // Thread* thr = Thread::current();
 852     // int refs_list_index = ((WorkerThread*)thr)->id();
 853     // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent,
 854     _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
 855                                   &is_alive, &keep_alive, &complete_gc);
 856   }
 857 private:
 858   bool _clear_referent;
 859 };
 860 








 861 // Balances reference queues.
 862 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
 863 // queues[0, 1, ..., _num_q-1] because only the first _num_q
 864 // corresponding to the active workers will be processed.
 865 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
 866 {
 867   // calculate total length
 868   size_t total_refs = 0;
 869   if (TraceReferenceGC && PrintGCDetails) {
 870     gclog_or_tty->print_cr("\nBalance ref_lists ");
 871   }
 872 
 873   for (int i = 0; i < _max_num_q; ++i) {
 874     total_refs += ref_lists[i].length();
 875     if (TraceReferenceGC && PrintGCDetails) {
 876       gclog_or_tty->print("%d ", ref_lists[i].length());
 877     }
 878   }
 879   if (TraceReferenceGC && PrintGCDetails) {
 880     gclog_or_tty->print_cr(" = %d", total_refs);


 898                               avg_refs - ref_lists[to_idx].length());
 899         } else {
 900           refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
 901                               avg_refs - ref_lists[to_idx].length());
 902         }
 903 
 904         assert(refs_to_move > 0, "otherwise the code below will fail");
 905 
 906         oop move_head = ref_lists[from_idx].head();
 907         oop move_tail = move_head;
 908         oop new_head  = move_head;
 909         // find an element to split the list on
 910         for (size_t j = 0; j < refs_to_move; ++j) {
 911           move_tail = new_head;
 912           new_head = java_lang_ref_Reference::discovered(new_head);
 913         }
 914 
 915         // Add the chain to the to list.
 916         if (ref_lists[to_idx].head() == NULL) {
 917           // to list is empty. Make a loop at the end.
 918           java_lang_ref_Reference::set_discovered(move_tail, move_tail);
 919         } else {
 920           java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
 921         }
 922         ref_lists[to_idx].set_head(move_head);
 923         ref_lists[to_idx].inc_length(refs_to_move);
 924 
 925         // Remove the chain from the from list.
 926         if (move_tail == new_head) {
 927           // We found the end of the from list.
 928           ref_lists[from_idx].set_head(NULL);
 929         } else {
 930           ref_lists[from_idx].set_head(new_head);
 931         }
 932         ref_lists[from_idx].dec_length(refs_to_move);
 933         if (ref_lists[from_idx].length() == 0) {
 934           break;
 935         }
 936       } else {
 937         to_idx = (to_idx + 1) % _num_q;
 938       }
 939     }
 940   }


1021     for (int i = 0; i < _max_num_q; i++) {
1022       process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
1023     }
1024   }
1025 
1026   // Phase 3:
1027   // . Traverse the list and process referents as appropriate.
1028   if (mt_processing) {
1029     RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
1030     task_executor->execute(phase3);
1031   } else {
1032     for (int i = 0; i < _max_num_q; i++) {
1033       process_phase3(refs_lists[i], clear_referent,
1034                      is_alive, keep_alive, complete_gc);
1035     }
1036   }
1037 }
1038 
1039 void ReferenceProcessor::clean_up_discovered_references() {
1040   // loop over the lists
1041   // Should this instead be
1042   // for (int i = 0; i < subclasses_of_ref; i++_ {



1043   //   for (int j = 0; j < _num_q; j++) {
1044   //     int index = i * _max_num_q + j;
1045   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {

1046     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
1047       gclog_or_tty->print_cr(
1048         "\nScrubbing %s discovered list of Null referents",
1049         list_name(i));
1050     }
1051     clean_up_discovered_reflist(_discoveredSoftRefs[i]);
1052   }
1053 }
1054 
1055 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
1056   assert(!discovery_is_atomic(), "Else why call this method?");
1057   DiscoveredListIterator iter(refs_list, NULL, NULL);
1058   while (iter.has_next()) {
1059     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
1060     oop next = java_lang_ref_Reference::next(iter.obj());
1061     assert(next->is_oop_or_null(), "bad next field");
1062     // If referent has been cleared or Reference is not active,
1063     // drop it.
1064     if (iter.referent() == NULL || next != NULL) {
1065       debug_only(


1243   // known to be strongly reachable.
1244   if (is_alive_non_header() != NULL) {
1245     verify_referent(obj);
1246     if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1247       return false;  // referent is reachable
1248     }
1249   }
1250   if (rt == REF_SOFT) {
1251     // For soft refs we can decide now if these are not
1252     // current candidates for clearing, in which case we
1253     // can mark through them now, rather than delaying that
1254     // to the reference-processing phase. Since all current
1255     // time-stamp policies advance the soft-ref clock only
1256     // at a major collection cycle, this is always currently
1257     // accurate.
1258     if (!_current_soft_ref_policy->should_clear_reference(obj)) {
1259       return false;
1260     }
1261   }
1262 


1263   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1264   const oop  discovered = java_lang_ref_Reference::discovered(obj);
1265   assert(discovered->is_oop_or_null(), "bad discovered field");
1266   if (discovered != NULL) {
1267     // The reference has already been discovered...
1268     if (TraceReferenceGC) {
1269       gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
1270                              obj, obj->blueprint()->internal_name());
1271     }
1272     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1273       // assumes that an object is not processed twice;
1274       // if it's been already discovered it must be on another
1275       // generation's discovered list; so we won't discover it.
1276       return false;
1277     } else {
1278       assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1279              "Unrecognized policy");
1280       // Check assumption that an object is not potentially
1281       // discovered twice except by concurrent collectors that potentially
1282       // trace the same Reference object twice.


1455         keep_alive->do_oop(next_addr);
1456       }
1457       iter.move_to_next();
1458     } else {
1459       iter.next();
1460     }
1461   }
1462   // Close the reachable set
1463   complete_gc->do_void();
1464 
1465   NOT_PRODUCT(
1466     if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
1467       gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
1468         "Refs in discovered list " INTPTR_FORMAT,
1469         iter.removed(), iter.processed(), (address)refs_list.head());
1470     }
1471   )
1472 }
1473 
1474 const char* ReferenceProcessor::list_name(int i) {
1475    assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");


1476    int j = i / _max_num_q;
1477    switch (j) {
1478      case 0: return "SoftRef";
1479      case 1: return "WeakRef";
1480      case 2: return "FinalRef";
1481      case 3: return "PhantomRef";
1482    }
1483    ShouldNotReachHere();
1484    return NULL;
1485 }
1486 
1487 #ifndef PRODUCT
1488 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1489   // empty for now
1490 }
1491 #endif
1492 
1493 #ifndef PRODUCT
1494 void ReferenceProcessor::clear_discovered_references() {
1495   guarantee(!_discovering_refs, "Discovering refs?");
1496   for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
1497     clear_discovered_references(_discoveredSoftRefs[i]);
1498   }
1499 }
1500 
1501 #endif // PRODUCT


  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "gc_interface/collectedHeap.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/referencePolicy.hpp"
  31 #include "memory/referenceProcessor.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/jniHandles.hpp"
  35 
  36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
  37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;

  38 bool             ReferenceProcessor::_pending_list_uses_discovered_field = false;
  39 

































  40 void referenceProcessor_init() {
  41   ReferenceProcessor::init_statics();
  42 }
  43 
  44 void ReferenceProcessor::init_statics() {
  45   // Initialize the master soft ref clock.
  46   java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
  47 
  48   _always_clear_soft_ref_policy = new AlwaysClearPolicy();
  49   _default_soft_ref_policy      = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
  50                                       NOT_COMPILER2(LRUCurrentHeapPolicy());
  51   if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
  52     vm_exit_during_initialization("Could not allocate reference policy object");
  53   }
  54   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
  55             RefDiscoveryPolicy == ReferentBasedDiscovery,
  56             "Unrecongnized RefDiscoveryPolicy");
  57   _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
  58 }
  59 


  61                                        bool      mt_processing,
  62                                        int       mt_processing_degree,
  63                                        bool      mt_discovery,
  64                                        int       mt_discovery_degree,
  65                                        bool      atomic_discovery,
  66                                        BoolObjectClosure* is_alive_non_header,
  67                                        bool      discovered_list_needs_barrier)  :
  68   _discovering_refs(false),
  69   _enqueuing_is_done(false),
  70   _is_alive_non_header(is_alive_non_header),
  71   _discovered_list_needs_barrier(discovered_list_needs_barrier),
  72   _bs(NULL),
  73   _processing_is_mt(mt_processing),
  74   _next_id(0)
  75 {
  76   _span = span;
  77   _discovery_is_atomic = atomic_discovery;
  78   _discovery_is_mt     = mt_discovery;
  79   _num_q               = MAX2(1, mt_processing_degree);
  80   _max_num_q           = MAX2(_num_q, mt_discovery_degree);
  81   _discoveredSoftRefs  = NEW_C_HEAP_ARRAY(DiscoveredList,
  82                                           _max_num_q * number_of_subclasses_of_ref());
  83   if (_discoveredSoftRefs == NULL) {
  84     vm_exit_during_initialization("Could not allocated RefProc Array");
  85   }
  86   _discoveredWeakRefs    = &_discoveredSoftRefs[_max_num_q];
  87   _discoveredFinalRefs   = &_discoveredWeakRefs[_max_num_q];
  88   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
  89   // Initialized all entries to NULL
  90   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
  91     _discoveredSoftRefs[i].set_head(NULL);
  92     _discoveredSoftRefs[i].set_length(0);
  93   }
  94   // If we do barriers, cache a copy of the barrier set.
  95   if (discovered_list_needs_barrier) {
  96     _bs = Universe::heap()->barrier_set();
  97   }
  98   setup_policy(false /* default soft ref policy */);
  99 }
 100 
 101 #ifndef PRODUCT
 102 void ReferenceProcessor::verify_no_references_recorded() {
 103   guarantee(!_discovering_refs, "Discovering refs?");
 104   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
 105     guarantee(_discoveredSoftRefs[i].is_empty(),
 106               "Found non-empty discovered list");
 107   }
 108 }
 109 #endif
 110 
 111 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
 112   // An alternative implementation of this routine
 113   // could use the following nested loop:
 114   //
 115   // for (int i = 0; i < number_of_subclasses_of_ref(); i++_ {
 116   //   for (int j = 0; j < _num_q; j++) {
 117   //     int index = i * _max_num_q + j;
 118 
 119   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
 120     if (UseCompressedOops) {
 121       f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
 122     } else {
 123       f->do_oop((oop*)_discoveredSoftRefs[i].adr_head());
 124     }
 125   }
 126 }
 127 
 128 void ReferenceProcessor::update_soft_ref_master_clock() {
 129   // Update (advance) the soft ref master clock field. This must be done
 130   // after processing the soft ref list.
 131   jlong now = os::javaTimeMillis();
 132   jlong clock = java_lang_ref_SoftReference::clock();
 133   NOT_PRODUCT(
 134   if (now < clock) {
 135     warning("time warp: %d to %d", clock, now);
 136   }
 137   )
 138   // In product mode, protect ourselves from system time being adjusted
 139   // externally and going backward; see note in the implementation of


 357 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
 358 public:
 359   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
 360                      DiscoveredList      discovered_refs[],
 361                      HeapWord*           pending_list_addr,
 362                      int                 n_queues)
 363     : EnqueueTask(ref_processor, discovered_refs,
 364                   pending_list_addr, n_queues)
 365   { }
 366 
 367   virtual void work(unsigned int work_id) {
 368     assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
 369     // Simplest first cut: static partitioning.
 370     int index = work_id;
 371     // The increment on "index" must correspond to the maximum number of queues
 372     // (n_queues) with which that ReferenceProcessor was created.  That
 373     // is because of the "clever" way the discovered references lists were
 374     // allocated and are indexed into.
 375     assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
 376     for (int j = 0;
 377          j < ReferenceProcessor::number_of_subclasses_of_ref();
 378          j++, index += _n_queues) {
 379       _ref_processor.enqueue_discovered_reflist(
 380         _refs_lists[index], _pending_list_addr);
 381       _refs_lists[index].set_head(NULL);
 382       _refs_lists[index].set_length(0);
 383     }
 384   }
 385 };
 386 
 387 // Enqueue references that are not made active again
 388 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr,
 389   AbstractRefProcTaskExecutor* task_executor) {
 390   if (_processing_is_mt && task_executor != NULL) {
 391     // Parallel code
 392     RefProcEnqueueTask tsk(*this, _discoveredSoftRefs,
 393                            pending_list_addr, _max_num_q);
 394     task_executor->execute(tsk);
 395   } else {
 396     // Serial code: call the parent class's implementation
 397     for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
 398       enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
 399       _discoveredSoftRefs[i].set_head(NULL);
 400       _discoveredSoftRefs[i].set_length(0);
 401     }
 402   }
 403 }
 404 
 405 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
















































































































 406   _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
 407   oop discovered = java_lang_ref_Reference::discovered(_ref);
 408   assert(_discovered_addr && discovered->is_oop_or_null(),
 409          "discovered field is bad");
 410   _next = discovered;
 411   _referent_addr = java_lang_ref_Reference::referent_addr(_ref);
 412   _referent = java_lang_ref_Reference::referent(_ref);
 413   assert(Universe::heap()->is_in_reserved_or_null(_referent),
 414          "Wrong oop found in java.lang.Reference object");
 415   assert(allow_null_referent ?
 416              _referent->is_oop_or_null()
 417            : _referent->is_oop(),
 418          "bad referent");
 419 }
 420 
 421 void DiscoveredListIterator::remove() {






 422   assert(_ref->is_oop(), "Dropping a bad reference");
 423   oop_store_raw(_discovered_addr, NULL);
 424 
 425   // First _prev_next ref actually points into DiscoveredList (gross).
 426   oop new_next;
 427   if (_next == _ref) {
 428     // At the end of the list, we should make _prev point to itself.
 429     // If _ref is the first ref, then _prev_next will be in the DiscoveredList,
 430     // and _prev will be NULL.
 431     new_next = _prev;
 432   } else {
 433     new_next = _next;
 434   }
 435 
 436   if (UseCompressedOops) {
 437     // Remove Reference object from list.
 438     oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next);
 439   } else {
 440     // Remove Reference object from list.
 441     oopDesc::store_heap_oop((oop*)_prev_next, new_next);
 442   }
 443   NOT_PRODUCT(_removed++);
 444   _refs_list.dec_length(1);
 445 }
 446 
 447 // Make the Reference object active again.
 448 void DiscoveredListIterator::make_active() {
 449   // For G1 we don't want to use set_next - it
 450   // will dirty the card for the next field of
 451   // the reference object and will fail
 452   // CT verification.
 453   if (UseG1GC) {
 454     BarrierSet* bs = oopDesc::bs();
 455     HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
 456 
 457     if (UseCompressedOops) {
 458       bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
 459     } else {
 460       bs->write_ref_field_pre((oop*)next_addr, NULL);
 461     }
 462     java_lang_ref_Reference::set_next_raw(_ref, NULL);
 463   } else {
 464     java_lang_ref_Reference::set_next(_ref, NULL);
 465   }
 466 }
 467 
 468 void DiscoveredListIterator::clear_referent() {
 469   oop_store_raw(_referent_addr, NULL);
 470 }
 471 
 472 // NOTE: process_phase*() are largely similar, and at a high level
 473 // merely iterate over the extant list applying a predicate to
 474 // each of its elements and possibly removing that element from the
 475 // list and applying some further closures to that element.
 476 // We should consider the possibility of replacing these
 477 // process_phase*() methods by abstracting them into
 478 // a single general iterator invocation that receives appropriate
 479 // closures that accomplish this work.
 480 
 481 // (SoftReferences only) Traverse the list and remove any SoftReferences whose
 482 // referents are not alive, but that should be kept alive for policy reasons.
 483 // Keep alive the transitive closure of all such referents.
 484 void
 485 ReferenceProcessor::process_phase1(DiscoveredList&    refs_list,
 486                                    ReferencePolicy*   policy,
 487                                    BoolObjectClosure* is_alive,
 488                                    OopClosure*        keep_alive,
 489                                    VoidClosure*       complete_gc) {


 635 void
 636 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) {
 637   oop obj = NULL;
 638   oop next = refs_list.head();
 639   while (next != obj) {
 640     obj = next;
 641     next = java_lang_ref_Reference::discovered(obj);
 642     java_lang_ref_Reference::set_discovered_raw(obj, NULL);
 643   }
 644   refs_list.set_head(NULL);
 645   refs_list.set_length(0);
 646 }
 647 
 648 void
 649 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
 650   clear_discovered_references(refs_list);
 651 }
 652 
 653 void ReferenceProcessor::abandon_partial_discovery() {
 654   // loop over the lists
 655   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
 656     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
 657       gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));

 658     }
 659     abandon_partial_discovered_list(_discoveredSoftRefs[i]);
 660   }
 661 }
 662 
 663 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask {
 664 public:
 665   RefProcPhase1Task(ReferenceProcessor& ref_processor,
 666                     DiscoveredList      refs_lists[],
 667                     ReferencePolicy*    policy,
 668                     bool                marks_oops_alive)
 669     : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
 670       _policy(policy)
 671   { }
 672   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
 673                     OopClosure& keep_alive,
 674                     VoidClosure& complete_gc)
 675   {
 676     Thread* thr = Thread::current();
 677     int refs_list_index = ((WorkerThread*)thr)->id();


 706                     bool                marks_oops_alive)
 707     : ProcessTask(ref_processor, refs_lists, marks_oops_alive),
 708       _clear_referent(clear_referent)
 709   { }
 710   virtual void work(unsigned int i, BoolObjectClosure& is_alive,
 711                     OopClosure& keep_alive,
 712                     VoidClosure& complete_gc)
 713   {
 714     // Don't use "refs_list_index" calculated in this way because
 715     // balance_queues() has moved the Ref's into the first n queues.
 716     // Thread* thr = Thread::current();
 717     // int refs_list_index = ((WorkerThread*)thr)->id();
 718     // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent,
 719     _ref_processor.process_phase3(_refs_lists[i], _clear_referent,
 720                                   &is_alive, &keep_alive, &complete_gc);
 721   }
 722 private:
 723   bool _clear_referent;
 724 };
 725 
 726 void ReferenceProcessor::set_discovered(oop ref, oop value) {
 727   if (_discovered_list_needs_barrier) {
 728     java_lang_ref_Reference::set_discovered(ref, value);
 729   } else {
 730     java_lang_ref_Reference::set_discovered_raw(ref, value);
 731   }
 732 }
 733 
 734 // Balances reference queues.
 735 // Move entries from all queues[0, 1, ..., _max_num_q-1] to
 736 // queues[0, 1, ..., _num_q-1] because only the first _num_q
 737 // corresponding to the active workers will be processed.
 738 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
 739 {
 740   // calculate total length
 741   size_t total_refs = 0;
 742   if (TraceReferenceGC && PrintGCDetails) {
 743     gclog_or_tty->print_cr("\nBalance ref_lists ");
 744   }
 745 
 746   for (int i = 0; i < _max_num_q; ++i) {
 747     total_refs += ref_lists[i].length();
 748     if (TraceReferenceGC && PrintGCDetails) {
 749       gclog_or_tty->print("%d ", ref_lists[i].length());
 750     }
 751   }
 752   if (TraceReferenceGC && PrintGCDetails) {
 753     gclog_or_tty->print_cr(" = %d", total_refs);


 771                               avg_refs - ref_lists[to_idx].length());
 772         } else {
 773           refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs,
 774                               avg_refs - ref_lists[to_idx].length());
 775         }
 776 
 777         assert(refs_to_move > 0, "otherwise the code below will fail");
 778 
 779         oop move_head = ref_lists[from_idx].head();
 780         oop move_tail = move_head;
 781         oop new_head  = move_head;
 782         // find an element to split the list on
 783         for (size_t j = 0; j < refs_to_move; ++j) {
 784           move_tail = new_head;
 785           new_head = java_lang_ref_Reference::discovered(new_head);
 786         }
 787 
 788         // Add the chain to the to list.
 789         if (ref_lists[to_idx].head() == NULL) {
 790           // to list is empty. Make a loop at the end.
 791           set_discovered(move_tail, move_tail);
 792         } else {
 793           set_discovered(move_tail, ref_lists[to_idx].head());
 794         }
 795         ref_lists[to_idx].set_head(move_head);
 796         ref_lists[to_idx].inc_length(refs_to_move);
 797 
 798         // Remove the chain from the from list.
 799         if (move_tail == new_head) {
 800           // We found the end of the from list.
 801           ref_lists[from_idx].set_head(NULL);
 802         } else {
 803           ref_lists[from_idx].set_head(new_head);
 804         }
 805         ref_lists[from_idx].dec_length(refs_to_move);
 806         if (ref_lists[from_idx].length() == 0) {
 807           break;
 808         }
 809       } else {
 810         to_idx = (to_idx + 1) % _num_q;
 811       }
 812     }
 813   }


 894     for (int i = 0; i < _max_num_q; i++) {
 895       process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
 896     }
 897   }
 898 
 899   // Phase 3:
 900   // . Traverse the list and process referents as appropriate.
 901   if (mt_processing) {
 902     RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
 903     task_executor->execute(phase3);
 904   } else {
 905     for (int i = 0; i < _max_num_q; i++) {
 906       process_phase3(refs_lists[i], clear_referent,
 907                      is_alive, keep_alive, complete_gc);
 908     }
 909   }
 910 }
 911 
 912 void ReferenceProcessor::clean_up_discovered_references() {
 913   // loop over the lists
 914 
 915   // An alternative implementation of this routine could
 916   // use the following nested loop:
 917   //
 918   // for (int i = 0; i < number_of_subclasses_of_ref(); i++) {
 919   //   for (int j = 0; j < _num_q; j++) {
 920   //     int index = i * _max_num_q + j;
 921 
 922   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
 923     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
 924       gclog_or_tty->print_cr(
 925         "\nScrubbing %s discovered list of Null referents",
 926         list_name(i));
 927     }
 928     clean_up_discovered_reflist(_discoveredSoftRefs[i]);
 929   }
 930 }
 931 
 932 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
 933   assert(!discovery_is_atomic(), "Else why call this method?");
 934   DiscoveredListIterator iter(refs_list, NULL, NULL);
 935   while (iter.has_next()) {
 936     iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
 937     oop next = java_lang_ref_Reference::next(iter.obj());
 938     assert(next->is_oop_or_null(), "bad next field");
 939     // If referent has been cleared or Reference is not active,
 940     // drop it.
 941     if (iter.referent() == NULL || next != NULL) {
 942       debug_only(


1120   // known to be strongly reachable.
1121   if (is_alive_non_header() != NULL) {
1122     verify_referent(obj);
1123     if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1124       return false;  // referent is reachable
1125     }
1126   }
1127   if (rt == REF_SOFT) {
1128     // For soft refs we can decide now if these are not
1129     // current candidates for clearing, in which case we
1130     // can mark through them now, rather than delaying that
1131     // to the reference-processing phase. Since all current
1132     // time-stamp policies advance the soft-ref clock only
1133     // at a major collection cycle, this is always currently
1134     // accurate.
1135     if (!_current_soft_ref_policy->should_clear_reference(obj)) {
1136       return false;
1137     }
1138   }
1139 
1140   ResourceMark rm;      // Needed for tracing.
1141 
1142   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1143   const oop  discovered = java_lang_ref_Reference::discovered(obj);
1144   assert(discovered->is_oop_or_null(), "bad discovered field");
1145   if (discovered != NULL) {
1146     // The reference has already been discovered...
1147     if (TraceReferenceGC) {
1148       gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
1149                              obj, obj->blueprint()->internal_name());
1150     }
1151     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1152       // assumes that an object is not processed twice;
1153       // if it's been already discovered it must be on another
1154       // generation's discovered list; so we won't discover it.
1155       return false;
1156     } else {
1157       assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1158              "Unrecognized policy");
1159       // Check assumption that an object is not potentially
1160       // discovered twice except by concurrent collectors that potentially
1161       // trace the same Reference object twice.


1334         keep_alive->do_oop(next_addr);
1335       }
1336       iter.move_to_next();
1337     } else {
1338       iter.next();
1339     }
1340   }
1341   // Close the reachable set
1342   complete_gc->do_void();
1343 
1344   NOT_PRODUCT(
1345     if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) {
1346       gclog_or_tty->print_cr(" Dropped %d Refs out of %d "
1347         "Refs in discovered list " INTPTR_FORMAT,
1348         iter.removed(), iter.processed(), (address)refs_list.head());
1349     }
1350   )
1351 }
1352 
1353 const char* ReferenceProcessor::list_name(int i) {
1354    assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
1355           "Out of bounds index");
1356 
1357    int j = i / _max_num_q;
1358    switch (j) {
1359      case 0: return "SoftRef";
1360      case 1: return "WeakRef";
1361      case 2: return "FinalRef";
1362      case 3: return "PhantomRef";
1363    }
1364    ShouldNotReachHere();
1365    return NULL;
1366 }
1367 
1368 #ifndef PRODUCT
1369 void ReferenceProcessor::verify_ok_to_handle_reflists() {
1370   // empty for now
1371 }
1372 #endif
1373 
1374 #ifndef PRODUCT
1375 void ReferenceProcessor::clear_discovered_references() {
1376   guarantee(!_discovering_refs, "Discovering refs?");
1377   for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
1378     clear_discovered_references(_discoveredSoftRefs[i]);
1379   }
1380 }
1381 
1382 #endif // PRODUCT