src/share/vm/memory/referenceProcessor.cpp

Print this page




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "gc_interface/collectedHeap.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/referencePolicy.hpp"
  31 #include "memory/referenceProcessor.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/jniHandles.hpp"
  35 
  36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
  37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
  38 const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;

  39 
  40 // List of discovered references.
  41 class DiscoveredList {
  42 public:
  43   DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
  44   oop head() const     {
  45      return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
  46                                 _oop_head;
  47   }
  48   HeapWord* adr_head() {
  49     return UseCompressedOops ? (HeapWord*)&_compressed_head :
  50                                (HeapWord*)&_oop_head;
  51   }
  52   void   set_head(oop o) {
  53     if (UseCompressedOops) {
  54       // Must compress the head ptr.
  55       _compressed_head = oopDesc::encode_heap_oop(o);
  56     } else {
  57       _oop_head = o;
  58     }


  70   size_t _len;
  71 };
  72 
  73 void referenceProcessor_init() {
  74   ReferenceProcessor::init_statics();
  75 }
  76 
  77 void ReferenceProcessor::init_statics() {
  78   // Initialize the master soft ref clock.
  79   java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
  80 
  81   _always_clear_soft_ref_policy = new AlwaysClearPolicy();
  82   _default_soft_ref_policy      = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
  83                                       NOT_COMPILER2(LRUCurrentHeapPolicy());
  84   if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
  85     vm_exit_during_initialization("Could not allocate reference policy object");
  86   }
  87   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
  88             RefDiscoveryPolicy == ReferentBasedDiscovery,
  89             "Unrecongnized RefDiscoveryPolicy");

  90 }
  91 
  92 ReferenceProcessor::ReferenceProcessor(MemRegion span,
  93                                        bool      mt_processing,
  94                                        int       mt_processing_degree,
  95                                        bool      mt_discovery,
  96                                        int       mt_discovery_degree,
  97                                        bool      atomic_discovery,
  98                                        BoolObjectClosure* is_alive_non_header,
  99                                        bool      discovered_list_needs_barrier)  :
 100   _discovering_refs(false),
 101   _enqueuing_is_done(false),
 102   _is_alive_non_header(is_alive_non_header),
 103   _discovered_list_needs_barrier(discovered_list_needs_barrier),
 104   _bs(NULL),
 105   _processing_is_mt(mt_processing),
 106   _next_id(0)
 107 {
 108   _span = span;
 109   _discovery_is_atomic = atomic_discovery;


 290 
 291   // Stop treating discovered references specially.
 292   ref->disable_discovery();
 293 
 294   // Return true if new pending references were added
 295   return old_pending_list_value != *pending_list_addr;
 296 }
 297 
 298 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
 299   NOT_PRODUCT(verify_ok_to_handle_reflists());
 300   if (UseCompressedOops) {
 301     return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
 302   } else {
 303     return enqueue_discovered_ref_helper<oop>(this, task_executor);
 304   }
 305 }
 306 
 307 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
 308                                                     HeapWord* pending_list_addr) {
 309   // Given a list of refs linked through the "discovered" field
 310   // (java.lang.ref.Reference.discovered) chain them through the
 311   // "next" field (java.lang.ref.Reference.next) and prepend
 312   // to the pending list.




 313   if (TraceReferenceGC && PrintGCDetails) {
 314     gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
 315                            INTPTR_FORMAT, (address)refs_list.head());
 316   }
 317 
 318   oop obj = NULL;
 319   oop next = refs_list.head();


























 320   // Walk down the list, copying the discovered field into
 321   // the next field and clearing it.
 322   while (obj != next) {
 323     obj = next;
 324     assert(obj->is_instanceRef(), "should be reference object");
 325     next = java_lang_ref_Reference::discovered(obj);
 326     if (TraceReferenceGC && PrintGCDetails) {
 327       gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
 328                              obj, next);
 329     }
 330     assert(java_lang_ref_Reference::next(obj) == NULL,
 331            "The reference should not be enqueued");
 332     if (next == obj) {  // obj is last
 333       // Swap refs_list into pendling_list_addr and
 334       // set obj's next to what we read from pending_list_addr.
 335       oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
 336       // Need oop_check on pending_list_addr above;
 337       // see special oop-check code at the end of
 338       // enqueue_discovered_reflists() further below.
 339       if (old == NULL) {
 340         // obj should be made to point to itself, since
 341         // pending list was empty.
 342         java_lang_ref_Reference::set_next(obj, obj);
 343       } else {
 344         java_lang_ref_Reference::set_next(obj, old);
 345       }
 346     } else {
 347       java_lang_ref_Reference::set_next(obj, next);
 348     }
 349     java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
 350   }

 351 }
 352 
 353 // Parallel enqueue task
 354 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
 355 public:
 356   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
 357                      DiscoveredList      discovered_refs[],
 358                      HeapWord*           pending_list_addr,
 359                      int                 n_queues)
 360     : EnqueueTask(ref_processor, discovered_refs,
 361                   pending_list_addr, n_queues)
 362   { }
 363 
 364   virtual void work(unsigned int work_id) {
 365     assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
 366     // Simplest first cut: static partitioning.
 367     int index = work_id;
 368     // The increment on "index" must correspond to the maximum number of queues
 369     // (n_queues) with which that ReferenceProcessor was created.  That
 370     // is because of the "clever" way the discovered references lists were


 598       if (TraceReferenceGC) {
 599         gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
 600                                iter.obj(), iter.obj()->blueprint()->internal_name());
 601       }
 602       // Remove Reference object from list
 603       iter.remove();
 604       // Make the Reference object active again
 605       iter.make_active();
 606       // keep the referent around
 607       iter.make_referent_alive();
 608       iter.move_to_next();
 609     } else {
 610       iter.next();
 611     }
 612   }
 613   // Close the reachable set
 614   complete_gc->do_void();
 615   NOT_PRODUCT(
 616     if (PrintGCDetails && TraceReferenceGC) {
 617       gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
 618         "discovered Refs by policy  list " INTPTR_FORMAT,
 619         iter.removed(), iter.processed(), (address)refs_list.head());
 620     }
 621   )
 622 }
 623 
 624 // Traverse the list and remove any Refs that are not active, or
 625 // whose referents are either alive or NULL.
 626 void
 627 ReferenceProcessor::pp2_work(DiscoveredList&    refs_list,
 628                              BoolObjectClosure* is_alive,
 629                              OopClosure*        keep_alive) {
 630   assert(discovery_is_atomic(), "Error");
 631   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
 632   while (iter.has_next()) {
 633     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
 634     DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
 635     assert(next == NULL, "Should not discover inactive Reference");
 636     if (iter.is_referent_alive()) {
 637       if (TraceReferenceGC) {
 638         gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",


1098   }
1099   return list;
1100 }
1101 
1102 inline void
1103 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1104                                               oop             obj,
1105                                               HeapWord*       discovered_addr) {
1106   assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1107   // First we must make sure this object is only enqueued once. CAS in a non null
1108   // discovered_addr.
1109   oop current_head = refs_list.head();
1110   // The last ref must have its discovered field pointing to itself.
1111   oop next_discovered = (current_head != NULL) ? current_head : obj;
1112 
1113   // Note: In the case of G1, this specific pre-barrier is strictly
1114   // not necessary because the only case we are interested in
1115   // here is when *discovered_addr is NULL (see the CAS further below),
1116   // so this will expand to nothing. As a result, we have manually
1117   // elided this out for G1, but left in the test for some future
1118   // collector that might have need for a pre-barrier here.
1119   if (_discovered_list_needs_barrier && !UseG1GC) {
1120     if (UseCompressedOops) {
1121       _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered);
1122     } else {
1123       _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered);
1124     }
1125     guarantee(false, "Need to check non-G1 collector");
1126   }
1127   oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
1128                                                     NULL);
1129   if (retest == NULL) {
1130     // This thread just won the right to enqueue the object.
1131     // We have separate lists for enqueueing so no synchronization
1132     // is necessary.
1133     refs_list.set_head(obj);
1134     refs_list.inc_length(1);
1135     if (_discovered_list_needs_barrier) {
1136       _bs->write_ref_field((void*)discovered_addr, next_discovered);
1137     }
1138 
1139     if (TraceReferenceGC) {
1140       gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
1141                              obj, obj->blueprint()->internal_name());
1142     }
1143   } else {
1144     // If retest was non NULL, another thread beat us to it:
1145     // The reference has already been discovered...
1146     if (TraceReferenceGC) {
1147       gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1148                              obj, obj->blueprint()->internal_name());
1149     }
1150   }
1151 }
1152 
1153 #ifndef PRODUCT
1154 // Non-atomic (i.e. concurrent) discovery might allow us
1155 // to observe j.l.References with NULL referents, being those
1156 // cleared concurrently by mutators during (or after) discovery.
1157 void ReferenceProcessor::verify_referent(oop obj) {
1158   bool da = discovery_is_atomic();
1159   oop referent = java_lang_ref_Reference::referent(obj);
1160   assert(da ? referent->is_oop() : referent->is_oop_or_null(),
1161          err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
1162                  INTPTR_FORMAT " during %satomic discovery ",
1163                  (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
1164 }
1165 #endif
1166 
1167 // We mention two of several possible choices here:
1168 // #0: if the reference object is not in the "originating generation"
1169 //     (or part of the heap being collected, indicated by our "span"
1170 //     we don't treat it specially (i.e. we scan it as we would
1171 //     a normal oop, treating its references as strong references).
1172 //     This means that references can't be enqueued unless their
1173 //     referent is also in the same span. This is the simplest,
1174 //     most "local" and most conservative approach, albeit one
1175 //     that may cause weak references to be enqueued least promptly.
1176 //     We call this choice the "ReferenceBasedDiscovery" policy.
1177 // #1: the reference object may be in any generation (span), but if
1178 //     the referent is in the generation (span) being currently collected
1179 //     then we can discover the reference object, provided
1180 //     the object has not already been discovered by
1181 //     a different concurrently running collector (as may be the
1182 //     case, for instance, if the reference object is in CMS and
1183 //     the referent in DefNewGeneration), and provided the processing
1184 //     of this reference object by the current collector will
1185 //     appear atomic to every other collector in the system.
1186 //     (Thus, for instance, a concurrent collector may not
1187 //     discover references in other generations even if the
1188 //     referent is in its own generation). This policy may,
1189 //     in certain cases, enqueue references somewhat sooner than
1190 //     might Policy #0 above, but at marginally increased cost
1191 //     and complexity in processing these references.
1192 //     We call this choice the "RefeferentBasedDiscovery" policy.
1193 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1194   // We enqueue references only if we are discovering refs
1195   // (rather than processing discovered refs).
1196   if (!_discovering_refs || !RegisterReferences) {
1197     return false;
1198   }
1199   // We only enqueue active references.
1200   oop next = java_lang_ref_Reference::next(obj);
1201   if (next != NULL) {
1202     return false;
1203   }
1204 
1205   HeapWord* obj_addr = (HeapWord*)obj;
1206   if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1207       !_span.contains(obj_addr)) {
1208     // Reference is not in the originating generation;
1209     // don't treat it specially (i.e. we want to scan it as a normal
1210     // object with strong references).
1211     return false;
1212   }
1213 
1214   // We only enqueue references whose referents are not (yet) strongly
1215   // reachable.
1216   if (is_alive_non_header() != NULL) {
1217     verify_referent(obj);
1218     if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1219       return false;  // referent is reachable
1220     }
1221   }
1222   if (rt == REF_SOFT) {
1223     // For soft refs we can decide now if these are not
1224     // current candidates for clearing, in which case we
1225     // can mark through them now, rather than delaying that
1226     // to the reference-processing phase. Since all current
1227     // time-stamp policies advance the soft-ref clock only
1228     // at a major collection cycle, this is always currently
1229     // accurate.
1230     if (!_current_soft_ref_policy->should_clear_reference(obj)) {
1231       return false;
1232     }
1233   }
1234 
1235   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1236   const oop  discovered = java_lang_ref_Reference::discovered(obj);
1237   assert(discovered->is_oop_or_null(), "bad discovered field");
1238   if (discovered != NULL) {
1239     // The reference has already been discovered...
1240     if (TraceReferenceGC) {
1241       gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
1242                              obj, obj->blueprint()->internal_name());
1243     }
1244     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1245       // assumes that an object is not processed twice;
1246       // if it's been already discovered it must be on another
1247       // generation's discovered list; so we won't discover it.
1248       return false;
1249     } else {
1250       assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1251              "Unrecognized policy");
1252       // Check assumption that an object is not potentially
1253       // discovered twice except by concurrent collectors that potentially
1254       // trace the same Reference object twice.
1255       assert(UseConcMarkSweepGC || UseG1GC,
1256              "Only possible with a concurrent marking collector");
1257       return true;
1258     }
1259   }
1260 
1261   if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1262     verify_referent(obj);
1263     // enqueue if and only if either:
1264     // reference is in our span or
1265     // we are an atomic collector and referent is in our span
1266     if (_span.contains(obj_addr) ||
1267         (discovery_is_atomic() &&
1268          _span.contains(java_lang_ref_Reference::referent(obj)))) {
1269       // should_enqueue = true;
1270     } else {
1271       return false;
1272     }
1273   } else {
1274     assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1275            _span.contains(obj_addr), "code inconsistency");
1276   }
1277 
1278   // Get the right type of discovered queue head.
1279   DiscoveredList* list = get_discovered_list(rt);
1280   if (list == NULL) {
1281     return false;   // nothing special needs to be done
1282   }
1283 
1284   if (_discovery_is_mt) {
1285     add_to_discovered_list_mt(*list, obj, discovered_addr);
1286   } else {
1287     // If "_discovered_list_needs_barrier", we do write barriers when
1288     // updating the discovered reference list.  Otherwise, we do a raw store
1289     // here: the field will be visited later when processing the discovered
1290     // references.
1291     oop current_head = list->head();
1292     // The last ref must have its discovered field pointing to itself.
1293     oop next_discovered = (current_head != NULL) ? current_head : obj;
1294 
1295     // As in the case further above, since we are over-writing a NULL
1296     // pre-value, we can safely elide the pre-barrier here for the case of G1.

1297     assert(discovered == NULL, "control point invariant");
1298     if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
1299       if (UseCompressedOops) {
1300         _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered);
1301       } else {
1302         _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered);
1303       }
1304       guarantee(false, "Need to check non-G1 collector");
1305     }
1306     oop_store_raw(discovered_addr, next_discovered);
1307     if (_discovered_list_needs_barrier) {
1308       _bs->write_ref_field((void*)discovered_addr, next_discovered);
1309     }
1310     list->set_head(obj);
1311     list->inc_length(1);
1312 
1313     if (TraceReferenceGC) {
1314       gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
1315                                 obj, obj->blueprint()->internal_name());
1316     }
1317   }
1318   assert(obj->is_oop(), "Enqueued a bad reference");
1319   verify_referent(obj);
1320   return true;
1321 }
1322 
1323 // Preclean the discovered references by removing those
1324 // whose referents are alive, and by marking from those that
1325 // are not active. These lists can be handled here
1326 // in any order and, indeed, concurrently.
1327 void ReferenceProcessor::preclean_discovered_references(
1328   BoolObjectClosure* is_alive,
1329   OopClosure* keep_alive,
1330   VoidClosure* complete_gc,
1331   YieldClosure* yield,
1332   bool should_unload_classes) {
1333 
1334   NOT_PRODUCT(verify_ok_to_handle_reflists());
1335 
1336 #ifdef ASSERT
1337   bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
1338                                CMSClassUnloadingEnabled && UseConcMarkSweepGC ||




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "gc_interface/collectedHeap.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/referencePolicy.hpp"
  31 #include "memory/referenceProcessor.hpp"
  32 #include "oops/oop.inline.hpp"
  33 #include "runtime/java.hpp"
  34 #include "runtime/jniHandles.hpp"
  35 
  36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
  37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy      = NULL;
  38 const int        subclasses_of_ref                = REF_PHANTOM - REF_OTHER;
  39 bool             ReferenceProcessor::_pending_list_uses_discovered_field = false;
  40 
  41 // List of discovered references.
  42 class DiscoveredList {
  43 public:
  44   DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
  45   oop head() const     {
  46      return UseCompressedOops ?  oopDesc::decode_heap_oop(_compressed_head) :
  47                                 _oop_head;
  48   }
  49   HeapWord* adr_head() {
  50     return UseCompressedOops ? (HeapWord*)&_compressed_head :
  51                                (HeapWord*)&_oop_head;
  52   }
  53   void   set_head(oop o) {
  54     if (UseCompressedOops) {
  55       // Must compress the head ptr.
  56       _compressed_head = oopDesc::encode_heap_oop(o);
  57     } else {
  58       _oop_head = o;
  59     }


  71   size_t _len;
  72 };
  73 
  74 void referenceProcessor_init() {
  75   ReferenceProcessor::init_statics();
  76 }
  77 
  78 void ReferenceProcessor::init_statics() {
  79   // Initialize the master soft ref clock.
  80   java_lang_ref_SoftReference::set_clock(os::javaTimeMillis());
  81 
  82   _always_clear_soft_ref_policy = new AlwaysClearPolicy();
  83   _default_soft_ref_policy      = new COMPILER2_PRESENT(LRUMaxHeapPolicy())
  84                                       NOT_COMPILER2(LRUCurrentHeapPolicy());
  85   if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) {
  86     vm_exit_during_initialization("Could not allocate reference policy object");
  87   }
  88   guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
  89             RefDiscoveryPolicy == ReferentBasedDiscovery,
  90             "Unrecongnized RefDiscoveryPolicy");
  91   _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
  92 }
  93 
  94 ReferenceProcessor::ReferenceProcessor(MemRegion span,
  95                                        bool      mt_processing,
  96                                        int       mt_processing_degree,
  97                                        bool      mt_discovery,
  98                                        int       mt_discovery_degree,
  99                                        bool      atomic_discovery,
 100                                        BoolObjectClosure* is_alive_non_header,
 101                                        bool      discovered_list_needs_barrier)  :
 102   _discovering_refs(false),
 103   _enqueuing_is_done(false),
 104   _is_alive_non_header(is_alive_non_header),
 105   _discovered_list_needs_barrier(discovered_list_needs_barrier),
 106   _bs(NULL),
 107   _processing_is_mt(mt_processing),
 108   _next_id(0)
 109 {
 110   _span = span;
 111   _discovery_is_atomic = atomic_discovery;


 292 
 293   // Stop treating discovered references specially.
 294   ref->disable_discovery();
 295 
 296   // Return true if new pending references were added
 297   return old_pending_list_value != *pending_list_addr;
 298 }
 299 
 300 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) {
 301   NOT_PRODUCT(verify_ok_to_handle_reflists());
 302   if (UseCompressedOops) {
 303     return enqueue_discovered_ref_helper<narrowOop>(this, task_executor);
 304   } else {
 305     return enqueue_discovered_ref_helper<oop>(this, task_executor);
 306   }
 307 }
 308 
 309 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
 310                                                     HeapWord* pending_list_addr) {
 311   // Given a list of refs linked through the "discovered" field
 312   // (java.lang.ref.Reference.discovered), self-loop their "next" field 
 313   // thus distinguishing them from active References, then 
 314   // prepend them to the pending list. 
 315   // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777),
 316   // the "next" field is used to chain the pending list, not the discovered
 317   // field.
 318 
 319   if (TraceReferenceGC && PrintGCDetails) {
 320     gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
 321                            INTPTR_FORMAT, (address)refs_list.head());
 322   }
 323 
 324   oop obj = NULL;
 325   oop next_d = refs_list.head();
 326   if (pending_list_uses_discovered_field()) { // New behaviour
 327     // Walk down the list, self-looping the next field 
 328     // so that the References are not considered active. 
 329     while (obj != next_d) {
 330       obj = next_d;
 331       assert(obj->is_instanceRef(), "should be reference object");
 332       next_d = java_lang_ref_Reference::discovered(obj);
 333       if (TraceReferenceGC && PrintGCDetails) {
 334         gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
 335                                obj, next_d);
 336       }
 337       assert(java_lang_ref_Reference::next(obj) == NULL,
 338              "Reference not active; should not be discovered"); 
 339       // Self-loop next, so as to make Ref not active. 
 340       java_lang_ref_Reference::set_next(obj, obj);
 341       if (next_d == obj) {  // obj is last
 342         // Swap refs_list into pendling_list_addr and
 343         // set obj's discovered to what we read from pending_list_addr.
 344         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
 345         // Need oop_check on pending_list_addr above;
 346         // see special oop-check code at the end of
 347         // enqueue_discovered_reflists() further below.
 348         java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL 
 349       }
 350     }
 351   } else { // Old behaviour
 352     // Walk down the list, copying the discovered field into
 353     // the next field and clearing the discovered field.
 354     while (obj != next_d) {
 355       obj = next_d;
 356       assert(obj->is_instanceRef(), "should be reference object");
 357       next_d = java_lang_ref_Reference::discovered(obj);
 358       if (TraceReferenceGC && PrintGCDetails) {
 359         gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
 360                                obj, next_d);
 361       }
 362       assert(java_lang_ref_Reference::next(obj) == NULL,
 363              "The reference should not be enqueued");
 364       if (next_d == obj) {  // obj is last
 365         // Swap refs_list into pendling_list_addr and
 366         // set obj's next to what we read from pending_list_addr.
 367         oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
 368         // Need oop_check on pending_list_addr above;
 369         // see special oop-check code at the end of
 370         // enqueue_discovered_reflists() further below.
 371         if (old == NULL) {
 372           // obj should be made to point to itself, since
 373           // pending list was empty.
 374           java_lang_ref_Reference::set_next(obj, obj);
 375         } else {
 376           java_lang_ref_Reference::set_next(obj, old);
 377         }
 378       } else {
 379         java_lang_ref_Reference::set_next(obj, next_d);
 380       }
 381       java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
 382     }
 383   }
 384 }
 385 
 386 // Parallel enqueue task
 387 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask {
 388 public:
 389   RefProcEnqueueTask(ReferenceProcessor& ref_processor,
 390                      DiscoveredList      discovered_refs[],
 391                      HeapWord*           pending_list_addr,
 392                      int                 n_queues)
 393     : EnqueueTask(ref_processor, discovered_refs,
 394                   pending_list_addr, n_queues)
 395   { }
 396 
 397   virtual void work(unsigned int work_id) {
 398     assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds");
 399     // Simplest first cut: static partitioning.
 400     int index = work_id;
 401     // The increment on "index" must correspond to the maximum number of queues
 402     // (n_queues) with which that ReferenceProcessor was created.  That
 403     // is because of the "clever" way the discovered references lists were


 631       if (TraceReferenceGC) {
 632         gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
 633                                iter.obj(), iter.obj()->blueprint()->internal_name());
 634       }
 635       // Remove Reference object from list
 636       iter.remove();
 637       // Make the Reference object active again
 638       iter.make_active();
 639       // keep the referent around
 640       iter.make_referent_alive();
 641       iter.move_to_next();
 642     } else {
 643       iter.next();
 644     }
 645   }
 646   // Close the reachable set
 647   complete_gc->do_void();
 648   NOT_PRODUCT(
 649     if (PrintGCDetails && TraceReferenceGC) {
 650       gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
 651         "discovered Refs by policy, from list " INTPTR_FORMAT,
 652         iter.removed(), iter.processed(), (address)refs_list.head());
 653     }
 654   )
 655 }
 656 
 657 // Traverse the list and remove any Refs that are not active, or
 658 // whose referents are either alive or NULL.
 659 void
 660 ReferenceProcessor::pp2_work(DiscoveredList&    refs_list,
 661                              BoolObjectClosure* is_alive,
 662                              OopClosure*        keep_alive) {
 663   assert(discovery_is_atomic(), "Error");
 664   DiscoveredListIterator iter(refs_list, keep_alive, is_alive);
 665   while (iter.has_next()) {
 666     iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */));
 667     DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());)
 668     assert(next == NULL, "Should not discover inactive Reference");
 669     if (iter.is_referent_alive()) {
 670       if (TraceReferenceGC) {
 671         gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",


1131   }
1132   return list;
1133 }
1134 
1135 inline void
1136 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
1137                                               oop             obj,
1138                                               HeapWord*       discovered_addr) {
1139   assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
1140   // First we must make sure this object is only enqueued once. CAS in a non null
1141   // discovered_addr.
1142   oop current_head = refs_list.head();
1143   // The last ref must have its discovered field pointing to itself.
1144   oop next_discovered = (current_head != NULL) ? current_head : obj;
1145 
1146   // Note: In the case of G1, this specific pre-barrier is strictly
1147   // not necessary because the only case we are interested in
1148   // here is when *discovered_addr is NULL (see the CAS further below),
1149   // so this will expand to nothing. As a result, we have manually
1150   // elided this out for G1, but left in the test for some future
1151   // collector that might have need for a pre-barrier here, e.g.:-
1152   // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
1153   assert(!_discovered_list_needs_barrier || UseG1GC,
1154          "Need to check non-G1 collector: "
1155          "may need a pre-write-barrier for CAS from NULL below");




1156   oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
1157                                                     NULL);
1158   if (retest == NULL) {
1159     // This thread just won the right to enqueue the object.
1160     // We have separate lists for enqueueing, so no synchronization
1161     // is necessary.
1162     refs_list.set_head(obj);
1163     refs_list.inc_length(1);
1164     if (_discovered_list_needs_barrier) {
1165       _bs->write_ref_field((void*)discovered_addr, next_discovered);
1166     }
1167 
1168     if (TraceReferenceGC) {
1169       gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
1170                              obj, obj->blueprint()->internal_name());
1171     }
1172   } else {
1173     // If retest was non NULL, another thread beat us to it:
1174     // The reference has already been discovered...
1175     if (TraceReferenceGC) {
1176       gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
1177                              obj, obj->blueprint()->internal_name());
1178     }
1179   }
1180 }
1181 
1182 #ifndef PRODUCT
1183 // Non-atomic (i.e. concurrent) discovery might allow us
1184 // to observe j.l.References with NULL referents, being those
1185 // cleared concurrently by mutators during (or after) discovery.
1186 void ReferenceProcessor::verify_referent(oop obj) {
1187   bool da = discovery_is_atomic();
1188   oop referent = java_lang_ref_Reference::referent(obj);
1189   assert(da ? referent->is_oop() : referent->is_oop_or_null(),
1190          err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
1191                  INTPTR_FORMAT " during %satomic discovery ",
1192                  (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
1193 }
1194 #endif
1195 
1196 // We mention two of several possible choices here:
1197 // #0: if the reference object is not in the "originating generation"
1198 //     (or part of the heap being collected, indicated by our "span"
1199 //     we don't treat it specially (i.e. we scan it as we would
1200 //     a normal oop, treating its references as strong references).
1201 //     This means that references can't be discovered unless their
1202 //     referent is also in the same span. This is the simplest,
1203 //     most "local" and most conservative approach, albeit one
1204 //     that may cause weak references to be enqueued least promptly.
1205 //     We call this choice the "ReferenceBasedDiscovery" policy.
1206 // #1: the reference object may be in any generation (span), but if
1207 //     the referent is in the generation (span) being currently collected
1208 //     then we can discover the reference object, provided
1209 //     the object has not already been discovered by
1210 //     a different concurrently running collector (as may be the
1211 //     case, for instance, if the reference object is in CMS and
1212 //     the referent in DefNewGeneration), and provided the processing
1213 //     of this reference object by the current collector will
1214 //     appear atomic to every other collector in the system.
1215 //     (Thus, for instance, a concurrent collector may not
1216 //     discover references in other generations even if the
1217 //     referent is in its own generation). This policy may,
1218 //     in certain cases, enqueue references somewhat sooner than
1219 //     might Policy #0 above, but at marginally increased cost
1220 //     and complexity in processing these references.
1221 //     We call this choice the "RefeferentBasedDiscovery" policy.
1222 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
1223   // Make sure we are discovering refs (rather than processing discovered refs).

1224   if (!_discovering_refs || !RegisterReferences) {
1225     return false;
1226   }
1227   // We only discover active references.
1228   oop next = java_lang_ref_Reference::next(obj);
1229   if (next != NULL) {   // Ref is no longer active
1230     return false;
1231   }
1232 
1233   HeapWord* obj_addr = (HeapWord*)obj;
1234   if (RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1235       !_span.contains(obj_addr)) {
1236     // Reference is not in the originating generation;
1237     // don't treat it specially (i.e. we want to scan it as a normal
1238     // object with strong references).
1239     return false;
1240   }
1241 
1242   // We only discover references whose referents are not (yet)
1243   // known to be strongly reachable.
1244   if (is_alive_non_header() != NULL) {
1245     verify_referent(obj);
1246     if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
1247       return false;  // referent is reachable
1248     }
1249   }
1250   if (rt == REF_SOFT) {
1251     // For soft refs we can decide now if these are not
1252     // current candidates for clearing, in which case we
1253     // can mark through them now, rather than delaying that
1254     // to the reference-processing phase. Since all current
1255     // time-stamp policies advance the soft-ref clock only
1256     // at a major collection cycle, this is always currently
1257     // accurate.
1258     if (!_current_soft_ref_policy->should_clear_reference(obj)) {
1259       return false;
1260     }
1261   }
1262 
1263   HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
1264   const oop  discovered = java_lang_ref_Reference::discovered(obj);
1265   assert(discovered->is_oop_or_null(), "bad discovered field");
1266   if (discovered != NULL) {
1267     // The reference has already been discovered...
1268     if (TraceReferenceGC) {
1269       gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
1270                              obj, obj->blueprint()->internal_name());
1271     }
1272     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1273       // assumes that an object is not processed twice;
1274       // if it's been already discovered it must be on another
1275       // generation's discovered list; so we won't discover it.
1276       return false;
1277     } else {
1278       assert(RefDiscoveryPolicy == ReferenceBasedDiscovery,
1279              "Unrecognized policy");
1280       // Check assumption that an object is not potentially
1281       // discovered twice except by concurrent collectors that potentially
1282       // trace the same Reference object twice.
1283       assert(UseConcMarkSweepGC || UseG1GC,
1284              "Only possible with a concurrent marking collector");
1285       return true;
1286     }
1287   }
1288 
1289   if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
1290     verify_referent(obj);
1291     // Discover if and only if EITHER:
1292     // .. reference is in our span, OR
1293     // .. we are an atomic collector and referent is in our span
1294     if (_span.contains(obj_addr) ||
1295         (discovery_is_atomic() &&
1296          _span.contains(java_lang_ref_Reference::referent(obj)))) {
1297       // should_enqueue = true;
1298     } else {
1299       return false;
1300     }
1301   } else {
1302     assert(RefDiscoveryPolicy == ReferenceBasedDiscovery &&
1303            _span.contains(obj_addr), "code inconsistency");
1304   }
1305 
1306   // Get the right type of discovered queue head.
1307   DiscoveredList* list = get_discovered_list(rt);
1308   if (list == NULL) {
1309     return false;   // nothing special needs to be done
1310   }
1311 
1312   if (_discovery_is_mt) {
1313     add_to_discovered_list_mt(*list, obj, discovered_addr);
1314   } else {
1315     // If "_discovered_list_needs_barrier", we do write barriers when
1316     // updating the discovered reference list.  Otherwise, we do a raw store
1317     // here: the field will be visited later when processing the discovered
1318     // references.
1319     oop current_head = list->head();
1320     // The last ref must have its discovered field pointing to itself.
1321     oop next_discovered = (current_head != NULL) ? current_head : obj;
1322 
1323     // As in the case further above, since we are over-writing a NULL
1324     // pre-value, we can safely elide the pre-barrier here for the case of G1.
1325     // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
1326     assert(discovered == NULL, "control point invariant");
1327     assert(!_discovered_list_needs_barrier || UseG1GC,
1328            "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");






1329     oop_store_raw(discovered_addr, next_discovered);
1330     if (_discovered_list_needs_barrier) {
1331       _bs->write_ref_field((void*)discovered_addr, next_discovered);
1332     }
1333     list->set_head(obj);
1334     list->inc_length(1);
1335 
1336     if (TraceReferenceGC) {
1337       gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
1338                                 obj, obj->blueprint()->internal_name());
1339     }
1340   }
1341   assert(obj->is_oop(), "Discovered a bad reference");
1342   verify_referent(obj);
1343   return true;
1344 }
1345 
1346 // Preclean the discovered references by removing those
1347 // whose referents are alive, and by marking from those that
1348 // are not active. These lists can be handled here
1349 // in any order and, indeed, concurrently.
1350 void ReferenceProcessor::preclean_discovered_references(
1351   BoolObjectClosure* is_alive,
1352   OopClosure* keep_alive,
1353   VoidClosure* complete_gc,
1354   YieldClosure* yield,
1355   bool should_unload_classes) {
1356 
1357   NOT_PRODUCT(verify_ok_to_handle_reflists());
1358 
1359 #ifdef ASSERT
1360   bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC ||
1361                                CMSClassUnloadingEnabled && UseConcMarkSweepGC ||