1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc_implementation/shared/gcTimer.hpp" 29 #include "gc_implementation/shared/gcTraceTime.hpp" 30 #include "gc_interface/collectedHeap.hpp" 31 #include "gc_interface/collectedHeap.inline.hpp" 32 #include "memory/referencePolicy.hpp" 33 #include "memory/referenceProcessor.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/java.hpp" 36 #include "runtime/jniHandles.hpp" 37 38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 39 40 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 41 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 42 bool ReferenceProcessor::_pending_list_uses_discovered_field = false; 43 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 44 45 void referenceProcessor_init() { 46 ReferenceProcessor::init_statics(); 47 } 48 49 void ReferenceProcessor::init_statics() { 50 // We need a monotonically non-deccreasing time in ms but 51 // os::javaTimeMillis() does not guarantee monotonicity. 52 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 53 54 // Initialize the soft ref timestamp clock. 55 _soft_ref_timestamp_clock = now; 56 // Also update the soft ref clock in j.l.r.SoftReference 57 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 58 59 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 60 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 61 NOT_COMPILER2(LRUCurrentHeapPolicy()); 62 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 63 vm_exit_during_initialization("Could not allocate reference policy object"); 64 } 65 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 66 RefDiscoveryPolicy == ReferentBasedDiscovery, 67 "Unrecongnized RefDiscoveryPolicy"); 68 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); 69 } 70 71 void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) { 72 #ifdef ASSERT 73 // Verify that we're not currently discovering refs 74 assert(!verify_disabled || !_discovering_refs, "nested call?"); 75 76 if (check_no_refs) { 77 // Verify that the discovered lists are empty 78 verify_no_references_recorded(); 79 } 80 #endif // ASSERT 81 82 // Someone could have modified the value of the static 83 // field in the j.l.r.SoftReference class that holds the 84 // soft reference timestamp clock using reflection or 85 // Unsafe between GCs. Unconditionally update the static 86 // field in ReferenceProcessor here so that we use the new 87 // value during reference discovery. 88 89 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 90 _discovering_refs = true; 91 } 92 93 ReferenceProcessor::ReferenceProcessor(MemRegion span, 94 bool mt_processing, 95 uint mt_processing_degree, 96 bool mt_discovery, 97 uint mt_discovery_degree, 98 bool atomic_discovery, 99 BoolObjectClosure* is_alive_non_header) : 100 _discovering_refs(false), 101 _enqueuing_is_done(false), 102 _is_alive_non_header(is_alive_non_header), 103 _processing_is_mt(mt_processing), 104 _next_id(0) 105 { 106 _span = span; 107 _discovery_is_atomic = atomic_discovery; 108 _discovery_is_mt = mt_discovery; 109 _num_q = MAX2(1U, mt_processing_degree); 110 _max_num_q = MAX2(_num_q, mt_discovery_degree); 111 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 112 _max_num_q * number_of_subclasses_of_ref(), mtGC); 113 114 if (_discovered_refs == NULL) { 115 vm_exit_during_initialization("Could not allocated RefProc Array"); 116 } 117 _discoveredSoftRefs = &_discovered_refs[0]; 118 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 119 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 120 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 121 _discoveredCleanerRefs = &_discoveredPhantomRefs[_max_num_q]; 122 123 // Initialize all entries to NULL 124 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 125 _discovered_refs[i].set_head(NULL); 126 _discovered_refs[i].set_length(0); 127 } 128 129 setup_policy(false /* default soft ref policy */); 130 } 131 132 #ifndef PRODUCT 133 void ReferenceProcessor::verify_no_references_recorded() { 134 guarantee(!_discovering_refs, "Discovering refs?"); 135 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 136 guarantee(_discovered_refs[i].is_empty(), 137 "Found non-empty discovered list"); 138 } 139 } 140 #endif 141 142 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 143 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 144 if (UseCompressedOops) { 145 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 146 } else { 147 f->do_oop((oop*)_discovered_refs[i].adr_head()); 148 } 149 } 150 } 151 152 void ReferenceProcessor::update_soft_ref_master_clock() { 153 // Update (advance) the soft ref master clock field. This must be done 154 // after processing the soft ref list. 155 156 // We need a monotonically non-deccreasing time in ms but 157 // os::javaTimeMillis() does not guarantee monotonicity. 158 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 159 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 160 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 161 162 NOT_PRODUCT( 163 if (now < _soft_ref_timestamp_clock) { 164 warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, 165 _soft_ref_timestamp_clock, now); 166 } 167 ) 168 // The values of now and _soft_ref_timestamp_clock are set using 169 // javaTimeNanos(), which is guaranteed to be monotonically 170 // non-decreasing provided the underlying platform provides such 171 // a time source (and it is bug free). 172 // In product mode, however, protect ourselves from non-monotonicty. 173 if (now > _soft_ref_timestamp_clock) { 174 _soft_ref_timestamp_clock = now; 175 java_lang_ref_SoftReference::set_clock(now); 176 } 177 // Else leave clock stalled at its old value until time progresses 178 // past clock value. 179 } 180 181 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { 182 size_t total = 0; 183 for (uint i = 0; i < _max_num_q; ++i) { 184 total += lists[i].length(); 185 } 186 return total; 187 } 188 189 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 190 BoolObjectClosure* is_alive, 191 OopClosure* keep_alive, 192 VoidClosure* complete_gc, 193 AbstractRefProcTaskExecutor* task_executor, 194 GCTimer* gc_timer, 195 GCId gc_id) { 196 NOT_PRODUCT(verify_ok_to_handle_reflists()); 197 198 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 199 // Stop treating discovered references specially. 200 disable_discovery(); 201 202 // If discovery was concurrent, someone could have modified 203 // the value of the static field in the j.l.r.SoftReference 204 // class that holds the soft reference timestamp clock using 205 // reflection or Unsafe between when discovery was enabled and 206 // now. Unconditionally update the static field in ReferenceProcessor 207 // here so that we use the new value during processing of the 208 // discovered soft refs. 209 210 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 211 212 bool trace_time = PrintGCDetails && PrintReferenceGC; 213 214 // Soft references 215 size_t soft_count = 0; 216 { 217 GCTraceTime tt("SoftReference", trace_time, false, gc_timer, gc_id); 218 soft_count = 219 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 220 is_alive, keep_alive, complete_gc, task_executor); 221 } 222 223 update_soft_ref_master_clock(); 224 225 // Weak references 226 size_t weak_count = 0; 227 { 228 GCTraceTime tt("WeakReference", trace_time, false, gc_timer, gc_id); 229 weak_count = 230 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 231 is_alive, keep_alive, complete_gc, task_executor); 232 } 233 234 // Final references 235 size_t final_count = 0; 236 { 237 GCTraceTime tt("FinalReference", trace_time, false, gc_timer, gc_id); 238 final_count = 239 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 240 is_alive, keep_alive, complete_gc, task_executor); 241 } 242 243 // Phantom references 244 size_t phantom_count = 0; 245 { 246 GCTraceTime tt("PhantomReference", trace_time, false, gc_timer, gc_id); 247 phantom_count = 248 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 249 is_alive, keep_alive, complete_gc, task_executor); 250 251 // Process cleaners, but include them in phantom statistics. We expect 252 // Cleaner references to be temporary, and don't want to deal with 253 // possible incompatibilities arising from making it more visible. 254 phantom_count += 255 process_discovered_reflist(_discoveredCleanerRefs, NULL, true, 256 is_alive, keep_alive, complete_gc, task_executor); 257 } 258 259 // Weak global JNI references. It would make more sense (semantically) to 260 // traverse these simultaneously with the regular weak references above, but 261 // that is not how the JDK1.2 specification is. See #4126360. Native code can 262 // thus use JNI weak references to circumvent the phantom references and 263 // resurrect a "post-mortem" object. 264 { 265 GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer, gc_id); 266 if (task_executor != NULL) { 267 task_executor->set_single_threaded_mode(); 268 } 269 process_phaseJNI(is_alive, keep_alive, complete_gc); 270 } 271 272 return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count); 273 } 274 275 #ifndef PRODUCT 276 // Calculate the number of jni handles. 277 uint ReferenceProcessor::count_jni_refs() { 278 class AlwaysAliveClosure: public BoolObjectClosure { 279 public: 280 virtual bool do_object_b(oop obj) { return true; } 281 }; 282 283 class CountHandleClosure: public OopClosure { 284 private: 285 int _count; 286 public: 287 CountHandleClosure(): _count(0) {} 288 void do_oop(oop* unused) { _count++; } 289 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 290 int count() { return _count; } 291 }; 292 CountHandleClosure global_handle_count; 293 AlwaysAliveClosure always_alive; 294 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 295 return global_handle_count.count(); 296 } 297 #endif 298 299 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 300 OopClosure* keep_alive, 301 VoidClosure* complete_gc) { 302 #ifndef PRODUCT 303 if (PrintGCDetails && PrintReferenceGC) { 304 unsigned int count = count_jni_refs(); 305 gclog_or_tty->print(", %u refs", count); 306 } 307 #endif 308 if (UseShenandoahGC) { 309 // Workaround bugs with JNI weak reference processing, by pessimistically 310 // assuming all JNI weak refs are alive. This effectively makes JNI weak refs 311 // non-reclaimable. // TODO: Fix this properly 312 class AlwaysAliveClosure: public BoolObjectClosure { 313 public: 314 virtual bool do_object_b(oop obj) { return true; } 315 }; 316 317 AlwaysAliveClosure always_alive; 318 JNIHandles::weak_oops_do(&always_alive, keep_alive); 319 } else { 320 JNIHandles::weak_oops_do(is_alive, keep_alive); 321 } 322 complete_gc->do_void(); 323 } 324 325 326 template <class T> 327 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 328 AbstractRefProcTaskExecutor* task_executor) { 329 330 // Remember old value of pending references list 331 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 332 T old_pending_list_value = *pending_list_addr; 333 334 // Enqueue references that are not made active again, and 335 // clear the decks for the next collection (cycle). 336 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 337 // Do the post-barrier on pending_list_addr missed in 338 // enqueue_discovered_reflist. 339 oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 340 341 // Stop treating discovered references specially. 342 ref->disable_discovery(); 343 344 // Return true if new pending references were added 345 return ! oopDesc::safe_equals(old_pending_list_value, *pending_list_addr); 346 } 347 348 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 349 NOT_PRODUCT(verify_ok_to_handle_reflists()); 350 if (UseCompressedOops) { 351 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 352 } else { 353 return enqueue_discovered_ref_helper<oop>(this, task_executor); 354 } 355 } 356 357 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 358 HeapWord* pending_list_addr) { 359 // Given a list of refs linked through the "discovered" field 360 // (java.lang.ref.Reference.discovered), self-loop their "next" field 361 // thus distinguishing them from active References, then 362 // prepend them to the pending list. 363 // 364 // The Java threads will see the Reference objects linked together through 365 // the discovered field. Instead of trying to do the write barrier updates 366 // in all places in the reference processor where we manipulate the discovered 367 // field we make sure to do the barrier here where we anyway iterate through 368 // all linked Reference objects. Note that it is important to not dirty any 369 // cards during reference processing since this will cause card table 370 // verification to fail for G1. 371 // 372 // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777), 373 // the "next" field is used to chain the pending list, not the discovered 374 // field. 375 if (TraceReferenceGC && PrintGCDetails) { 376 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 377 INTPTR_FORMAT, (address)refs_list.head()); 378 } 379 380 oop obj = NULL; 381 oop next_d = refs_list.head(); 382 if (pending_list_uses_discovered_field()) { // New behavior 383 // Walk down the list, self-looping the next field 384 // so that the References are not considered active. 385 while (! oopDesc::safe_equals(obj, next_d)) { 386 obj = next_d; 387 assert(obj->is_instanceRef(), "should be reference object"); 388 next_d = java_lang_ref_Reference::discovered(obj); 389 if (TraceReferenceGC && PrintGCDetails) { 390 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 391 (void *)obj, (void *)next_d); 392 } 393 assert(java_lang_ref_Reference::next(obj) == NULL, 394 "Reference not active; should not be discovered"); 395 // Self-loop next, so as to make Ref not active. 396 java_lang_ref_Reference::set_next_raw(obj, obj); 397 if (! oopDesc::safe_equals(next_d, obj)) { 398 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); 399 } else { 400 // This is the last object. 401 // Swap refs_list into pending_list_addr and 402 // set obj's discovered to what we read from pending_list_addr. 403 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 404 // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above. 405 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL 406 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); 407 } 408 } 409 } else { // Old behaviour 410 // Walk down the list, copying the discovered field into 411 // the next field and clearing the discovered field. 412 while (obj != next_d) { 413 obj = next_d; 414 assert(obj->is_instanceRef(), "should be reference object"); 415 next_d = java_lang_ref_Reference::discovered(obj); 416 if (TraceReferenceGC && PrintGCDetails) { 417 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 418 (void *)obj, (void *)next_d); 419 } 420 assert(java_lang_ref_Reference::next(obj) == NULL, 421 "The reference should not be enqueued"); 422 if (next_d == obj) { // obj is last 423 // Swap refs_list into pendling_list_addr and 424 // set obj's next to what we read from pending_list_addr. 425 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 426 // Need oop_check on pending_list_addr above; 427 // see special oop-check code at the end of 428 // enqueue_discovered_reflists() further below. 429 if (old == NULL) { 430 // obj should be made to point to itself, since 431 // pending list was empty. 432 java_lang_ref_Reference::set_next(obj, obj); 433 } else { 434 java_lang_ref_Reference::set_next(obj, old); 435 } 436 } else { 437 java_lang_ref_Reference::set_next(obj, next_d); 438 } 439 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 440 } 441 } 442 } 443 444 // Parallel enqueue task 445 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 446 public: 447 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 448 DiscoveredList discovered_refs[], 449 HeapWord* pending_list_addr, 450 int n_queues) 451 : EnqueueTask(ref_processor, discovered_refs, 452 pending_list_addr, n_queues) 453 { } 454 455 virtual void work(unsigned int work_id) { 456 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 457 // Simplest first cut: static partitioning. 458 int index = work_id; 459 // The increment on "index" must correspond to the maximum number of queues 460 // (n_queues) with which that ReferenceProcessor was created. That 461 // is because of the "clever" way the discovered references lists were 462 // allocated and are indexed into. 463 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 464 for (int j = 0; 465 j < ReferenceProcessor::number_of_subclasses_of_ref(); 466 j++, index += _n_queues) { 467 _ref_processor.enqueue_discovered_reflist( 468 _refs_lists[index], _pending_list_addr); 469 _refs_lists[index].set_head(NULL); 470 _refs_lists[index].set_length(0); 471 } 472 } 473 }; 474 475 // Enqueue references that are not made active again 476 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 477 AbstractRefProcTaskExecutor* task_executor) { 478 if (_processing_is_mt && task_executor != NULL) { 479 // Parallel code 480 RefProcEnqueueTask tsk(*this, _discovered_refs, 481 pending_list_addr, _max_num_q); 482 task_executor->execute(tsk); 483 } else { 484 // Serial code: call the parent class's implementation 485 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 486 enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr); 487 _discovered_refs[i].set_head(NULL); 488 _discovered_refs[i].set_length(0); 489 } 490 } 491 } 492 493 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 494 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 495 oop discovered = java_lang_ref_Reference::discovered(_ref); 496 assert(_discovered_addr && discovered->is_oop_or_null(), 497 "discovered field is bad"); 498 _next = discovered; 499 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 500 _referent = java_lang_ref_Reference::referent(_ref); 501 assert(Universe::heap()->is_in_reserved_or_null(_referent), 502 "Wrong oop found in java.lang.Reference object"); 503 assert(allow_null_referent ? 504 _referent->is_oop_or_null() 505 : _referent->is_oop(), 506 "bad referent"); 507 } 508 509 void DiscoveredListIterator::remove() { 510 assert(_ref->is_oop(), "Dropping a bad reference"); 511 oop_store_raw(_discovered_addr, NULL); 512 513 // First _prev_next ref actually points into DiscoveredList (gross). 514 oop new_next; 515 if (oopDesc::safe_equals(_next, _ref)) { 516 // At the end of the list, we should make _prev point to itself. 517 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 518 // and _prev will be NULL. 519 new_next = _prev; 520 } else { 521 new_next = _next; 522 } 523 // Remove Reference object from discovered list. Note that G1 does not need a 524 // pre-barrier here because we know the Reference has already been found/marked, 525 // that's how it ended up in the discovered list in the first place. 526 oop_store_raw(_prev_next, new_next); 527 NOT_PRODUCT(_removed++); 528 _refs_list.dec_length(1); 529 } 530 531 // Make the Reference object active again. 532 void DiscoveredListIterator::make_active() { 533 // The pre barrier for G1 is probably just needed for the old 534 // reference processing behavior. Should we guard this with 535 // ReferenceProcessor::pending_list_uses_discovered_field() ? 536 if (UseG1GC) { 537 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); 538 if (UseCompressedOops) { 539 oopDesc::bs()->write_ref_field_pre((narrowOop*)next_addr, NULL); 540 } else { 541 oopDesc::bs()->write_ref_field_pre((oop*)next_addr, NULL); 542 } 543 } 544 java_lang_ref_Reference::set_next_raw(_ref, NULL); 545 } 546 547 void DiscoveredListIterator::clear_referent() { 548 oop_store_raw(_referent_addr, NULL); 549 } 550 551 // NOTE: process_phase*() are largely similar, and at a high level 552 // merely iterate over the extant list applying a predicate to 553 // each of its elements and possibly removing that element from the 554 // list and applying some further closures to that element. 555 // We should consider the possibility of replacing these 556 // process_phase*() methods by abstracting them into 557 // a single general iterator invocation that receives appropriate 558 // closures that accomplish this work. 559 560 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 561 // referents are not alive, but that should be kept alive for policy reasons. 562 // Keep alive the transitive closure of all such referents. 563 void 564 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 565 ReferencePolicy* policy, 566 BoolObjectClosure* is_alive, 567 OopClosure* keep_alive, 568 VoidClosure* complete_gc) { 569 assert(policy != NULL, "Must have a non-NULL policy"); 570 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 571 // Decide which softly reachable refs should be kept alive. 572 while (iter.has_next()) { 573 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 574 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 575 if (referent_is_dead && 576 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 577 if (TraceReferenceGC) { 578 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 579 (void *)iter.obj(), iter.obj()->klass()->internal_name()); 580 } 581 // Remove Reference object from list 582 iter.remove(); 583 // Make the Reference object active again 584 iter.make_active(); 585 // keep the referent around 586 iter.make_referent_alive(); 587 iter.move_to_next(); 588 } else { 589 iter.next(); 590 } 591 } 592 // Close the reachable set 593 complete_gc->do_void(); 594 NOT_PRODUCT( 595 if (PrintGCDetails && TraceReferenceGC) { 596 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 597 "discovered Refs by policy, from list " INTPTR_FORMAT, 598 iter.removed(), iter.processed(), (address)refs_list.head()); 599 } 600 ) 601 } 602 603 // Traverse the list and remove any Refs that are not active, or 604 // whose referents are either alive or NULL. 605 void 606 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 607 BoolObjectClosure* is_alive, 608 OopClosure* keep_alive) { 609 assert(discovery_is_atomic(), "Error"); 610 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 611 while (iter.has_next()) { 612 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 613 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 614 assert(next == NULL, "Should not discover inactive Reference"); 615 if (iter.is_referent_alive()) { 616 if (TraceReferenceGC) { 617 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 618 (void *)iter.obj(), iter.obj()->klass()->internal_name()); 619 } 620 // The referent is reachable after all. 621 // Remove Reference object from list. 622 iter.remove(); 623 // Update the referent pointer as necessary: Note that this 624 // should not entail any recursive marking because the 625 // referent must already have been traversed. 626 iter.make_referent_alive(); 627 iter.move_to_next(); 628 } else { 629 iter.next(); 630 } 631 } 632 NOT_PRODUCT( 633 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 634 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 635 "Refs in discovered list " INTPTR_FORMAT, 636 iter.removed(), iter.processed(), (address)refs_list.head()); 637 } 638 ) 639 } 640 641 void 642 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 643 BoolObjectClosure* is_alive, 644 OopClosure* keep_alive, 645 VoidClosure* complete_gc) { 646 assert(!discovery_is_atomic(), "Error"); 647 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 648 while (iter.has_next()) { 649 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 650 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 651 oop next = java_lang_ref_Reference::next(iter.obj()); 652 if ((iter.referent() == NULL || iter.is_referent_alive() || 653 next != NULL)) { 654 assert(next->is_oop_or_null(), "bad next field"); 655 // Remove Reference object from list 656 iter.remove(); 657 // Trace the cohorts 658 iter.make_referent_alive(); 659 if (UseCompressedOops) { 660 keep_alive->do_oop((narrowOop*)next_addr); 661 } else { 662 keep_alive->do_oop((oop*)next_addr); 663 } 664 iter.move_to_next(); 665 } else { 666 iter.next(); 667 } 668 } 669 // Now close the newly reachable set 670 complete_gc->do_void(); 671 NOT_PRODUCT( 672 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 673 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 674 "Refs in discovered list " INTPTR_FORMAT, 675 iter.removed(), iter.processed(), (address)refs_list.head()); 676 } 677 ) 678 } 679 680 // Traverse the list and process the referents, by either 681 // clearing them or keeping them (and their reachable 682 // closure) alive. 683 void 684 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 685 bool clear_referent, 686 BoolObjectClosure* is_alive, 687 OopClosure* keep_alive, 688 VoidClosure* complete_gc) { 689 ResourceMark rm; 690 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 691 while (iter.has_next()) { 692 iter.update_discovered(); 693 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 694 if (clear_referent) { 695 // NULL out referent pointer 696 iter.clear_referent(); 697 } else { 698 // keep the referent around 699 iter.make_referent_alive(); 700 } 701 if (TraceReferenceGC) { 702 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 703 clear_referent ? "cleared " : "", 704 (void *)iter.obj(), iter.obj()->klass()->internal_name()); 705 } 706 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 707 iter.next(); 708 } 709 // Remember to update the next pointer of the last ref. 710 iter.update_discovered(); 711 // Close the reachable set 712 complete_gc->do_void(); 713 } 714 715 void 716 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 717 oop obj = NULL; 718 oop next = refs_list.head(); 719 while (! oopDesc::safe_equals(next, obj)) { 720 obj = next; 721 next = java_lang_ref_Reference::discovered(obj); 722 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 723 } 724 refs_list.set_head(NULL); 725 refs_list.set_length(0); 726 } 727 728 void 729 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 730 clear_discovered_references(refs_list); 731 } 732 733 void ReferenceProcessor::abandon_partial_discovery() { 734 // loop over the lists 735 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 736 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 737 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i)); 738 } 739 abandon_partial_discovered_list(_discovered_refs[i]); 740 } 741 } 742 743 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 744 public: 745 RefProcPhase1Task(ReferenceProcessor& ref_processor, 746 DiscoveredList refs_lists[], 747 ReferencePolicy* policy, 748 bool marks_oops_alive) 749 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 750 _policy(policy) 751 { } 752 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 753 OopClosure& keep_alive, 754 VoidClosure& complete_gc) 755 { 756 Thread* thr = Thread::current(); 757 int refs_list_index = ((WorkerThread*)thr)->id(); 758 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 759 &is_alive, &keep_alive, &complete_gc); 760 } 761 private: 762 ReferencePolicy* _policy; 763 }; 764 765 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 766 public: 767 RefProcPhase2Task(ReferenceProcessor& ref_processor, 768 DiscoveredList refs_lists[], 769 bool marks_oops_alive) 770 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 771 { } 772 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 773 OopClosure& keep_alive, 774 VoidClosure& complete_gc) 775 { 776 _ref_processor.process_phase2(_refs_lists[i], 777 &is_alive, &keep_alive, &complete_gc); 778 } 779 }; 780 781 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 782 public: 783 RefProcPhase3Task(ReferenceProcessor& ref_processor, 784 DiscoveredList refs_lists[], 785 bool clear_referent, 786 bool marks_oops_alive) 787 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 788 _clear_referent(clear_referent) 789 { } 790 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 791 OopClosure& keep_alive, 792 VoidClosure& complete_gc) 793 { 794 // Don't use "refs_list_index" calculated in this way because 795 // balance_queues() has moved the Ref's into the first n queues. 796 // Thread* thr = Thread::current(); 797 // int refs_list_index = ((WorkerThread*)thr)->id(); 798 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 799 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 800 &is_alive, &keep_alive, &complete_gc); 801 } 802 private: 803 bool _clear_referent; 804 }; 805 806 // Balances reference queues. 807 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 808 // queues[0, 1, ..., _num_q-1] because only the first _num_q 809 // corresponding to the active workers will be processed. 810 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 811 { 812 // calculate total length 813 size_t total_refs = 0; 814 if (TraceReferenceGC && PrintGCDetails) { 815 gclog_or_tty->print_cr("\nBalance ref_lists "); 816 } 817 818 for (uint i = 0; i < _max_num_q; ++i) { 819 total_refs += ref_lists[i].length(); 820 if (TraceReferenceGC && PrintGCDetails) { 821 gclog_or_tty->print("%d ", ref_lists[i].length()); 822 } 823 } 824 if (TraceReferenceGC && PrintGCDetails) { 825 gclog_or_tty->print_cr(" = %d", total_refs); 826 } 827 size_t avg_refs = total_refs / _num_q + 1; 828 uint to_idx = 0; 829 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 830 bool move_all = false; 831 if (from_idx >= _num_q) { 832 move_all = ref_lists[from_idx].length() > 0; 833 } 834 while ((ref_lists[from_idx].length() > avg_refs) || 835 move_all) { 836 assert(to_idx < _num_q, "Sanity Check!"); 837 if (ref_lists[to_idx].length() < avg_refs) { 838 // move superfluous refs 839 size_t refs_to_move; 840 // Move all the Ref's if the from queue will not be processed. 841 if (move_all) { 842 refs_to_move = MIN2(ref_lists[from_idx].length(), 843 avg_refs - ref_lists[to_idx].length()); 844 } else { 845 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 846 avg_refs - ref_lists[to_idx].length()); 847 } 848 849 assert(refs_to_move > 0, "otherwise the code below will fail"); 850 851 oop move_head = ref_lists[from_idx].head(); 852 oop move_tail = move_head; 853 oop new_head = move_head; 854 // find an element to split the list on 855 for (size_t j = 0; j < refs_to_move; ++j) { 856 move_tail = new_head; 857 new_head = java_lang_ref_Reference::discovered(new_head); 858 } 859 860 // Add the chain to the to list. 861 if (ref_lists[to_idx].head() == NULL) { 862 // to list is empty. Make a loop at the end. 863 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 864 } else { 865 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 866 } 867 ref_lists[to_idx].set_head(move_head); 868 ref_lists[to_idx].inc_length(refs_to_move); 869 870 // Remove the chain from the from list. 871 if (oopDesc::safe_equals(move_tail, new_head)) { 872 // We found the end of the from list. 873 ref_lists[from_idx].set_head(NULL); 874 } else { 875 ref_lists[from_idx].set_head(new_head); 876 } 877 ref_lists[from_idx].dec_length(refs_to_move); 878 if (ref_lists[from_idx].length() == 0) { 879 break; 880 } 881 } else { 882 to_idx = (to_idx + 1) % _num_q; 883 } 884 } 885 } 886 #ifdef ASSERT 887 size_t balanced_total_refs = 0; 888 for (uint i = 0; i < _max_num_q; ++i) { 889 balanced_total_refs += ref_lists[i].length(); 890 if (TraceReferenceGC && PrintGCDetails) { 891 gclog_or_tty->print("%d ", ref_lists[i].length()); 892 } 893 } 894 if (TraceReferenceGC && PrintGCDetails) { 895 gclog_or_tty->print_cr(" = %d", balanced_total_refs); 896 gclog_or_tty->flush(); 897 } 898 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 899 #endif 900 } 901 902 void ReferenceProcessor::balance_all_queues() { 903 balance_queues(_discoveredSoftRefs); 904 balance_queues(_discoveredWeakRefs); 905 balance_queues(_discoveredFinalRefs); 906 balance_queues(_discoveredPhantomRefs); 907 balance_queues(_discoveredCleanerRefs); 908 } 909 910 size_t 911 ReferenceProcessor::process_discovered_reflist( 912 DiscoveredList refs_lists[], 913 ReferencePolicy* policy, 914 bool clear_referent, 915 BoolObjectClosure* is_alive, 916 OopClosure* keep_alive, 917 VoidClosure* complete_gc, 918 AbstractRefProcTaskExecutor* task_executor) 919 { 920 bool mt_processing = task_executor != NULL && _processing_is_mt; 921 // If discovery used MT and a dynamic number of GC threads, then 922 // the queues must be balanced for correctness if fewer than the 923 // maximum number of queues were used. The number of queue used 924 // during discovery may be different than the number to be used 925 // for processing so don't depend of _num_q < _max_num_q as part 926 // of the test. 927 bool must_balance = _discovery_is_mt; 928 929 if ((mt_processing && ParallelRefProcBalancingEnabled) || 930 must_balance) { 931 balance_queues(refs_lists); 932 } 933 934 size_t total_list_count = total_count(refs_lists); 935 936 if (PrintReferenceGC && PrintGCDetails) { 937 gclog_or_tty->print(", %u refs", total_list_count); 938 } 939 940 // Phase 1 (soft refs only): 941 // . Traverse the list and remove any SoftReferences whose 942 // referents are not alive, but that should be kept alive for 943 // policy reasons. Keep alive the transitive closure of all 944 // such referents. 945 if (policy != NULL) { 946 if (mt_processing) { 947 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 948 task_executor->execute(phase1); 949 } else { 950 for (uint i = 0; i < _max_num_q; i++) { 951 process_phase1(refs_lists[i], policy, 952 is_alive, keep_alive, complete_gc); 953 } 954 } 955 } else { // policy == NULL 956 assert(refs_lists != _discoveredSoftRefs, 957 "Policy must be specified for soft references."); 958 } 959 960 // Phase 2: 961 // . Traverse the list and remove any refs whose referents are alive. 962 if (mt_processing) { 963 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 964 task_executor->execute(phase2); 965 } else { 966 for (uint i = 0; i < _max_num_q; i++) { 967 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 968 } 969 } 970 971 // Phase 3: 972 // . Traverse the list and process referents as appropriate. 973 if (mt_processing) { 974 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 975 task_executor->execute(phase3); 976 } else { 977 for (uint i = 0; i < _max_num_q; i++) { 978 process_phase3(refs_lists[i], clear_referent, 979 is_alive, keep_alive, complete_gc); 980 } 981 } 982 983 return total_list_count; 984 } 985 986 void ReferenceProcessor::clean_up_discovered_references() { 987 // loop over the lists 988 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 989 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 990 gclog_or_tty->print_cr( 991 "\nScrubbing %s discovered list of Null referents", 992 list_name(i)); 993 } 994 clean_up_discovered_reflist(_discovered_refs[i]); 995 } 996 } 997 998 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 999 assert(!discovery_is_atomic(), "Else why call this method?"); 1000 DiscoveredListIterator iter(refs_list, NULL, NULL); 1001 while (iter.has_next()) { 1002 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1003 oop next = java_lang_ref_Reference::next(iter.obj()); 1004 assert(next->is_oop_or_null(), "bad next field"); 1005 // If referent has been cleared or Reference is not active, 1006 // drop it. 1007 if (iter.referent() == NULL || next != NULL) { 1008 debug_only( 1009 if (PrintGCDetails && TraceReferenceGC) { 1010 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 1011 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 1012 " and referent: " INTPTR_FORMAT, 1013 (void *)iter.obj(), (void *)next, (void *)iter.referent()); 1014 } 1015 ) 1016 // Remove Reference object from list 1017 iter.remove(); 1018 iter.move_to_next(); 1019 } else { 1020 iter.next(); 1021 } 1022 } 1023 NOT_PRODUCT( 1024 if (PrintGCDetails && TraceReferenceGC) { 1025 gclog_or_tty->print( 1026 " Removed %d Refs with NULL referents out of %d discovered Refs", 1027 iter.removed(), iter.processed()); 1028 } 1029 ) 1030 } 1031 1032 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 1033 uint id = 0; 1034 // Determine the queue index to use for this object. 1035 if (_discovery_is_mt) { 1036 // During a multi-threaded discovery phase, 1037 // each thread saves to its "own" list. 1038 Thread* thr = Thread::current(); 1039 id = thr->as_Worker_thread()->id(); 1040 } else { 1041 // single-threaded discovery, we save in round-robin 1042 // fashion to each of the lists. 1043 if (_processing_is_mt) { 1044 id = next_id(); 1045 } 1046 } 1047 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 1048 1049 // Get the discovered queue to which we will add 1050 DiscoveredList* list = NULL; 1051 switch (rt) { 1052 case REF_OTHER: 1053 // Unknown reference type, no special treatment 1054 break; 1055 case REF_SOFT: 1056 list = &_discoveredSoftRefs[id]; 1057 break; 1058 case REF_WEAK: 1059 list = &_discoveredWeakRefs[id]; 1060 break; 1061 case REF_FINAL: 1062 list = &_discoveredFinalRefs[id]; 1063 break; 1064 case REF_PHANTOM: 1065 list = &_discoveredPhantomRefs[id]; 1066 break; 1067 case REF_CLEANER: 1068 list = &_discoveredCleanerRefs[id]; 1069 break; 1070 case REF_NONE: 1071 // we should not reach here if we are an InstanceRefKlass 1072 default: 1073 ShouldNotReachHere(); 1074 } 1075 if (TraceReferenceGC && PrintGCDetails) { 1076 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list); 1077 } 1078 return list; 1079 } 1080 1081 inline void 1082 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1083 oop obj, 1084 HeapWord* discovered_addr) { 1085 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1086 // First we must make sure this object is only enqueued once. CAS in a non null 1087 // discovered_addr. 1088 oop current_head = refs_list.head(); 1089 // The last ref must have its discovered field pointing to itself. 1090 oop next_discovered = (current_head != NULL) ? current_head : obj; 1091 1092 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 1093 NULL); 1094 if (retest == NULL) { 1095 // This thread just won the right to enqueue the object. 1096 // We have separate lists for enqueueing, so no synchronization 1097 // is necessary. 1098 refs_list.set_head(obj); 1099 refs_list.inc_length(1); 1100 1101 if (TraceReferenceGC) { 1102 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 1103 (void *)obj, obj->klass()->internal_name()); 1104 } 1105 } else { 1106 // If retest was non NULL, another thread beat us to it: 1107 // The reference has already been discovered... 1108 if (TraceReferenceGC) { 1109 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1110 (void *)obj, obj->klass()->internal_name()); 1111 } 1112 } 1113 } 1114 1115 #ifndef PRODUCT 1116 // Non-atomic (i.e. concurrent) discovery might allow us 1117 // to observe j.l.References with NULL referents, being those 1118 // cleared concurrently by mutators during (or after) discovery. 1119 void ReferenceProcessor::verify_referent(oop obj) { 1120 bool da = discovery_is_atomic(); 1121 oop referent = java_lang_ref_Reference::referent(obj); 1122 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 1123 err_msg("Bad referent " INTPTR_FORMAT " found in Reference " 1124 INTPTR_FORMAT " during %satomic discovery ", 1125 (void *)referent, (void *)obj, da ? "" : "non-")); 1126 } 1127 #endif 1128 1129 // We mention two of several possible choices here: 1130 // #0: if the reference object is not in the "originating generation" 1131 // (or part of the heap being collected, indicated by our "span" 1132 // we don't treat it specially (i.e. we scan it as we would 1133 // a normal oop, treating its references as strong references). 1134 // This means that references can't be discovered unless their 1135 // referent is also in the same span. This is the simplest, 1136 // most "local" and most conservative approach, albeit one 1137 // that may cause weak references to be enqueued least promptly. 1138 // We call this choice the "ReferenceBasedDiscovery" policy. 1139 // #1: the reference object may be in any generation (span), but if 1140 // the referent is in the generation (span) being currently collected 1141 // then we can discover the reference object, provided 1142 // the object has not already been discovered by 1143 // a different concurrently running collector (as may be the 1144 // case, for instance, if the reference object is in CMS and 1145 // the referent in DefNewGeneration), and provided the processing 1146 // of this reference object by the current collector will 1147 // appear atomic to every other collector in the system. 1148 // (Thus, for instance, a concurrent collector may not 1149 // discover references in other generations even if the 1150 // referent is in its own generation). This policy may, 1151 // in certain cases, enqueue references somewhat sooner than 1152 // might Policy #0 above, but at marginally increased cost 1153 // and complexity in processing these references. 1154 // We call this choice the "RefeferentBasedDiscovery" policy. 1155 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1156 // Make sure we are discovering refs (rather than processing discovered refs). 1157 if (!_discovering_refs || !RegisterReferences) { 1158 return false; 1159 } 1160 DEBUG_ONLY(oopDesc::bs()->verify_safe_oop(obj);) 1161 // We only discover active references. 1162 oop next = java_lang_ref_Reference::next(obj); 1163 if (next != NULL) { // Ref is no longer active 1164 return false; 1165 } 1166 1167 HeapWord* obj_addr = (HeapWord*)obj; 1168 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1169 !_span.contains(obj_addr)) { 1170 // Reference is not in the originating generation; 1171 // don't treat it specially (i.e. we want to scan it as a normal 1172 // object with strong references). 1173 return false; 1174 } 1175 1176 // We only discover references whose referents are not (yet) 1177 // known to be strongly reachable. 1178 if (is_alive_non_header() != NULL) { 1179 verify_referent(obj); 1180 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1181 return false; // referent is reachable 1182 } 1183 } 1184 if (rt == REF_SOFT) { 1185 // For soft refs we can decide now if these are not 1186 // current candidates for clearing, in which case we 1187 // can mark through them now, rather than delaying that 1188 // to the reference-processing phase. Since all current 1189 // time-stamp policies advance the soft-ref clock only 1190 // at a major collection cycle, this is always currently 1191 // accurate. 1192 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1193 return false; 1194 } 1195 } 1196 1197 ResourceMark rm; // Needed for tracing. 1198 1199 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1200 const oop discovered = java_lang_ref_Reference::discovered(obj); 1201 assert(discovered->is_oop_or_null(), "bad discovered field"); 1202 if (discovered != NULL) { 1203 // The reference has already been discovered... 1204 if (TraceReferenceGC) { 1205 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1206 (void *)obj, obj->klass()->internal_name()); 1207 } 1208 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1209 // assumes that an object is not processed twice; 1210 // if it's been already discovered it must be on another 1211 // generation's discovered list; so we won't discover it. 1212 return false; 1213 } else { 1214 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1215 "Unrecognized policy"); 1216 // Check assumption that an object is not potentially 1217 // discovered twice except by concurrent collectors that potentially 1218 // trace the same Reference object twice. 1219 assert(UseConcMarkSweepGC || UseG1GC || UseShenandoahGC, 1220 "Only possible with a concurrent marking collector"); 1221 return true; 1222 } 1223 } 1224 1225 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1226 verify_referent(obj); 1227 // Discover if and only if EITHER: 1228 // .. reference is in our span, OR 1229 // .. we are an atomic collector and referent is in our span 1230 if (_span.contains(obj_addr) || 1231 (discovery_is_atomic() && 1232 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1233 // should_enqueue = true; 1234 } else { 1235 return false; 1236 } 1237 } else { 1238 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1239 _span.contains(obj_addr), "code inconsistency"); 1240 } 1241 1242 // Get the right type of discovered queue head. 1243 DiscoveredList* list = get_discovered_list(rt); 1244 if (list == NULL) { 1245 return false; // nothing special needs to be done 1246 } 1247 1248 if (_discovery_is_mt) { 1249 add_to_discovered_list_mt(*list, obj, discovered_addr); 1250 } else { 1251 // We do a raw store here: the field will be visited later when processing 1252 // the discovered references. 1253 oop current_head = list->head(); 1254 // The last ref must have its discovered field pointing to itself. 1255 oop next_discovered = (current_head != NULL) ? current_head : obj; 1256 1257 assert(discovered == NULL, "control point invariant"); 1258 oop_store_raw(discovered_addr, next_discovered); 1259 list->set_head(obj); 1260 list->inc_length(1); 1261 1262 if (TraceReferenceGC) { 1263 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", 1264 (void *)obj, obj->klass()->internal_name()); 1265 } 1266 } 1267 assert(obj->is_oop(), "Discovered a bad reference"); 1268 verify_referent(obj); 1269 return true; 1270 } 1271 1272 // Preclean the discovered references by removing those 1273 // whose referents are alive, and by marking from those that 1274 // are not active. These lists can be handled here 1275 // in any order and, indeed, concurrently. 1276 void ReferenceProcessor::preclean_discovered_references( 1277 BoolObjectClosure* is_alive, 1278 OopClosure* keep_alive, 1279 VoidClosure* complete_gc, 1280 YieldClosure* yield, 1281 GCTimer* gc_timer, 1282 GCId gc_id) { 1283 1284 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1285 1286 // Soft references 1287 { 1288 GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1289 false, gc_timer, gc_id); 1290 for (uint i = 0; i < _max_num_q; i++) { 1291 if (yield->should_return()) { 1292 return; 1293 } 1294 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1295 keep_alive, complete_gc, yield); 1296 } 1297 } 1298 1299 // Weak references 1300 { 1301 GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1302 false, gc_timer, gc_id); 1303 for (uint i = 0; i < _max_num_q; i++) { 1304 if (yield->should_return()) { 1305 return; 1306 } 1307 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1308 keep_alive, complete_gc, yield); 1309 } 1310 } 1311 1312 // Final references 1313 { 1314 GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1315 false, gc_timer, gc_id); 1316 for (uint i = 0; i < _max_num_q; i++) { 1317 if (yield->should_return()) { 1318 return; 1319 } 1320 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1321 keep_alive, complete_gc, yield); 1322 } 1323 } 1324 1325 // Phantom references 1326 { 1327 GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1328 false, gc_timer, gc_id); 1329 for (uint i = 0; i < _max_num_q; i++) { 1330 if (yield->should_return()) { 1331 return; 1332 } 1333 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1334 keep_alive, complete_gc, yield); 1335 } 1336 1337 // Cleaner references. Included in timing for phantom references. We 1338 // expect Cleaner references to be temporary, and don't want to deal with 1339 // possible incompatibilities arising from making it more visible. 1340 for (uint i = 0; i < _max_num_q; i++) { 1341 if (yield->should_return()) { 1342 return; 1343 } 1344 preclean_discovered_reflist(_discoveredCleanerRefs[i], is_alive, 1345 keep_alive, complete_gc, yield); 1346 } 1347 } 1348 } 1349 1350 // Walk the given discovered ref list, and remove all reference objects 1351 // whose referents are still alive, whose referents are NULL or which 1352 // are not active (have a non-NULL next field). NOTE: When we are 1353 // thus precleaning the ref lists (which happens single-threaded today), 1354 // we do not disable refs discovery to honour the correct semantics of 1355 // java.lang.Reference. As a result, we need to be careful below 1356 // that ref removal steps interleave safely with ref discovery steps 1357 // (in this thread). 1358 void 1359 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1360 BoolObjectClosure* is_alive, 1361 OopClosure* keep_alive, 1362 VoidClosure* complete_gc, 1363 YieldClosure* yield) { 1364 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1365 while (iter.has_next()) { 1366 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1367 oop obj = iter.obj(); 1368 oop next = java_lang_ref_Reference::next(obj); 1369 if (iter.referent() == NULL || iter.is_referent_alive() || 1370 next != NULL) { 1371 // The referent has been cleared, or is alive, or the Reference is not 1372 // active; we need to trace and mark its cohort. 1373 if (TraceReferenceGC) { 1374 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1375 (void *)iter.obj(), iter.obj()->klass()->internal_name()); 1376 } 1377 // Remove Reference object from list 1378 iter.remove(); 1379 // Keep alive its cohort. 1380 iter.make_referent_alive(); 1381 if (UseCompressedOops) { 1382 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1383 keep_alive->do_oop(next_addr); 1384 } else { 1385 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1386 keep_alive->do_oop(next_addr); 1387 } 1388 iter.move_to_next(); 1389 } else { 1390 iter.next(); 1391 } 1392 } 1393 // Close the reachable set 1394 complete_gc->do_void(); 1395 1396 NOT_PRODUCT( 1397 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 1398 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1399 "Refs in discovered list " INTPTR_FORMAT, 1400 iter.removed(), iter.processed(), (address)refs_list.head()); 1401 } 1402 ) 1403 } 1404 1405 const char* ReferenceProcessor::list_name(uint i) { 1406 assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(), 1407 "Out of bounds index"); 1408 1409 int j = i / _max_num_q; 1410 switch (j) { 1411 case 0: return "SoftRef"; 1412 case 1: return "WeakRef"; 1413 case 2: return "FinalRef"; 1414 case 3: return "PhantomRef"; 1415 case 4: return "CleanerRef"; 1416 } 1417 ShouldNotReachHere(); 1418 return NULL; 1419 } 1420 1421 #ifndef PRODUCT 1422 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1423 // empty for now 1424 } 1425 #endif 1426 1427 #ifndef PRODUCT 1428 void ReferenceProcessor::clear_discovered_references() { 1429 guarantee(!_discovering_refs, "Discovering refs?"); 1430 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1431 clear_discovered_references(_discovered_refs[i]); 1432 } 1433 } 1434 1435 #endif // PRODUCT