1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc_interface/collectedHeap.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "memory/referencePolicy.hpp" 31 #include "memory/referenceProcessor.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/jniHandles.hpp" 35 36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 38 bool ReferenceProcessor::_pending_list_uses_discovered_field = false; 39 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 40 41 void referenceProcessor_init() { 42 ReferenceProcessor::init_statics(); 43 } 44 45 void ReferenceProcessor::init_statics() { 46 // We need a monotonically non-deccreasing time in ms but 47 // os::javaTimeMillis() does not guarantee monotonicity. 48 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 49 50 // Initialize the soft ref timestamp clock. 51 _soft_ref_timestamp_clock = now; 52 // Also update the soft ref clock in j.l.r.SoftReference 53 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 54 55 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 56 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 57 NOT_COMPILER2(LRUCurrentHeapPolicy()); 58 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 59 vm_exit_during_initialization("Could not allocate reference policy object"); 60 } 61 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 62 RefDiscoveryPolicy == ReferentBasedDiscovery, 63 "Unrecongnized RefDiscoveryPolicy"); 64 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); 65 } 66 67 void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) { 68 #ifdef ASSERT 69 // Verify that we're not currently discovering refs 70 assert(!verify_disabled || !_discovering_refs, "nested call?"); 71 72 if (check_no_refs) { 73 // Verify that the discovered lists are empty 74 verify_no_references_recorded(); 75 } 76 #endif // ASSERT 77 78 // Someone could have modified the value of the static 79 // field in the j.l.r.SoftReference class that holds the 80 // soft reference timestamp clock using reflection or 81 // Unsafe between GCs. Unconditionally update the static 82 // field in ReferenceProcessor here so that we use the new 83 // value during reference discovery. 84 85 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 86 _discovering_refs = true; 87 } 88 89 ReferenceProcessor::ReferenceProcessor(MemRegion span, 90 bool mt_processing, 91 uint mt_processing_degree, 92 bool mt_discovery, 93 uint mt_discovery_degree, 94 bool atomic_discovery, 95 BoolObjectClosure* is_alive_non_header, 96 bool discovered_list_needs_barrier) : 97 _discovering_refs(false), 98 _enqueuing_is_done(false), 99 _is_alive_non_header(is_alive_non_header), 100 _discovered_list_needs_barrier(discovered_list_needs_barrier), 101 _bs(NULL), 102 _processing_is_mt(mt_processing), 103 _next_id(0) 104 { 105 _span = span; 106 _discovery_is_atomic = atomic_discovery; 107 _discovery_is_mt = mt_discovery; 108 _num_q = MAX2(1U, mt_processing_degree); 109 _max_num_q = MAX2(_num_q, mt_discovery_degree); 110 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 111 _max_num_q * number_of_subclasses_of_ref(), mtGC); 112 113 if (_discovered_refs == NULL) { 114 vm_exit_during_initialization("Could not allocated RefProc Array"); 115 } 116 _discoveredSoftRefs = &_discovered_refs[0]; 117 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 118 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 119 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 120 121 // Initialize all entries to NULL 122 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 123 _discovered_refs[i].set_head(NULL); 124 _discovered_refs[i].set_length(0); 125 } 126 127 // If we do barriers, cache a copy of the barrier set. 128 if (discovered_list_needs_barrier) { 129 _bs = Universe::heap()->barrier_set(); 130 } 131 setup_policy(false /* default soft ref policy */); 132 } 133 134 #ifndef PRODUCT 135 void ReferenceProcessor::verify_no_references_recorded() { 136 guarantee(!_discovering_refs, "Discovering refs?"); 137 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 138 guarantee(_discovered_refs[i].is_empty(), 139 "Found non-empty discovered list"); 140 } 141 } 142 #endif 143 144 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 145 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 146 if (UseCompressedOops) { 147 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 148 } else { 149 f->do_oop((oop*)_discovered_refs[i].adr_head()); 150 } 151 } 152 } 153 154 void ReferenceProcessor::update_soft_ref_master_clock() { 155 // Update (advance) the soft ref master clock field. This must be done 156 // after processing the soft ref list. 157 158 // We need a monotonically non-deccreasing time in ms but 159 // os::javaTimeMillis() does not guarantee monotonicity. 160 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 161 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 162 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 163 164 NOT_PRODUCT( 165 if (now < _soft_ref_timestamp_clock) { 166 warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, 167 _soft_ref_timestamp_clock, now); 168 } 169 ) 170 // The values of now and _soft_ref_timestamp_clock are set using 171 // javaTimeNanos(), which is guaranteed to be monotonically 172 // non-decreasing provided the underlying platform provides such 173 // a time source (and it is bug free). 174 // In product mode, however, protect ourselves from non-monotonicty. 175 if (now > _soft_ref_timestamp_clock) { 176 _soft_ref_timestamp_clock = now; 177 java_lang_ref_SoftReference::set_clock(now); 178 } 179 // Else leave clock stalled at its old value until time progresses 180 // past clock value. 181 } 182 183 void ReferenceProcessor::process_discovered_references( 184 BoolObjectClosure* is_alive, 185 OopClosure* keep_alive, 186 VoidClosure* complete_gc, 187 AbstractRefProcTaskExecutor* task_executor) { 188 NOT_PRODUCT(verify_ok_to_handle_reflists()); 189 190 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 191 // Stop treating discovered references specially. 192 disable_discovery(); 193 194 // If discovery was concurrent, someone could have modified 195 // the value of the static field in the j.l.r.SoftReference 196 // class that holds the soft reference timestamp clock using 197 // reflection or Unsafe between when discovery was enabled and 198 // now. Unconditionally update the static field in ReferenceProcessor 199 // here so that we use the new value during processing of the 200 // discovered soft refs. 201 202 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 203 204 bool trace_time = PrintGCDetails && PrintReferenceGC; 205 // Soft references 206 { 207 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); 208 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 209 is_alive, keep_alive, complete_gc, task_executor); 210 } 211 212 update_soft_ref_master_clock(); 213 214 // Weak references 215 { 216 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); 217 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 218 is_alive, keep_alive, complete_gc, task_executor); 219 } 220 221 // Final references 222 { 223 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); 224 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 225 is_alive, keep_alive, complete_gc, task_executor); 226 } 227 228 // Phantom references 229 { 230 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); 231 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 232 is_alive, keep_alive, complete_gc, task_executor); 233 } 234 235 // Weak global JNI references. It would make more sense (semantically) to 236 // traverse these simultaneously with the regular weak references above, but 237 // that is not how the JDK1.2 specification is. See #4126360. Native code can 238 // thus use JNI weak references to circumvent the phantom references and 239 // resurrect a "post-mortem" object. 240 { 241 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); 242 if (task_executor != NULL) { 243 task_executor->set_single_threaded_mode(); 244 } 245 process_phaseJNI(is_alive, keep_alive, complete_gc); 246 } 247 } 248 249 #ifndef PRODUCT 250 // Calculate the number of jni handles. 251 uint ReferenceProcessor::count_jni_refs() { 252 class AlwaysAliveClosure: public BoolObjectClosure { 253 public: 254 virtual bool do_object_b(oop obj) { return true; } 255 }; 256 257 class CountHandleClosure: public OopClosure { 258 private: 259 int _count; 260 public: 261 CountHandleClosure(): _count(0) {} 262 void do_oop(oop* unused) { _count++; } 263 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 264 int count() { return _count; } 265 }; 266 CountHandleClosure global_handle_count; 267 AlwaysAliveClosure always_alive; 268 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 269 return global_handle_count.count(); 270 } 271 #endif 272 273 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 274 OopClosure* keep_alive, 275 VoidClosure* complete_gc) { 276 #ifndef PRODUCT 277 if (PrintGCDetails && PrintReferenceGC) { 278 unsigned int count = count_jni_refs(); 279 gclog_or_tty->print(", %u refs", count); 280 } 281 #endif 282 JNIHandles::weak_oops_do(is_alive, keep_alive); 283 complete_gc->do_void(); 284 } 285 286 287 template <class T> 288 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 289 AbstractRefProcTaskExecutor* task_executor) { 290 291 // Remember old value of pending references list 292 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 293 T old_pending_list_value = *pending_list_addr; 294 295 // Enqueue references that are not made active again, and 296 // clear the decks for the next collection (cycle). 297 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 298 // Do the oop-check on pending_list_addr missed in 299 // enqueue_discovered_reflist. We should probably 300 // do a raw oop_check so that future such idempotent 301 // oop_stores relying on the oop-check side-effect 302 // may be elided automatically and safely without 303 // affecting correctness. 304 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 305 306 // Stop treating discovered references specially. 307 ref->disable_discovery(); 308 309 // Return true if new pending references were added 310 return old_pending_list_value != *pending_list_addr; 311 } 312 313 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 314 NOT_PRODUCT(verify_ok_to_handle_reflists()); 315 if (UseCompressedOops) { 316 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 317 } else { 318 return enqueue_discovered_ref_helper<oop>(this, task_executor); 319 } 320 } 321 322 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 323 HeapWord* pending_list_addr) { 324 // Given a list of refs linked through the "discovered" field 325 // (java.lang.ref.Reference.discovered), self-loop their "next" field 326 // thus distinguishing them from active References, then 327 // prepend them to the pending list. 328 // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777), 329 // the "next" field is used to chain the pending list, not the discovered 330 // field. 331 332 if (TraceReferenceGC && PrintGCDetails) { 333 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 334 INTPTR_FORMAT, (address)refs_list.head()); 335 } 336 337 oop obj = NULL; 338 oop next_d = refs_list.head(); 339 if (pending_list_uses_discovered_field()) { // New behaviour 340 // Walk down the list, self-looping the next field 341 // so that the References are not considered active. 342 while (obj != next_d) { 343 obj = next_d; 344 assert(obj->is_instanceRef(), "should be reference object"); 345 next_d = java_lang_ref_Reference::discovered(obj); 346 if (TraceReferenceGC && PrintGCDetails) { 347 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 348 obj, next_d); 349 } 350 assert(java_lang_ref_Reference::next(obj) == NULL, 351 "Reference not active; should not be discovered"); 352 // Self-loop next, so as to make Ref not active. 353 java_lang_ref_Reference::set_next(obj, obj); 354 if (next_d == obj) { // obj is last 355 // Swap refs_list into pendling_list_addr and 356 // set obj's discovered to what we read from pending_list_addr. 357 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 358 // Need oop_check on pending_list_addr above; 359 // see special oop-check code at the end of 360 // enqueue_discovered_reflists() further below. 361 java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL 362 } 363 } 364 } else { // Old behaviour 365 // Walk down the list, copying the discovered field into 366 // the next field and clearing the discovered field. 367 while (obj != next_d) { 368 obj = next_d; 369 assert(obj->is_instanceRef(), "should be reference object"); 370 next_d = java_lang_ref_Reference::discovered(obj); 371 if (TraceReferenceGC && PrintGCDetails) { 372 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 373 obj, next_d); 374 } 375 assert(java_lang_ref_Reference::next(obj) == NULL, 376 "The reference should not be enqueued"); 377 if (next_d == obj) { // obj is last 378 // Swap refs_list into pendling_list_addr and 379 // set obj's next to what we read from pending_list_addr. 380 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 381 // Need oop_check on pending_list_addr above; 382 // see special oop-check code at the end of 383 // enqueue_discovered_reflists() further below. 384 if (old == NULL) { 385 // obj should be made to point to itself, since 386 // pending list was empty. 387 java_lang_ref_Reference::set_next(obj, obj); 388 } else { 389 java_lang_ref_Reference::set_next(obj, old); 390 } 391 } else { 392 java_lang_ref_Reference::set_next(obj, next_d); 393 } 394 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 395 } 396 } 397 } 398 399 // Parallel enqueue task 400 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 401 public: 402 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 403 DiscoveredList discovered_refs[], 404 HeapWord* pending_list_addr, 405 int n_queues) 406 : EnqueueTask(ref_processor, discovered_refs, 407 pending_list_addr, n_queues) 408 { } 409 410 virtual void work(unsigned int work_id) { 411 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 412 // Simplest first cut: static partitioning. 413 int index = work_id; 414 // The increment on "index" must correspond to the maximum number of queues 415 // (n_queues) with which that ReferenceProcessor was created. That 416 // is because of the "clever" way the discovered references lists were 417 // allocated and are indexed into. 418 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 419 for (int j = 0; 420 j < ReferenceProcessor::number_of_subclasses_of_ref(); 421 j++, index += _n_queues) { 422 _ref_processor.enqueue_discovered_reflist( 423 _refs_lists[index], _pending_list_addr); 424 _refs_lists[index].set_head(NULL); 425 _refs_lists[index].set_length(0); 426 } 427 } 428 }; 429 430 // Enqueue references that are not made active again 431 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 432 AbstractRefProcTaskExecutor* task_executor) { 433 if (_processing_is_mt && task_executor != NULL) { 434 // Parallel code 435 RefProcEnqueueTask tsk(*this, _discovered_refs, 436 pending_list_addr, _max_num_q); 437 task_executor->execute(tsk); 438 } else { 439 // Serial code: call the parent class's implementation 440 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 441 enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr); 442 _discovered_refs[i].set_head(NULL); 443 _discovered_refs[i].set_length(0); 444 } 445 } 446 } 447 448 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 449 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 450 oop discovered = java_lang_ref_Reference::discovered(_ref); 451 assert(_discovered_addr && discovered->is_oop_or_null(), 452 "discovered field is bad"); 453 _next = discovered; 454 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 455 _referent = java_lang_ref_Reference::referent(_ref); 456 assert(Universe::heap()->is_in_reserved_or_null(_referent), 457 "Wrong oop found in java.lang.Reference object"); 458 assert(allow_null_referent ? 459 _referent->is_oop_or_null() 460 : _referent->is_oop(), 461 "bad referent"); 462 } 463 464 void DiscoveredListIterator::remove() { 465 assert(_ref->is_oop(), "Dropping a bad reference"); 466 oop_store_raw(_discovered_addr, NULL); 467 468 // First _prev_next ref actually points into DiscoveredList (gross). 469 oop new_next; 470 if (_next == _ref) { 471 // At the end of the list, we should make _prev point to itself. 472 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 473 // and _prev will be NULL. 474 new_next = _prev; 475 } else { 476 new_next = _next; 477 } 478 479 if (UseCompressedOops) { 480 // Remove Reference object from list. 481 oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next); 482 } else { 483 // Remove Reference object from list. 484 oopDesc::store_heap_oop((oop*)_prev_next, new_next); 485 } 486 NOT_PRODUCT(_removed++); 487 _refs_list.dec_length(1); 488 } 489 490 // Make the Reference object active again. 491 void DiscoveredListIterator::make_active() { 492 // For G1 we don't want to use set_next - it 493 // will dirty the card for the next field of 494 // the reference object and will fail 495 // CT verification. 496 if (UseG1GC) { 497 BarrierSet* bs = oopDesc::bs(); 498 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); 499 500 if (UseCompressedOops) { 501 bs->write_ref_field_pre((narrowOop*)next_addr, NULL); 502 } else { 503 bs->write_ref_field_pre((oop*)next_addr, NULL); 504 } 505 java_lang_ref_Reference::set_next_raw(_ref, NULL); 506 } else { 507 java_lang_ref_Reference::set_next(_ref, NULL); 508 } 509 } 510 511 void DiscoveredListIterator::clear_referent() { 512 oop_store_raw(_referent_addr, NULL); 513 } 514 515 // NOTE: process_phase*() are largely similar, and at a high level 516 // merely iterate over the extant list applying a predicate to 517 // each of its elements and possibly removing that element from the 518 // list and applying some further closures to that element. 519 // We should consider the possibility of replacing these 520 // process_phase*() methods by abstracting them into 521 // a single general iterator invocation that receives appropriate 522 // closures that accomplish this work. 523 524 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 525 // referents are not alive, but that should be kept alive for policy reasons. 526 // Keep alive the transitive closure of all such referents. 527 void 528 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 529 ReferencePolicy* policy, 530 BoolObjectClosure* is_alive, 531 OopClosure* keep_alive, 532 VoidClosure* complete_gc) { 533 assert(policy != NULL, "Must have a non-NULL policy"); 534 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 535 // Decide which softly reachable refs should be kept alive. 536 while (iter.has_next()) { 537 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 538 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 539 if (referent_is_dead && 540 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 541 if (TraceReferenceGC) { 542 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 543 iter.obj(), iter.obj()->klass()->internal_name()); 544 } 545 // Remove Reference object from list 546 iter.remove(); 547 // Make the Reference object active again 548 iter.make_active(); 549 // keep the referent around 550 iter.make_referent_alive(); 551 iter.move_to_next(); 552 } else { 553 iter.next(); 554 } 555 } 556 // Close the reachable set 557 complete_gc->do_void(); 558 NOT_PRODUCT( 559 if (PrintGCDetails && TraceReferenceGC) { 560 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 561 "discovered Refs by policy, from list " INTPTR_FORMAT, 562 iter.removed(), iter.processed(), (address)refs_list.head()); 563 } 564 ) 565 } 566 567 // Traverse the list and remove any Refs that are not active, or 568 // whose referents are either alive or NULL. 569 void 570 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 571 BoolObjectClosure* is_alive, 572 OopClosure* keep_alive) { 573 assert(discovery_is_atomic(), "Error"); 574 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 575 while (iter.has_next()) { 576 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 577 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 578 assert(next == NULL, "Should not discover inactive Reference"); 579 if (iter.is_referent_alive()) { 580 if (TraceReferenceGC) { 581 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 582 iter.obj(), iter.obj()->klass()->internal_name()); 583 } 584 // The referent is reachable after all. 585 // Remove Reference object from list. 586 iter.remove(); 587 // Update the referent pointer as necessary: Note that this 588 // should not entail any recursive marking because the 589 // referent must already have been traversed. 590 iter.make_referent_alive(); 591 iter.move_to_next(); 592 } else { 593 iter.next(); 594 } 595 } 596 NOT_PRODUCT( 597 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 598 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 599 "Refs in discovered list " INTPTR_FORMAT, 600 iter.removed(), iter.processed(), (address)refs_list.head()); 601 } 602 ) 603 } 604 605 void 606 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 607 BoolObjectClosure* is_alive, 608 OopClosure* keep_alive, 609 VoidClosure* complete_gc) { 610 assert(!discovery_is_atomic(), "Error"); 611 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 612 while (iter.has_next()) { 613 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 614 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 615 oop next = java_lang_ref_Reference::next(iter.obj()); 616 if ((iter.referent() == NULL || iter.is_referent_alive() || 617 next != NULL)) { 618 assert(next->is_oop_or_null(), "bad next field"); 619 // Remove Reference object from list 620 iter.remove(); 621 // Trace the cohorts 622 iter.make_referent_alive(); 623 if (UseCompressedOops) { 624 keep_alive->do_oop((narrowOop*)next_addr); 625 } else { 626 keep_alive->do_oop((oop*)next_addr); 627 } 628 iter.move_to_next(); 629 } else { 630 iter.next(); 631 } 632 } 633 // Now close the newly reachable set 634 complete_gc->do_void(); 635 NOT_PRODUCT( 636 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 637 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 638 "Refs in discovered list " INTPTR_FORMAT, 639 iter.removed(), iter.processed(), (address)refs_list.head()); 640 } 641 ) 642 } 643 644 // Traverse the list and process the referents, by either 645 // clearing them or keeping them (and their reachable 646 // closure) alive. 647 void 648 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 649 bool clear_referent, 650 BoolObjectClosure* is_alive, 651 OopClosure* keep_alive, 652 VoidClosure* complete_gc) { 653 ResourceMark rm; 654 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 655 while (iter.has_next()) { 656 iter.update_discovered(); 657 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 658 if (clear_referent) { 659 // NULL out referent pointer 660 iter.clear_referent(); 661 } else { 662 // keep the referent around 663 iter.make_referent_alive(); 664 } 665 if (TraceReferenceGC) { 666 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 667 clear_referent ? "cleared " : "", 668 iter.obj(), iter.obj()->klass()->internal_name()); 669 } 670 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 671 iter.next(); 672 } 673 // Remember to update the next pointer of the last ref. 674 iter.update_discovered(); 675 // Close the reachable set 676 complete_gc->do_void(); 677 } 678 679 void 680 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 681 oop obj = NULL; 682 oop next = refs_list.head(); 683 while (next != obj) { 684 obj = next; 685 next = java_lang_ref_Reference::discovered(obj); 686 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 687 } 688 refs_list.set_head(NULL); 689 refs_list.set_length(0); 690 } 691 692 void 693 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 694 clear_discovered_references(refs_list); 695 } 696 697 void ReferenceProcessor::abandon_partial_discovery() { 698 // loop over the lists 699 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 700 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 701 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i)); 702 } 703 abandon_partial_discovered_list(_discovered_refs[i]); 704 } 705 } 706 707 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 708 public: 709 RefProcPhase1Task(ReferenceProcessor& ref_processor, 710 DiscoveredList refs_lists[], 711 ReferencePolicy* policy, 712 bool marks_oops_alive) 713 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 714 _policy(policy) 715 { } 716 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 717 OopClosure& keep_alive, 718 VoidClosure& complete_gc) 719 { 720 Thread* thr = Thread::current(); 721 int refs_list_index = ((WorkerThread*)thr)->id(); 722 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 723 &is_alive, &keep_alive, &complete_gc); 724 } 725 private: 726 ReferencePolicy* _policy; 727 }; 728 729 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 730 public: 731 RefProcPhase2Task(ReferenceProcessor& ref_processor, 732 DiscoveredList refs_lists[], 733 bool marks_oops_alive) 734 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 735 { } 736 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 737 OopClosure& keep_alive, 738 VoidClosure& complete_gc) 739 { 740 _ref_processor.process_phase2(_refs_lists[i], 741 &is_alive, &keep_alive, &complete_gc); 742 } 743 }; 744 745 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 746 public: 747 RefProcPhase3Task(ReferenceProcessor& ref_processor, 748 DiscoveredList refs_lists[], 749 bool clear_referent, 750 bool marks_oops_alive) 751 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 752 _clear_referent(clear_referent) 753 { } 754 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 755 OopClosure& keep_alive, 756 VoidClosure& complete_gc) 757 { 758 // Don't use "refs_list_index" calculated in this way because 759 // balance_queues() has moved the Ref's into the first n queues. 760 // Thread* thr = Thread::current(); 761 // int refs_list_index = ((WorkerThread*)thr)->id(); 762 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 763 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 764 &is_alive, &keep_alive, &complete_gc); 765 } 766 private: 767 bool _clear_referent; 768 }; 769 770 void ReferenceProcessor::set_discovered(oop ref, oop value) { 771 if (_discovered_list_needs_barrier) { 772 java_lang_ref_Reference::set_discovered(ref, value); 773 } else { 774 java_lang_ref_Reference::set_discovered_raw(ref, value); 775 } 776 } 777 778 // Balances reference queues. 779 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 780 // queues[0, 1, ..., _num_q-1] because only the first _num_q 781 // corresponding to the active workers will be processed. 782 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 783 { 784 // calculate total length 785 size_t total_refs = 0; 786 if (TraceReferenceGC && PrintGCDetails) { 787 gclog_or_tty->print_cr("\nBalance ref_lists "); 788 } 789 790 for (uint i = 0; i < _max_num_q; ++i) { 791 total_refs += ref_lists[i].length(); 792 if (TraceReferenceGC && PrintGCDetails) { 793 gclog_or_tty->print("%d ", ref_lists[i].length()); 794 } 795 } 796 if (TraceReferenceGC && PrintGCDetails) { 797 gclog_or_tty->print_cr(" = %d", total_refs); 798 } 799 size_t avg_refs = total_refs / _num_q + 1; 800 uint to_idx = 0; 801 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 802 bool move_all = false; 803 if (from_idx >= _num_q) { 804 move_all = ref_lists[from_idx].length() > 0; 805 } 806 while ((ref_lists[from_idx].length() > avg_refs) || 807 move_all) { 808 assert(to_idx < _num_q, "Sanity Check!"); 809 if (ref_lists[to_idx].length() < avg_refs) { 810 // move superfluous refs 811 size_t refs_to_move; 812 // Move all the Ref's if the from queue will not be processed. 813 if (move_all) { 814 refs_to_move = MIN2(ref_lists[from_idx].length(), 815 avg_refs - ref_lists[to_idx].length()); 816 } else { 817 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 818 avg_refs - ref_lists[to_idx].length()); 819 } 820 821 assert(refs_to_move > 0, "otherwise the code below will fail"); 822 823 oop move_head = ref_lists[from_idx].head(); 824 oop move_tail = move_head; 825 oop new_head = move_head; 826 // find an element to split the list on 827 for (size_t j = 0; j < refs_to_move; ++j) { 828 move_tail = new_head; 829 new_head = java_lang_ref_Reference::discovered(new_head); 830 } 831 832 // Add the chain to the to list. 833 if (ref_lists[to_idx].head() == NULL) { 834 // to list is empty. Make a loop at the end. 835 set_discovered(move_tail, move_tail); 836 } else { 837 set_discovered(move_tail, ref_lists[to_idx].head()); 838 } 839 ref_lists[to_idx].set_head(move_head); 840 ref_lists[to_idx].inc_length(refs_to_move); 841 842 // Remove the chain from the from list. 843 if (move_tail == new_head) { 844 // We found the end of the from list. 845 ref_lists[from_idx].set_head(NULL); 846 } else { 847 ref_lists[from_idx].set_head(new_head); 848 } 849 ref_lists[from_idx].dec_length(refs_to_move); 850 if (ref_lists[from_idx].length() == 0) { 851 break; 852 } 853 } else { 854 to_idx = (to_idx + 1) % _num_q; 855 } 856 } 857 } 858 #ifdef ASSERT 859 size_t balanced_total_refs = 0; 860 for (uint i = 0; i < _max_num_q; ++i) { 861 balanced_total_refs += ref_lists[i].length(); 862 if (TraceReferenceGC && PrintGCDetails) { 863 gclog_or_tty->print("%d ", ref_lists[i].length()); 864 } 865 } 866 if (TraceReferenceGC && PrintGCDetails) { 867 gclog_or_tty->print_cr(" = %d", balanced_total_refs); 868 gclog_or_tty->flush(); 869 } 870 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 871 #endif 872 } 873 874 void ReferenceProcessor::balance_all_queues() { 875 balance_queues(_discoveredSoftRefs); 876 balance_queues(_discoveredWeakRefs); 877 balance_queues(_discoveredFinalRefs); 878 balance_queues(_discoveredPhantomRefs); 879 } 880 881 void 882 ReferenceProcessor::process_discovered_reflist( 883 DiscoveredList refs_lists[], 884 ReferencePolicy* policy, 885 bool clear_referent, 886 BoolObjectClosure* is_alive, 887 OopClosure* keep_alive, 888 VoidClosure* complete_gc, 889 AbstractRefProcTaskExecutor* task_executor) 890 { 891 bool mt_processing = task_executor != NULL && _processing_is_mt; 892 // If discovery used MT and a dynamic number of GC threads, then 893 // the queues must be balanced for correctness if fewer than the 894 // maximum number of queues were used. The number of queue used 895 // during discovery may be different than the number to be used 896 // for processing so don't depend of _num_q < _max_num_q as part 897 // of the test. 898 bool must_balance = _discovery_is_mt; 899 900 if ((mt_processing && ParallelRefProcBalancingEnabled) || 901 must_balance) { 902 balance_queues(refs_lists); 903 } 904 if (PrintReferenceGC && PrintGCDetails) { 905 size_t total = 0; 906 for (uint i = 0; i < _max_num_q; ++i) { 907 total += refs_lists[i].length(); 908 } 909 gclog_or_tty->print(", %u refs", total); 910 } 911 912 // Phase 1 (soft refs only): 913 // . Traverse the list and remove any SoftReferences whose 914 // referents are not alive, but that should be kept alive for 915 // policy reasons. Keep alive the transitive closure of all 916 // such referents. 917 if (policy != NULL) { 918 if (mt_processing) { 919 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 920 task_executor->execute(phase1); 921 } else { 922 for (uint i = 0; i < _max_num_q; i++) { 923 process_phase1(refs_lists[i], policy, 924 is_alive, keep_alive, complete_gc); 925 } 926 } 927 } else { // policy == NULL 928 assert(refs_lists != _discoveredSoftRefs, 929 "Policy must be specified for soft references."); 930 } 931 932 // Phase 2: 933 // . Traverse the list and remove any refs whose referents are alive. 934 if (mt_processing) { 935 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 936 task_executor->execute(phase2); 937 } else { 938 for (uint i = 0; i < _max_num_q; i++) { 939 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 940 } 941 } 942 943 // Phase 3: 944 // . Traverse the list and process referents as appropriate. 945 if (mt_processing) { 946 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 947 task_executor->execute(phase3); 948 } else { 949 for (uint i = 0; i < _max_num_q; i++) { 950 process_phase3(refs_lists[i], clear_referent, 951 is_alive, keep_alive, complete_gc); 952 } 953 } 954 } 955 956 void ReferenceProcessor::clean_up_discovered_references() { 957 // loop over the lists 958 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 959 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 960 gclog_or_tty->print_cr( 961 "\nScrubbing %s discovered list of Null referents", 962 list_name(i)); 963 } 964 clean_up_discovered_reflist(_discovered_refs[i]); 965 } 966 } 967 968 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 969 assert(!discovery_is_atomic(), "Else why call this method?"); 970 DiscoveredListIterator iter(refs_list, NULL, NULL); 971 while (iter.has_next()) { 972 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 973 oop next = java_lang_ref_Reference::next(iter.obj()); 974 assert(next->is_oop_or_null(), "bad next field"); 975 // If referent has been cleared or Reference is not active, 976 // drop it. 977 if (iter.referent() == NULL || next != NULL) { 978 debug_only( 979 if (PrintGCDetails && TraceReferenceGC) { 980 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 981 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 982 " and referent: " INTPTR_FORMAT, 983 iter.obj(), next, iter.referent()); 984 } 985 ) 986 // Remove Reference object from list 987 iter.remove(); 988 iter.move_to_next(); 989 } else { 990 iter.next(); 991 } 992 } 993 NOT_PRODUCT( 994 if (PrintGCDetails && TraceReferenceGC) { 995 gclog_or_tty->print( 996 " Removed %d Refs with NULL referents out of %d discovered Refs", 997 iter.removed(), iter.processed()); 998 } 999 ) 1000 } 1001 1002 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 1003 uint id = 0; 1004 // Determine the queue index to use for this object. 1005 if (_discovery_is_mt) { 1006 // During a multi-threaded discovery phase, 1007 // each thread saves to its "own" list. 1008 Thread* thr = Thread::current(); 1009 id = thr->as_Worker_thread()->id(); 1010 } else { 1011 // single-threaded discovery, we save in round-robin 1012 // fashion to each of the lists. 1013 if (_processing_is_mt) { 1014 id = next_id(); 1015 } 1016 } 1017 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 1018 1019 // Get the discovered queue to which we will add 1020 DiscoveredList* list = NULL; 1021 switch (rt) { 1022 case REF_OTHER: 1023 // Unknown reference type, no special treatment 1024 break; 1025 case REF_SOFT: 1026 list = &_discoveredSoftRefs[id]; 1027 break; 1028 case REF_WEAK: 1029 list = &_discoveredWeakRefs[id]; 1030 break; 1031 case REF_FINAL: 1032 list = &_discoveredFinalRefs[id]; 1033 break; 1034 case REF_PHANTOM: 1035 list = &_discoveredPhantomRefs[id]; 1036 break; 1037 case REF_NONE: 1038 // we should not reach here if we are an InstanceRefKlass 1039 default: 1040 ShouldNotReachHere(); 1041 } 1042 if (TraceReferenceGC && PrintGCDetails) { 1043 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list); 1044 } 1045 return list; 1046 } 1047 1048 inline void 1049 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1050 oop obj, 1051 HeapWord* discovered_addr) { 1052 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1053 // First we must make sure this object is only enqueued once. CAS in a non null 1054 // discovered_addr. 1055 oop current_head = refs_list.head(); 1056 // The last ref must have its discovered field pointing to itself. 1057 oop next_discovered = (current_head != NULL) ? current_head : obj; 1058 1059 // Note: In the case of G1, this specific pre-barrier is strictly 1060 // not necessary because the only case we are interested in 1061 // here is when *discovered_addr is NULL (see the CAS further below), 1062 // so this will expand to nothing. As a result, we have manually 1063 // elided this out for G1, but left in the test for some future 1064 // collector that might have need for a pre-barrier here, e.g.:- 1065 // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); 1066 assert(!_discovered_list_needs_barrier || UseG1GC, 1067 "Need to check non-G1 collector: " 1068 "may need a pre-write-barrier for CAS from NULL below"); 1069 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 1070 NULL); 1071 if (retest == NULL) { 1072 // This thread just won the right to enqueue the object. 1073 // We have separate lists for enqueueing, so no synchronization 1074 // is necessary. 1075 refs_list.set_head(obj); 1076 refs_list.inc_length(1); 1077 if (_discovered_list_needs_barrier) { 1078 _bs->write_ref_field((void*)discovered_addr, next_discovered); 1079 } 1080 1081 if (TraceReferenceGC) { 1082 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 1083 obj, obj->klass()->internal_name()); 1084 } 1085 } else { 1086 // If retest was non NULL, another thread beat us to it: 1087 // The reference has already been discovered... 1088 if (TraceReferenceGC) { 1089 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1090 obj, obj->klass()->internal_name()); 1091 } 1092 } 1093 } 1094 1095 #ifndef PRODUCT 1096 // Non-atomic (i.e. concurrent) discovery might allow us 1097 // to observe j.l.References with NULL referents, being those 1098 // cleared concurrently by mutators during (or after) discovery. 1099 void ReferenceProcessor::verify_referent(oop obj) { 1100 bool da = discovery_is_atomic(); 1101 oop referent = java_lang_ref_Reference::referent(obj); 1102 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 1103 err_msg("Bad referent " INTPTR_FORMAT " found in Reference " 1104 INTPTR_FORMAT " during %satomic discovery ", 1105 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-")); 1106 } 1107 #endif 1108 1109 // We mention two of several possible choices here: 1110 // #0: if the reference object is not in the "originating generation" 1111 // (or part of the heap being collected, indicated by our "span" 1112 // we don't treat it specially (i.e. we scan it as we would 1113 // a normal oop, treating its references as strong references). 1114 // This means that references can't be discovered unless their 1115 // referent is also in the same span. This is the simplest, 1116 // most "local" and most conservative approach, albeit one 1117 // that may cause weak references to be enqueued least promptly. 1118 // We call this choice the "ReferenceBasedDiscovery" policy. 1119 // #1: the reference object may be in any generation (span), but if 1120 // the referent is in the generation (span) being currently collected 1121 // then we can discover the reference object, provided 1122 // the object has not already been discovered by 1123 // a different concurrently running collector (as may be the 1124 // case, for instance, if the reference object is in CMS and 1125 // the referent in DefNewGeneration), and provided the processing 1126 // of this reference object by the current collector will 1127 // appear atomic to every other collector in the system. 1128 // (Thus, for instance, a concurrent collector may not 1129 // discover references in other generations even if the 1130 // referent is in its own generation). This policy may, 1131 // in certain cases, enqueue references somewhat sooner than 1132 // might Policy #0 above, but at marginally increased cost 1133 // and complexity in processing these references. 1134 // We call this choice the "RefeferentBasedDiscovery" policy. 1135 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1136 // Make sure we are discovering refs (rather than processing discovered refs). 1137 if (!_discovering_refs || !RegisterReferences) { 1138 return false; 1139 } 1140 // We only discover active references. 1141 oop next = java_lang_ref_Reference::next(obj); 1142 if (next != NULL) { // Ref is no longer active 1143 return false; 1144 } 1145 1146 HeapWord* obj_addr = (HeapWord*)obj; 1147 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1148 !_span.contains(obj_addr)) { 1149 // Reference is not in the originating generation; 1150 // don't treat it specially (i.e. we want to scan it as a normal 1151 // object with strong references). 1152 return false; 1153 } 1154 1155 // We only discover references whose referents are not (yet) 1156 // known to be strongly reachable. 1157 if (is_alive_non_header() != NULL) { 1158 verify_referent(obj); 1159 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1160 return false; // referent is reachable 1161 } 1162 } 1163 if (rt == REF_SOFT) { 1164 // For soft refs we can decide now if these are not 1165 // current candidates for clearing, in which case we 1166 // can mark through them now, rather than delaying that 1167 // to the reference-processing phase. Since all current 1168 // time-stamp policies advance the soft-ref clock only 1169 // at a major collection cycle, this is always currently 1170 // accurate. 1171 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1172 return false; 1173 } 1174 } 1175 1176 ResourceMark rm; // Needed for tracing. 1177 1178 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1179 const oop discovered = java_lang_ref_Reference::discovered(obj); 1180 assert(discovered->is_oop_or_null(), "bad discovered field"); 1181 if (discovered != NULL) { 1182 // The reference has already been discovered... 1183 if (TraceReferenceGC) { 1184 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1185 obj, obj->klass()->internal_name()); 1186 } 1187 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1188 // assumes that an object is not processed twice; 1189 // if it's been already discovered it must be on another 1190 // generation's discovered list; so we won't discover it. 1191 return false; 1192 } else { 1193 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1194 "Unrecognized policy"); 1195 // Check assumption that an object is not potentially 1196 // discovered twice except by concurrent collectors that potentially 1197 // trace the same Reference object twice. 1198 assert(UseConcMarkSweepGC || UseG1GC, 1199 "Only possible with a concurrent marking collector"); 1200 return true; 1201 } 1202 } 1203 1204 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1205 verify_referent(obj); 1206 // Discover if and only if EITHER: 1207 // .. reference is in our span, OR 1208 // .. we are an atomic collector and referent is in our span 1209 if (_span.contains(obj_addr) || 1210 (discovery_is_atomic() && 1211 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1212 // should_enqueue = true; 1213 } else { 1214 return false; 1215 } 1216 } else { 1217 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1218 _span.contains(obj_addr), "code inconsistency"); 1219 } 1220 1221 // Get the right type of discovered queue head. 1222 DiscoveredList* list = get_discovered_list(rt); 1223 if (list == NULL) { 1224 return false; // nothing special needs to be done 1225 } 1226 1227 if (_discovery_is_mt) { 1228 add_to_discovered_list_mt(*list, obj, discovered_addr); 1229 } else { 1230 // If "_discovered_list_needs_barrier", we do write barriers when 1231 // updating the discovered reference list. Otherwise, we do a raw store 1232 // here: the field will be visited later when processing the discovered 1233 // references. 1234 oop current_head = list->head(); 1235 // The last ref must have its discovered field pointing to itself. 1236 oop next_discovered = (current_head != NULL) ? current_head : obj; 1237 1238 // As in the case further above, since we are over-writing a NULL 1239 // pre-value, we can safely elide the pre-barrier here for the case of G1. 1240 // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); 1241 assert(discovered == NULL, "control point invariant"); 1242 assert(!_discovered_list_needs_barrier || UseG1GC, 1243 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); 1244 oop_store_raw(discovered_addr, next_discovered); 1245 if (_discovered_list_needs_barrier) { 1246 _bs->write_ref_field((void*)discovered_addr, next_discovered); 1247 } 1248 list->set_head(obj); 1249 list->inc_length(1); 1250 1251 if (TraceReferenceGC) { 1252 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", 1253 obj, obj->klass()->internal_name()); 1254 } 1255 } 1256 assert(obj->is_oop(), "Discovered a bad reference"); 1257 verify_referent(obj); 1258 return true; 1259 } 1260 1261 // Preclean the discovered references by removing those 1262 // whose referents are alive, and by marking from those that 1263 // are not active. These lists can be handled here 1264 // in any order and, indeed, concurrently. 1265 void ReferenceProcessor::preclean_discovered_references( 1266 BoolObjectClosure* is_alive, 1267 OopClosure* keep_alive, 1268 VoidClosure* complete_gc, 1269 YieldClosure* yield) { 1270 1271 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1272 1273 // Soft references 1274 { 1275 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1276 false, gclog_or_tty); 1277 for (uint i = 0; i < _max_num_q; i++) { 1278 if (yield->should_return()) { 1279 return; 1280 } 1281 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1282 keep_alive, complete_gc, yield); 1283 } 1284 } 1285 1286 // Weak references 1287 { 1288 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1289 false, gclog_or_tty); 1290 for (uint i = 0; i < _max_num_q; i++) { 1291 if (yield->should_return()) { 1292 return; 1293 } 1294 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1295 keep_alive, complete_gc, yield); 1296 } 1297 } 1298 1299 // Final references 1300 { 1301 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1302 false, gclog_or_tty); 1303 for (uint i = 0; i < _max_num_q; i++) { 1304 if (yield->should_return()) { 1305 return; 1306 } 1307 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1308 keep_alive, complete_gc, yield); 1309 } 1310 } 1311 1312 // Phantom references 1313 { 1314 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1315 false, gclog_or_tty); 1316 for (uint i = 0; i < _max_num_q; i++) { 1317 if (yield->should_return()) { 1318 return; 1319 } 1320 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1321 keep_alive, complete_gc, yield); 1322 } 1323 } 1324 } 1325 1326 // Walk the given discovered ref list, and remove all reference objects 1327 // whose referents are still alive, whose referents are NULL or which 1328 // are not active (have a non-NULL next field). NOTE: When we are 1329 // thus precleaning the ref lists (which happens single-threaded today), 1330 // we do not disable refs discovery to honour the correct semantics of 1331 // java.lang.Reference. As a result, we need to be careful below 1332 // that ref removal steps interleave safely with ref discovery steps 1333 // (in this thread). 1334 void 1335 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1336 BoolObjectClosure* is_alive, 1337 OopClosure* keep_alive, 1338 VoidClosure* complete_gc, 1339 YieldClosure* yield) { 1340 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1341 while (iter.has_next()) { 1342 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1343 oop obj = iter.obj(); 1344 oop next = java_lang_ref_Reference::next(obj); 1345 if (iter.referent() == NULL || iter.is_referent_alive() || 1346 next != NULL) { 1347 // The referent has been cleared, or is alive, or the Reference is not 1348 // active; we need to trace and mark its cohort. 1349 if (TraceReferenceGC) { 1350 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1351 iter.obj(), iter.obj()->klass()->internal_name()); 1352 } 1353 // Remove Reference object from list 1354 iter.remove(); 1355 // Keep alive its cohort. 1356 iter.make_referent_alive(); 1357 if (UseCompressedOops) { 1358 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1359 keep_alive->do_oop(next_addr); 1360 } else { 1361 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1362 keep_alive->do_oop(next_addr); 1363 } 1364 iter.move_to_next(); 1365 } else { 1366 iter.next(); 1367 } 1368 } 1369 // Close the reachable set 1370 complete_gc->do_void(); 1371 1372 NOT_PRODUCT( 1373 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 1374 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1375 "Refs in discovered list " INTPTR_FORMAT, 1376 iter.removed(), iter.processed(), (address)refs_list.head()); 1377 } 1378 ) 1379 } 1380 1381 const char* ReferenceProcessor::list_name(uint i) { 1382 assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(), 1383 "Out of bounds index"); 1384 1385 int j = i / _max_num_q; 1386 switch (j) { 1387 case 0: return "SoftRef"; 1388 case 1: return "WeakRef"; 1389 case 2: return "FinalRef"; 1390 case 3: return "PhantomRef"; 1391 } 1392 ShouldNotReachHere(); 1393 return NULL; 1394 } 1395 1396 #ifndef PRODUCT 1397 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1398 // empty for now 1399 } 1400 #endif 1401 1402 #ifndef PRODUCT 1403 void ReferenceProcessor::clear_discovered_references() { 1404 guarantee(!_discovering_refs, "Discovering refs?"); 1405 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1406 clear_discovered_references(_discovered_refs[i]); 1407 } 1408 } 1409 1410 #endif // PRODUCT