1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc_interface/collectedHeap.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "memory/referencePolicy.hpp" 31 #include "memory/referenceProcessor.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/jniHandles.hpp" 35 36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 38 bool ReferenceProcessor::_pending_list_uses_discovered_field = false; 39 40 void referenceProcessor_init() { 41 ReferenceProcessor::init_statics(); 42 } 43 44 void ReferenceProcessor::init_statics() { 45 // Initialize the master soft ref clock. 46 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); 47 48 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 49 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 50 NOT_COMPILER2(LRUCurrentHeapPolicy()); 51 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 52 vm_exit_during_initialization("Could not allocate reference policy object"); 53 } 54 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 55 RefDiscoveryPolicy == ReferentBasedDiscovery, 56 "Unrecongnized RefDiscoveryPolicy"); 57 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); 58 } 59 60 ReferenceProcessor::ReferenceProcessor(MemRegion span, 61 bool mt_processing, 62 int mt_processing_degree, 63 bool mt_discovery, 64 int mt_discovery_degree, 65 bool atomic_discovery, 66 BoolObjectClosure* is_alive_non_header, 67 bool discovered_list_needs_barrier) : 68 _discovering_refs(false), 69 _enqueuing_is_done(false), 70 _is_alive_non_header(is_alive_non_header), 71 _discovered_list_needs_barrier(discovered_list_needs_barrier), 72 _bs(NULL), 73 _processing_is_mt(mt_processing), 74 _next_id(0) 75 { 76 _span = span; 77 _discovery_is_atomic = atomic_discovery; 78 _discovery_is_mt = mt_discovery; 79 _num_q = MAX2(1, mt_processing_degree); 80 _max_num_q = MAX2(_num_q, mt_discovery_degree); 81 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, 82 _max_num_q * number_of_subclasses_of_ref()); 83 if (_discoveredSoftRefs == NULL) { 84 vm_exit_during_initialization("Could not allocated RefProc Array"); 85 } 86 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 87 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 88 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 89 // Initialized all entries to NULL 90 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 91 _discoveredSoftRefs[i].set_head(NULL); 92 _discoveredSoftRefs[i].set_length(0); 93 } 94 // If we do barriers, cache a copy of the barrier set. 95 if (discovered_list_needs_barrier) { 96 _bs = Universe::heap()->barrier_set(); 97 } 98 setup_policy(false /* default soft ref policy */); 99 } 100 101 #ifndef PRODUCT 102 void ReferenceProcessor::verify_no_references_recorded() { 103 guarantee(!_discovering_refs, "Discovering refs?"); 104 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 105 guarantee(_discoveredSoftRefs[i].is_empty(), 106 "Found non-empty discovered list"); 107 } 108 } 109 #endif 110 111 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 112 // An alternative implementation of this routine 113 // could use the following nested loop: 114 // 115 // for (int i = 0; i < number_of_subclasses_of_ref(); i++_ { 116 // for (int j = 0; j < _num_q; j++) { 117 // int index = i * _max_num_q + j; 118 119 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 120 if (UseCompressedOops) { 121 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); 122 } else { 123 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); 124 } 125 } 126 } 127 128 void ReferenceProcessor::update_soft_ref_master_clock() { 129 // Update (advance) the soft ref master clock field. This must be done 130 // after processing the soft ref list. 131 jlong now = os::javaTimeMillis(); 132 jlong clock = java_lang_ref_SoftReference::clock(); 133 NOT_PRODUCT( 134 if (now < clock) { 135 warning("time warp: %d to %d", clock, now); 136 } 137 ) 138 // In product mode, protect ourselves from system time being adjusted 139 // externally and going backward; see note in the implementation of 140 // GenCollectedHeap::time_since_last_gc() for the right way to fix 141 // this uniformly throughout the VM; see bug-id 4741166. XXX 142 if (now > clock) { 143 java_lang_ref_SoftReference::set_clock(now); 144 } 145 // Else leave clock stalled at its old value until time progresses 146 // past clock value. 147 } 148 149 void ReferenceProcessor::process_discovered_references( 150 BoolObjectClosure* is_alive, 151 OopClosure* keep_alive, 152 VoidClosure* complete_gc, 153 AbstractRefProcTaskExecutor* task_executor) { 154 NOT_PRODUCT(verify_ok_to_handle_reflists()); 155 156 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 157 // Stop treating discovered references specially. 158 disable_discovery(); 159 160 bool trace_time = PrintGCDetails && PrintReferenceGC; 161 // Soft references 162 { 163 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); 164 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 165 is_alive, keep_alive, complete_gc, task_executor); 166 } 167 168 update_soft_ref_master_clock(); 169 170 // Weak references 171 { 172 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); 173 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 174 is_alive, keep_alive, complete_gc, task_executor); 175 } 176 177 // Final references 178 { 179 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); 180 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 181 is_alive, keep_alive, complete_gc, task_executor); 182 } 183 184 // Phantom references 185 { 186 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); 187 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 188 is_alive, keep_alive, complete_gc, task_executor); 189 } 190 191 // Weak global JNI references. It would make more sense (semantically) to 192 // traverse these simultaneously with the regular weak references above, but 193 // that is not how the JDK1.2 specification is. See #4126360. Native code can 194 // thus use JNI weak references to circumvent the phantom references and 195 // resurrect a "post-mortem" object. 196 { 197 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); 198 if (task_executor != NULL) { 199 task_executor->set_single_threaded_mode(); 200 } 201 process_phaseJNI(is_alive, keep_alive, complete_gc); 202 } 203 } 204 205 #ifndef PRODUCT 206 // Calculate the number of jni handles. 207 uint ReferenceProcessor::count_jni_refs() { 208 class AlwaysAliveClosure: public BoolObjectClosure { 209 public: 210 virtual bool do_object_b(oop obj) { return true; } 211 virtual void do_object(oop obj) { assert(false, "Don't call"); } 212 }; 213 214 class CountHandleClosure: public OopClosure { 215 private: 216 int _count; 217 public: 218 CountHandleClosure(): _count(0) {} 219 void do_oop(oop* unused) { _count++; } 220 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 221 int count() { return _count; } 222 }; 223 CountHandleClosure global_handle_count; 224 AlwaysAliveClosure always_alive; 225 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 226 return global_handle_count.count(); 227 } 228 #endif 229 230 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 231 OopClosure* keep_alive, 232 VoidClosure* complete_gc) { 233 #ifndef PRODUCT 234 if (PrintGCDetails && PrintReferenceGC) { 235 unsigned int count = count_jni_refs(); 236 gclog_or_tty->print(", %u refs", count); 237 } 238 #endif 239 JNIHandles::weak_oops_do(is_alive, keep_alive); 240 complete_gc->do_void(); 241 } 242 243 244 template <class T> 245 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 246 AbstractRefProcTaskExecutor* task_executor) { 247 248 // Remember old value of pending references list 249 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 250 T old_pending_list_value = *pending_list_addr; 251 252 // Enqueue references that are not made active again, and 253 // clear the decks for the next collection (cycle). 254 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 255 // Do the oop-check on pending_list_addr missed in 256 // enqueue_discovered_reflist. We should probably 257 // do a raw oop_check so that future such idempotent 258 // oop_stores relying on the oop-check side-effect 259 // may be elided automatically and safely without 260 // affecting correctness. 261 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 262 263 // Stop treating discovered references specially. 264 ref->disable_discovery(); 265 266 // Return true if new pending references were added 267 return old_pending_list_value != *pending_list_addr; 268 } 269 270 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 271 NOT_PRODUCT(verify_ok_to_handle_reflists()); 272 if (UseCompressedOops) { 273 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 274 } else { 275 return enqueue_discovered_ref_helper<oop>(this, task_executor); 276 } 277 } 278 279 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 280 HeapWord* pending_list_addr) { 281 // Given a list of refs linked through the "discovered" field 282 // (java.lang.ref.Reference.discovered), self-loop their "next" field 283 // thus distinguishing them from active References, then 284 // prepend them to the pending list. 285 // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777), 286 // the "next" field is used to chain the pending list, not the discovered 287 // field. 288 289 if (TraceReferenceGC && PrintGCDetails) { 290 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 291 INTPTR_FORMAT, (address)refs_list.head()); 292 } 293 294 oop obj = NULL; 295 oop next_d = refs_list.head(); 296 if (pending_list_uses_discovered_field()) { // New behaviour 297 // Walk down the list, self-looping the next field 298 // so that the References are not considered active. 299 while (obj != next_d) { 300 obj = next_d; 301 assert(obj->is_instanceRef(), "should be reference object"); 302 next_d = java_lang_ref_Reference::discovered(obj); 303 if (TraceReferenceGC && PrintGCDetails) { 304 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 305 obj, next_d); 306 } 307 assert(java_lang_ref_Reference::next(obj) == NULL, 308 "Reference not active; should not be discovered"); 309 // Self-loop next, so as to make Ref not active. 310 java_lang_ref_Reference::set_next(obj, obj); 311 if (next_d == obj) { // obj is last 312 // Swap refs_list into pendling_list_addr and 313 // set obj's discovered to what we read from pending_list_addr. 314 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 315 // Need oop_check on pending_list_addr above; 316 // see special oop-check code at the end of 317 // enqueue_discovered_reflists() further below. 318 java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL 319 } 320 } 321 } else { // Old behaviour 322 // Walk down the list, copying the discovered field into 323 // the next field and clearing the discovered field. 324 while (obj != next_d) { 325 obj = next_d; 326 assert(obj->is_instanceRef(), "should be reference object"); 327 next_d = java_lang_ref_Reference::discovered(obj); 328 if (TraceReferenceGC && PrintGCDetails) { 329 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 330 obj, next_d); 331 } 332 assert(java_lang_ref_Reference::next(obj) == NULL, 333 "The reference should not be enqueued"); 334 if (next_d == obj) { // obj is last 335 // Swap refs_list into pendling_list_addr and 336 // set obj's next to what we read from pending_list_addr. 337 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 338 // Need oop_check on pending_list_addr above; 339 // see special oop-check code at the end of 340 // enqueue_discovered_reflists() further below. 341 if (old == NULL) { 342 // obj should be made to point to itself, since 343 // pending list was empty. 344 java_lang_ref_Reference::set_next(obj, obj); 345 } else { 346 java_lang_ref_Reference::set_next(obj, old); 347 } 348 } else { 349 java_lang_ref_Reference::set_next(obj, next_d); 350 } 351 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 352 } 353 } 354 } 355 356 // Parallel enqueue task 357 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 358 public: 359 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 360 DiscoveredList discovered_refs[], 361 HeapWord* pending_list_addr, 362 int n_queues) 363 : EnqueueTask(ref_processor, discovered_refs, 364 pending_list_addr, n_queues) 365 { } 366 367 virtual void work(unsigned int work_id) { 368 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 369 // Simplest first cut: static partitioning. 370 int index = work_id; 371 // The increment on "index" must correspond to the maximum number of queues 372 // (n_queues) with which that ReferenceProcessor was created. That 373 // is because of the "clever" way the discovered references lists were 374 // allocated and are indexed into. 375 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 376 for (int j = 0; 377 j < ReferenceProcessor::number_of_subclasses_of_ref(); 378 j++, index += _n_queues) { 379 _ref_processor.enqueue_discovered_reflist( 380 _refs_lists[index], _pending_list_addr); 381 _refs_lists[index].set_head(NULL); 382 _refs_lists[index].set_length(0); 383 } 384 } 385 }; 386 387 // Enqueue references that are not made active again 388 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 389 AbstractRefProcTaskExecutor* task_executor) { 390 if (_processing_is_mt && task_executor != NULL) { 391 // Parallel code 392 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, 393 pending_list_addr, _max_num_q); 394 task_executor->execute(tsk); 395 } else { 396 // Serial code: call the parent class's implementation 397 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 398 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); 399 _discoveredSoftRefs[i].set_head(NULL); 400 _discoveredSoftRefs[i].set_length(0); 401 } 402 } 403 } 404 405 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 406 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 407 oop discovered = java_lang_ref_Reference::discovered(_ref); 408 assert(_discovered_addr && discovered->is_oop_or_null(), 409 "discovered field is bad"); 410 _next = discovered; 411 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 412 _referent = java_lang_ref_Reference::referent(_ref); 413 assert(Universe::heap()->is_in_reserved_or_null(_referent), 414 "Wrong oop found in java.lang.Reference object"); 415 assert(allow_null_referent ? 416 _referent->is_oop_or_null() 417 : _referent->is_oop(), 418 "bad referent"); 419 } 420 421 void DiscoveredListIterator::remove() { 422 assert(_ref->is_oop(), "Dropping a bad reference"); 423 oop_store_raw(_discovered_addr, NULL); 424 425 // First _prev_next ref actually points into DiscoveredList (gross). 426 oop new_next; 427 if (_next == _ref) { 428 // At the end of the list, we should make _prev point to itself. 429 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 430 // and _prev will be NULL. 431 new_next = _prev; 432 } else { 433 new_next = _next; 434 } 435 436 if (UseCompressedOops) { 437 // Remove Reference object from list. 438 oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next); 439 } else { 440 // Remove Reference object from list. 441 oopDesc::store_heap_oop((oop*)_prev_next, new_next); 442 } 443 NOT_PRODUCT(_removed++); 444 _refs_list.dec_length(1); 445 } 446 447 // Make the Reference object active again. 448 void DiscoveredListIterator::make_active() { 449 // For G1 we don't want to use set_next - it 450 // will dirty the card for the next field of 451 // the reference object and will fail 452 // CT verification. 453 if (UseG1GC) { 454 BarrierSet* bs = oopDesc::bs(); 455 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); 456 457 if (UseCompressedOops) { 458 bs->write_ref_field_pre((narrowOop*)next_addr, NULL); 459 } else { 460 bs->write_ref_field_pre((oop*)next_addr, NULL); 461 } 462 java_lang_ref_Reference::set_next_raw(_ref, NULL); 463 } else { 464 java_lang_ref_Reference::set_next(_ref, NULL); 465 } 466 } 467 468 void DiscoveredListIterator::clear_referent() { 469 oop_store_raw(_referent_addr, NULL); 470 } 471 472 // NOTE: process_phase*() are largely similar, and at a high level 473 // merely iterate over the extant list applying a predicate to 474 // each of its elements and possibly removing that element from the 475 // list and applying some further closures to that element. 476 // We should consider the possibility of replacing these 477 // process_phase*() methods by abstracting them into 478 // a single general iterator invocation that receives appropriate 479 // closures that accomplish this work. 480 481 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 482 // referents are not alive, but that should be kept alive for policy reasons. 483 // Keep alive the transitive closure of all such referents. 484 void 485 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 486 ReferencePolicy* policy, 487 BoolObjectClosure* is_alive, 488 OopClosure* keep_alive, 489 VoidClosure* complete_gc) { 490 assert(policy != NULL, "Must have a non-NULL policy"); 491 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 492 // Decide which softly reachable refs should be kept alive. 493 while (iter.has_next()) { 494 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 495 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 496 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { 497 if (TraceReferenceGC) { 498 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 499 iter.obj(), iter.obj()->blueprint()->internal_name()); 500 } 501 // Remove Reference object from list 502 iter.remove(); 503 // Make the Reference object active again 504 iter.make_active(); 505 // keep the referent around 506 iter.make_referent_alive(); 507 iter.move_to_next(); 508 } else { 509 iter.next(); 510 } 511 } 512 // Close the reachable set 513 complete_gc->do_void(); 514 NOT_PRODUCT( 515 if (PrintGCDetails && TraceReferenceGC) { 516 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 517 "discovered Refs by policy, from list " INTPTR_FORMAT, 518 iter.removed(), iter.processed(), (address)refs_list.head()); 519 } 520 ) 521 } 522 523 // Traverse the list and remove any Refs that are not active, or 524 // whose referents are either alive or NULL. 525 void 526 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 527 BoolObjectClosure* is_alive, 528 OopClosure* keep_alive) { 529 assert(discovery_is_atomic(), "Error"); 530 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 531 while (iter.has_next()) { 532 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 533 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 534 assert(next == NULL, "Should not discover inactive Reference"); 535 if (iter.is_referent_alive()) { 536 if (TraceReferenceGC) { 537 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 538 iter.obj(), iter.obj()->blueprint()->internal_name()); 539 } 540 // The referent is reachable after all. 541 // Remove Reference object from list. 542 iter.remove(); 543 // Update the referent pointer as necessary: Note that this 544 // should not entail any recursive marking because the 545 // referent must already have been traversed. 546 iter.make_referent_alive(); 547 iter.move_to_next(); 548 } else { 549 iter.next(); 550 } 551 } 552 NOT_PRODUCT( 553 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 554 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 555 "Refs in discovered list " INTPTR_FORMAT, 556 iter.removed(), iter.processed(), (address)refs_list.head()); 557 } 558 ) 559 } 560 561 void 562 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 563 BoolObjectClosure* is_alive, 564 OopClosure* keep_alive, 565 VoidClosure* complete_gc) { 566 assert(!discovery_is_atomic(), "Error"); 567 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 568 while (iter.has_next()) { 569 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 570 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 571 oop next = java_lang_ref_Reference::next(iter.obj()); 572 if ((iter.referent() == NULL || iter.is_referent_alive() || 573 next != NULL)) { 574 assert(next->is_oop_or_null(), "bad next field"); 575 // Remove Reference object from list 576 iter.remove(); 577 // Trace the cohorts 578 iter.make_referent_alive(); 579 if (UseCompressedOops) { 580 keep_alive->do_oop((narrowOop*)next_addr); 581 } else { 582 keep_alive->do_oop((oop*)next_addr); 583 } 584 iter.move_to_next(); 585 } else { 586 iter.next(); 587 } 588 } 589 // Now close the newly reachable set 590 complete_gc->do_void(); 591 NOT_PRODUCT( 592 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 593 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 594 "Refs in discovered list " INTPTR_FORMAT, 595 iter.removed(), iter.processed(), (address)refs_list.head()); 596 } 597 ) 598 } 599 600 // Traverse the list and process the referents, by either 601 // clearing them or keeping them (and their reachable 602 // closure) alive. 603 void 604 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 605 bool clear_referent, 606 BoolObjectClosure* is_alive, 607 OopClosure* keep_alive, 608 VoidClosure* complete_gc) { 609 ResourceMark rm; 610 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 611 while (iter.has_next()) { 612 iter.update_discovered(); 613 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 614 if (clear_referent) { 615 // NULL out referent pointer 616 iter.clear_referent(); 617 } else { 618 // keep the referent around 619 iter.make_referent_alive(); 620 } 621 if (TraceReferenceGC) { 622 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 623 clear_referent ? "cleared " : "", 624 iter.obj(), iter.obj()->blueprint()->internal_name()); 625 } 626 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 627 iter.next(); 628 } 629 // Remember to update the next pointer of the last ref. 630 iter.update_discovered(); 631 // Close the reachable set 632 complete_gc->do_void(); 633 } 634 635 void 636 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 637 oop obj = NULL; 638 oop next = refs_list.head(); 639 while (next != obj) { 640 obj = next; 641 next = java_lang_ref_Reference::discovered(obj); 642 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 643 } 644 refs_list.set_head(NULL); 645 refs_list.set_length(0); 646 } 647 648 void 649 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 650 clear_discovered_references(refs_list); 651 } 652 653 void ReferenceProcessor::abandon_partial_discovery() { 654 // loop over the lists 655 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 656 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 657 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i)); 658 } 659 abandon_partial_discovered_list(_discoveredSoftRefs[i]); 660 } 661 } 662 663 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 664 public: 665 RefProcPhase1Task(ReferenceProcessor& ref_processor, 666 DiscoveredList refs_lists[], 667 ReferencePolicy* policy, 668 bool marks_oops_alive) 669 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 670 _policy(policy) 671 { } 672 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 673 OopClosure& keep_alive, 674 VoidClosure& complete_gc) 675 { 676 Thread* thr = Thread::current(); 677 int refs_list_index = ((WorkerThread*)thr)->id(); 678 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 679 &is_alive, &keep_alive, &complete_gc); 680 } 681 private: 682 ReferencePolicy* _policy; 683 }; 684 685 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 686 public: 687 RefProcPhase2Task(ReferenceProcessor& ref_processor, 688 DiscoveredList refs_lists[], 689 bool marks_oops_alive) 690 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 691 { } 692 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 693 OopClosure& keep_alive, 694 VoidClosure& complete_gc) 695 { 696 _ref_processor.process_phase2(_refs_lists[i], 697 &is_alive, &keep_alive, &complete_gc); 698 } 699 }; 700 701 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 702 public: 703 RefProcPhase3Task(ReferenceProcessor& ref_processor, 704 DiscoveredList refs_lists[], 705 bool clear_referent, 706 bool marks_oops_alive) 707 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 708 _clear_referent(clear_referent) 709 { } 710 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 711 OopClosure& keep_alive, 712 VoidClosure& complete_gc) 713 { 714 // Don't use "refs_list_index" calculated in this way because 715 // balance_queues() has moved the Ref's into the first n queues. 716 // Thread* thr = Thread::current(); 717 // int refs_list_index = ((WorkerThread*)thr)->id(); 718 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 719 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 720 &is_alive, &keep_alive, &complete_gc); 721 } 722 private: 723 bool _clear_referent; 724 }; 725 726 void ReferenceProcessor::set_discovered(oop ref, oop value) { 727 if (_discovered_list_needs_barrier) { 728 java_lang_ref_Reference::set_discovered(ref, value); 729 } else { 730 java_lang_ref_Reference::set_discovered_raw(ref, value); 731 } 732 } 733 734 // Balances reference queues. 735 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 736 // queues[0, 1, ..., _num_q-1] because only the first _num_q 737 // corresponding to the active workers will be processed. 738 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 739 { 740 // calculate total length 741 size_t total_refs = 0; 742 if (TraceReferenceGC && PrintGCDetails) { 743 gclog_or_tty->print_cr("\nBalance ref_lists "); 744 } 745 746 for (int i = 0; i < _max_num_q; ++i) { 747 total_refs += ref_lists[i].length(); 748 if (TraceReferenceGC && PrintGCDetails) { 749 gclog_or_tty->print("%d ", ref_lists[i].length()); 750 } 751 } 752 if (TraceReferenceGC && PrintGCDetails) { 753 gclog_or_tty->print_cr(" = %d", total_refs); 754 } 755 size_t avg_refs = total_refs / _num_q + 1; 756 int to_idx = 0; 757 for (int from_idx = 0; from_idx < _max_num_q; from_idx++) { 758 bool move_all = false; 759 if (from_idx >= _num_q) { 760 move_all = ref_lists[from_idx].length() > 0; 761 } 762 while ((ref_lists[from_idx].length() > avg_refs) || 763 move_all) { 764 assert(to_idx < _num_q, "Sanity Check!"); 765 if (ref_lists[to_idx].length() < avg_refs) { 766 // move superfluous refs 767 size_t refs_to_move; 768 // Move all the Ref's if the from queue will not be processed. 769 if (move_all) { 770 refs_to_move = MIN2(ref_lists[from_idx].length(), 771 avg_refs - ref_lists[to_idx].length()); 772 } else { 773 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 774 avg_refs - ref_lists[to_idx].length()); 775 } 776 777 assert(refs_to_move > 0, "otherwise the code below will fail"); 778 779 oop move_head = ref_lists[from_idx].head(); 780 oop move_tail = move_head; 781 oop new_head = move_head; 782 // find an element to split the list on 783 for (size_t j = 0; j < refs_to_move; ++j) { 784 move_tail = new_head; 785 new_head = java_lang_ref_Reference::discovered(new_head); 786 } 787 788 // Add the chain to the to list. 789 if (ref_lists[to_idx].head() == NULL) { 790 // to list is empty. Make a loop at the end. 791 set_discovered(move_tail, move_tail); 792 } else { 793 set_discovered(move_tail, ref_lists[to_idx].head()); 794 } 795 ref_lists[to_idx].set_head(move_head); 796 ref_lists[to_idx].inc_length(refs_to_move); 797 798 // Remove the chain from the from list. 799 if (move_tail == new_head) { 800 // We found the end of the from list. 801 ref_lists[from_idx].set_head(NULL); 802 } else { 803 ref_lists[from_idx].set_head(new_head); 804 } 805 ref_lists[from_idx].dec_length(refs_to_move); 806 if (ref_lists[from_idx].length() == 0) { 807 break; 808 } 809 } else { 810 to_idx = (to_idx + 1) % _num_q; 811 } 812 } 813 } 814 #ifdef ASSERT 815 size_t balanced_total_refs = 0; 816 for (int i = 0; i < _max_num_q; ++i) { 817 balanced_total_refs += ref_lists[i].length(); 818 if (TraceReferenceGC && PrintGCDetails) { 819 gclog_or_tty->print("%d ", ref_lists[i].length()); 820 } 821 } 822 if (TraceReferenceGC && PrintGCDetails) { 823 gclog_or_tty->print_cr(" = %d", balanced_total_refs); 824 gclog_or_tty->flush(); 825 } 826 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 827 #endif 828 } 829 830 void ReferenceProcessor::balance_all_queues() { 831 balance_queues(_discoveredSoftRefs); 832 balance_queues(_discoveredWeakRefs); 833 balance_queues(_discoveredFinalRefs); 834 balance_queues(_discoveredPhantomRefs); 835 } 836 837 void 838 ReferenceProcessor::process_discovered_reflist( 839 DiscoveredList refs_lists[], 840 ReferencePolicy* policy, 841 bool clear_referent, 842 BoolObjectClosure* is_alive, 843 OopClosure* keep_alive, 844 VoidClosure* complete_gc, 845 AbstractRefProcTaskExecutor* task_executor) 846 { 847 bool mt_processing = task_executor != NULL && _processing_is_mt; 848 // If discovery used MT and a dynamic number of GC threads, then 849 // the queues must be balanced for correctness if fewer than the 850 // maximum number of queues were used. The number of queue used 851 // during discovery may be different than the number to be used 852 // for processing so don't depend of _num_q < _max_num_q as part 853 // of the test. 854 bool must_balance = _discovery_is_mt; 855 856 if ((mt_processing && ParallelRefProcBalancingEnabled) || 857 must_balance) { 858 balance_queues(refs_lists); 859 } 860 if (PrintReferenceGC && PrintGCDetails) { 861 size_t total = 0; 862 for (int i = 0; i < _max_num_q; ++i) { 863 total += refs_lists[i].length(); 864 } 865 gclog_or_tty->print(", %u refs", total); 866 } 867 868 // Phase 1 (soft refs only): 869 // . Traverse the list and remove any SoftReferences whose 870 // referents are not alive, but that should be kept alive for 871 // policy reasons. Keep alive the transitive closure of all 872 // such referents. 873 if (policy != NULL) { 874 if (mt_processing) { 875 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 876 task_executor->execute(phase1); 877 } else { 878 for (int i = 0; i < _max_num_q; i++) { 879 process_phase1(refs_lists[i], policy, 880 is_alive, keep_alive, complete_gc); 881 } 882 } 883 } else { // policy == NULL 884 assert(refs_lists != _discoveredSoftRefs, 885 "Policy must be specified for soft references."); 886 } 887 888 // Phase 2: 889 // . Traverse the list and remove any refs whose referents are alive. 890 if (mt_processing) { 891 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 892 task_executor->execute(phase2); 893 } else { 894 for (int i = 0; i < _max_num_q; i++) { 895 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 896 } 897 } 898 899 // Phase 3: 900 // . Traverse the list and process referents as appropriate. 901 if (mt_processing) { 902 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 903 task_executor->execute(phase3); 904 } else { 905 for (int i = 0; i < _max_num_q; i++) { 906 process_phase3(refs_lists[i], clear_referent, 907 is_alive, keep_alive, complete_gc); 908 } 909 } 910 } 911 912 void ReferenceProcessor::clean_up_discovered_references() { 913 // loop over the lists 914 915 // An alternative implementation of this routine could 916 // use the following nested loop: 917 // 918 // for (int i = 0; i < number_of_subclasses_of_ref(); i++) { 919 // for (int j = 0; j < _num_q; j++) { 920 // int index = i * _max_num_q + j; 921 922 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 923 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 924 gclog_or_tty->print_cr( 925 "\nScrubbing %s discovered list of Null referents", 926 list_name(i)); 927 } 928 clean_up_discovered_reflist(_discoveredSoftRefs[i]); 929 } 930 } 931 932 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 933 assert(!discovery_is_atomic(), "Else why call this method?"); 934 DiscoveredListIterator iter(refs_list, NULL, NULL); 935 while (iter.has_next()) { 936 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 937 oop next = java_lang_ref_Reference::next(iter.obj()); 938 assert(next->is_oop_or_null(), "bad next field"); 939 // If referent has been cleared or Reference is not active, 940 // drop it. 941 if (iter.referent() == NULL || next != NULL) { 942 debug_only( 943 if (PrintGCDetails && TraceReferenceGC) { 944 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 945 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 946 " and referent: " INTPTR_FORMAT, 947 iter.obj(), next, iter.referent()); 948 } 949 ) 950 // Remove Reference object from list 951 iter.remove(); 952 iter.move_to_next(); 953 } else { 954 iter.next(); 955 } 956 } 957 NOT_PRODUCT( 958 if (PrintGCDetails && TraceReferenceGC) { 959 gclog_or_tty->print( 960 " Removed %d Refs with NULL referents out of %d discovered Refs", 961 iter.removed(), iter.processed()); 962 } 963 ) 964 } 965 966 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 967 int id = 0; 968 // Determine the queue index to use for this object. 969 if (_discovery_is_mt) { 970 // During a multi-threaded discovery phase, 971 // each thread saves to its "own" list. 972 Thread* thr = Thread::current(); 973 id = thr->as_Worker_thread()->id(); 974 } else { 975 // single-threaded discovery, we save in round-robin 976 // fashion to each of the lists. 977 if (_processing_is_mt) { 978 id = next_id(); 979 } 980 } 981 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 982 983 // Get the discovered queue to which we will add 984 DiscoveredList* list = NULL; 985 switch (rt) { 986 case REF_OTHER: 987 // Unknown reference type, no special treatment 988 break; 989 case REF_SOFT: 990 list = &_discoveredSoftRefs[id]; 991 break; 992 case REF_WEAK: 993 list = &_discoveredWeakRefs[id]; 994 break; 995 case REF_FINAL: 996 list = &_discoveredFinalRefs[id]; 997 break; 998 case REF_PHANTOM: 999 list = &_discoveredPhantomRefs[id]; 1000 break; 1001 case REF_NONE: 1002 // we should not reach here if we are an instanceRefKlass 1003 default: 1004 ShouldNotReachHere(); 1005 } 1006 if (TraceReferenceGC && PrintGCDetails) { 1007 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list); 1008 } 1009 return list; 1010 } 1011 1012 inline void 1013 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1014 oop obj, 1015 HeapWord* discovered_addr) { 1016 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1017 // First we must make sure this object is only enqueued once. CAS in a non null 1018 // discovered_addr. 1019 oop current_head = refs_list.head(); 1020 // The last ref must have its discovered field pointing to itself. 1021 oop next_discovered = (current_head != NULL) ? current_head : obj; 1022 1023 // Note: In the case of G1, this specific pre-barrier is strictly 1024 // not necessary because the only case we are interested in 1025 // here is when *discovered_addr is NULL (see the CAS further below), 1026 // so this will expand to nothing. As a result, we have manually 1027 // elided this out for G1, but left in the test for some future 1028 // collector that might have need for a pre-barrier here, e.g.:- 1029 // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); 1030 assert(!_discovered_list_needs_barrier || UseG1GC, 1031 "Need to check non-G1 collector: " 1032 "may need a pre-write-barrier for CAS from NULL below"); 1033 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 1034 NULL); 1035 if (retest == NULL) { 1036 // This thread just won the right to enqueue the object. 1037 // We have separate lists for enqueueing, so no synchronization 1038 // is necessary. 1039 refs_list.set_head(obj); 1040 refs_list.inc_length(1); 1041 if (_discovered_list_needs_barrier) { 1042 _bs->write_ref_field((void*)discovered_addr, next_discovered); 1043 } 1044 1045 if (TraceReferenceGC) { 1046 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 1047 obj, obj->blueprint()->internal_name()); 1048 } 1049 } else { 1050 // If retest was non NULL, another thread beat us to it: 1051 // The reference has already been discovered... 1052 if (TraceReferenceGC) { 1053 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1054 obj, obj->blueprint()->internal_name()); 1055 } 1056 } 1057 } 1058 1059 #ifndef PRODUCT 1060 // Non-atomic (i.e. concurrent) discovery might allow us 1061 // to observe j.l.References with NULL referents, being those 1062 // cleared concurrently by mutators during (or after) discovery. 1063 void ReferenceProcessor::verify_referent(oop obj) { 1064 bool da = discovery_is_atomic(); 1065 oop referent = java_lang_ref_Reference::referent(obj); 1066 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 1067 err_msg("Bad referent " INTPTR_FORMAT " found in Reference " 1068 INTPTR_FORMAT " during %satomic discovery ", 1069 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-")); 1070 } 1071 #endif 1072 1073 // We mention two of several possible choices here: 1074 // #0: if the reference object is not in the "originating generation" 1075 // (or part of the heap being collected, indicated by our "span" 1076 // we don't treat it specially (i.e. we scan it as we would 1077 // a normal oop, treating its references as strong references). 1078 // This means that references can't be discovered unless their 1079 // referent is also in the same span. This is the simplest, 1080 // most "local" and most conservative approach, albeit one 1081 // that may cause weak references to be enqueued least promptly. 1082 // We call this choice the "ReferenceBasedDiscovery" policy. 1083 // #1: the reference object may be in any generation (span), but if 1084 // the referent is in the generation (span) being currently collected 1085 // then we can discover the reference object, provided 1086 // the object has not already been discovered by 1087 // a different concurrently running collector (as may be the 1088 // case, for instance, if the reference object is in CMS and 1089 // the referent in DefNewGeneration), and provided the processing 1090 // of this reference object by the current collector will 1091 // appear atomic to every other collector in the system. 1092 // (Thus, for instance, a concurrent collector may not 1093 // discover references in other generations even if the 1094 // referent is in its own generation). This policy may, 1095 // in certain cases, enqueue references somewhat sooner than 1096 // might Policy #0 above, but at marginally increased cost 1097 // and complexity in processing these references. 1098 // We call this choice the "RefeferentBasedDiscovery" policy. 1099 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1100 // Make sure we are discovering refs (rather than processing discovered refs). 1101 if (!_discovering_refs || !RegisterReferences) { 1102 return false; 1103 } 1104 // We only discover active references. 1105 oop next = java_lang_ref_Reference::next(obj); 1106 if (next != NULL) { // Ref is no longer active 1107 return false; 1108 } 1109 1110 HeapWord* obj_addr = (HeapWord*)obj; 1111 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1112 !_span.contains(obj_addr)) { 1113 // Reference is not in the originating generation; 1114 // don't treat it specially (i.e. we want to scan it as a normal 1115 // object with strong references). 1116 return false; 1117 } 1118 1119 // We only discover references whose referents are not (yet) 1120 // known to be strongly reachable. 1121 if (is_alive_non_header() != NULL) { 1122 verify_referent(obj); 1123 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1124 return false; // referent is reachable 1125 } 1126 } 1127 if (rt == REF_SOFT) { 1128 // For soft refs we can decide now if these are not 1129 // current candidates for clearing, in which case we 1130 // can mark through them now, rather than delaying that 1131 // to the reference-processing phase. Since all current 1132 // time-stamp policies advance the soft-ref clock only 1133 // at a major collection cycle, this is always currently 1134 // accurate. 1135 if (!_current_soft_ref_policy->should_clear_reference(obj)) { 1136 return false; 1137 } 1138 } 1139 1140 ResourceMark rm; // Needed for tracing. 1141 1142 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1143 const oop discovered = java_lang_ref_Reference::discovered(obj); 1144 assert(discovered->is_oop_or_null(), "bad discovered field"); 1145 if (discovered != NULL) { 1146 // The reference has already been discovered... 1147 if (TraceReferenceGC) { 1148 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1149 obj, obj->blueprint()->internal_name()); 1150 } 1151 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1152 // assumes that an object is not processed twice; 1153 // if it's been already discovered it must be on another 1154 // generation's discovered list; so we won't discover it. 1155 return false; 1156 } else { 1157 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1158 "Unrecognized policy"); 1159 // Check assumption that an object is not potentially 1160 // discovered twice except by concurrent collectors that potentially 1161 // trace the same Reference object twice. 1162 assert(UseConcMarkSweepGC || UseG1GC, 1163 "Only possible with a concurrent marking collector"); 1164 return true; 1165 } 1166 } 1167 1168 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1169 verify_referent(obj); 1170 // Discover if and only if EITHER: 1171 // .. reference is in our span, OR 1172 // .. we are an atomic collector and referent is in our span 1173 if (_span.contains(obj_addr) || 1174 (discovery_is_atomic() && 1175 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1176 // should_enqueue = true; 1177 } else { 1178 return false; 1179 } 1180 } else { 1181 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1182 _span.contains(obj_addr), "code inconsistency"); 1183 } 1184 1185 // Get the right type of discovered queue head. 1186 DiscoveredList* list = get_discovered_list(rt); 1187 if (list == NULL) { 1188 return false; // nothing special needs to be done 1189 } 1190 1191 if (_discovery_is_mt) { 1192 add_to_discovered_list_mt(*list, obj, discovered_addr); 1193 } else { 1194 // If "_discovered_list_needs_barrier", we do write barriers when 1195 // updating the discovered reference list. Otherwise, we do a raw store 1196 // here: the field will be visited later when processing the discovered 1197 // references. 1198 oop current_head = list->head(); 1199 // The last ref must have its discovered field pointing to itself. 1200 oop next_discovered = (current_head != NULL) ? current_head : obj; 1201 1202 // As in the case further above, since we are over-writing a NULL 1203 // pre-value, we can safely elide the pre-barrier here for the case of G1. 1204 // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); 1205 assert(discovered == NULL, "control point invariant"); 1206 assert(!_discovered_list_needs_barrier || UseG1GC, 1207 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); 1208 oop_store_raw(discovered_addr, next_discovered); 1209 if (_discovered_list_needs_barrier) { 1210 _bs->write_ref_field((void*)discovered_addr, next_discovered); 1211 } 1212 list->set_head(obj); 1213 list->inc_length(1); 1214 1215 if (TraceReferenceGC) { 1216 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", 1217 obj, obj->blueprint()->internal_name()); 1218 } 1219 } 1220 assert(obj->is_oop(), "Discovered a bad reference"); 1221 verify_referent(obj); 1222 return true; 1223 } 1224 1225 // Preclean the discovered references by removing those 1226 // whose referents are alive, and by marking from those that 1227 // are not active. These lists can be handled here 1228 // in any order and, indeed, concurrently. 1229 void ReferenceProcessor::preclean_discovered_references( 1230 BoolObjectClosure* is_alive, 1231 OopClosure* keep_alive, 1232 VoidClosure* complete_gc, 1233 YieldClosure* yield, 1234 bool should_unload_classes) { 1235 1236 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1237 1238 #ifdef ASSERT 1239 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC || 1240 CMSClassUnloadingEnabled && UseConcMarkSweepGC || 1241 ExplicitGCInvokesConcurrentAndUnloadsClasses && 1242 UseConcMarkSweepGC && should_unload_classes; 1243 RememberKlassesChecker mx(must_remember_klasses); 1244 #endif 1245 // Soft references 1246 { 1247 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1248 false, gclog_or_tty); 1249 for (int i = 0; i < _max_num_q; i++) { 1250 if (yield->should_return()) { 1251 return; 1252 } 1253 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1254 keep_alive, complete_gc, yield); 1255 } 1256 } 1257 1258 // Weak references 1259 { 1260 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1261 false, gclog_or_tty); 1262 for (int i = 0; i < _max_num_q; i++) { 1263 if (yield->should_return()) { 1264 return; 1265 } 1266 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1267 keep_alive, complete_gc, yield); 1268 } 1269 } 1270 1271 // Final references 1272 { 1273 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1274 false, gclog_or_tty); 1275 for (int i = 0; i < _max_num_q; i++) { 1276 if (yield->should_return()) { 1277 return; 1278 } 1279 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1280 keep_alive, complete_gc, yield); 1281 } 1282 } 1283 1284 // Phantom references 1285 { 1286 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1287 false, gclog_or_tty); 1288 for (int i = 0; i < _max_num_q; i++) { 1289 if (yield->should_return()) { 1290 return; 1291 } 1292 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1293 keep_alive, complete_gc, yield); 1294 } 1295 } 1296 } 1297 1298 // Walk the given discovered ref list, and remove all reference objects 1299 // whose referents are still alive, whose referents are NULL or which 1300 // are not active (have a non-NULL next field). NOTE: When we are 1301 // thus precleaning the ref lists (which happens single-threaded today), 1302 // we do not disable refs discovery to honour the correct semantics of 1303 // java.lang.Reference. As a result, we need to be careful below 1304 // that ref removal steps interleave safely with ref discovery steps 1305 // (in this thread). 1306 void 1307 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1308 BoolObjectClosure* is_alive, 1309 OopClosure* keep_alive, 1310 VoidClosure* complete_gc, 1311 YieldClosure* yield) { 1312 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1313 while (iter.has_next()) { 1314 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1315 oop obj = iter.obj(); 1316 oop next = java_lang_ref_Reference::next(obj); 1317 if (iter.referent() == NULL || iter.is_referent_alive() || 1318 next != NULL) { 1319 // The referent has been cleared, or is alive, or the Reference is not 1320 // active; we need to trace and mark its cohort. 1321 if (TraceReferenceGC) { 1322 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1323 iter.obj(), iter.obj()->blueprint()->internal_name()); 1324 } 1325 // Remove Reference object from list 1326 iter.remove(); 1327 // Keep alive its cohort. 1328 iter.make_referent_alive(); 1329 if (UseCompressedOops) { 1330 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1331 keep_alive->do_oop(next_addr); 1332 } else { 1333 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1334 keep_alive->do_oop(next_addr); 1335 } 1336 iter.move_to_next(); 1337 } else { 1338 iter.next(); 1339 } 1340 } 1341 // Close the reachable set 1342 complete_gc->do_void(); 1343 1344 NOT_PRODUCT( 1345 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 1346 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1347 "Refs in discovered list " INTPTR_FORMAT, 1348 iter.removed(), iter.processed(), (address)refs_list.head()); 1349 } 1350 ) 1351 } 1352 1353 const char* ReferenceProcessor::list_name(int i) { 1354 assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(), 1355 "Out of bounds index"); 1356 1357 int j = i / _max_num_q; 1358 switch (j) { 1359 case 0: return "SoftRef"; 1360 case 1: return "WeakRef"; 1361 case 2: return "FinalRef"; 1362 case 3: return "PhantomRef"; 1363 } 1364 ShouldNotReachHere(); 1365 return NULL; 1366 } 1367 1368 #ifndef PRODUCT 1369 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1370 // empty for now 1371 } 1372 #endif 1373 1374 #ifndef PRODUCT 1375 void ReferenceProcessor::clear_discovered_references() { 1376 guarantee(!_discovering_refs, "Discovering refs?"); 1377 for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1378 clear_discovered_references(_discoveredSoftRefs[i]); 1379 } 1380 } 1381 1382 #endif // PRODUCT