1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc_interface/collectedHeap.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "memory/referencePolicy.hpp" 31 #include "memory/referenceProcessor.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/jniHandles.hpp" 35 36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 38 oop ReferenceProcessor::_sentinelRef = NULL; 39 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; 40 41 // List of discovered references. 42 class DiscoveredList { 43 public: 44 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 45 oop head() const { 46 return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) : 47 _oop_head; 48 } 49 HeapWord* adr_head() { 50 return UseCompressedOops ? (HeapWord*)&_compressed_head : 51 (HeapWord*)&_oop_head; 52 } 53 void set_head(oop o) { 54 if (UseCompressedOops) { 55 // Must compress the head ptr. 56 _compressed_head = oopDesc::encode_heap_oop_not_null(o); 57 } else { 58 _oop_head = o; 59 } 60 } 61 bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); } 62 size_t length() { return _len; } 63 void set_length(size_t len) { _len = len; } 64 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 65 void dec_length(size_t dec) { _len -= dec; } 66 private: 67 // Set value depending on UseCompressedOops. This could be a template class 68 // but then we have to fix all the instantiations and declarations that use this class. 69 oop _oop_head; 70 narrowOop _compressed_head; 71 size_t _len; 72 }; 73 74 void referenceProcessor_init() { 75 ReferenceProcessor::init_statics(); 76 } 77 78 void ReferenceProcessor::init_statics() { 79 assert(_sentinelRef == NULL, "should be initialized precisely once"); 80 EXCEPTION_MARK; 81 _sentinelRef = instanceKlass::cast( 82 SystemDictionary::Reference_klass())-> 83 allocate_permanent_instance(THREAD); 84 85 // Initialize the master soft ref clock. 86 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); 87 88 if (HAS_PENDING_EXCEPTION) { 89 Handle ex(THREAD, PENDING_EXCEPTION); 90 vm_exit_during_initialization(ex); 91 } 92 assert(_sentinelRef != NULL && _sentinelRef->is_oop(), 93 "Just constructed it!"); 94 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 95 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 96 NOT_COMPILER2(LRUCurrentHeapPolicy()); 97 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 98 vm_exit_during_initialization("Could not allocate reference policy object"); 99 } 100 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 101 RefDiscoveryPolicy == ReferentBasedDiscovery, 102 "Unrecongnized RefDiscoveryPolicy"); 103 } 104 105 ReferenceProcessor* 106 ReferenceProcessor::create_ref_processor(MemRegion span, 107 bool atomic_discovery, 108 bool mt_discovery, 109 BoolObjectClosure* is_alive_non_header, 110 int parallel_gc_threads, 111 bool mt_processing, 112 bool dl_needs_barrier) { 113 int mt_degree = 1; 114 if (parallel_gc_threads > 1) { 115 mt_degree = parallel_gc_threads; 116 } 117 ReferenceProcessor* rp = 118 new ReferenceProcessor(span, atomic_discovery, 119 mt_discovery, mt_degree, 120 mt_processing && (parallel_gc_threads > 0), 121 dl_needs_barrier); 122 if (rp == NULL) { 123 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); 124 } 125 rp->set_is_alive_non_header(is_alive_non_header); 126 rp->setup_policy(false /* default soft ref policy */); 127 return rp; 128 } 129 130 ReferenceProcessor::ReferenceProcessor(MemRegion span, 131 bool atomic_discovery, 132 bool mt_discovery, 133 int mt_degree, 134 bool mt_processing, 135 bool discovered_list_needs_barrier) : 136 _discovering_refs(false), 137 _enqueuing_is_done(false), 138 _is_alive_non_header(NULL), 139 _discovered_list_needs_barrier(discovered_list_needs_barrier), 140 _bs(NULL), 141 _processing_is_mt(mt_processing), 142 _next_id(0) 143 { 144 _span = span; 145 _discovery_is_atomic = atomic_discovery; 146 _discovery_is_mt = mt_discovery; 147 _num_q = mt_degree; 148 _max_num_q = mt_degree; 149 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref); 150 if (_discoveredSoftRefs == NULL) { 151 vm_exit_during_initialization("Could not allocated RefProc Array"); 152 } 153 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 154 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 155 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 156 assert(sentinel_ref() != NULL, "_sentinelRef is NULL"); 157 // Initialized all entries to _sentinelRef 158 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 159 _discoveredSoftRefs[i].set_head(sentinel_ref()); 160 _discoveredSoftRefs[i].set_length(0); 161 } 162 // If we do barreirs, cache a copy of the barrier set. 163 if (discovered_list_needs_barrier) { 164 _bs = Universe::heap()->barrier_set(); 165 } 166 } 167 168 #ifndef PRODUCT 169 void ReferenceProcessor::verify_no_references_recorded() { 170 guarantee(!_discovering_refs, "Discovering refs?"); 171 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 172 guarantee(_discoveredSoftRefs[i].empty(), 173 "Found non-empty discovered list"); 174 } 175 } 176 #endif 177 178 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 179 // Should this instead be 180 // for (int i = 0; i < subclasses_of_ref; i++_ { 181 // for (int j = 0; j < _num_q; j++) { 182 // int index = i * _max_num_q + j; 183 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 184 if (UseCompressedOops) { 185 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); 186 } else { 187 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); 188 } 189 } 190 } 191 192 void ReferenceProcessor::oops_do(OopClosure* f) { 193 f->do_oop(adr_sentinel_ref()); 194 } 195 196 void ReferenceProcessor::update_soft_ref_master_clock() { 197 // Update (advance) the soft ref master clock field. This must be done 198 // after processing the soft ref list. 199 jlong now = os::javaTimeMillis(); 200 jlong clock = java_lang_ref_SoftReference::clock(); 201 NOT_PRODUCT( 202 if (now < clock) { 203 warning("time warp: %d to %d", clock, now); 204 } 205 ) 206 // In product mode, protect ourselves from system time being adjusted 207 // externally and going backward; see note in the implementation of 208 // GenCollectedHeap::time_since_last_gc() for the right way to fix 209 // this uniformly throughout the VM; see bug-id 4741166. XXX 210 if (now > clock) { 211 java_lang_ref_SoftReference::set_clock(now); 212 } 213 // Else leave clock stalled at its old value until time progresses 214 // past clock value. 215 } 216 217 void ReferenceProcessor::process_discovered_references( 218 BoolObjectClosure* is_alive, 219 OopClosure* keep_alive, 220 VoidClosure* complete_gc, 221 AbstractRefProcTaskExecutor* task_executor) { 222 NOT_PRODUCT(verify_ok_to_handle_reflists()); 223 224 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 225 // Stop treating discovered references specially. 226 disable_discovery(); 227 228 bool trace_time = PrintGCDetails && PrintReferenceGC; 229 // Soft references 230 { 231 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); 232 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 233 is_alive, keep_alive, complete_gc, task_executor); 234 } 235 236 update_soft_ref_master_clock(); 237 238 // Weak references 239 { 240 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); 241 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 242 is_alive, keep_alive, complete_gc, task_executor); 243 } 244 245 // Final references 246 { 247 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); 248 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 249 is_alive, keep_alive, complete_gc, task_executor); 250 } 251 252 // Phantom references 253 { 254 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); 255 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 256 is_alive, keep_alive, complete_gc, task_executor); 257 } 258 259 // Weak global JNI references. It would make more sense (semantically) to 260 // traverse these simultaneously with the regular weak references above, but 261 // that is not how the JDK1.2 specification is. See #4126360. Native code can 262 // thus use JNI weak references to circumvent the phantom references and 263 // resurrect a "post-mortem" object. 264 { 265 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); 266 if (task_executor != NULL) { 267 task_executor->set_single_threaded_mode(); 268 } 269 process_phaseJNI(is_alive, keep_alive, complete_gc); 270 } 271 } 272 273 #ifndef PRODUCT 274 // Calculate the number of jni handles. 275 uint ReferenceProcessor::count_jni_refs() { 276 class AlwaysAliveClosure: public BoolObjectClosure { 277 public: 278 virtual bool do_object_b(oop obj) { return true; } 279 virtual void do_object(oop obj) { assert(false, "Don't call"); } 280 }; 281 282 class CountHandleClosure: public OopClosure { 283 private: 284 int _count; 285 public: 286 CountHandleClosure(): _count(0) {} 287 void do_oop(oop* unused) { _count++; } 288 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 289 int count() { return _count; } 290 }; 291 CountHandleClosure global_handle_count; 292 AlwaysAliveClosure always_alive; 293 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 294 return global_handle_count.count(); 295 } 296 #endif 297 298 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 299 OopClosure* keep_alive, 300 VoidClosure* complete_gc) { 301 #ifndef PRODUCT 302 if (PrintGCDetails && PrintReferenceGC) { 303 unsigned int count = count_jni_refs(); 304 gclog_or_tty->print(", %u refs", count); 305 } 306 #endif 307 JNIHandles::weak_oops_do(is_alive, keep_alive); 308 // Finally remember to keep sentinel around 309 keep_alive->do_oop(adr_sentinel_ref()); 310 complete_gc->do_void(); 311 } 312 313 314 template <class T> 315 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 316 AbstractRefProcTaskExecutor* task_executor) { 317 318 // Remember old value of pending references list 319 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 320 T old_pending_list_value = *pending_list_addr; 321 322 // Enqueue references that are not made active again, and 323 // clear the decks for the next collection (cycle). 324 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 325 // Do the oop-check on pending_list_addr missed in 326 // enqueue_discovered_reflist. We should probably 327 // do a raw oop_check so that future such idempotent 328 // oop_stores relying on the oop-check side-effect 329 // may be elided automatically and safely without 330 // affecting correctness. 331 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 332 333 // Stop treating discovered references specially. 334 ref->disable_discovery(); 335 336 // Return true if new pending references were added 337 return old_pending_list_value != *pending_list_addr; 338 } 339 340 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 341 NOT_PRODUCT(verify_ok_to_handle_reflists()); 342 if (UseCompressedOops) { 343 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 344 } else { 345 return enqueue_discovered_ref_helper<oop>(this, task_executor); 346 } 347 } 348 349 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 350 HeapWord* pending_list_addr) { 351 // Given a list of refs linked through the "discovered" field 352 // (java.lang.ref.Reference.discovered) chain them through the 353 // "next" field (java.lang.ref.Reference.next) and prepend 354 // to the pending list. 355 if (TraceReferenceGC && PrintGCDetails) { 356 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 357 INTPTR_FORMAT, (address)refs_list.head()); 358 } 359 oop obj = refs_list.head(); 360 // Walk down the list, copying the discovered field into 361 // the next field and clearing it (except for the last 362 // non-sentinel object which is treated specially to avoid 363 // confusion with an active reference). 364 while (obj != sentinel_ref()) { 365 assert(obj->is_instanceRef(), "should be reference object"); 366 oop next = java_lang_ref_Reference::discovered(obj); 367 if (TraceReferenceGC && PrintGCDetails) { 368 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, 369 obj, next); 370 } 371 assert(java_lang_ref_Reference::next(obj) == NULL, 372 "The reference should not be enqueued"); 373 if (next == sentinel_ref()) { // obj is last 374 // Swap refs_list into pendling_list_addr and 375 // set obj's next to what we read from pending_list_addr. 376 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 377 // Need oop_check on pending_list_addr above; 378 // see special oop-check code at the end of 379 // enqueue_discovered_reflists() further below. 380 if (old == NULL) { 381 // obj should be made to point to itself, since 382 // pending list was empty. 383 java_lang_ref_Reference::set_next(obj, obj); 384 } else { 385 java_lang_ref_Reference::set_next(obj, old); 386 } 387 } else { 388 java_lang_ref_Reference::set_next(obj, next); 389 } 390 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 391 obj = next; 392 } 393 } 394 395 // Parallel enqueue task 396 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 397 public: 398 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 399 DiscoveredList discovered_refs[], 400 HeapWord* pending_list_addr, 401 oop sentinel_ref, 402 int n_queues) 403 : EnqueueTask(ref_processor, discovered_refs, 404 pending_list_addr, sentinel_ref, n_queues) 405 { } 406 407 virtual void work(unsigned int work_id) { 408 assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds"); 409 // Simplest first cut: static partitioning. 410 int index = work_id; 411 // The increment on "index" must correspond to the maximum number of queues 412 // (n_queues) with which that ReferenceProcessor was created. That 413 // is because of the "clever" way the discovered references lists were 414 // allocated and are indexed into. That number is ParallelGCThreads 415 // currently. Assert that. 416 assert(_n_queues == (int) ParallelGCThreads, "Different number not expected"); 417 for (int j = 0; 418 j < subclasses_of_ref; 419 j++, index += _n_queues) { 420 _ref_processor.enqueue_discovered_reflist( 421 _refs_lists[index], _pending_list_addr); 422 _refs_lists[index].set_head(_sentinel_ref); 423 _refs_lists[index].set_length(0); 424 } 425 } 426 }; 427 428 // Enqueue references that are not made active again 429 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 430 AbstractRefProcTaskExecutor* task_executor) { 431 if (_processing_is_mt && task_executor != NULL) { 432 // Parallel code 433 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, 434 pending_list_addr, sentinel_ref(), _max_num_q); 435 task_executor->execute(tsk); 436 } else { 437 // Serial code: call the parent class's implementation 438 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 439 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); 440 _discoveredSoftRefs[i].set_head(sentinel_ref()); 441 _discoveredSoftRefs[i].set_length(0); 442 } 443 } 444 } 445 446 // Iterator for the list of discovered references. 447 class DiscoveredListIterator { 448 public: 449 inline DiscoveredListIterator(DiscoveredList& refs_list, 450 OopClosure* keep_alive, 451 BoolObjectClosure* is_alive); 452 453 // End Of List. 454 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); } 455 456 // Get oop to the Reference object. 457 inline oop obj() const { return _ref; } 458 459 // Get oop to the referent object. 460 inline oop referent() const { return _referent; } 461 462 // Returns true if referent is alive. 463 inline bool is_referent_alive() const; 464 465 // Loads data for the current reference. 466 // The "allow_null_referent" argument tells us to allow for the possibility 467 // of a NULL referent in the discovered Reference object. This typically 468 // happens in the case of concurrent collectors that may have done the 469 // discovery concurrently, or interleaved, with mutator execution. 470 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 471 472 // Move to the next discovered reference. 473 inline void next(); 474 475 // Remove the current reference from the list 476 inline void remove(); 477 478 // Make the Reference object active again. 479 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); } 480 481 // Make the referent alive. 482 inline void make_referent_alive() { 483 if (UseCompressedOops) { 484 _keep_alive->do_oop((narrowOop*)_referent_addr); 485 } else { 486 _keep_alive->do_oop((oop*)_referent_addr); 487 } 488 } 489 490 // Update the discovered field. 491 inline void update_discovered() { 492 // First _prev_next ref actually points into DiscoveredList (gross). 493 if (UseCompressedOops) { 494 _keep_alive->do_oop((narrowOop*)_prev_next); 495 } else { 496 _keep_alive->do_oop((oop*)_prev_next); 497 } 498 } 499 500 // NULL out referent pointer. 501 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } 502 503 // Statistics 504 NOT_PRODUCT( 505 inline size_t processed() const { return _processed; } 506 inline size_t removed() const { return _removed; } 507 ) 508 509 inline void move_to_next(); 510 511 private: 512 DiscoveredList& _refs_list; 513 HeapWord* _prev_next; 514 oop _ref; 515 HeapWord* _discovered_addr; 516 oop _next; 517 HeapWord* _referent_addr; 518 oop _referent; 519 OopClosure* _keep_alive; 520 BoolObjectClosure* _is_alive; 521 DEBUG_ONLY( 522 oop _first_seen; // cyclic linked list check 523 ) 524 NOT_PRODUCT( 525 size_t _processed; 526 size_t _removed; 527 ) 528 }; 529 530 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list, 531 OopClosure* keep_alive, 532 BoolObjectClosure* is_alive) 533 : _refs_list(refs_list), 534 _prev_next(refs_list.adr_head()), 535 _ref(refs_list.head()), 536 #ifdef ASSERT 537 _first_seen(refs_list.head()), 538 #endif 539 #ifndef PRODUCT 540 _processed(0), 541 _removed(0), 542 #endif 543 _next(refs_list.head()), 544 _keep_alive(keep_alive), 545 _is_alive(is_alive) 546 { } 547 548 inline bool DiscoveredListIterator::is_referent_alive() const { 549 return _is_alive->do_object_b(_referent); 550 } 551 552 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 553 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 554 oop discovered = java_lang_ref_Reference::discovered(_ref); 555 assert(_discovered_addr && discovered->is_oop_or_null(), 556 "discovered field is bad"); 557 _next = discovered; 558 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 559 _referent = java_lang_ref_Reference::referent(_ref); 560 assert(Universe::heap()->is_in_reserved_or_null(_referent), 561 "Wrong oop found in java.lang.Reference object"); 562 assert(allow_null_referent ? 563 _referent->is_oop_or_null() 564 : _referent->is_oop(), 565 "bad referent"); 566 } 567 568 inline void DiscoveredListIterator::next() { 569 _prev_next = _discovered_addr; 570 move_to_next(); 571 } 572 573 inline void DiscoveredListIterator::remove() { 574 assert(_ref->is_oop(), "Dropping a bad reference"); 575 oop_store_raw(_discovered_addr, NULL); 576 // First _prev_next ref actually points into DiscoveredList (gross). 577 if (UseCompressedOops) { 578 // Remove Reference object from list. 579 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next); 580 } else { 581 // Remove Reference object from list. 582 oopDesc::store_heap_oop((oop*)_prev_next, _next); 583 } 584 NOT_PRODUCT(_removed++); 585 _refs_list.dec_length(1); 586 } 587 588 inline void DiscoveredListIterator::move_to_next() { 589 _ref = _next; 590 assert(_ref != _first_seen, "cyclic ref_list found"); 591 NOT_PRODUCT(_processed++); 592 } 593 594 // NOTE: process_phase*() are largely similar, and at a high level 595 // merely iterate over the extant list applying a predicate to 596 // each of its elements and possibly removing that element from the 597 // list and applying some further closures to that element. 598 // We should consider the possibility of replacing these 599 // process_phase*() methods by abstracting them into 600 // a single general iterator invocation that receives appropriate 601 // closures that accomplish this work. 602 603 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 604 // referents are not alive, but that should be kept alive for policy reasons. 605 // Keep alive the transitive closure of all such referents. 606 void 607 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 608 ReferencePolicy* policy, 609 BoolObjectClosure* is_alive, 610 OopClosure* keep_alive, 611 VoidClosure* complete_gc) { 612 assert(policy != NULL, "Must have a non-NULL policy"); 613 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 614 // Decide which softly reachable refs should be kept alive. 615 while (iter.has_next()) { 616 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 617 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 618 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { 619 if (TraceReferenceGC) { 620 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 621 iter.obj(), iter.obj()->blueprint()->internal_name()); 622 } 623 // Remove Reference object from list 624 iter.remove(); 625 // Make the Reference object active again 626 iter.make_active(); 627 // keep the referent around 628 iter.make_referent_alive(); 629 iter.move_to_next(); 630 } else { 631 iter.next(); 632 } 633 } 634 // Close the reachable set 635 complete_gc->do_void(); 636 NOT_PRODUCT( 637 if (PrintGCDetails && TraceReferenceGC) { 638 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 639 "discovered Refs by policy list " INTPTR_FORMAT, 640 iter.removed(), iter.processed(), (address)refs_list.head()); 641 } 642 ) 643 } 644 645 // Traverse the list and remove any Refs that are not active, or 646 // whose referents are either alive or NULL. 647 void 648 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 649 BoolObjectClosure* is_alive, 650 OopClosure* keep_alive) { 651 assert(discovery_is_atomic(), "Error"); 652 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 653 while (iter.has_next()) { 654 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 655 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 656 assert(next == NULL, "Should not discover inactive Reference"); 657 if (iter.is_referent_alive()) { 658 if (TraceReferenceGC) { 659 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 660 iter.obj(), iter.obj()->blueprint()->internal_name()); 661 } 662 // The referent is reachable after all. 663 // Remove Reference object from list. 664 iter.remove(); 665 // Update the referent pointer as necessary: Note that this 666 // should not entail any recursive marking because the 667 // referent must already have been traversed. 668 iter.make_referent_alive(); 669 iter.move_to_next(); 670 } else { 671 iter.next(); 672 } 673 } 674 NOT_PRODUCT( 675 if (PrintGCDetails && TraceReferenceGC) { 676 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 677 "Refs in discovered list " INTPTR_FORMAT, 678 iter.removed(), iter.processed(), (address)refs_list.head()); 679 } 680 ) 681 } 682 683 void 684 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 685 BoolObjectClosure* is_alive, 686 OopClosure* keep_alive, 687 VoidClosure* complete_gc) { 688 assert(!discovery_is_atomic(), "Error"); 689 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 690 while (iter.has_next()) { 691 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 692 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 693 oop next = java_lang_ref_Reference::next(iter.obj()); 694 if ((iter.referent() == NULL || iter.is_referent_alive() || 695 next != NULL)) { 696 assert(next->is_oop_or_null(), "bad next field"); 697 // Remove Reference object from list 698 iter.remove(); 699 // Trace the cohorts 700 iter.make_referent_alive(); 701 if (UseCompressedOops) { 702 keep_alive->do_oop((narrowOop*)next_addr); 703 } else { 704 keep_alive->do_oop((oop*)next_addr); 705 } 706 iter.move_to_next(); 707 } else { 708 iter.next(); 709 } 710 } 711 // Now close the newly reachable set 712 complete_gc->do_void(); 713 NOT_PRODUCT( 714 if (PrintGCDetails && TraceReferenceGC) { 715 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 716 "Refs in discovered list " INTPTR_FORMAT, 717 iter.removed(), iter.processed(), (address)refs_list.head()); 718 } 719 ) 720 } 721 722 // Traverse the list and process the referents, by either 723 // clearing them or keeping them (and their reachable 724 // closure) alive. 725 void 726 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 727 bool clear_referent, 728 BoolObjectClosure* is_alive, 729 OopClosure* keep_alive, 730 VoidClosure* complete_gc) { 731 ResourceMark rm; 732 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 733 while (iter.has_next()) { 734 iter.update_discovered(); 735 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 736 if (clear_referent) { 737 // NULL out referent pointer 738 iter.clear_referent(); 739 } else { 740 // keep the referent around 741 iter.make_referent_alive(); 742 } 743 if (TraceReferenceGC) { 744 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 745 clear_referent ? "cleared " : "", 746 iter.obj(), iter.obj()->blueprint()->internal_name()); 747 } 748 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 749 iter.next(); 750 } 751 // Remember to keep sentinel pointer around 752 iter.update_discovered(); 753 // Close the reachable set 754 complete_gc->do_void(); 755 } 756 757 void 758 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 759 oop obj = refs_list.head(); 760 while (obj != sentinel_ref()) { 761 oop discovered = java_lang_ref_Reference::discovered(obj); 762 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 763 obj = discovered; 764 } 765 refs_list.set_head(sentinel_ref()); 766 refs_list.set_length(0); 767 } 768 769 void ReferenceProcessor::abandon_partial_discovery() { 770 // loop over the lists 771 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 772 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 773 gclog_or_tty->print_cr( 774 "\nAbandoning %s discovered list", 775 list_name(i)); 776 } 777 abandon_partial_discovered_list(_discoveredSoftRefs[i]); 778 } 779 } 780 781 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 782 public: 783 RefProcPhase1Task(ReferenceProcessor& ref_processor, 784 DiscoveredList refs_lists[], 785 ReferencePolicy* policy, 786 bool marks_oops_alive) 787 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 788 _policy(policy) 789 { } 790 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 791 OopClosure& keep_alive, 792 VoidClosure& complete_gc) 793 { 794 Thread* thr = Thread::current(); 795 int refs_list_index = ((WorkerThread*)thr)->id(); 796 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 797 &is_alive, &keep_alive, &complete_gc); 798 } 799 private: 800 ReferencePolicy* _policy; 801 }; 802 803 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 804 public: 805 RefProcPhase2Task(ReferenceProcessor& ref_processor, 806 DiscoveredList refs_lists[], 807 bool marks_oops_alive) 808 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 809 { } 810 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 811 OopClosure& keep_alive, 812 VoidClosure& complete_gc) 813 { 814 _ref_processor.process_phase2(_refs_lists[i], 815 &is_alive, &keep_alive, &complete_gc); 816 } 817 }; 818 819 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 820 public: 821 RefProcPhase3Task(ReferenceProcessor& ref_processor, 822 DiscoveredList refs_lists[], 823 bool clear_referent, 824 bool marks_oops_alive) 825 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 826 _clear_referent(clear_referent) 827 { } 828 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 829 OopClosure& keep_alive, 830 VoidClosure& complete_gc) 831 { 832 // Don't use "refs_list_index" calculated in this way because 833 // balance_queues() has moved the Ref's into the first n queues. 834 // Thread* thr = Thread::current(); 835 // int refs_list_index = ((WorkerThread*)thr)->id(); 836 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 837 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 838 &is_alive, &keep_alive, &complete_gc); 839 } 840 private: 841 bool _clear_referent; 842 }; 843 844 // Balances reference queues. 845 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 846 // queues[0, 1, ..., _num_q-1] because only the first _num_q 847 // corresponding to the active workers will be processed. 848 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 849 { 850 // calculate total length 851 size_t total_refs = 0; 852 if (TraceReferenceGC && PrintGCDetails) { 853 gclog_or_tty->print_cr("\nBalance ref_lists "); 854 } 855 856 for (int i = 0; i < _max_num_q; ++i) { 857 total_refs += ref_lists[i].length(); 858 if (TraceReferenceGC && PrintGCDetails) { 859 gclog_or_tty->print("%d ", ref_lists[i].length()); 860 } 861 } 862 if (TraceReferenceGC && PrintGCDetails) { 863 gclog_or_tty->print_cr(" = %d", total_refs); 864 } 865 size_t avg_refs = total_refs / _num_q + 1; 866 int to_idx = 0; 867 for (int from_idx = 0; from_idx < _max_num_q; from_idx++) { 868 bool move_all = false; 869 if (from_idx >= _num_q) { 870 move_all = ref_lists[from_idx].length() > 0; 871 } 872 while ((ref_lists[from_idx].length() > avg_refs) || 873 move_all) { 874 assert(to_idx < _num_q, "Sanity Check!"); 875 if (ref_lists[to_idx].length() < avg_refs) { 876 // move superfluous refs 877 size_t refs_to_move; 878 // Move all the Ref's if the from queue will not be processed. 879 if (move_all) { 880 refs_to_move = MIN2(ref_lists[from_idx].length(), 881 avg_refs - ref_lists[to_idx].length()); 882 } else { 883 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 884 avg_refs - ref_lists[to_idx].length()); 885 } 886 oop move_head = ref_lists[from_idx].head(); 887 oop move_tail = move_head; 888 oop new_head = move_head; 889 // find an element to split the list on 890 for (size_t j = 0; j < refs_to_move; ++j) { 891 move_tail = new_head; 892 new_head = java_lang_ref_Reference::discovered(new_head); 893 } 894 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); 895 ref_lists[to_idx].set_head(move_head); 896 ref_lists[to_idx].inc_length(refs_to_move); 897 ref_lists[from_idx].set_head(new_head); 898 ref_lists[from_idx].dec_length(refs_to_move); 899 if (ref_lists[from_idx].length() == 0) { 900 break; 901 } 902 } else { 903 to_idx = (to_idx + 1) % _num_q; 904 } 905 } 906 } 907 #ifdef ASSERT 908 size_t balanced_total_refs = 0; 909 for (int i = 0; i < _max_num_q; ++i) { 910 balanced_total_refs += ref_lists[i].length(); 911 if (TraceReferenceGC && PrintGCDetails) { 912 gclog_or_tty->print("%d ", ref_lists[i].length()); 913 } 914 } 915 if (TraceReferenceGC && PrintGCDetails) { 916 gclog_or_tty->print_cr(" = %d", balanced_total_refs); 917 gclog_or_tty->flush(); 918 } 919 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 920 #endif 921 } 922 923 void ReferenceProcessor::balance_all_queues() { 924 balance_queues(_discoveredSoftRefs); 925 balance_queues(_discoveredWeakRefs); 926 balance_queues(_discoveredFinalRefs); 927 balance_queues(_discoveredPhantomRefs); 928 } 929 930 void 931 ReferenceProcessor::process_discovered_reflist( 932 DiscoveredList refs_lists[], 933 ReferencePolicy* policy, 934 bool clear_referent, 935 BoolObjectClosure* is_alive, 936 OopClosure* keep_alive, 937 VoidClosure* complete_gc, 938 AbstractRefProcTaskExecutor* task_executor) 939 { 940 bool mt_processing = task_executor != NULL && _processing_is_mt; 941 // If discovery used MT and a dynamic number of GC threads, then 942 // the queues must be balanced for correctness if fewer than the 943 // maximum number of queues were used. The number of queue used 944 // during discovery may be different than the number to be used 945 // for processing so don't depend of _num_q < _max_num_q as part 946 // of the test. 947 bool must_balance = _discovery_is_mt; 948 949 if ((mt_processing && ParallelRefProcBalancingEnabled) || 950 must_balance) { 951 balance_queues(refs_lists); 952 } 953 if (PrintReferenceGC && PrintGCDetails) { 954 size_t total = 0; 955 for (int i = 0; i < _num_q; ++i) { 956 total += refs_lists[i].length(); 957 } 958 gclog_or_tty->print(", %u refs", total); 959 } 960 961 // Phase 1 (soft refs only): 962 // . Traverse the list and remove any SoftReferences whose 963 // referents are not alive, but that should be kept alive for 964 // policy reasons. Keep alive the transitive closure of all 965 // such referents. 966 if (policy != NULL) { 967 if (mt_processing) { 968 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 969 task_executor->execute(phase1); 970 } else { 971 for (int i = 0; i < _num_q; i++) { 972 process_phase1(refs_lists[i], policy, 973 is_alive, keep_alive, complete_gc); 974 } 975 } 976 } else { // policy == NULL 977 assert(refs_lists != _discoveredSoftRefs, 978 "Policy must be specified for soft references."); 979 } 980 981 // Phase 2: 982 // . Traverse the list and remove any refs whose referents are alive. 983 if (mt_processing) { 984 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 985 task_executor->execute(phase2); 986 } else { 987 for (int i = 0; i < _num_q; i++) { 988 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 989 } 990 } 991 992 // Phase 3: 993 // . Traverse the list and process referents as appropriate. 994 if (mt_processing) { 995 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 996 task_executor->execute(phase3); 997 } else { 998 for (int i = 0; i < _num_q; i++) { 999 process_phase3(refs_lists[i], clear_referent, 1000 is_alive, keep_alive, complete_gc); 1001 } 1002 } 1003 } 1004 1005 void ReferenceProcessor::clean_up_discovered_references() { 1006 // loop over the lists 1007 // Should this instead be 1008 // for (int i = 0; i < subclasses_of_ref; i++_ { 1009 // for (int j = 0; j < _num_q; j++) { 1010 // int index = i * _max_num_q + j; 1011 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 1012 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { 1013 gclog_or_tty->print_cr( 1014 "\nScrubbing %s discovered list of Null referents", 1015 list_name(i)); 1016 } 1017 clean_up_discovered_reflist(_discoveredSoftRefs[i]); 1018 } 1019 } 1020 1021 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 1022 assert(!discovery_is_atomic(), "Else why call this method?"); 1023 DiscoveredListIterator iter(refs_list, NULL, NULL); 1024 while (iter.has_next()) { 1025 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1026 oop next = java_lang_ref_Reference::next(iter.obj()); 1027 assert(next->is_oop_or_null(), "bad next field"); 1028 // If referent has been cleared or Reference is not active, 1029 // drop it. 1030 if (iter.referent() == NULL || next != NULL) { 1031 debug_only( 1032 if (PrintGCDetails && TraceReferenceGC) { 1033 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 1034 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 1035 " and referent: " INTPTR_FORMAT, 1036 iter.obj(), next, iter.referent()); 1037 } 1038 ) 1039 // Remove Reference object from list 1040 iter.remove(); 1041 iter.move_to_next(); 1042 } else { 1043 iter.next(); 1044 } 1045 } 1046 NOT_PRODUCT( 1047 if (PrintGCDetails && TraceReferenceGC) { 1048 gclog_or_tty->print( 1049 " Removed %d Refs with NULL referents out of %d discovered Refs", 1050 iter.removed(), iter.processed()); 1051 } 1052 ) 1053 } 1054 1055 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 1056 int id = 0; 1057 // Determine the queue index to use for this object. 1058 if (_discovery_is_mt) { 1059 // During a multi-threaded discovery phase, 1060 // each thread saves to its "own" list. 1061 Thread* thr = Thread::current(); 1062 assert(thr->is_GC_task_thread(), 1063 "Dubious cast from Thread* to WorkerThread*?"); 1064 id = ((WorkerThread*)thr)->id(); 1065 } else { 1066 // single-threaded discovery, we save in round-robin 1067 // fashion to each of the lists. 1068 if (_processing_is_mt) { 1069 id = next_id(); 1070 } 1071 } 1072 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 1073 1074 // Get the discovered queue to which we will add 1075 DiscoveredList* list = NULL; 1076 switch (rt) { 1077 case REF_OTHER: 1078 // Unknown reference type, no special treatment 1079 break; 1080 case REF_SOFT: 1081 list = &_discoveredSoftRefs[id]; 1082 break; 1083 case REF_WEAK: 1084 list = &_discoveredWeakRefs[id]; 1085 break; 1086 case REF_FINAL: 1087 list = &_discoveredFinalRefs[id]; 1088 break; 1089 case REF_PHANTOM: 1090 list = &_discoveredPhantomRefs[id]; 1091 break; 1092 case REF_NONE: 1093 // we should not reach here if we are an instanceRefKlass 1094 default: 1095 ShouldNotReachHere(); 1096 } 1097 if (TraceReferenceGC && PrintGCDetails) { 1098 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, 1099 id, list); 1100 } 1101 return list; 1102 } 1103 1104 inline void 1105 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1106 oop obj, 1107 HeapWord* discovered_addr) { 1108 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1109 // First we must make sure this object is only enqueued once. CAS in a non null 1110 // discovered_addr. 1111 oop current_head = refs_list.head(); 1112 1113 // Note: In the case of G1, this specific pre-barrier is strictly 1114 // not necessary because the only case we are interested in 1115 // here is when *discovered_addr is NULL (see the CAS further below), 1116 // so this will expand to nothing. As a result, we have manually 1117 // elided this out for G1, but left in the test for some future 1118 // collector that might have need for a pre-barrier here. 1119 if (_discovered_list_needs_barrier && !UseG1GC) { 1120 if (UseCompressedOops) { 1121 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); 1122 } else { 1123 _bs->write_ref_field_pre((oop*)discovered_addr, current_head); 1124 } 1125 guarantee(false, "Need to check non-G1 collector"); 1126 } 1127 oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr, 1128 NULL); 1129 if (retest == NULL) { 1130 // This thread just won the right to enqueue the object. 1131 // We have separate lists for enqueueing so no synchronization 1132 // is necessary. 1133 refs_list.set_head(obj); 1134 refs_list.inc_length(1); 1135 if (_discovered_list_needs_barrier) { 1136 _bs->write_ref_field((void*)discovered_addr, current_head); 1137 } 1138 } else { 1139 // If retest was non NULL, another thread beat us to it: 1140 // The reference has already been discovered... 1141 if (TraceReferenceGC) { 1142 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1143 obj, obj->blueprint()->internal_name()); 1144 } 1145 } 1146 } 1147 1148 // We mention two of several possible choices here: 1149 // #0: if the reference object is not in the "originating generation" 1150 // (or part of the heap being collected, indicated by our "span" 1151 // we don't treat it specially (i.e. we scan it as we would 1152 // a normal oop, treating its references as strong references). 1153 // This means that references can't be enqueued unless their 1154 // referent is also in the same span. This is the simplest, 1155 // most "local" and most conservative approach, albeit one 1156 // that may cause weak references to be enqueued least promptly. 1157 // We call this choice the "ReferenceBasedDiscovery" policy. 1158 // #1: the reference object may be in any generation (span), but if 1159 // the referent is in the generation (span) being currently collected 1160 // then we can discover the reference object, provided 1161 // the object has not already been discovered by 1162 // a different concurrently running collector (as may be the 1163 // case, for instance, if the reference object is in CMS and 1164 // the referent in DefNewGeneration), and provided the processing 1165 // of this reference object by the current collector will 1166 // appear atomic to every other collector in the system. 1167 // (Thus, for instance, a concurrent collector may not 1168 // discover references in other generations even if the 1169 // referent is in its own generation). This policy may, 1170 // in certain cases, enqueue references somewhat sooner than 1171 // might Policy #0 above, but at marginally increased cost 1172 // and complexity in processing these references. 1173 // We call this choice the "RefeferentBasedDiscovery" policy. 1174 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1175 // We enqueue references only if we are discovering refs 1176 // (rather than processing discovered refs). 1177 if (!_discovering_refs || !RegisterReferences) { 1178 return false; 1179 } 1180 // We only enqueue active references. 1181 oop next = java_lang_ref_Reference::next(obj); 1182 if (next != NULL) { 1183 return false; 1184 } 1185 1186 HeapWord* obj_addr = (HeapWord*)obj; 1187 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1188 !_span.contains(obj_addr)) { 1189 // Reference is not in the originating generation; 1190 // don't treat it specially (i.e. we want to scan it as a normal 1191 // object with strong references). 1192 return false; 1193 } 1194 1195 // We only enqueue references whose referents are not (yet) strongly 1196 // reachable. 1197 if (is_alive_non_header() != NULL) { 1198 oop referent = java_lang_ref_Reference::referent(obj); 1199 // In the case of non-concurrent discovery, the last 1200 // disjunct below should hold. It may not hold in the 1201 // case of concurrent discovery because mutators may 1202 // concurrently clear() a Reference. 1203 assert(UseConcMarkSweepGC || UseG1GC || referent != NULL, 1204 "Refs with null referents already filtered"); 1205 if (is_alive_non_header()->do_object_b(referent)) { 1206 return false; // referent is reachable 1207 } 1208 } 1209 if (rt == REF_SOFT) { 1210 // For soft refs we can decide now if these are not 1211 // current candidates for clearing, in which case we 1212 // can mark through them now, rather than delaying that 1213 // to the reference-processing phase. Since all current 1214 // time-stamp policies advance the soft-ref clock only 1215 // at a major collection cycle, this is always currently 1216 // accurate. 1217 if (!_current_soft_ref_policy->should_clear_reference(obj)) { 1218 return false; 1219 } 1220 } 1221 1222 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1223 const oop discovered = java_lang_ref_Reference::discovered(obj); 1224 assert(discovered->is_oop_or_null(), "bad discovered field"); 1225 if (discovered != NULL) { 1226 // The reference has already been discovered... 1227 if (TraceReferenceGC) { 1228 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1229 obj, obj->blueprint()->internal_name()); 1230 } 1231 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1232 // assumes that an object is not processed twice; 1233 // if it's been already discovered it must be on another 1234 // generation's discovered list; so we won't discover it. 1235 return false; 1236 } else { 1237 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1238 "Unrecognized policy"); 1239 // Check assumption that an object is not potentially 1240 // discovered twice except by concurrent collectors that potentially 1241 // trace the same Reference object twice. 1242 assert(UseConcMarkSweepGC, 1243 "Only possible with an incremental-update concurrent collector"); 1244 return true; 1245 } 1246 } 1247 1248 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1249 oop referent = java_lang_ref_Reference::referent(obj); 1250 assert(referent->is_oop(), "bad referent"); 1251 // enqueue if and only if either: 1252 // reference is in our span or 1253 // we are an atomic collector and referent is in our span 1254 if (_span.contains(obj_addr) || 1255 (discovery_is_atomic() && _span.contains(referent))) { 1256 // should_enqueue = true; 1257 } else { 1258 return false; 1259 } 1260 } else { 1261 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1262 _span.contains(obj_addr), "code inconsistency"); 1263 } 1264 1265 // Get the right type of discovered queue head. 1266 DiscoveredList* list = get_discovered_list(rt); 1267 if (list == NULL) { 1268 return false; // nothing special needs to be done 1269 } 1270 1271 if (_discovery_is_mt) { 1272 add_to_discovered_list_mt(*list, obj, discovered_addr); 1273 } else { 1274 // If "_discovered_list_needs_barrier", we do write barriers when 1275 // updating the discovered reference list. Otherwise, we do a raw store 1276 // here: the field will be visited later when processing the discovered 1277 // references. 1278 oop current_head = list->head(); 1279 // As in the case further above, since we are over-writing a NULL 1280 // pre-value, we can safely elide the pre-barrier here for the case of G1. 1281 assert(discovered == NULL, "control point invariant"); 1282 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 1283 if (UseCompressedOops) { 1284 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); 1285 } else { 1286 _bs->write_ref_field_pre((oop*)discovered_addr, current_head); 1287 } 1288 guarantee(false, "Need to check non-G1 collector"); 1289 } 1290 oop_store_raw(discovered_addr, current_head); 1291 if (_discovered_list_needs_barrier) { 1292 _bs->write_ref_field((void*)discovered_addr, current_head); 1293 } 1294 list->set_head(obj); 1295 list->inc_length(1); 1296 } 1297 1298 // In the MT discovery case, it is currently possible to see 1299 // the following message multiple times if several threads 1300 // discover a reference about the same time. Only one will 1301 // however have actually added it to the disocvered queue. 1302 // One could let add_to_discovered_list_mt() return an 1303 // indication for success in queueing (by 1 thread) or 1304 // failure (by all other threads), but I decided the extra 1305 // code was not worth the effort for something that is 1306 // only used for debugging support. 1307 if (TraceReferenceGC) { 1308 oop referent = java_lang_ref_Reference::referent(obj); 1309 if (PrintGCDetails) { 1310 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", 1311 obj, obj->blueprint()->internal_name()); 1312 } 1313 assert(referent->is_oop(), "Enqueued a bad referent"); 1314 } 1315 assert(obj->is_oop(), "Enqueued a bad reference"); 1316 return true; 1317 } 1318 1319 // Preclean the discovered references by removing those 1320 // whose referents are alive, and by marking from those that 1321 // are not active. These lists can be handled here 1322 // in any order and, indeed, concurrently. 1323 void ReferenceProcessor::preclean_discovered_references( 1324 BoolObjectClosure* is_alive, 1325 OopClosure* keep_alive, 1326 VoidClosure* complete_gc, 1327 YieldClosure* yield, 1328 bool should_unload_classes) { 1329 1330 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1331 1332 #ifdef ASSERT 1333 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC || 1334 CMSClassUnloadingEnabled && UseConcMarkSweepGC || 1335 ExplicitGCInvokesConcurrentAndUnloadsClasses && 1336 UseConcMarkSweepGC && should_unload_classes; 1337 RememberKlassesChecker mx(must_remember_klasses); 1338 #endif 1339 // Soft references 1340 { 1341 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1342 false, gclog_or_tty); 1343 for (int i = 0; i < _max_num_q; i++) { 1344 if (yield->should_return()) { 1345 return; 1346 } 1347 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1348 keep_alive, complete_gc, yield); 1349 } 1350 } 1351 1352 // Weak references 1353 { 1354 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1355 false, gclog_or_tty); 1356 for (int i = 0; i < _num_q; i++) { 1357 if (yield->should_return()) { 1358 return; 1359 } 1360 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1361 keep_alive, complete_gc, yield); 1362 } 1363 } 1364 1365 // Final references 1366 { 1367 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1368 false, gclog_or_tty); 1369 for (int i = 0; i < _num_q; i++) { 1370 if (yield->should_return()) { 1371 return; 1372 } 1373 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1374 keep_alive, complete_gc, yield); 1375 } 1376 } 1377 1378 // Phantom references 1379 { 1380 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1381 false, gclog_or_tty); 1382 for (int i = 0; i < _num_q; i++) { 1383 if (yield->should_return()) { 1384 return; 1385 } 1386 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1387 keep_alive, complete_gc, yield); 1388 } 1389 } 1390 } 1391 1392 // Walk the given discovered ref list, and remove all reference objects 1393 // whose referents are still alive, whose referents are NULL or which 1394 // are not active (have a non-NULL next field). NOTE: When we are 1395 // thus precleaning the ref lists (which happens single-threaded today), 1396 // we do not disable refs discovery to honour the correct semantics of 1397 // java.lang.Reference. As a result, we need to be careful below 1398 // that ref removal steps interleave safely with ref discovery steps 1399 // (in this thread). 1400 void 1401 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1402 BoolObjectClosure* is_alive, 1403 OopClosure* keep_alive, 1404 VoidClosure* complete_gc, 1405 YieldClosure* yield) { 1406 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1407 while (iter.has_next()) { 1408 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1409 oop obj = iter.obj(); 1410 oop next = java_lang_ref_Reference::next(obj); 1411 if (iter.referent() == NULL || iter.is_referent_alive() || 1412 next != NULL) { 1413 // The referent has been cleared, or is alive, or the Reference is not 1414 // active; we need to trace and mark its cohort. 1415 if (TraceReferenceGC) { 1416 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1417 iter.obj(), iter.obj()->blueprint()->internal_name()); 1418 } 1419 // Remove Reference object from list 1420 iter.remove(); 1421 // Keep alive its cohort. 1422 iter.make_referent_alive(); 1423 if (UseCompressedOops) { 1424 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1425 keep_alive->do_oop(next_addr); 1426 } else { 1427 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1428 keep_alive->do_oop(next_addr); 1429 } 1430 iter.move_to_next(); 1431 } else { 1432 iter.next(); 1433 } 1434 } 1435 // Close the reachable set 1436 complete_gc->do_void(); 1437 1438 NOT_PRODUCT( 1439 if (PrintGCDetails && PrintReferenceGC) { 1440 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1441 "Refs in discovered list " INTPTR_FORMAT, 1442 iter.removed(), iter.processed(), (address)refs_list.head()); 1443 } 1444 ) 1445 } 1446 1447 const char* ReferenceProcessor::list_name(int i) { 1448 assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index"); 1449 int j = i / _max_num_q; 1450 switch (j) { 1451 case 0: return "SoftRef"; 1452 case 1: return "WeakRef"; 1453 case 2: return "FinalRef"; 1454 case 3: return "PhantomRef"; 1455 } 1456 ShouldNotReachHere(); 1457 return NULL; 1458 } 1459 1460 #ifndef PRODUCT 1461 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1462 // empty for now 1463 } 1464 #endif 1465 1466 void ReferenceProcessor::verify() { 1467 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef"); 1468 } 1469 1470 #ifndef PRODUCT 1471 void ReferenceProcessor::clear_discovered_references() { 1472 guarantee(!_discovering_refs, "Discovering refs?"); 1473 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 1474 oop obj = _discoveredSoftRefs[i].head(); 1475 while (obj != sentinel_ref()) { 1476 oop next = java_lang_ref_Reference::discovered(obj); 1477 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 1478 obj = next; 1479 } 1480 _discoveredSoftRefs[i].set_head(sentinel_ref()); 1481 _discoveredSoftRefs[i].set_length(0); 1482 } 1483 } 1484 #endif // PRODUCT