1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc_interface/collectedHeap.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "memory/referencePolicy.hpp" 31 #include "memory/referenceProcessor.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/jniHandles.hpp" 35 36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 38 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; 39 bool ReferenceProcessor::_pending_list_uses_discovered_field = false; 40 41 // List of discovered references. 42 class DiscoveredList { 43 public: 44 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 45 oop head() const { 46 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : 47 _oop_head; 48 } 49 HeapWord* adr_head() { 50 return UseCompressedOops ? (HeapWord*)&_compressed_head : 51 (HeapWord*)&_oop_head; 52 } 53 void set_head(oop o) { 54 if (UseCompressedOops) { 55 // Must compress the head ptr. 56 _compressed_head = oopDesc::encode_heap_oop(o); 57 } else { 58 _oop_head = o; 59 } 60 } 61 bool empty() const { return head() == NULL; } 62 size_t length() { return _len; } 63 void set_length(size_t len) { _len = len; } 64 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 65 void dec_length(size_t dec) { _len -= dec; } 66 private: 67 // Set value depending on UseCompressedOops. This could be a template class 68 // but then we have to fix all the instantiations and declarations that use this class. 69 oop _oop_head; 70 narrowOop _compressed_head; 71 size_t _len; 72 }; 73 74 void referenceProcessor_init() { 75 ReferenceProcessor::init_statics(); 76 } 77 78 void ReferenceProcessor::init_statics() { 79 // Initialize the master soft ref clock. 80 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); 81 82 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 83 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 84 NOT_COMPILER2(LRUCurrentHeapPolicy()); 85 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 86 vm_exit_during_initialization("Could not allocate reference policy object"); 87 } 88 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 89 RefDiscoveryPolicy == ReferentBasedDiscovery, 90 "Unrecongnized RefDiscoveryPolicy"); 91 _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field(); 92 } 93 94 ReferenceProcessor::ReferenceProcessor(MemRegion span, 95 bool mt_processing, 96 int mt_processing_degree, 97 bool mt_discovery, 98 int mt_discovery_degree, 99 bool atomic_discovery, 100 BoolObjectClosure* is_alive_non_header, 101 bool discovered_list_needs_barrier) : 102 _discovering_refs(false), 103 _enqueuing_is_done(false), 104 _is_alive_non_header(is_alive_non_header), 105 _discovered_list_needs_barrier(discovered_list_needs_barrier), 106 _bs(NULL), 107 _processing_is_mt(mt_processing), 108 _next_id(0) 109 { 110 _span = span; 111 _discovery_is_atomic = atomic_discovery; 112 _discovery_is_mt = mt_discovery; 113 _num_q = MAX2(1, mt_processing_degree); 114 _max_num_q = MAX2(_num_q, mt_discovery_degree); 115 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref); 116 if (_discoveredSoftRefs == NULL) { 117 vm_exit_during_initialization("Could not allocated RefProc Array"); 118 } 119 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 120 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 121 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 122 // Initialized all entries to NULL 123 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 124 _discoveredSoftRefs[i].set_head(NULL); 125 _discoveredSoftRefs[i].set_length(0); 126 } 127 // If we do barreirs, cache a copy of the barrier set. 128 if (discovered_list_needs_barrier) { 129 _bs = Universe::heap()->barrier_set(); 130 } 131 setup_policy(false /* default soft ref policy */); 132 } 133 134 #ifndef PRODUCT 135 void ReferenceProcessor::verify_no_references_recorded() { 136 guarantee(!_discovering_refs, "Discovering refs?"); 137 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 138 guarantee(_discoveredSoftRefs[i].empty(), 139 "Found non-empty discovered list"); 140 } 141 } 142 #endif 143 144 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 145 // Should this instead be 146 // for (int i = 0; i < subclasses_of_ref; i++_ { 147 // for (int j = 0; j < _num_q; j++) { 148 // int index = i * _max_num_q + j; 149 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 150 if (UseCompressedOops) { 151 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); 152 } else { 153 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); 154 } 155 } 156 } 157 158 void ReferenceProcessor::update_soft_ref_master_clock() { 159 // Update (advance) the soft ref master clock field. This must be done 160 // after processing the soft ref list. 161 jlong now = os::javaTimeMillis(); 162 jlong clock = java_lang_ref_SoftReference::clock(); 163 NOT_PRODUCT( 164 if (now < clock) { 165 warning("time warp: %d to %d", clock, now); 166 } 167 ) 168 // In product mode, protect ourselves from system time being adjusted 169 // externally and going backward; see note in the implementation of 170 // GenCollectedHeap::time_since_last_gc() for the right way to fix 171 // this uniformly throughout the VM; see bug-id 4741166. XXX 172 if (now > clock) { 173 java_lang_ref_SoftReference::set_clock(now); 174 } 175 // Else leave clock stalled at its old value until time progresses 176 // past clock value. 177 } 178 179 void ReferenceProcessor::process_discovered_references( 180 BoolObjectClosure* is_alive, 181 OopClosure* keep_alive, 182 VoidClosure* complete_gc, 183 AbstractRefProcTaskExecutor* task_executor) { 184 NOT_PRODUCT(verify_ok_to_handle_reflists()); 185 186 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 187 // Stop treating discovered references specially. 188 disable_discovery(); 189 190 bool trace_time = PrintGCDetails && PrintReferenceGC; 191 // Soft references 192 { 193 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); 194 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 195 is_alive, keep_alive, complete_gc, task_executor); 196 } 197 198 update_soft_ref_master_clock(); 199 200 // Weak references 201 { 202 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); 203 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 204 is_alive, keep_alive, complete_gc, task_executor); 205 } 206 207 // Final references 208 { 209 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); 210 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 211 is_alive, keep_alive, complete_gc, task_executor); 212 } 213 214 // Phantom references 215 { 216 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); 217 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 218 is_alive, keep_alive, complete_gc, task_executor); 219 } 220 221 // Weak global JNI references. It would make more sense (semantically) to 222 // traverse these simultaneously with the regular weak references above, but 223 // that is not how the JDK1.2 specification is. See #4126360. Native code can 224 // thus use JNI weak references to circumvent the phantom references and 225 // resurrect a "post-mortem" object. 226 { 227 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); 228 if (task_executor != NULL) { 229 task_executor->set_single_threaded_mode(); 230 } 231 process_phaseJNI(is_alive, keep_alive, complete_gc); 232 } 233 } 234 235 #ifndef PRODUCT 236 // Calculate the number of jni handles. 237 uint ReferenceProcessor::count_jni_refs() { 238 class AlwaysAliveClosure: public BoolObjectClosure { 239 public: 240 virtual bool do_object_b(oop obj) { return true; } 241 virtual void do_object(oop obj) { assert(false, "Don't call"); } 242 }; 243 244 class CountHandleClosure: public OopClosure { 245 private: 246 int _count; 247 public: 248 CountHandleClosure(): _count(0) {} 249 void do_oop(oop* unused) { _count++; } 250 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 251 int count() { return _count; } 252 }; 253 CountHandleClosure global_handle_count; 254 AlwaysAliveClosure always_alive; 255 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 256 return global_handle_count.count(); 257 } 258 #endif 259 260 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 261 OopClosure* keep_alive, 262 VoidClosure* complete_gc) { 263 #ifndef PRODUCT 264 if (PrintGCDetails && PrintReferenceGC) { 265 unsigned int count = count_jni_refs(); 266 gclog_or_tty->print(", %u refs", count); 267 } 268 #endif 269 JNIHandles::weak_oops_do(is_alive, keep_alive); 270 complete_gc->do_void(); 271 } 272 273 274 template <class T> 275 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 276 AbstractRefProcTaskExecutor* task_executor) { 277 278 // Remember old value of pending references list 279 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 280 T old_pending_list_value = *pending_list_addr; 281 282 // Enqueue references that are not made active again, and 283 // clear the decks for the next collection (cycle). 284 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 285 // Do the oop-check on pending_list_addr missed in 286 // enqueue_discovered_reflist. We should probably 287 // do a raw oop_check so that future such idempotent 288 // oop_stores relying on the oop-check side-effect 289 // may be elided automatically and safely without 290 // affecting correctness. 291 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 292 293 // Stop treating discovered references specially. 294 ref->disable_discovery(); 295 296 // Return true if new pending references were added 297 return old_pending_list_value != *pending_list_addr; 298 } 299 300 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 301 NOT_PRODUCT(verify_ok_to_handle_reflists()); 302 if (UseCompressedOops) { 303 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 304 } else { 305 return enqueue_discovered_ref_helper<oop>(this, task_executor); 306 } 307 } 308 309 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 310 HeapWord* pending_list_addr) { 311 // Given a list of refs linked through the "discovered" field 312 // (java.lang.ref.Reference.discovered), self-loop their "next" field 313 // thus distinguishing them from active References, then 314 // prepend them to the pending list. 315 // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777), 316 // the "next" field is used to chain the pending list, not the discovered 317 // field. 318 319 if (TraceReferenceGC && PrintGCDetails) { 320 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 321 INTPTR_FORMAT, (address)refs_list.head()); 322 } 323 324 oop obj = NULL; 325 oop next_d = refs_list.head(); 326 if (pending_list_uses_discovered_field()) { // New behaviour 327 // Walk down the list, self-looping the next field 328 // so that the References are not considered active. 329 while (obj != next_d) { 330 obj = next_d; 331 assert(obj->is_instanceRef(), "should be reference object"); 332 next_d = java_lang_ref_Reference::discovered(obj); 333 if (TraceReferenceGC && PrintGCDetails) { 334 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 335 obj, next_d); 336 } 337 assert(java_lang_ref_Reference::next(obj) == NULL, 338 "Reference not active; should not be discovered"); 339 // Self-loop next, so as to make Ref not active. 340 java_lang_ref_Reference::set_next(obj, obj); 341 if (next_d == obj) { // obj is last 342 // Swap refs_list into pendling_list_addr and 343 // set obj's discovered to what we read from pending_list_addr. 344 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 345 // Need oop_check on pending_list_addr above; 346 // see special oop-check code at the end of 347 // enqueue_discovered_reflists() further below. 348 java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL 349 } 350 } 351 } else { // Old behaviour 352 // Walk down the list, copying the discovered field into 353 // the next field and clearing the discovered field. 354 while (obj != next_d) { 355 obj = next_d; 356 assert(obj->is_instanceRef(), "should be reference object"); 357 next_d = java_lang_ref_Reference::discovered(obj); 358 if (TraceReferenceGC && PrintGCDetails) { 359 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 360 obj, next_d); 361 } 362 assert(java_lang_ref_Reference::next(obj) == NULL, 363 "The reference should not be enqueued"); 364 if (next_d == obj) { // obj is last 365 // Swap refs_list into pendling_list_addr and 366 // set obj's next to what we read from pending_list_addr. 367 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 368 // Need oop_check on pending_list_addr above; 369 // see special oop-check code at the end of 370 // enqueue_discovered_reflists() further below. 371 if (old == NULL) { 372 // obj should be made to point to itself, since 373 // pending list was empty. 374 java_lang_ref_Reference::set_next(obj, obj); 375 } else { 376 java_lang_ref_Reference::set_next(obj, old); 377 } 378 } else { 379 java_lang_ref_Reference::set_next(obj, next_d); 380 } 381 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 382 } 383 } 384 } 385 386 // Parallel enqueue task 387 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 388 public: 389 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 390 DiscoveredList discovered_refs[], 391 HeapWord* pending_list_addr, 392 int n_queues) 393 : EnqueueTask(ref_processor, discovered_refs, 394 pending_list_addr, n_queues) 395 { } 396 397 virtual void work(unsigned int work_id) { 398 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 399 // Simplest first cut: static partitioning. 400 int index = work_id; 401 // The increment on "index" must correspond to the maximum number of queues 402 // (n_queues) with which that ReferenceProcessor was created. That 403 // is because of the "clever" way the discovered references lists were 404 // allocated and are indexed into. 405 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 406 for (int j = 0; 407 j < subclasses_of_ref; 408 j++, index += _n_queues) { 409 _ref_processor.enqueue_discovered_reflist( 410 _refs_lists[index], _pending_list_addr); 411 _refs_lists[index].set_head(NULL); 412 _refs_lists[index].set_length(0); 413 } 414 } 415 }; 416 417 // Enqueue references that are not made active again 418 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 419 AbstractRefProcTaskExecutor* task_executor) { 420 if (_processing_is_mt && task_executor != NULL) { 421 // Parallel code 422 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, 423 pending_list_addr, _max_num_q); 424 task_executor->execute(tsk); 425 } else { 426 // Serial code: call the parent class's implementation 427 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 428 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); 429 _discoveredSoftRefs[i].set_head(NULL); 430 _discoveredSoftRefs[i].set_length(0); 431 } 432 } 433 } 434 435 // Iterator for the list of discovered references. 436 class DiscoveredListIterator { 437 public: 438 inline DiscoveredListIterator(DiscoveredList& refs_list, 439 OopClosure* keep_alive, 440 BoolObjectClosure* is_alive); 441 442 // End Of List. 443 inline bool has_next() const { return _ref != NULL; } 444 445 // Get oop to the Reference object. 446 inline oop obj() const { return _ref; } 447 448 // Get oop to the referent object. 449 inline oop referent() const { return _referent; } 450 451 // Returns true if referent is alive. 452 inline bool is_referent_alive() const; 453 454 // Loads data for the current reference. 455 // The "allow_null_referent" argument tells us to allow for the possibility 456 // of a NULL referent in the discovered Reference object. This typically 457 // happens in the case of concurrent collectors that may have done the 458 // discovery concurrently, or interleaved, with mutator execution. 459 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 460 461 // Move to the next discovered reference. 462 inline void next(); 463 464 // Remove the current reference from the list 465 inline void remove(); 466 467 // Make the Reference object active again. 468 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); } 469 470 // Make the referent alive. 471 inline void make_referent_alive() { 472 if (UseCompressedOops) { 473 _keep_alive->do_oop((narrowOop*)_referent_addr); 474 } else { 475 _keep_alive->do_oop((oop*)_referent_addr); 476 } 477 } 478 479 // Update the discovered field. 480 inline void update_discovered() { 481 // First _prev_next ref actually points into DiscoveredList (gross). 482 if (UseCompressedOops) { 483 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { 484 _keep_alive->do_oop((narrowOop*)_prev_next); 485 } 486 } else { 487 if (!oopDesc::is_null(*(oop*)_prev_next)) { 488 _keep_alive->do_oop((oop*)_prev_next); 489 } 490 } 491 } 492 493 // NULL out referent pointer. 494 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } 495 496 // Statistics 497 NOT_PRODUCT( 498 inline size_t processed() const { return _processed; } 499 inline size_t removed() const { return _removed; } 500 ) 501 502 inline void move_to_next(); 503 504 private: 505 DiscoveredList& _refs_list; 506 HeapWord* _prev_next; 507 oop _prev; 508 oop _ref; 509 HeapWord* _discovered_addr; 510 oop _next; 511 HeapWord* _referent_addr; 512 oop _referent; 513 OopClosure* _keep_alive; 514 BoolObjectClosure* _is_alive; 515 DEBUG_ONLY( 516 oop _first_seen; // cyclic linked list check 517 ) 518 NOT_PRODUCT( 519 size_t _processed; 520 size_t _removed; 521 ) 522 }; 523 524 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list, 525 OopClosure* keep_alive, 526 BoolObjectClosure* is_alive) 527 : _refs_list(refs_list), 528 _prev_next(refs_list.adr_head()), 529 _prev(NULL), 530 _ref(refs_list.head()), 531 #ifdef ASSERT 532 _first_seen(refs_list.head()), 533 #endif 534 #ifndef PRODUCT 535 _processed(0), 536 _removed(0), 537 #endif 538 _next(NULL), 539 _keep_alive(keep_alive), 540 _is_alive(is_alive) 541 { } 542 543 inline bool DiscoveredListIterator::is_referent_alive() const { 544 return _is_alive->do_object_b(_referent); 545 } 546 547 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 548 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 549 oop discovered = java_lang_ref_Reference::discovered(_ref); 550 assert(_discovered_addr && discovered->is_oop_or_null(), 551 "discovered field is bad"); 552 _next = discovered; 553 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 554 _referent = java_lang_ref_Reference::referent(_ref); 555 assert(Universe::heap()->is_in_reserved_or_null(_referent), 556 "Wrong oop found in java.lang.Reference object"); 557 assert(allow_null_referent ? 558 _referent->is_oop_or_null() 559 : _referent->is_oop(), 560 "bad referent"); 561 } 562 563 inline void DiscoveredListIterator::next() { 564 _prev_next = _discovered_addr; 565 _prev = _ref; 566 move_to_next(); 567 } 568 569 inline void DiscoveredListIterator::remove() { 570 assert(_ref->is_oop(), "Dropping a bad reference"); 571 oop_store_raw(_discovered_addr, NULL); 572 573 // First _prev_next ref actually points into DiscoveredList (gross). 574 oop new_next; 575 if (_next == _ref) { 576 // At the end of the list, we should make _prev point to itself. 577 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 578 // and _prev will be NULL. 579 new_next = _prev; 580 } else { 581 new_next = _next; 582 } 583 584 if (UseCompressedOops) { 585 // Remove Reference object from list. 586 oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next); 587 } else { 588 // Remove Reference object from list. 589 oopDesc::store_heap_oop((oop*)_prev_next, new_next); 590 } 591 NOT_PRODUCT(_removed++); 592 _refs_list.dec_length(1); 593 } 594 595 inline void DiscoveredListIterator::move_to_next() { 596 if (_ref == _next) { 597 // End of the list. 598 _ref = NULL; 599 } else { 600 _ref = _next; 601 } 602 assert(_ref != _first_seen, "cyclic ref_list found"); 603 NOT_PRODUCT(_processed++); 604 } 605 606 // NOTE: process_phase*() are largely similar, and at a high level 607 // merely iterate over the extant list applying a predicate to 608 // each of its elements and possibly removing that element from the 609 // list and applying some further closures to that element. 610 // We should consider the possibility of replacing these 611 // process_phase*() methods by abstracting them into 612 // a single general iterator invocation that receives appropriate 613 // closures that accomplish this work. 614 615 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 616 // referents are not alive, but that should be kept alive for policy reasons. 617 // Keep alive the transitive closure of all such referents. 618 void 619 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 620 ReferencePolicy* policy, 621 BoolObjectClosure* is_alive, 622 OopClosure* keep_alive, 623 VoidClosure* complete_gc) { 624 assert(policy != NULL, "Must have a non-NULL policy"); 625 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 626 // Decide which softly reachable refs should be kept alive. 627 while (iter.has_next()) { 628 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 629 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 630 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { 631 if (TraceReferenceGC) { 632 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 633 iter.obj(), iter.obj()->blueprint()->internal_name()); 634 } 635 // Remove Reference object from list 636 iter.remove(); 637 // Make the Reference object active again 638 iter.make_active(); 639 // keep the referent around 640 iter.make_referent_alive(); 641 iter.move_to_next(); 642 } else { 643 iter.next(); 644 } 645 } 646 // Close the reachable set 647 complete_gc->do_void(); 648 NOT_PRODUCT( 649 if (PrintGCDetails && TraceReferenceGC) { 650 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 651 "discovered Refs by policy, from list " INTPTR_FORMAT, 652 iter.removed(), iter.processed(), (address)refs_list.head()); 653 } 654 ) 655 } 656 657 // Traverse the list and remove any Refs that are not active, or 658 // whose referents are either alive or NULL. 659 void 660 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 661 BoolObjectClosure* is_alive, 662 OopClosure* keep_alive) { 663 assert(discovery_is_atomic(), "Error"); 664 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 665 while (iter.has_next()) { 666 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 667 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 668 assert(next == NULL, "Should not discover inactive Reference"); 669 if (iter.is_referent_alive()) { 670 if (TraceReferenceGC) { 671 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 672 iter.obj(), iter.obj()->blueprint()->internal_name()); 673 } 674 // The referent is reachable after all. 675 // Remove Reference object from list. 676 iter.remove(); 677 // Update the referent pointer as necessary: Note that this 678 // should not entail any recursive marking because the 679 // referent must already have been traversed. 680 iter.make_referent_alive(); 681 iter.move_to_next(); 682 } else { 683 iter.next(); 684 } 685 } 686 NOT_PRODUCT( 687 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 688 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 689 "Refs in discovered list " INTPTR_FORMAT, 690 iter.removed(), iter.processed(), (address)refs_list.head()); 691 } 692 ) 693 } 694 695 void 696 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 697 BoolObjectClosure* is_alive, 698 OopClosure* keep_alive, 699 VoidClosure* complete_gc) { 700 assert(!discovery_is_atomic(), "Error"); 701 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 702 while (iter.has_next()) { 703 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 704 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 705 oop next = java_lang_ref_Reference::next(iter.obj()); 706 if ((iter.referent() == NULL || iter.is_referent_alive() || 707 next != NULL)) { 708 assert(next->is_oop_or_null(), "bad next field"); 709 // Remove Reference object from list 710 iter.remove(); 711 // Trace the cohorts 712 iter.make_referent_alive(); 713 if (UseCompressedOops) { 714 keep_alive->do_oop((narrowOop*)next_addr); 715 } else { 716 keep_alive->do_oop((oop*)next_addr); 717 } 718 iter.move_to_next(); 719 } else { 720 iter.next(); 721 } 722 } 723 // Now close the newly reachable set 724 complete_gc->do_void(); 725 NOT_PRODUCT( 726 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 727 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 728 "Refs in discovered list " INTPTR_FORMAT, 729 iter.removed(), iter.processed(), (address)refs_list.head()); 730 } 731 ) 732 } 733 734 // Traverse the list and process the referents, by either 735 // clearing them or keeping them (and their reachable 736 // closure) alive. 737 void 738 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 739 bool clear_referent, 740 BoolObjectClosure* is_alive, 741 OopClosure* keep_alive, 742 VoidClosure* complete_gc) { 743 ResourceMark rm; 744 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 745 while (iter.has_next()) { 746 iter.update_discovered(); 747 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 748 if (clear_referent) { 749 // NULL out referent pointer 750 iter.clear_referent(); 751 } else { 752 // keep the referent around 753 iter.make_referent_alive(); 754 } 755 if (TraceReferenceGC) { 756 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 757 clear_referent ? "cleared " : "", 758 iter.obj(), iter.obj()->blueprint()->internal_name()); 759 } 760 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 761 iter.next(); 762 } 763 // Remember to update the next pointer of the last ref. 764 iter.update_discovered(); 765 // Close the reachable set 766 complete_gc->do_void(); 767 } 768 769 void 770 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 771 oop obj = NULL; 772 oop next = refs_list.head(); 773 while (next != obj) { 774 obj = next; 775 next = java_lang_ref_Reference::discovered(obj); 776 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 777 } 778 refs_list.set_head(NULL); 779 refs_list.set_length(0); 780 } 781 782 void 783 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 784 clear_discovered_references(refs_list); 785 } 786 787 void ReferenceProcessor::abandon_partial_discovery() { 788 // loop over the lists 789 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 790 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 791 gclog_or_tty->print_cr("\nAbandoning %s discovered list", 792 list_name(i)); 793 } 794 abandon_partial_discovered_list(_discoveredSoftRefs[i]); 795 } 796 } 797 798 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 799 public: 800 RefProcPhase1Task(ReferenceProcessor& ref_processor, 801 DiscoveredList refs_lists[], 802 ReferencePolicy* policy, 803 bool marks_oops_alive) 804 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 805 _policy(policy) 806 { } 807 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 808 OopClosure& keep_alive, 809 VoidClosure& complete_gc) 810 { 811 Thread* thr = Thread::current(); 812 int refs_list_index = ((WorkerThread*)thr)->id(); 813 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 814 &is_alive, &keep_alive, &complete_gc); 815 } 816 private: 817 ReferencePolicy* _policy; 818 }; 819 820 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 821 public: 822 RefProcPhase2Task(ReferenceProcessor& ref_processor, 823 DiscoveredList refs_lists[], 824 bool marks_oops_alive) 825 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 826 { } 827 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 828 OopClosure& keep_alive, 829 VoidClosure& complete_gc) 830 { 831 _ref_processor.process_phase2(_refs_lists[i], 832 &is_alive, &keep_alive, &complete_gc); 833 } 834 }; 835 836 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 837 public: 838 RefProcPhase3Task(ReferenceProcessor& ref_processor, 839 DiscoveredList refs_lists[], 840 bool clear_referent, 841 bool marks_oops_alive) 842 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 843 _clear_referent(clear_referent) 844 { } 845 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 846 OopClosure& keep_alive, 847 VoidClosure& complete_gc) 848 { 849 // Don't use "refs_list_index" calculated in this way because 850 // balance_queues() has moved the Ref's into the first n queues. 851 // Thread* thr = Thread::current(); 852 // int refs_list_index = ((WorkerThread*)thr)->id(); 853 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 854 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 855 &is_alive, &keep_alive, &complete_gc); 856 } 857 private: 858 bool _clear_referent; 859 }; 860 861 // Balances reference queues. 862 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 863 // queues[0, 1, ..., _num_q-1] because only the first _num_q 864 // corresponding to the active workers will be processed. 865 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 866 { 867 // calculate total length 868 size_t total_refs = 0; 869 if (TraceReferenceGC && PrintGCDetails) { 870 gclog_or_tty->print_cr("\nBalance ref_lists "); 871 } 872 873 for (int i = 0; i < _max_num_q; ++i) { 874 total_refs += ref_lists[i].length(); 875 if (TraceReferenceGC && PrintGCDetails) { 876 gclog_or_tty->print("%d ", ref_lists[i].length()); 877 } 878 } 879 if (TraceReferenceGC && PrintGCDetails) { 880 gclog_or_tty->print_cr(" = %d", total_refs); 881 } 882 size_t avg_refs = total_refs / _num_q + 1; 883 int to_idx = 0; 884 for (int from_idx = 0; from_idx < _max_num_q; from_idx++) { 885 bool move_all = false; 886 if (from_idx >= _num_q) { 887 move_all = ref_lists[from_idx].length() > 0; 888 } 889 while ((ref_lists[from_idx].length() > avg_refs) || 890 move_all) { 891 assert(to_idx < _num_q, "Sanity Check!"); 892 if (ref_lists[to_idx].length() < avg_refs) { 893 // move superfluous refs 894 size_t refs_to_move; 895 // Move all the Ref's if the from queue will not be processed. 896 if (move_all) { 897 refs_to_move = MIN2(ref_lists[from_idx].length(), 898 avg_refs - ref_lists[to_idx].length()); 899 } else { 900 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 901 avg_refs - ref_lists[to_idx].length()); 902 } 903 904 assert(refs_to_move > 0, "otherwise the code below will fail"); 905 906 oop move_head = ref_lists[from_idx].head(); 907 oop move_tail = move_head; 908 oop new_head = move_head; 909 // find an element to split the list on 910 for (size_t j = 0; j < refs_to_move; ++j) { 911 move_tail = new_head; 912 new_head = java_lang_ref_Reference::discovered(new_head); 913 } 914 915 // Add the chain to the to list. 916 if (ref_lists[to_idx].head() == NULL) { 917 // to list is empty. Make a loop at the end. 918 java_lang_ref_Reference::set_discovered(move_tail, move_tail); 919 } else { 920 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); 921 } 922 ref_lists[to_idx].set_head(move_head); 923 ref_lists[to_idx].inc_length(refs_to_move); 924 925 // Remove the chain from the from list. 926 if (move_tail == new_head) { 927 // We found the end of the from list. 928 ref_lists[from_idx].set_head(NULL); 929 } else { 930 ref_lists[from_idx].set_head(new_head); 931 } 932 ref_lists[from_idx].dec_length(refs_to_move); 933 if (ref_lists[from_idx].length() == 0) { 934 break; 935 } 936 } else { 937 to_idx = (to_idx + 1) % _num_q; 938 } 939 } 940 } 941 #ifdef ASSERT 942 size_t balanced_total_refs = 0; 943 for (int i = 0; i < _max_num_q; ++i) { 944 balanced_total_refs += ref_lists[i].length(); 945 if (TraceReferenceGC && PrintGCDetails) { 946 gclog_or_tty->print("%d ", ref_lists[i].length()); 947 } 948 } 949 if (TraceReferenceGC && PrintGCDetails) { 950 gclog_or_tty->print_cr(" = %d", balanced_total_refs); 951 gclog_or_tty->flush(); 952 } 953 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 954 #endif 955 } 956 957 void ReferenceProcessor::balance_all_queues() { 958 balance_queues(_discoveredSoftRefs); 959 balance_queues(_discoveredWeakRefs); 960 balance_queues(_discoveredFinalRefs); 961 balance_queues(_discoveredPhantomRefs); 962 } 963 964 void 965 ReferenceProcessor::process_discovered_reflist( 966 DiscoveredList refs_lists[], 967 ReferencePolicy* policy, 968 bool clear_referent, 969 BoolObjectClosure* is_alive, 970 OopClosure* keep_alive, 971 VoidClosure* complete_gc, 972 AbstractRefProcTaskExecutor* task_executor) 973 { 974 bool mt_processing = task_executor != NULL && _processing_is_mt; 975 // If discovery used MT and a dynamic number of GC threads, then 976 // the queues must be balanced for correctness if fewer than the 977 // maximum number of queues were used. The number of queue used 978 // during discovery may be different than the number to be used 979 // for processing so don't depend of _num_q < _max_num_q as part 980 // of the test. 981 bool must_balance = _discovery_is_mt; 982 983 if ((mt_processing && ParallelRefProcBalancingEnabled) || 984 must_balance) { 985 balance_queues(refs_lists); 986 } 987 if (PrintReferenceGC && PrintGCDetails) { 988 size_t total = 0; 989 for (int i = 0; i < _max_num_q; ++i) { 990 total += refs_lists[i].length(); 991 } 992 gclog_or_tty->print(", %u refs", total); 993 } 994 995 // Phase 1 (soft refs only): 996 // . Traverse the list and remove any SoftReferences whose 997 // referents are not alive, but that should be kept alive for 998 // policy reasons. Keep alive the transitive closure of all 999 // such referents. 1000 if (policy != NULL) { 1001 if (mt_processing) { 1002 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 1003 task_executor->execute(phase1); 1004 } else { 1005 for (int i = 0; i < _max_num_q; i++) { 1006 process_phase1(refs_lists[i], policy, 1007 is_alive, keep_alive, complete_gc); 1008 } 1009 } 1010 } else { // policy == NULL 1011 assert(refs_lists != _discoveredSoftRefs, 1012 "Policy must be specified for soft references."); 1013 } 1014 1015 // Phase 2: 1016 // . Traverse the list and remove any refs whose referents are alive. 1017 if (mt_processing) { 1018 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 1019 task_executor->execute(phase2); 1020 } else { 1021 for (int i = 0; i < _max_num_q; i++) { 1022 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 1023 } 1024 } 1025 1026 // Phase 3: 1027 // . Traverse the list and process referents as appropriate. 1028 if (mt_processing) { 1029 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 1030 task_executor->execute(phase3); 1031 } else { 1032 for (int i = 0; i < _max_num_q; i++) { 1033 process_phase3(refs_lists[i], clear_referent, 1034 is_alive, keep_alive, complete_gc); 1035 } 1036 } 1037 } 1038 1039 void ReferenceProcessor::clean_up_discovered_references() { 1040 // loop over the lists 1041 // Should this instead be 1042 // for (int i = 0; i < subclasses_of_ref; i++_ { 1043 // for (int j = 0; j < _num_q; j++) { 1044 // int index = i * _max_num_q + j; 1045 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 1046 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 1047 gclog_or_tty->print_cr( 1048 "\nScrubbing %s discovered list of Null referents", 1049 list_name(i)); 1050 } 1051 clean_up_discovered_reflist(_discoveredSoftRefs[i]); 1052 } 1053 } 1054 1055 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 1056 assert(!discovery_is_atomic(), "Else why call this method?"); 1057 DiscoveredListIterator iter(refs_list, NULL, NULL); 1058 while (iter.has_next()) { 1059 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1060 oop next = java_lang_ref_Reference::next(iter.obj()); 1061 assert(next->is_oop_or_null(), "bad next field"); 1062 // If referent has been cleared or Reference is not active, 1063 // drop it. 1064 if (iter.referent() == NULL || next != NULL) { 1065 debug_only( 1066 if (PrintGCDetails && TraceReferenceGC) { 1067 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 1068 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 1069 " and referent: " INTPTR_FORMAT, 1070 iter.obj(), next, iter.referent()); 1071 } 1072 ) 1073 // Remove Reference object from list 1074 iter.remove(); 1075 iter.move_to_next(); 1076 } else { 1077 iter.next(); 1078 } 1079 } 1080 NOT_PRODUCT( 1081 if (PrintGCDetails && TraceReferenceGC) { 1082 gclog_or_tty->print( 1083 " Removed %d Refs with NULL referents out of %d discovered Refs", 1084 iter.removed(), iter.processed()); 1085 } 1086 ) 1087 } 1088 1089 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 1090 int id = 0; 1091 // Determine the queue index to use for this object. 1092 if (_discovery_is_mt) { 1093 // During a multi-threaded discovery phase, 1094 // each thread saves to its "own" list. 1095 Thread* thr = Thread::current(); 1096 id = thr->as_Worker_thread()->id(); 1097 } else { 1098 // single-threaded discovery, we save in round-robin 1099 // fashion to each of the lists. 1100 if (_processing_is_mt) { 1101 id = next_id(); 1102 } 1103 } 1104 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 1105 1106 // Get the discovered queue to which we will add 1107 DiscoveredList* list = NULL; 1108 switch (rt) { 1109 case REF_OTHER: 1110 // Unknown reference type, no special treatment 1111 break; 1112 case REF_SOFT: 1113 list = &_discoveredSoftRefs[id]; 1114 break; 1115 case REF_WEAK: 1116 list = &_discoveredWeakRefs[id]; 1117 break; 1118 case REF_FINAL: 1119 list = &_discoveredFinalRefs[id]; 1120 break; 1121 case REF_PHANTOM: 1122 list = &_discoveredPhantomRefs[id]; 1123 break; 1124 case REF_NONE: 1125 // we should not reach here if we are an instanceRefKlass 1126 default: 1127 ShouldNotReachHere(); 1128 } 1129 if (TraceReferenceGC && PrintGCDetails) { 1130 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list); 1131 } 1132 return list; 1133 } 1134 1135 inline void 1136 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1137 oop obj, 1138 HeapWord* discovered_addr) { 1139 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1140 // First we must make sure this object is only enqueued once. CAS in a non null 1141 // discovered_addr. 1142 oop current_head = refs_list.head(); 1143 // The last ref must have its discovered field pointing to itself. 1144 oop next_discovered = (current_head != NULL) ? current_head : obj; 1145 1146 // Note: In the case of G1, this specific pre-barrier is strictly 1147 // not necessary because the only case we are interested in 1148 // here is when *discovered_addr is NULL (see the CAS further below), 1149 // so this will expand to nothing. As a result, we have manually 1150 // elided this out for G1, but left in the test for some future 1151 // collector that might have need for a pre-barrier here, e.g.:- 1152 // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); 1153 assert(!_discovered_list_needs_barrier || UseG1GC, 1154 "Need to check non-G1 collector: " 1155 "may need a pre-write-barrier for CAS from NULL below"); 1156 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 1157 NULL); 1158 if (retest == NULL) { 1159 // This thread just won the right to enqueue the object. 1160 // We have separate lists for enqueueing, so no synchronization 1161 // is necessary. 1162 refs_list.set_head(obj); 1163 refs_list.inc_length(1); 1164 if (_discovered_list_needs_barrier) { 1165 _bs->write_ref_field((void*)discovered_addr, next_discovered); 1166 } 1167 1168 if (TraceReferenceGC) { 1169 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 1170 obj, obj->blueprint()->internal_name()); 1171 } 1172 } else { 1173 // If retest was non NULL, another thread beat us to it: 1174 // The reference has already been discovered... 1175 if (TraceReferenceGC) { 1176 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1177 obj, obj->blueprint()->internal_name()); 1178 } 1179 } 1180 } 1181 1182 #ifndef PRODUCT 1183 // Non-atomic (i.e. concurrent) discovery might allow us 1184 // to observe j.l.References with NULL referents, being those 1185 // cleared concurrently by mutators during (or after) discovery. 1186 void ReferenceProcessor::verify_referent(oop obj) { 1187 bool da = discovery_is_atomic(); 1188 oop referent = java_lang_ref_Reference::referent(obj); 1189 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 1190 err_msg("Bad referent " INTPTR_FORMAT " found in Reference " 1191 INTPTR_FORMAT " during %satomic discovery ", 1192 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-")); 1193 } 1194 #endif 1195 1196 // We mention two of several possible choices here: 1197 // #0: if the reference object is not in the "originating generation" 1198 // (or part of the heap being collected, indicated by our "span" 1199 // we don't treat it specially (i.e. we scan it as we would 1200 // a normal oop, treating its references as strong references). 1201 // This means that references can't be discovered unless their 1202 // referent is also in the same span. This is the simplest, 1203 // most "local" and most conservative approach, albeit one 1204 // that may cause weak references to be enqueued least promptly. 1205 // We call this choice the "ReferenceBasedDiscovery" policy. 1206 // #1: the reference object may be in any generation (span), but if 1207 // the referent is in the generation (span) being currently collected 1208 // then we can discover the reference object, provided 1209 // the object has not already been discovered by 1210 // a different concurrently running collector (as may be the 1211 // case, for instance, if the reference object is in CMS and 1212 // the referent in DefNewGeneration), and provided the processing 1213 // of this reference object by the current collector will 1214 // appear atomic to every other collector in the system. 1215 // (Thus, for instance, a concurrent collector may not 1216 // discover references in other generations even if the 1217 // referent is in its own generation). This policy may, 1218 // in certain cases, enqueue references somewhat sooner than 1219 // might Policy #0 above, but at marginally increased cost 1220 // and complexity in processing these references. 1221 // We call this choice the "RefeferentBasedDiscovery" policy. 1222 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1223 // Make sure we are discovering refs (rather than processing discovered refs). 1224 if (!_discovering_refs || !RegisterReferences) { 1225 return false; 1226 } 1227 // We only discover active references. 1228 oop next = java_lang_ref_Reference::next(obj); 1229 if (next != NULL) { // Ref is no longer active 1230 return false; 1231 } 1232 1233 HeapWord* obj_addr = (HeapWord*)obj; 1234 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1235 !_span.contains(obj_addr)) { 1236 // Reference is not in the originating generation; 1237 // don't treat it specially (i.e. we want to scan it as a normal 1238 // object with strong references). 1239 return false; 1240 } 1241 1242 // We only discover references whose referents are not (yet) 1243 // known to be strongly reachable. 1244 if (is_alive_non_header() != NULL) { 1245 verify_referent(obj); 1246 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1247 return false; // referent is reachable 1248 } 1249 } 1250 if (rt == REF_SOFT) { 1251 // For soft refs we can decide now if these are not 1252 // current candidates for clearing, in which case we 1253 // can mark through them now, rather than delaying that 1254 // to the reference-processing phase. Since all current 1255 // time-stamp policies advance the soft-ref clock only 1256 // at a major collection cycle, this is always currently 1257 // accurate. 1258 if (!_current_soft_ref_policy->should_clear_reference(obj)) { 1259 return false; 1260 } 1261 } 1262 1263 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1264 const oop discovered = java_lang_ref_Reference::discovered(obj); 1265 assert(discovered->is_oop_or_null(), "bad discovered field"); 1266 if (discovered != NULL) { 1267 // The reference has already been discovered... 1268 if (TraceReferenceGC) { 1269 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1270 obj, obj->blueprint()->internal_name()); 1271 } 1272 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1273 // assumes that an object is not processed twice; 1274 // if it's been already discovered it must be on another 1275 // generation's discovered list; so we won't discover it. 1276 return false; 1277 } else { 1278 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1279 "Unrecognized policy"); 1280 // Check assumption that an object is not potentially 1281 // discovered twice except by concurrent collectors that potentially 1282 // trace the same Reference object twice. 1283 assert(UseConcMarkSweepGC || UseG1GC, 1284 "Only possible with a concurrent marking collector"); 1285 return true; 1286 } 1287 } 1288 1289 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1290 verify_referent(obj); 1291 // Discover if and only if EITHER: 1292 // .. reference is in our span, OR 1293 // .. we are an atomic collector and referent is in our span 1294 if (_span.contains(obj_addr) || 1295 (discovery_is_atomic() && 1296 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1297 // should_enqueue = true; 1298 } else { 1299 return false; 1300 } 1301 } else { 1302 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1303 _span.contains(obj_addr), "code inconsistency"); 1304 } 1305 1306 // Get the right type of discovered queue head. 1307 DiscoveredList* list = get_discovered_list(rt); 1308 if (list == NULL) { 1309 return false; // nothing special needs to be done 1310 } 1311 1312 if (_discovery_is_mt) { 1313 add_to_discovered_list_mt(*list, obj, discovered_addr); 1314 } else { 1315 // If "_discovered_list_needs_barrier", we do write barriers when 1316 // updating the discovered reference list. Otherwise, we do a raw store 1317 // here: the field will be visited later when processing the discovered 1318 // references. 1319 oop current_head = list->head(); 1320 // The last ref must have its discovered field pointing to itself. 1321 oop next_discovered = (current_head != NULL) ? current_head : obj; 1322 1323 // As in the case further above, since we are over-writing a NULL 1324 // pre-value, we can safely elide the pre-barrier here for the case of G1. 1325 // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered); 1326 assert(discovered == NULL, "control point invariant"); 1327 assert(!_discovered_list_needs_barrier || UseG1GC, 1328 "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below"); 1329 oop_store_raw(discovered_addr, next_discovered); 1330 if (_discovered_list_needs_barrier) { 1331 _bs->write_ref_field((void*)discovered_addr, next_discovered); 1332 } 1333 list->set_head(obj); 1334 list->inc_length(1); 1335 1336 if (TraceReferenceGC) { 1337 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", 1338 obj, obj->blueprint()->internal_name()); 1339 } 1340 } 1341 assert(obj->is_oop(), "Discovered a bad reference"); 1342 verify_referent(obj); 1343 return true; 1344 } 1345 1346 // Preclean the discovered references by removing those 1347 // whose referents are alive, and by marking from those that 1348 // are not active. These lists can be handled here 1349 // in any order and, indeed, concurrently. 1350 void ReferenceProcessor::preclean_discovered_references( 1351 BoolObjectClosure* is_alive, 1352 OopClosure* keep_alive, 1353 VoidClosure* complete_gc, 1354 YieldClosure* yield, 1355 bool should_unload_classes) { 1356 1357 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1358 1359 #ifdef ASSERT 1360 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC || 1361 CMSClassUnloadingEnabled && UseConcMarkSweepGC || 1362 ExplicitGCInvokesConcurrentAndUnloadsClasses && 1363 UseConcMarkSweepGC && should_unload_classes; 1364 RememberKlassesChecker mx(must_remember_klasses); 1365 #endif 1366 // Soft references 1367 { 1368 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1369 false, gclog_or_tty); 1370 for (int i = 0; i < _max_num_q; i++) { 1371 if (yield->should_return()) { 1372 return; 1373 } 1374 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1375 keep_alive, complete_gc, yield); 1376 } 1377 } 1378 1379 // Weak references 1380 { 1381 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1382 false, gclog_or_tty); 1383 for (int i = 0; i < _max_num_q; i++) { 1384 if (yield->should_return()) { 1385 return; 1386 } 1387 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1388 keep_alive, complete_gc, yield); 1389 } 1390 } 1391 1392 // Final references 1393 { 1394 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1395 false, gclog_or_tty); 1396 for (int i = 0; i < _max_num_q; i++) { 1397 if (yield->should_return()) { 1398 return; 1399 } 1400 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1401 keep_alive, complete_gc, yield); 1402 } 1403 } 1404 1405 // Phantom references 1406 { 1407 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1408 false, gclog_or_tty); 1409 for (int i = 0; i < _max_num_q; i++) { 1410 if (yield->should_return()) { 1411 return; 1412 } 1413 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1414 keep_alive, complete_gc, yield); 1415 } 1416 } 1417 } 1418 1419 // Walk the given discovered ref list, and remove all reference objects 1420 // whose referents are still alive, whose referents are NULL or which 1421 // are not active (have a non-NULL next field). NOTE: When we are 1422 // thus precleaning the ref lists (which happens single-threaded today), 1423 // we do not disable refs discovery to honour the correct semantics of 1424 // java.lang.Reference. As a result, we need to be careful below 1425 // that ref removal steps interleave safely with ref discovery steps 1426 // (in this thread). 1427 void 1428 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1429 BoolObjectClosure* is_alive, 1430 OopClosure* keep_alive, 1431 VoidClosure* complete_gc, 1432 YieldClosure* yield) { 1433 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1434 while (iter.has_next()) { 1435 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1436 oop obj = iter.obj(); 1437 oop next = java_lang_ref_Reference::next(obj); 1438 if (iter.referent() == NULL || iter.is_referent_alive() || 1439 next != NULL) { 1440 // The referent has been cleared, or is alive, or the Reference is not 1441 // active; we need to trace and mark its cohort. 1442 if (TraceReferenceGC) { 1443 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1444 iter.obj(), iter.obj()->blueprint()->internal_name()); 1445 } 1446 // Remove Reference object from list 1447 iter.remove(); 1448 // Keep alive its cohort. 1449 iter.make_referent_alive(); 1450 if (UseCompressedOops) { 1451 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1452 keep_alive->do_oop(next_addr); 1453 } else { 1454 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1455 keep_alive->do_oop(next_addr); 1456 } 1457 iter.move_to_next(); 1458 } else { 1459 iter.next(); 1460 } 1461 } 1462 // Close the reachable set 1463 complete_gc->do_void(); 1464 1465 NOT_PRODUCT( 1466 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 1467 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1468 "Refs in discovered list " INTPTR_FORMAT, 1469 iter.removed(), iter.processed(), (address)refs_list.head()); 1470 } 1471 ) 1472 } 1473 1474 const char* ReferenceProcessor::list_name(int i) { 1475 assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index"); 1476 int j = i / _max_num_q; 1477 switch (j) { 1478 case 0: return "SoftRef"; 1479 case 1: return "WeakRef"; 1480 case 2: return "FinalRef"; 1481 case 3: return "PhantomRef"; 1482 } 1483 ShouldNotReachHere(); 1484 return NULL; 1485 } 1486 1487 #ifndef PRODUCT 1488 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1489 // empty for now 1490 } 1491 #endif 1492 1493 #ifndef PRODUCT 1494 void ReferenceProcessor::clear_discovered_references() { 1495 guarantee(!_discovering_refs, "Discovering refs?"); 1496 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 1497 clear_discovered_references(_discoveredSoftRefs[i]); 1498 } 1499 } 1500 1501 #endif // PRODUCT