1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc_interface/collectedHeap.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "memory/referencePolicy.hpp" 31 #include "memory/referenceProcessor.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/jniHandles.hpp" 35 36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 38 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; 39 40 // List of discovered references. 41 class DiscoveredList { 42 public: 43 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 44 oop head() const { 45 return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : 46 _oop_head; 47 } 48 HeapWord* adr_head() { 49 return UseCompressedOops ? (HeapWord*)&_compressed_head : 50 (HeapWord*)&_oop_head; 51 } 52 void set_head(oop o) { 53 if (UseCompressedOops) { 54 // Must compress the head ptr. 55 _compressed_head = oopDesc::encode_heap_oop(o); 56 } else { 57 _oop_head = o; 58 } 59 } 60 bool empty() const { return head() == NULL; } 61 size_t length() { return _len; } 62 void set_length(size_t len) { _len = len; } 63 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 64 void dec_length(size_t dec) { _len -= dec; } 65 private: 66 // Set value depending on UseCompressedOops. This could be a template class 67 // but then we have to fix all the instantiations and declarations that use this class. 68 oop _oop_head; 69 narrowOop _compressed_head; 70 size_t _len; 71 }; 72 73 void referenceProcessor_init() { 74 ReferenceProcessor::init_statics(); 75 } 76 77 void ReferenceProcessor::init_statics() { 78 // Initialize the master soft ref clock. 79 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); 80 81 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 82 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 83 NOT_COMPILER2(LRUCurrentHeapPolicy()); 84 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 85 vm_exit_during_initialization("Could not allocate reference policy object"); 86 } 87 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 88 RefDiscoveryPolicy == ReferentBasedDiscovery, 89 "Unrecongnized RefDiscoveryPolicy"); 90 } 91 92 ReferenceProcessor::ReferenceProcessor(MemRegion span, 93 bool mt_processing, 94 int mt_processing_degree, 95 bool mt_discovery, 96 int mt_discovery_degree, 97 bool atomic_discovery, 98 BoolObjectClosure* is_alive_non_header, 99 bool discovered_list_needs_barrier) : 100 _discovering_refs(false), 101 _enqueuing_is_done(false), 102 _is_alive_non_header(is_alive_non_header), 103 _discovered_list_needs_barrier(discovered_list_needs_barrier), 104 _bs(NULL), 105 _processing_is_mt(mt_processing), 106 _next_id(0) 107 { 108 _span = span; 109 _discovery_is_atomic = atomic_discovery; 110 _discovery_is_mt = mt_discovery; 111 _num_q = MAX2(1, mt_processing_degree); 112 _max_num_q = MAX2(_num_q, mt_discovery_degree); 113 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref); 114 if (_discoveredSoftRefs == NULL) { 115 vm_exit_during_initialization("Could not allocated RefProc Array"); 116 } 117 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 118 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 119 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 120 // Initialized all entries to NULL 121 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 122 _discoveredSoftRefs[i].set_head(NULL); 123 _discoveredSoftRefs[i].set_length(0); 124 } 125 // If we do barreirs, cache a copy of the barrier set. 126 if (discovered_list_needs_barrier) { 127 _bs = Universe::heap()->barrier_set(); 128 } 129 setup_policy(false /* default soft ref policy */); 130 } 131 132 #ifndef PRODUCT 133 void ReferenceProcessor::verify_no_references_recorded() { 134 guarantee(!_discovering_refs, "Discovering refs?"); 135 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 136 guarantee(_discoveredSoftRefs[i].empty(), 137 "Found non-empty discovered list"); 138 } 139 } 140 #endif 141 142 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 143 // Should this instead be 144 // for (int i = 0; i < subclasses_of_ref; i++_ { 145 // for (int j = 0; j < _num_q; j++) { 146 // int index = i * _max_num_q + j; 147 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 148 if (UseCompressedOops) { 149 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); 150 } else { 151 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); 152 } 153 } 154 } 155 156 void ReferenceProcessor::update_soft_ref_master_clock() { 157 // Update (advance) the soft ref master clock field. This must be done 158 // after processing the soft ref list. 159 jlong now = os::javaTimeMillis(); 160 jlong clock = java_lang_ref_SoftReference::clock(); 161 NOT_PRODUCT( 162 if (now < clock) { 163 warning("time warp: %d to %d", clock, now); 164 } 165 ) 166 // In product mode, protect ourselves from system time being adjusted 167 // externally and going backward; see note in the implementation of 168 // GenCollectedHeap::time_since_last_gc() for the right way to fix 169 // this uniformly throughout the VM; see bug-id 4741166. XXX 170 if (now > clock) { 171 java_lang_ref_SoftReference::set_clock(now); 172 } 173 // Else leave clock stalled at its old value until time progresses 174 // past clock value. 175 } 176 177 void ReferenceProcessor::process_discovered_references( 178 BoolObjectClosure* is_alive, 179 OopClosure* keep_alive, 180 VoidClosure* complete_gc, 181 AbstractRefProcTaskExecutor* task_executor) { 182 NOT_PRODUCT(verify_ok_to_handle_reflists()); 183 184 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 185 // Stop treating discovered references specially. 186 disable_discovery(); 187 188 bool trace_time = PrintGCDetails && PrintReferenceGC; 189 // Soft references 190 { 191 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); 192 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 193 is_alive, keep_alive, complete_gc, task_executor); 194 } 195 196 update_soft_ref_master_clock(); 197 198 // Weak references 199 { 200 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); 201 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 202 is_alive, keep_alive, complete_gc, task_executor); 203 } 204 205 // Final references 206 { 207 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); 208 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 209 is_alive, keep_alive, complete_gc, task_executor); 210 } 211 212 // Phantom references 213 { 214 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); 215 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 216 is_alive, keep_alive, complete_gc, task_executor); 217 } 218 219 // Weak global JNI references. It would make more sense (semantically) to 220 // traverse these simultaneously with the regular weak references above, but 221 // that is not how the JDK1.2 specification is. See #4126360. Native code can 222 // thus use JNI weak references to circumvent the phantom references and 223 // resurrect a "post-mortem" object. 224 { 225 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); 226 if (task_executor != NULL) { 227 task_executor->set_single_threaded_mode(); 228 } 229 process_phaseJNI(is_alive, keep_alive, complete_gc); 230 } 231 } 232 233 #ifndef PRODUCT 234 // Calculate the number of jni handles. 235 uint ReferenceProcessor::count_jni_refs() { 236 class AlwaysAliveClosure: public BoolObjectClosure { 237 public: 238 virtual bool do_object_b(oop obj) { return true; } 239 virtual void do_object(oop obj) { assert(false, "Don't call"); } 240 }; 241 242 class CountHandleClosure: public OopClosure { 243 private: 244 int _count; 245 public: 246 CountHandleClosure(): _count(0) {} 247 void do_oop(oop* unused) { _count++; } 248 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 249 int count() { return _count; } 250 }; 251 CountHandleClosure global_handle_count; 252 AlwaysAliveClosure always_alive; 253 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 254 return global_handle_count.count(); 255 } 256 #endif 257 258 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 259 OopClosure* keep_alive, 260 VoidClosure* complete_gc) { 261 #ifndef PRODUCT 262 if (PrintGCDetails && PrintReferenceGC) { 263 unsigned int count = count_jni_refs(); 264 gclog_or_tty->print(", %u refs", count); 265 } 266 #endif 267 JNIHandles::weak_oops_do(is_alive, keep_alive); 268 complete_gc->do_void(); 269 } 270 271 272 template <class T> 273 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 274 AbstractRefProcTaskExecutor* task_executor) { 275 276 // Remember old value of pending references list 277 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 278 T old_pending_list_value = *pending_list_addr; 279 280 // Enqueue references that are not made active again, and 281 // clear the decks for the next collection (cycle). 282 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 283 // Do the oop-check on pending_list_addr missed in 284 // enqueue_discovered_reflist. We should probably 285 // do a raw oop_check so that future such idempotent 286 // oop_stores relying on the oop-check side-effect 287 // may be elided automatically and safely without 288 // affecting correctness. 289 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 290 291 // Stop treating discovered references specially. 292 ref->disable_discovery(); 293 294 // Return true if new pending references were added 295 return old_pending_list_value != *pending_list_addr; 296 } 297 298 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 299 NOT_PRODUCT(verify_ok_to_handle_reflists()); 300 if (UseCompressedOops) { 301 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 302 } else { 303 return enqueue_discovered_ref_helper<oop>(this, task_executor); 304 } 305 } 306 307 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 308 HeapWord* pending_list_addr) { 309 // Given a list of refs linked through the "discovered" field 310 // (java.lang.ref.Reference.discovered) chain them through the 311 // "next" field (java.lang.ref.Reference.next) and prepend 312 // to the pending list. 313 if (TraceReferenceGC && PrintGCDetails) { 314 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 315 INTPTR_FORMAT, (address)refs_list.head()); 316 } 317 318 oop obj = NULL; 319 oop next = refs_list.head(); 320 // Walk down the list, copying the discovered field into 321 // the next field and clearing it. 322 while (obj != next) { 323 obj = next; 324 assert(obj->is_instanceRef(), "should be reference object"); 325 next = java_lang_ref_Reference::discovered(obj); 326 if (TraceReferenceGC && PrintGCDetails) { 327 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, 328 obj, next); 329 } 330 assert(java_lang_ref_Reference::next(obj) == NULL, 331 "The reference should not be enqueued"); 332 if (next == obj) { // obj is last 333 // Swap refs_list into pendling_list_addr and 334 // set obj's next to what we read from pending_list_addr. 335 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 336 // Need oop_check on pending_list_addr above; 337 // see special oop-check code at the end of 338 // enqueue_discovered_reflists() further below. 339 if (old == NULL) { 340 // obj should be made to point to itself, since 341 // pending list was empty. 342 java_lang_ref_Reference::set_next(obj, obj); 343 } else { 344 java_lang_ref_Reference::set_next(obj, old); 345 } 346 } else { 347 java_lang_ref_Reference::set_next(obj, next); 348 } 349 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 350 } 351 } 352 353 // Parallel enqueue task 354 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 355 public: 356 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 357 DiscoveredList discovered_refs[], 358 HeapWord* pending_list_addr, 359 int n_queues) 360 : EnqueueTask(ref_processor, discovered_refs, 361 pending_list_addr, n_queues) 362 { } 363 364 virtual void work(unsigned int work_id) { 365 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 366 // Simplest first cut: static partitioning. 367 int index = work_id; 368 // The increment on "index" must correspond to the maximum number of queues 369 // (n_queues) with which that ReferenceProcessor was created. That 370 // is because of the "clever" way the discovered references lists were 371 // allocated and are indexed into. 372 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 373 for (int j = 0; 374 j < subclasses_of_ref; 375 j++, index += _n_queues) { 376 _ref_processor.enqueue_discovered_reflist( 377 _refs_lists[index], _pending_list_addr); 378 _refs_lists[index].set_head(NULL); 379 _refs_lists[index].set_length(0); 380 } 381 } 382 }; 383 384 // Enqueue references that are not made active again 385 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 386 AbstractRefProcTaskExecutor* task_executor) { 387 if (_processing_is_mt && task_executor != NULL) { 388 // Parallel code 389 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, 390 pending_list_addr, _max_num_q); 391 task_executor->execute(tsk); 392 } else { 393 // Serial code: call the parent class's implementation 394 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 395 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); 396 _discoveredSoftRefs[i].set_head(NULL); 397 _discoveredSoftRefs[i].set_length(0); 398 } 399 } 400 } 401 402 // Iterator for the list of discovered references. 403 class DiscoveredListIterator { 404 public: 405 inline DiscoveredListIterator(DiscoveredList& refs_list, 406 OopClosure* keep_alive, 407 BoolObjectClosure* is_alive); 408 409 // End Of List. 410 inline bool has_next() const { return _ref != NULL; } 411 412 // Get oop to the Reference object. 413 inline oop obj() const { return _ref; } 414 415 // Get oop to the referent object. 416 inline oop referent() const { return _referent; } 417 418 // Returns true if referent is alive. 419 inline bool is_referent_alive() const; 420 421 // Loads data for the current reference. 422 // The "allow_null_referent" argument tells us to allow for the possibility 423 // of a NULL referent in the discovered Reference object. This typically 424 // happens in the case of concurrent collectors that may have done the 425 // discovery concurrently, or interleaved, with mutator execution. 426 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 427 428 // Move to the next discovered reference. 429 inline void next(); 430 431 // Remove the current reference from the list 432 inline void remove(); 433 434 // Make the Reference object active again. 435 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); } 436 437 // Make the referent alive. 438 inline void make_referent_alive() { 439 if (UseCompressedOops) { 440 _keep_alive->do_oop((narrowOop*)_referent_addr); 441 } else { 442 _keep_alive->do_oop((oop*)_referent_addr); 443 } 444 } 445 446 // Update the discovered field. 447 inline void update_discovered() { 448 // First _prev_next ref actually points into DiscoveredList (gross). 449 if (UseCompressedOops) { 450 if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { 451 _keep_alive->do_oop((narrowOop*)_prev_next); 452 } 453 } else { 454 if (!oopDesc::is_null(*(oop*)_prev_next)) { 455 _keep_alive->do_oop((oop*)_prev_next); 456 } 457 } 458 } 459 460 // NULL out referent pointer. 461 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } 462 463 // Statistics 464 NOT_PRODUCT( 465 inline size_t processed() const { return _processed; } 466 inline size_t removed() const { return _removed; } 467 ) 468 469 inline void move_to_next(); 470 471 private: 472 DiscoveredList& _refs_list; 473 HeapWord* _prev_next; 474 oop _prev; 475 oop _ref; 476 HeapWord* _discovered_addr; 477 oop _next; 478 HeapWord* _referent_addr; 479 oop _referent; 480 OopClosure* _keep_alive; 481 BoolObjectClosure* _is_alive; 482 DEBUG_ONLY( 483 oop _first_seen; // cyclic linked list check 484 ) 485 NOT_PRODUCT( 486 size_t _processed; 487 size_t _removed; 488 ) 489 }; 490 491 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list, 492 OopClosure* keep_alive, 493 BoolObjectClosure* is_alive) 494 : _refs_list(refs_list), 495 _prev_next(refs_list.adr_head()), 496 _prev(NULL), 497 _ref(refs_list.head()), 498 #ifdef ASSERT 499 _first_seen(refs_list.head()), 500 #endif 501 #ifndef PRODUCT 502 _processed(0), 503 _removed(0), 504 #endif 505 _next(NULL), 506 _keep_alive(keep_alive), 507 _is_alive(is_alive) 508 { } 509 510 inline bool DiscoveredListIterator::is_referent_alive() const { 511 return _is_alive->do_object_b(_referent); 512 } 513 514 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 515 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 516 oop discovered = java_lang_ref_Reference::discovered(_ref); 517 assert(_discovered_addr && discovered->is_oop_or_null(), 518 "discovered field is bad"); 519 _next = discovered; 520 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 521 _referent = java_lang_ref_Reference::referent(_ref); 522 assert(Universe::heap()->is_in_reserved_or_null(_referent), 523 "Wrong oop found in java.lang.Reference object"); 524 assert(allow_null_referent ? 525 _referent->is_oop_or_null() 526 : _referent->is_oop(), 527 "bad referent"); 528 } 529 530 inline void DiscoveredListIterator::next() { 531 _prev_next = _discovered_addr; 532 _prev = _ref; 533 move_to_next(); 534 } 535 536 inline void DiscoveredListIterator::remove() { 537 assert(_ref->is_oop(), "Dropping a bad reference"); 538 oop_store_raw(_discovered_addr, NULL); 539 540 // First _prev_next ref actually points into DiscoveredList (gross). 541 oop new_next; 542 if (_next == _ref) { 543 // At the end of the list, we should make _prev point to itself. 544 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 545 // and _prev will be NULL. 546 new_next = _prev; 547 } else { 548 new_next = _next; 549 } 550 551 if (UseCompressedOops) { 552 // Remove Reference object from list. 553 oopDesc::encode_store_heap_oop((narrowOop*)_prev_next, new_next); 554 } else { 555 // Remove Reference object from list. 556 oopDesc::store_heap_oop((oop*)_prev_next, new_next); 557 } 558 NOT_PRODUCT(_removed++); 559 _refs_list.dec_length(1); 560 } 561 562 inline void DiscoveredListIterator::move_to_next() { 563 if (_ref == _next) { 564 // End of the list. 565 _ref = NULL; 566 } else { 567 _ref = _next; 568 } 569 assert(_ref != _first_seen, "cyclic ref_list found"); 570 NOT_PRODUCT(_processed++); 571 } 572 573 // NOTE: process_phase*() are largely similar, and at a high level 574 // merely iterate over the extant list applying a predicate to 575 // each of its elements and possibly removing that element from the 576 // list and applying some further closures to that element. 577 // We should consider the possibility of replacing these 578 // process_phase*() methods by abstracting them into 579 // a single general iterator invocation that receives appropriate 580 // closures that accomplish this work. 581 582 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 583 // referents are not alive, but that should be kept alive for policy reasons. 584 // Keep alive the transitive closure of all such referents. 585 void 586 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 587 ReferencePolicy* policy, 588 BoolObjectClosure* is_alive, 589 OopClosure* keep_alive, 590 VoidClosure* complete_gc) { 591 assert(policy != NULL, "Must have a non-NULL policy"); 592 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 593 // Decide which softly reachable refs should be kept alive. 594 while (iter.has_next()) { 595 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 596 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 597 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { 598 if (TraceReferenceGC) { 599 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 600 iter.obj(), iter.obj()->blueprint()->internal_name()); 601 } 602 // Remove Reference object from list 603 iter.remove(); 604 // Make the Reference object active again 605 iter.make_active(); 606 // keep the referent around 607 iter.make_referent_alive(); 608 iter.move_to_next(); 609 } else { 610 iter.next(); 611 } 612 } 613 // Close the reachable set 614 complete_gc->do_void(); 615 NOT_PRODUCT( 616 if (PrintGCDetails && TraceReferenceGC) { 617 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 618 "discovered Refs by policy list " INTPTR_FORMAT, 619 iter.removed(), iter.processed(), (address)refs_list.head()); 620 } 621 ) 622 } 623 624 // Traverse the list and remove any Refs that are not active, or 625 // whose referents are either alive or NULL. 626 void 627 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 628 BoolObjectClosure* is_alive, 629 OopClosure* keep_alive) { 630 assert(discovery_is_atomic(), "Error"); 631 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 632 while (iter.has_next()) { 633 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 634 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 635 assert(next == NULL, "Should not discover inactive Reference"); 636 if (iter.is_referent_alive()) { 637 if (TraceReferenceGC) { 638 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 639 iter.obj(), iter.obj()->blueprint()->internal_name()); 640 } 641 // The referent is reachable after all. 642 // Remove Reference object from list. 643 iter.remove(); 644 // Update the referent pointer as necessary: Note that this 645 // should not entail any recursive marking because the 646 // referent must already have been traversed. 647 iter.make_referent_alive(); 648 iter.move_to_next(); 649 } else { 650 iter.next(); 651 } 652 } 653 NOT_PRODUCT( 654 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 655 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 656 "Refs in discovered list " INTPTR_FORMAT, 657 iter.removed(), iter.processed(), (address)refs_list.head()); 658 } 659 ) 660 } 661 662 void 663 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 664 BoolObjectClosure* is_alive, 665 OopClosure* keep_alive, 666 VoidClosure* complete_gc) { 667 assert(!discovery_is_atomic(), "Error"); 668 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 669 while (iter.has_next()) { 670 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 671 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 672 oop next = java_lang_ref_Reference::next(iter.obj()); 673 if ((iter.referent() == NULL || iter.is_referent_alive() || 674 next != NULL)) { 675 assert(next->is_oop_or_null(), "bad next field"); 676 // Remove Reference object from list 677 iter.remove(); 678 // Trace the cohorts 679 iter.make_referent_alive(); 680 if (UseCompressedOops) { 681 keep_alive->do_oop((narrowOop*)next_addr); 682 } else { 683 keep_alive->do_oop((oop*)next_addr); 684 } 685 iter.move_to_next(); 686 } else { 687 iter.next(); 688 } 689 } 690 // Now close the newly reachable set 691 complete_gc->do_void(); 692 NOT_PRODUCT( 693 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 694 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 695 "Refs in discovered list " INTPTR_FORMAT, 696 iter.removed(), iter.processed(), (address)refs_list.head()); 697 } 698 ) 699 } 700 701 // Traverse the list and process the referents, by either 702 // clearing them or keeping them (and their reachable 703 // closure) alive. 704 void 705 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 706 bool clear_referent, 707 BoolObjectClosure* is_alive, 708 OopClosure* keep_alive, 709 VoidClosure* complete_gc) { 710 ResourceMark rm; 711 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 712 while (iter.has_next()) { 713 iter.update_discovered(); 714 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 715 if (clear_referent) { 716 // NULL out referent pointer 717 iter.clear_referent(); 718 } else { 719 // keep the referent around 720 iter.make_referent_alive(); 721 } 722 if (TraceReferenceGC) { 723 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 724 clear_referent ? "cleared " : "", 725 iter.obj(), iter.obj()->blueprint()->internal_name()); 726 } 727 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 728 iter.next(); 729 } 730 // Remember to update the next pointer of the last ref. 731 iter.update_discovered(); 732 // Close the reachable set 733 complete_gc->do_void(); 734 } 735 736 void 737 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 738 oop obj = NULL; 739 oop next = refs_list.head(); 740 while (next != obj) { 741 obj = next; 742 next = java_lang_ref_Reference::discovered(obj); 743 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 744 } 745 refs_list.set_head(NULL); 746 refs_list.set_length(0); 747 } 748 749 void 750 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 751 clear_discovered_references(refs_list); 752 } 753 754 void ReferenceProcessor::abandon_partial_discovery() { 755 // loop over the lists 756 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 757 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 758 gclog_or_tty->print_cr("\nAbandoning %s discovered list", 759 list_name(i)); 760 } 761 abandon_partial_discovered_list(_discoveredSoftRefs[i]); 762 } 763 } 764 765 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 766 public: 767 RefProcPhase1Task(ReferenceProcessor& ref_processor, 768 DiscoveredList refs_lists[], 769 ReferencePolicy* policy, 770 bool marks_oops_alive) 771 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 772 _policy(policy) 773 { } 774 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 775 OopClosure& keep_alive, 776 VoidClosure& complete_gc) 777 { 778 Thread* thr = Thread::current(); 779 int refs_list_index = ((WorkerThread*)thr)->id(); 780 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 781 &is_alive, &keep_alive, &complete_gc); 782 } 783 private: 784 ReferencePolicy* _policy; 785 }; 786 787 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 788 public: 789 RefProcPhase2Task(ReferenceProcessor& ref_processor, 790 DiscoveredList refs_lists[], 791 bool marks_oops_alive) 792 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 793 { } 794 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 795 OopClosure& keep_alive, 796 VoidClosure& complete_gc) 797 { 798 _ref_processor.process_phase2(_refs_lists[i], 799 &is_alive, &keep_alive, &complete_gc); 800 } 801 }; 802 803 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 804 public: 805 RefProcPhase3Task(ReferenceProcessor& ref_processor, 806 DiscoveredList refs_lists[], 807 bool clear_referent, 808 bool marks_oops_alive) 809 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 810 _clear_referent(clear_referent) 811 { } 812 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 813 OopClosure& keep_alive, 814 VoidClosure& complete_gc) 815 { 816 // Don't use "refs_list_index" calculated in this way because 817 // balance_queues() has moved the Ref's into the first n queues. 818 // Thread* thr = Thread::current(); 819 // int refs_list_index = ((WorkerThread*)thr)->id(); 820 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 821 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 822 &is_alive, &keep_alive, &complete_gc); 823 } 824 private: 825 bool _clear_referent; 826 }; 827 828 // Balances reference queues. 829 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 830 // queues[0, 1, ..., _num_q-1] because only the first _num_q 831 // corresponding to the active workers will be processed. 832 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 833 { 834 // calculate total length 835 size_t total_refs = 0; 836 if (TraceReferenceGC && PrintGCDetails) { 837 gclog_or_tty->print_cr("\nBalance ref_lists "); 838 } 839 840 for (int i = 0; i < _max_num_q; ++i) { 841 total_refs += ref_lists[i].length(); 842 if (TraceReferenceGC && PrintGCDetails) { 843 gclog_or_tty->print("%d ", ref_lists[i].length()); 844 } 845 } 846 if (TraceReferenceGC && PrintGCDetails) { 847 gclog_or_tty->print_cr(" = %d", total_refs); 848 } 849 size_t avg_refs = total_refs / _num_q + 1; 850 int to_idx = 0; 851 for (int from_idx = 0; from_idx < _max_num_q; from_idx++) { 852 bool move_all = false; 853 if (from_idx >= _num_q) { 854 move_all = ref_lists[from_idx].length() > 0; 855 } 856 while ((ref_lists[from_idx].length() > avg_refs) || 857 move_all) { 858 assert(to_idx < _num_q, "Sanity Check!"); 859 if (ref_lists[to_idx].length() < avg_refs) { 860 // move superfluous refs 861 size_t refs_to_move; 862 // Move all the Ref's if the from queue will not be processed. 863 if (move_all) { 864 refs_to_move = MIN2(ref_lists[from_idx].length(), 865 avg_refs - ref_lists[to_idx].length()); 866 } else { 867 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 868 avg_refs - ref_lists[to_idx].length()); 869 } 870 871 assert(refs_to_move > 0, "otherwise the code below will fail"); 872 873 oop move_head = ref_lists[from_idx].head(); 874 oop move_tail = move_head; 875 oop new_head = move_head; 876 // find an element to split the list on 877 for (size_t j = 0; j < refs_to_move; ++j) { 878 move_tail = new_head; 879 new_head = java_lang_ref_Reference::discovered(new_head); 880 } 881 882 // Add the chain to the to list. 883 if (ref_lists[to_idx].head() == NULL) { 884 // to list is empty. Make a loop at the end. 885 java_lang_ref_Reference::set_discovered(move_tail, move_tail); 886 } else { 887 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); 888 } 889 ref_lists[to_idx].set_head(move_head); 890 ref_lists[to_idx].inc_length(refs_to_move); 891 892 // Remove the chain from the from list. 893 if (move_tail == new_head) { 894 // We found the end of the from list. 895 ref_lists[from_idx].set_head(NULL); 896 } else { 897 ref_lists[from_idx].set_head(new_head); 898 } 899 ref_lists[from_idx].dec_length(refs_to_move); 900 if (ref_lists[from_idx].length() == 0) { 901 break; 902 } 903 } else { 904 to_idx = (to_idx + 1) % _num_q; 905 } 906 } 907 } 908 #ifdef ASSERT 909 size_t balanced_total_refs = 0; 910 for (int i = 0; i < _max_num_q; ++i) { 911 balanced_total_refs += ref_lists[i].length(); 912 if (TraceReferenceGC && PrintGCDetails) { 913 gclog_or_tty->print("%d ", ref_lists[i].length()); 914 } 915 } 916 if (TraceReferenceGC && PrintGCDetails) { 917 gclog_or_tty->print_cr(" = %d", balanced_total_refs); 918 gclog_or_tty->flush(); 919 } 920 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 921 #endif 922 } 923 924 void ReferenceProcessor::balance_all_queues() { 925 balance_queues(_discoveredSoftRefs); 926 balance_queues(_discoveredWeakRefs); 927 balance_queues(_discoveredFinalRefs); 928 balance_queues(_discoveredPhantomRefs); 929 } 930 931 void 932 ReferenceProcessor::process_discovered_reflist( 933 DiscoveredList refs_lists[], 934 ReferencePolicy* policy, 935 bool clear_referent, 936 BoolObjectClosure* is_alive, 937 OopClosure* keep_alive, 938 VoidClosure* complete_gc, 939 AbstractRefProcTaskExecutor* task_executor) 940 { 941 bool mt_processing = task_executor != NULL && _processing_is_mt; 942 // If discovery used MT and a dynamic number of GC threads, then 943 // the queues must be balanced for correctness if fewer than the 944 // maximum number of queues were used. The number of queue used 945 // during discovery may be different than the number to be used 946 // for processing so don't depend of _num_q < _max_num_q as part 947 // of the test. 948 bool must_balance = _discovery_is_mt; 949 950 if ((mt_processing && ParallelRefProcBalancingEnabled) || 951 must_balance) { 952 balance_queues(refs_lists); 953 } 954 if (PrintReferenceGC && PrintGCDetails) { 955 size_t total = 0; 956 for (int i = 0; i < _max_num_q; ++i) { 957 total += refs_lists[i].length(); 958 } 959 gclog_or_tty->print(", %u refs", total); 960 } 961 962 // Phase 1 (soft refs only): 963 // . Traverse the list and remove any SoftReferences whose 964 // referents are not alive, but that should be kept alive for 965 // policy reasons. Keep alive the transitive closure of all 966 // such referents. 967 if (policy != NULL) { 968 if (mt_processing) { 969 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 970 task_executor->execute(phase1); 971 } else { 972 for (int i = 0; i < _max_num_q; i++) { 973 process_phase1(refs_lists[i], policy, 974 is_alive, keep_alive, complete_gc); 975 } 976 } 977 } else { // policy == NULL 978 assert(refs_lists != _discoveredSoftRefs, 979 "Policy must be specified for soft references."); 980 } 981 982 // Phase 2: 983 // . Traverse the list and remove any refs whose referents are alive. 984 if (mt_processing) { 985 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 986 task_executor->execute(phase2); 987 } else { 988 for (int i = 0; i < _max_num_q; i++) { 989 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 990 } 991 } 992 993 // Phase 3: 994 // . Traverse the list and process referents as appropriate. 995 if (mt_processing) { 996 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 997 task_executor->execute(phase3); 998 } else { 999 for (int i = 0; i < _max_num_q; i++) { 1000 process_phase3(refs_lists[i], clear_referent, 1001 is_alive, keep_alive, complete_gc); 1002 } 1003 } 1004 } 1005 1006 void ReferenceProcessor::clean_up_discovered_references() { 1007 // loop over the lists 1008 // Should this instead be 1009 // for (int i = 0; i < subclasses_of_ref; i++_ { 1010 // for (int j = 0; j < _num_q; j++) { 1011 // int index = i * _max_num_q + j; 1012 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 1013 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 1014 gclog_or_tty->print_cr( 1015 "\nScrubbing %s discovered list of Null referents", 1016 list_name(i)); 1017 } 1018 clean_up_discovered_reflist(_discoveredSoftRefs[i]); 1019 } 1020 } 1021 1022 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 1023 assert(!discovery_is_atomic(), "Else why call this method?"); 1024 DiscoveredListIterator iter(refs_list, NULL, NULL); 1025 while (iter.has_next()) { 1026 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1027 oop next = java_lang_ref_Reference::next(iter.obj()); 1028 assert(next->is_oop_or_null(), "bad next field"); 1029 // If referent has been cleared or Reference is not active, 1030 // drop it. 1031 if (iter.referent() == NULL || next != NULL) { 1032 debug_only( 1033 if (PrintGCDetails && TraceReferenceGC) { 1034 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 1035 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 1036 " and referent: " INTPTR_FORMAT, 1037 iter.obj(), next, iter.referent()); 1038 } 1039 ) 1040 // Remove Reference object from list 1041 iter.remove(); 1042 iter.move_to_next(); 1043 } else { 1044 iter.next(); 1045 } 1046 } 1047 NOT_PRODUCT( 1048 if (PrintGCDetails && TraceReferenceGC) { 1049 gclog_or_tty->print( 1050 " Removed %d Refs with NULL referents out of %d discovered Refs", 1051 iter.removed(), iter.processed()); 1052 } 1053 ) 1054 } 1055 1056 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 1057 int id = 0; 1058 // Determine the queue index to use for this object. 1059 if (_discovery_is_mt) { 1060 // During a multi-threaded discovery phase, 1061 // each thread saves to its "own" list. 1062 Thread* thr = Thread::current(); 1063 id = thr->as_Worker_thread()->id(); 1064 } else { 1065 // single-threaded discovery, we save in round-robin 1066 // fashion to each of the lists. 1067 if (_processing_is_mt) { 1068 id = next_id(); 1069 } 1070 } 1071 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 1072 1073 // Get the discovered queue to which we will add 1074 DiscoveredList* list = NULL; 1075 switch (rt) { 1076 case REF_OTHER: 1077 // Unknown reference type, no special treatment 1078 break; 1079 case REF_SOFT: 1080 list = &_discoveredSoftRefs[id]; 1081 break; 1082 case REF_WEAK: 1083 list = &_discoveredWeakRefs[id]; 1084 break; 1085 case REF_FINAL: 1086 list = &_discoveredFinalRefs[id]; 1087 break; 1088 case REF_PHANTOM: 1089 list = &_discoveredPhantomRefs[id]; 1090 break; 1091 case REF_NONE: 1092 // we should not reach here if we are an instanceRefKlass 1093 default: 1094 ShouldNotReachHere(); 1095 } 1096 if (TraceReferenceGC && PrintGCDetails) { 1097 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list); 1098 } 1099 return list; 1100 } 1101 1102 inline void 1103 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1104 oop obj, 1105 HeapWord* discovered_addr) { 1106 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1107 // First we must make sure this object is only enqueued once. CAS in a non null 1108 // discovered_addr. 1109 oop current_head = refs_list.head(); 1110 // The last ref must have its discovered field pointing to itself. 1111 oop next_discovered = (current_head != NULL) ? current_head : obj; 1112 1113 // Note: In the case of G1, this specific pre-barrier is strictly 1114 // not necessary because the only case we are interested in 1115 // here is when *discovered_addr is NULL (see the CAS further below), 1116 // so this will expand to nothing. As a result, we have manually 1117 // elided this out for G1, but left in the test for some future 1118 // collector that might have need for a pre-barrier here. 1119 if (_discovered_list_needs_barrier && !UseG1GC) { 1120 if (UseCompressedOops) { 1121 _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered); 1122 } else { 1123 _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered); 1124 } 1125 guarantee(false, "Need to check non-G1 collector"); 1126 } 1127 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 1128 NULL); 1129 if (retest == NULL) { 1130 // This thread just won the right to enqueue the object. 1131 // We have separate lists for enqueueing so no synchronization 1132 // is necessary. 1133 refs_list.set_head(obj); 1134 refs_list.inc_length(1); 1135 if (_discovered_list_needs_barrier) { 1136 _bs->write_ref_field((void*)discovered_addr, next_discovered); 1137 } 1138 1139 if (TraceReferenceGC) { 1140 gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)", 1141 obj, obj->blueprint()->internal_name()); 1142 } 1143 } else { 1144 // If retest was non NULL, another thread beat us to it: 1145 // The reference has already been discovered... 1146 if (TraceReferenceGC) { 1147 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1148 obj, obj->blueprint()->internal_name()); 1149 } 1150 } 1151 } 1152 1153 #ifndef PRODUCT 1154 // Non-atomic (i.e. concurrent) discovery might allow us 1155 // to observe j.l.References with NULL referents, being those 1156 // cleared concurrently by mutators during (or after) discovery. 1157 void ReferenceProcessor::verify_referent(oop obj) { 1158 bool da = discovery_is_atomic(); 1159 oop referent = java_lang_ref_Reference::referent(obj); 1160 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 1161 err_msg("Bad referent " INTPTR_FORMAT " found in Reference " 1162 INTPTR_FORMAT " during %satomic discovery ", 1163 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-")); 1164 } 1165 #endif 1166 1167 // We mention two of several possible choices here: 1168 // #0: if the reference object is not in the "originating generation" 1169 // (or part of the heap being collected, indicated by our "span" 1170 // we don't treat it specially (i.e. we scan it as we would 1171 // a normal oop, treating its references as strong references). 1172 // This means that references can't be enqueued unless their 1173 // referent is also in the same span. This is the simplest, 1174 // most "local" and most conservative approach, albeit one 1175 // that may cause weak references to be enqueued least promptly. 1176 // We call this choice the "ReferenceBasedDiscovery" policy. 1177 // #1: the reference object may be in any generation (span), but if 1178 // the referent is in the generation (span) being currently collected 1179 // then we can discover the reference object, provided 1180 // the object has not already been discovered by 1181 // a different concurrently running collector (as may be the 1182 // case, for instance, if the reference object is in CMS and 1183 // the referent in DefNewGeneration), and provided the processing 1184 // of this reference object by the current collector will 1185 // appear atomic to every other collector in the system. 1186 // (Thus, for instance, a concurrent collector may not 1187 // discover references in other generations even if the 1188 // referent is in its own generation). This policy may, 1189 // in certain cases, enqueue references somewhat sooner than 1190 // might Policy #0 above, but at marginally increased cost 1191 // and complexity in processing these references. 1192 // We call this choice the "RefeferentBasedDiscovery" policy. 1193 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1194 // We enqueue references only if we are discovering refs 1195 // (rather than processing discovered refs). 1196 if (!_discovering_refs || !RegisterReferences) { 1197 return false; 1198 } 1199 // We only enqueue active references. 1200 oop next = java_lang_ref_Reference::next(obj); 1201 if (next != NULL) { 1202 return false; 1203 } 1204 1205 HeapWord* obj_addr = (HeapWord*)obj; 1206 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1207 !_span.contains(obj_addr)) { 1208 // Reference is not in the originating generation; 1209 // don't treat it specially (i.e. we want to scan it as a normal 1210 // object with strong references). 1211 return false; 1212 } 1213 1214 // We only enqueue references whose referents are not (yet) strongly 1215 // reachable. 1216 if (is_alive_non_header() != NULL) { 1217 verify_referent(obj); 1218 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1219 return false; // referent is reachable 1220 } 1221 } 1222 if (rt == REF_SOFT) { 1223 // For soft refs we can decide now if these are not 1224 // current candidates for clearing, in which case we 1225 // can mark through them now, rather than delaying that 1226 // to the reference-processing phase. Since all current 1227 // time-stamp policies advance the soft-ref clock only 1228 // at a major collection cycle, this is always currently 1229 // accurate. 1230 if (!_current_soft_ref_policy->should_clear_reference(obj)) { 1231 return false; 1232 } 1233 } 1234 1235 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1236 const oop discovered = java_lang_ref_Reference::discovered(obj); 1237 assert(discovered->is_oop_or_null(), "bad discovered field"); 1238 if (discovered != NULL) { 1239 // The reference has already been discovered... 1240 if (TraceReferenceGC) { 1241 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1242 obj, obj->blueprint()->internal_name()); 1243 } 1244 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1245 // assumes that an object is not processed twice; 1246 // if it's been already discovered it must be on another 1247 // generation's discovered list; so we won't discover it. 1248 return false; 1249 } else { 1250 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1251 "Unrecognized policy"); 1252 // Check assumption that an object is not potentially 1253 // discovered twice except by concurrent collectors that potentially 1254 // trace the same Reference object twice. 1255 assert(UseConcMarkSweepGC || UseG1GC, 1256 "Only possible with a concurrent marking collector"); 1257 return true; 1258 } 1259 } 1260 1261 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1262 verify_referent(obj); 1263 // enqueue if and only if either: 1264 // reference is in our span or 1265 // we are an atomic collector and referent is in our span 1266 if (_span.contains(obj_addr) || 1267 (discovery_is_atomic() && 1268 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1269 // should_enqueue = true; 1270 } else { 1271 return false; 1272 } 1273 } else { 1274 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1275 _span.contains(obj_addr), "code inconsistency"); 1276 } 1277 1278 // Get the right type of discovered queue head. 1279 DiscoveredList* list = get_discovered_list(rt); 1280 if (list == NULL) { 1281 return false; // nothing special needs to be done 1282 } 1283 1284 if (_discovery_is_mt) { 1285 add_to_discovered_list_mt(*list, obj, discovered_addr); 1286 } else { 1287 // If "_discovered_list_needs_barrier", we do write barriers when 1288 // updating the discovered reference list. Otherwise, we do a raw store 1289 // here: the field will be visited later when processing the discovered 1290 // references. 1291 oop current_head = list->head(); 1292 // The last ref must have its discovered field pointing to itself. 1293 oop next_discovered = (current_head != NULL) ? current_head : obj; 1294 1295 // As in the case further above, since we are over-writing a NULL 1296 // pre-value, we can safely elide the pre-barrier here for the case of G1. 1297 assert(discovered == NULL, "control point invariant"); 1298 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 1299 if (UseCompressedOops) { 1300 _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered); 1301 } else { 1302 _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered); 1303 } 1304 guarantee(false, "Need to check non-G1 collector"); 1305 } 1306 oop_store_raw(discovered_addr, next_discovered); 1307 if (_discovered_list_needs_barrier) { 1308 _bs->write_ref_field((void*)discovered_addr, next_discovered); 1309 } 1310 list->set_head(obj); 1311 list->inc_length(1); 1312 1313 if (TraceReferenceGC) { 1314 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", 1315 obj, obj->blueprint()->internal_name()); 1316 } 1317 } 1318 assert(obj->is_oop(), "Enqueued a bad reference"); 1319 verify_referent(obj); 1320 return true; 1321 } 1322 1323 // Preclean the discovered references by removing those 1324 // whose referents are alive, and by marking from those that 1325 // are not active. These lists can be handled here 1326 // in any order and, indeed, concurrently. 1327 void ReferenceProcessor::preclean_discovered_references( 1328 BoolObjectClosure* is_alive, 1329 OopClosure* keep_alive, 1330 VoidClosure* complete_gc, 1331 YieldClosure* yield, 1332 bool should_unload_classes) { 1333 1334 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1335 1336 #ifdef ASSERT 1337 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC || 1338 CMSClassUnloadingEnabled && UseConcMarkSweepGC || 1339 ExplicitGCInvokesConcurrentAndUnloadsClasses && 1340 UseConcMarkSweepGC && should_unload_classes; 1341 RememberKlassesChecker mx(must_remember_klasses); 1342 #endif 1343 // Soft references 1344 { 1345 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1346 false, gclog_or_tty); 1347 for (int i = 0; i < _max_num_q; i++) { 1348 if (yield->should_return()) { 1349 return; 1350 } 1351 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1352 keep_alive, complete_gc, yield); 1353 } 1354 } 1355 1356 // Weak references 1357 { 1358 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1359 false, gclog_or_tty); 1360 for (int i = 0; i < _max_num_q; i++) { 1361 if (yield->should_return()) { 1362 return; 1363 } 1364 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1365 keep_alive, complete_gc, yield); 1366 } 1367 } 1368 1369 // Final references 1370 { 1371 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1372 false, gclog_or_tty); 1373 for (int i = 0; i < _max_num_q; i++) { 1374 if (yield->should_return()) { 1375 return; 1376 } 1377 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1378 keep_alive, complete_gc, yield); 1379 } 1380 } 1381 1382 // Phantom references 1383 { 1384 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1385 false, gclog_or_tty); 1386 for (int i = 0; i < _max_num_q; i++) { 1387 if (yield->should_return()) { 1388 return; 1389 } 1390 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1391 keep_alive, complete_gc, yield); 1392 } 1393 } 1394 } 1395 1396 // Walk the given discovered ref list, and remove all reference objects 1397 // whose referents are still alive, whose referents are NULL or which 1398 // are not active (have a non-NULL next field). NOTE: When we are 1399 // thus precleaning the ref lists (which happens single-threaded today), 1400 // we do not disable refs discovery to honour the correct semantics of 1401 // java.lang.Reference. As a result, we need to be careful below 1402 // that ref removal steps interleave safely with ref discovery steps 1403 // (in this thread). 1404 void 1405 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1406 BoolObjectClosure* is_alive, 1407 OopClosure* keep_alive, 1408 VoidClosure* complete_gc, 1409 YieldClosure* yield) { 1410 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1411 while (iter.has_next()) { 1412 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1413 oop obj = iter.obj(); 1414 oop next = java_lang_ref_Reference::next(obj); 1415 if (iter.referent() == NULL || iter.is_referent_alive() || 1416 next != NULL) { 1417 // The referent has been cleared, or is alive, or the Reference is not 1418 // active; we need to trace and mark its cohort. 1419 if (TraceReferenceGC) { 1420 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1421 iter.obj(), iter.obj()->blueprint()->internal_name()); 1422 } 1423 // Remove Reference object from list 1424 iter.remove(); 1425 // Keep alive its cohort. 1426 iter.make_referent_alive(); 1427 if (UseCompressedOops) { 1428 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1429 keep_alive->do_oop(next_addr); 1430 } else { 1431 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1432 keep_alive->do_oop(next_addr); 1433 } 1434 iter.move_to_next(); 1435 } else { 1436 iter.next(); 1437 } 1438 } 1439 // Close the reachable set 1440 complete_gc->do_void(); 1441 1442 NOT_PRODUCT( 1443 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 1444 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1445 "Refs in discovered list " INTPTR_FORMAT, 1446 iter.removed(), iter.processed(), (address)refs_list.head()); 1447 } 1448 ) 1449 } 1450 1451 const char* ReferenceProcessor::list_name(int i) { 1452 assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index"); 1453 int j = i / _max_num_q; 1454 switch (j) { 1455 case 0: return "SoftRef"; 1456 case 1: return "WeakRef"; 1457 case 2: return "FinalRef"; 1458 case 3: return "PhantomRef"; 1459 } 1460 ShouldNotReachHere(); 1461 return NULL; 1462 } 1463 1464 #ifndef PRODUCT 1465 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1466 // empty for now 1467 } 1468 #endif 1469 1470 #ifndef PRODUCT 1471 void ReferenceProcessor::clear_discovered_references() { 1472 guarantee(!_discovering_refs, "Discovering refs?"); 1473 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 1474 clear_discovered_references(_discoveredSoftRefs[i]); 1475 } 1476 } 1477 1478 #endif // PRODUCT