1 #ifdef USE_PRAGMA_IDENT_SRC 2 #pragma ident "@(#)referenceProcessor.cpp 1.57 07/08/17 12:30:18 JVM" 3 #endif 4 /* 5 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. 6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 7 * 8 * This code is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 only, as 10 * published by the Free Software Foundation. 11 * 12 * This code is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 15 * version 2 for more details (a copy is included in the LICENSE file that 16 * accompanied this code). 17 * 18 * You should have received a copy of the GNU General Public License version 19 * 2 along with this work; if not, write to the Free Software Foundation, 20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 21 * 22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 23 * CA 95054 USA or visit www.sun.com if you need additional information or 24 * have any questions. 25 * 26 */ 27 28 # include "incls/_precompiled.incl" 29 # include "incls/_referenceProcessor.cpp.incl" 30 31 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 32 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 33 oop ReferenceProcessor::_sentinelRef = NULL; 34 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; 35 36 // List of discovered references. 37 class DiscoveredList { 38 public: 39 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 40 oop head() const { 41 return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) : 42 _oop_head; 43 } 44 HeapWord* adr_head() { 45 return UseCompressedOops ? (HeapWord*)&_compressed_head : 46 (HeapWord*)&_oop_head; 47 } 48 void set_head(oop o) { 49 if (UseCompressedOops) { 50 // Must compress the head ptr. 51 _compressed_head = oopDesc::encode_heap_oop_not_null(o); 52 } else { 53 _oop_head = o; 54 } 55 } 56 bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); } 57 size_t length() { return _len; } 58 void set_length(size_t len) { _len = len; } 59 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 60 void dec_length(size_t dec) { _len -= dec; } 61 private: 62 // Set value depending on UseCompressedOops. This could be a template class 63 // but then we have to fix all the instantiations and declarations that use this class. 64 oop _oop_head; 65 narrowOop _compressed_head; 66 size_t _len; 67 }; 68 69 void referenceProcessor_init() { 70 ReferenceProcessor::init_statics(); 71 } 72 73 void ReferenceProcessor::init_statics() { 74 assert(_sentinelRef == NULL, "should be initialized precisely once"); 75 EXCEPTION_MARK; 76 _sentinelRef = instanceKlass::cast( 77 SystemDictionary::reference_klass())-> 78 allocate_permanent_instance(THREAD); 79 80 // Initialize the master soft ref clock. 81 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); 82 83 if (HAS_PENDING_EXCEPTION) { 84 Handle ex(THREAD, PENDING_EXCEPTION); 85 vm_exit_during_initialization(ex); 86 } 87 assert(_sentinelRef != NULL && _sentinelRef->is_oop(), 88 "Just constructed it!"); 89 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 90 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 91 NOT_COMPILER2(LRUCurrentHeapPolicy()); 92 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 93 vm_exit_during_initialization("Could not allocate reference policy object"); 94 } 95 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 96 RefDiscoveryPolicy == ReferentBasedDiscovery, 97 "Unrecongnized RefDiscoveryPolicy"); 98 } 99 100 ReferenceProcessor* 101 ReferenceProcessor::create_ref_processor(MemRegion span, 102 bool atomic_discovery, 103 bool mt_discovery, 104 BoolObjectClosure* is_alive_non_header, 105 int parallel_gc_threads, 106 bool mt_processing, 107 bool dl_needs_barrier) { 108 int mt_degree = 1; 109 if (parallel_gc_threads > 1) { 110 mt_degree = parallel_gc_threads; 111 } 112 ReferenceProcessor* rp = 113 new ReferenceProcessor(span, atomic_discovery, 114 mt_discovery, mt_degree, 115 mt_processing && (parallel_gc_threads > 0), 116 dl_needs_barrier); 117 if (rp == NULL) { 118 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); 119 } 120 rp->set_is_alive_non_header(is_alive_non_header); 121 rp->setup_policy(false /* default soft ref policy */); 122 return rp; 123 } 124 125 ReferenceProcessor::ReferenceProcessor(MemRegion span, 126 bool atomic_discovery, 127 bool mt_discovery, 128 int mt_degree, 129 bool mt_processing, 130 bool discovered_list_needs_barrier) : 131 _discovering_refs(false), 132 _enqueuing_is_done(false), 133 _is_alive_non_header(NULL), 134 _discovered_list_needs_barrier(discovered_list_needs_barrier), 135 _bs(NULL), 136 _processing_is_mt(mt_processing), 137 _next_id(0) 138 { 139 _span = span; 140 _discovery_is_atomic = atomic_discovery; 141 _discovery_is_mt = mt_discovery; 142 _num_q = mt_degree; 143 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _num_q * subclasses_of_ref); 144 if (_discoveredSoftRefs == NULL) { 145 vm_exit_during_initialization("Could not allocated RefProc Array"); 146 } 147 _discoveredWeakRefs = &_discoveredSoftRefs[_num_q]; 148 _discoveredFinalRefs = &_discoveredWeakRefs[_num_q]; 149 _discoveredPhantomRefs = &_discoveredFinalRefs[_num_q]; 150 assert(sentinel_ref() != NULL, "_sentinelRef is NULL"); 151 // Initialized all entries to _sentinelRef 152 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 153 _discoveredSoftRefs[i].set_head(sentinel_ref()); 154 _discoveredSoftRefs[i].set_length(0); 155 } 156 // If we do barreirs, cache a copy of the barrier set. 157 if (discovered_list_needs_barrier) { 158 _bs = Universe::heap()->barrier_set(); 159 } 160 } 161 162 #ifndef PRODUCT 163 void ReferenceProcessor::verify_no_references_recorded() { 164 guarantee(!_discovering_refs, "Discovering refs?"); 165 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 166 guarantee(_discoveredSoftRefs[i].empty(), 167 "Found non-empty discovered list"); 168 } 169 } 170 #endif 171 172 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 173 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 174 if (UseCompressedOops) { 175 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); 176 } else { 177 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); 178 } 179 } 180 } 181 182 void ReferenceProcessor::oops_do(OopClosure* f) { 183 f->do_oop(adr_sentinel_ref()); 184 } 185 186 void ReferenceProcessor::update_soft_ref_master_clock() { 187 // Update (advance) the soft ref master clock field. This must be done 188 // after processing the soft ref list. 189 jlong now = os::javaTimeMillis(); 190 jlong clock = java_lang_ref_SoftReference::clock(); 191 NOT_PRODUCT( 192 if (now < clock) { 193 warning("time warp: %d to %d", clock, now); 194 } 195 ) 196 // In product mode, protect ourselves from system time being adjusted 197 // externally and going backward; see note in the implementation of 198 // GenCollectedHeap::time_since_last_gc() for the right way to fix 199 // this uniformly throughout the VM; see bug-id 4741166. XXX 200 if (now > clock) { 201 java_lang_ref_SoftReference::set_clock(now); 202 } 203 // Else leave clock stalled at its old value until time progresses 204 // past clock value. 205 } 206 207 void ReferenceProcessor::process_discovered_references( 208 BoolObjectClosure* is_alive, 209 OopClosure* keep_alive, 210 VoidClosure* complete_gc, 211 AbstractRefProcTaskExecutor* task_executor) { 212 NOT_PRODUCT(verify_ok_to_handle_reflists()); 213 214 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 215 // Stop treating discovered references specially. 216 disable_discovery(); 217 218 bool trace_time = PrintGCDetails && PrintReferenceGC; 219 // Soft references 220 { 221 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); 222 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 223 is_alive, keep_alive, complete_gc, task_executor); 224 } 225 226 update_soft_ref_master_clock(); 227 228 // Weak references 229 { 230 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); 231 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 232 is_alive, keep_alive, complete_gc, task_executor); 233 } 234 235 // Final references 236 { 237 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); 238 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 239 is_alive, keep_alive, complete_gc, task_executor); 240 } 241 242 // Phantom references 243 { 244 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); 245 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 246 is_alive, keep_alive, complete_gc, task_executor); 247 } 248 249 // Weak global JNI references. It would make more sense (semantically) to 250 // traverse these simultaneously with the regular weak references above, but 251 // that is not how the JDK1.2 specification is. See #4126360. Native code can 252 // thus use JNI weak references to circumvent the phantom references and 253 // resurrect a "post-mortem" object. 254 { 255 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); 256 if (task_executor != NULL) { 257 task_executor->set_single_threaded_mode(); 258 } 259 process_phaseJNI(is_alive, keep_alive, complete_gc); 260 } 261 } 262 263 #ifndef PRODUCT 264 // Calculate the number of jni handles. 265 uint ReferenceProcessor::count_jni_refs() { 266 class AlwaysAliveClosure: public BoolObjectClosure { 267 public: 268 virtual bool do_object_b(oop obj) { return true; } 269 virtual void do_object(oop obj) { assert(false, "Don't call"); } 270 }; 271 272 class CountHandleClosure: public OopClosure { 273 private: 274 int _count; 275 public: 276 CountHandleClosure(): _count(0) {} 277 void do_oop(oop* unused) { _count++; } 278 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 279 int count() { return _count; } 280 }; 281 CountHandleClosure global_handle_count; 282 AlwaysAliveClosure always_alive; 283 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 284 return global_handle_count.count(); 285 } 286 #endif 287 288 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 289 OopClosure* keep_alive, 290 VoidClosure* complete_gc) { 291 #ifndef PRODUCT 292 if (PrintGCDetails && PrintReferenceGC) { 293 unsigned int count = count_jni_refs(); 294 gclog_or_tty->print(", %u refs", count); 295 } 296 #endif 297 JNIHandles::weak_oops_do(is_alive, keep_alive); 298 // Finally remember to keep sentinel around 299 keep_alive->do_oop(adr_sentinel_ref()); 300 complete_gc->do_void(); 301 } 302 303 304 template <class T> 305 static bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 306 AbstractRefProcTaskExecutor* task_executor) { 307 308 // Remember old value of pending references list 309 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 310 T old_pending_list_value = *pending_list_addr; 311 312 // Enqueue references that are not made active again, and 313 // clear the decks for the next collection (cycle). 314 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 315 // Do the oop-check on pending_list_addr missed in 316 // enqueue_discovered_reflist. We should probably 317 // do a raw oop_check so that future such idempotent 318 // oop_stores relying on the oop-check side-effect 319 // may be elided automatically and safely without 320 // affecting correctness. 321 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 322 323 // Stop treating discovered references specially. 324 ref->disable_discovery(); 325 326 // Return true if new pending references were added 327 return old_pending_list_value != *pending_list_addr; 328 } 329 330 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 331 NOT_PRODUCT(verify_ok_to_handle_reflists()); 332 if (UseCompressedOops) { 333 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 334 } else { 335 return enqueue_discovered_ref_helper<oop>(this, task_executor); 336 } 337 } 338 339 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 340 HeapWord* pending_list_addr) { 341 // Given a list of refs linked through the "discovered" field 342 // (java.lang.ref.Reference.discovered) chain them through the 343 // "next" field (java.lang.ref.Reference.next) and prepend 344 // to the pending list. 345 if (TraceReferenceGC && PrintGCDetails) { 346 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 347 INTPTR_FORMAT, (address)refs_list.head()); 348 } 349 oop obj = refs_list.head(); 350 // Walk down the list, copying the discovered field into 351 // the next field and clearing it (except for the last 352 // non-sentinel object which is treated specially to avoid 353 // confusion with an active reference). 354 while (obj != sentinel_ref()) { 355 assert(obj->is_instanceRef(), "should be reference object"); 356 oop next = java_lang_ref_Reference::discovered(obj); 357 if (TraceReferenceGC && PrintGCDetails) { 358 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, 359 obj, next); 360 } 361 assert(java_lang_ref_Reference::next(obj) == NULL, 362 "The reference should not be enqueued"); 363 if (next == sentinel_ref()) { // obj is last 364 // Swap refs_list into pendling_list_addr and 365 // set obj's next to what we read from pending_list_addr. 366 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 367 // Need oop_check on pending_list_addr above; 368 // see special oop-check code at the end of 369 // enqueue_discovered_reflists() further below. 370 if (old == NULL) { 371 // obj should be made to point to itself, since 372 // pending list was empty. 373 java_lang_ref_Reference::set_next(obj, obj); 374 } else { 375 java_lang_ref_Reference::set_next(obj, old); 376 } 377 } else { 378 java_lang_ref_Reference::set_next(obj, next); 379 } 380 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 381 obj = next; 382 } 383 } 384 385 // Parallel enqueue task 386 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 387 public: 388 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 389 DiscoveredList discovered_refs[], 390 HeapWord* pending_list_addr, 391 oop sentinel_ref, 392 int n_queues) 393 : EnqueueTask(ref_processor, discovered_refs, 394 pending_list_addr, sentinel_ref, n_queues) 395 { } 396 397 virtual void work(unsigned int work_id) { 398 assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds"); 399 // Simplest first cut: static partitioning. 400 int index = work_id; 401 for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) { 402 _ref_processor.enqueue_discovered_reflist( 403 _refs_lists[index], _pending_list_addr); 404 _refs_lists[index].set_head(_sentinel_ref); 405 _refs_lists[index].set_length(0); 406 } 407 } 408 }; 409 410 // Enqueue references that are not made active again 411 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 412 AbstractRefProcTaskExecutor* task_executor) { 413 if (_processing_is_mt && task_executor != NULL) { 414 // Parallel code 415 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, 416 pending_list_addr, sentinel_ref(), _num_q); 417 task_executor->execute(tsk); 418 } else { 419 // Serial code: call the parent class's implementation 420 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 421 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); 422 _discoveredSoftRefs[i].set_head(sentinel_ref()); 423 _discoveredSoftRefs[i].set_length(0); 424 } 425 } 426 } 427 428 // Iterator for the list of discovered references. 429 class DiscoveredListIterator { 430 public: 431 inline DiscoveredListIterator(DiscoveredList& refs_list, 432 OopClosure* keep_alive, 433 BoolObjectClosure* is_alive); 434 435 // End Of List. 436 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); } 437 438 // Get oop to the Reference object. 439 inline oop obj() const { return _ref; } 440 441 // Get oop to the referent object. 442 inline oop referent() const { return _referent; } 443 444 // Returns true if referent is alive. 445 inline bool is_referent_alive() const; 446 447 // Loads data for the current reference. 448 // The "allow_null_referent" argument tells us to allow for the possibility 449 // of a NULL referent in the discovered Reference object. This typically 450 // happens in the case of concurrent collectors that may have done the 451 // discovery concurrently, or interleaved, with mutator execution. 452 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 453 454 // Move to the next discovered reference. 455 inline void next(); 456 457 // Remove the current reference from the list 458 inline void remove(); 459 460 // Make the Reference object active again. 461 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); } 462 463 // Make the referent alive. 464 inline void make_referent_alive() { 465 if (UseCompressedOops) { 466 _keep_alive->do_oop((narrowOop*)_referent_addr); 467 } else { 468 _keep_alive->do_oop((oop*)_referent_addr); 469 } 470 } 471 472 // Update the discovered field. 473 inline void update_discovered() { 474 // First _prev_next ref actually points into DiscoveredList (gross). 475 if (UseCompressedOops) { 476 _keep_alive->do_oop((narrowOop*)_prev_next); 477 } else { 478 _keep_alive->do_oop((oop*)_prev_next); 479 } 480 } 481 482 // NULL out referent pointer. 483 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } 484 485 // Statistics 486 NOT_PRODUCT( 487 inline size_t processed() const { return _processed; } 488 inline size_t removed() const { return _removed; } 489 ) 490 491 inline void move_to_next(); 492 493 private: 494 DiscoveredList& _refs_list; 495 HeapWord* _prev_next; 496 oop _ref; 497 HeapWord* _discovered_addr; 498 oop _next; 499 HeapWord* _referent_addr; 500 oop _referent; 501 OopClosure* _keep_alive; 502 BoolObjectClosure* _is_alive; 503 DEBUG_ONLY( 504 oop _first_seen; // cyclic linked list check 505 ) 506 NOT_PRODUCT( 507 size_t _processed; 508 size_t _removed; 509 ) 510 }; 511 512 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list, 513 OopClosure* keep_alive, 514 BoolObjectClosure* is_alive) 515 : _refs_list(refs_list), 516 _prev_next(refs_list.adr_head()), 517 _ref(refs_list.head()), 518 #ifdef ASSERT 519 _first_seen(refs_list.head()), 520 #endif 521 #ifndef PRODUCT 522 _processed(0), 523 _removed(0), 524 #endif 525 _next(refs_list.head()), 526 _keep_alive(keep_alive), 527 _is_alive(is_alive) 528 { } 529 530 inline bool DiscoveredListIterator::is_referent_alive() const { 531 return _is_alive->do_object_b(_referent); 532 } 533 534 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 535 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 536 oop discovered = java_lang_ref_Reference::discovered(_ref); 537 assert(_discovered_addr && discovered->is_oop_or_null(), 538 "discovered field is bad"); 539 _next = discovered; 540 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 541 _referent = java_lang_ref_Reference::referent(_ref); 542 assert(Universe::heap()->is_in_reserved_or_null(_referent), 543 "Wrong oop found in java.lang.Reference object"); 544 assert(allow_null_referent ? 545 _referent->is_oop_or_null() 546 : _referent->is_oop(), 547 "bad referent"); 548 } 549 550 inline void DiscoveredListIterator::next() { 551 _prev_next = _discovered_addr; 552 move_to_next(); 553 } 554 555 inline void DiscoveredListIterator::remove() { 556 assert(_ref->is_oop(), "Dropping a bad reference"); 557 oop_store_raw(_discovered_addr, NULL); 558 // First _prev_next ref actually points into DiscoveredList (gross). 559 if (UseCompressedOops) { 560 // Remove Reference object from list. 561 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next); 562 } else { 563 // Remove Reference object from list. 564 oopDesc::store_heap_oop((oop*)_prev_next, _next); 565 } 566 NOT_PRODUCT(_removed++); 567 _refs_list.dec_length(1); 568 } 569 570 inline void DiscoveredListIterator::move_to_next() { 571 _ref = _next; 572 assert(_ref != _first_seen, "cyclic ref_list found"); 573 NOT_PRODUCT(_processed++); 574 } 575 576 // NOTE: process_phase*() are largely similar, and at a high level 577 // merely iterate over the extant list applying a predicate to 578 // each of its elements and possibly removing that element from the 579 // list and applying some further closures to that element. 580 // We should consider the possibility of replacing these 581 // process_phase*() methods by abstracting them into 582 // a single general iterator invocation that receives appropriate 583 // closures that accomplish this work. 584 585 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 586 // referents are not alive, but that should be kept alive for policy reasons. 587 // Keep alive the transitive closure of all such referents. 588 void 589 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 590 ReferencePolicy* policy, 591 BoolObjectClosure* is_alive, 592 OopClosure* keep_alive, 593 VoidClosure* complete_gc) { 594 assert(policy != NULL, "Must have a non-NULL policy"); 595 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 596 // Decide which softly reachable refs should be kept alive. 597 while (iter.has_next()) { 598 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 599 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 600 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { 601 if (TraceReferenceGC) { 602 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 603 iter.obj(), iter.obj()->blueprint()->internal_name()); 604 } 605 // Remove Reference object from list 606 iter.remove(); 607 // Make the Reference object active again 608 iter.make_active(); 609 // keep the referent around 610 iter.make_referent_alive(); 611 iter.move_to_next(); 612 } else { 613 iter.next(); 614 } 615 } 616 // Close the reachable set 617 complete_gc->do_void(); 618 NOT_PRODUCT( 619 if (PrintGCDetails && TraceReferenceGC) { 620 gclog_or_tty->print(" Dropped %d dead Refs out of %d " 621 "discovered Refs by policy ", iter.removed(), iter.processed()); 622 } 623 ) 624 } 625 626 // Traverse the list and remove any Refs that are not active, or 627 // whose referents are either alive or NULL. 628 void 629 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 630 BoolObjectClosure* is_alive, 631 OopClosure* keep_alive) { 632 assert(discovery_is_atomic(), "Error"); 633 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 634 while (iter.has_next()) { 635 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 636 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 637 assert(next == NULL, "Should not discover inactive Reference"); 638 if (iter.is_referent_alive()) { 639 if (TraceReferenceGC) { 640 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 641 iter.obj(), iter.obj()->blueprint()->internal_name()); 642 } 643 // The referent is reachable after all. 644 // Remove Reference object from list. 645 iter.remove(); 646 // Update the referent pointer as necessary: Note that this 647 // should not entail any recursive marking because the 648 // referent must already have been traversed. 649 iter.make_referent_alive(); 650 iter.move_to_next(); 651 } else { 652 iter.next(); 653 } 654 } 655 NOT_PRODUCT( 656 if (PrintGCDetails && TraceReferenceGC) { 657 gclog_or_tty->print(" Dropped %d active Refs out of %d " 658 "Refs in discovered list ", iter.removed(), iter.processed()); 659 } 660 ) 661 } 662 663 void 664 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 665 BoolObjectClosure* is_alive, 666 OopClosure* keep_alive, 667 VoidClosure* complete_gc) { 668 assert(!discovery_is_atomic(), "Error"); 669 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 670 while (iter.has_next()) { 671 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 672 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 673 oop next = java_lang_ref_Reference::next(iter.obj()); 674 if ((iter.referent() == NULL || iter.is_referent_alive() || 675 next != NULL)) { 676 assert(next->is_oop_or_null(), "bad next field"); 677 // Remove Reference object from list 678 iter.remove(); 679 // Trace the cohorts 680 iter.make_referent_alive(); 681 if (UseCompressedOops) { 682 keep_alive->do_oop((narrowOop*)next_addr); 683 } else { 684 keep_alive->do_oop((oop*)next_addr); 685 } 686 iter.move_to_next(); 687 } else { 688 iter.next(); 689 } 690 } 691 // Now close the newly reachable set 692 complete_gc->do_void(); 693 NOT_PRODUCT( 694 if (PrintGCDetails && TraceReferenceGC) { 695 gclog_or_tty->print(" Dropped %d active Refs out of %d " 696 "Refs in discovered list ", iter.removed(), iter.processed()); 697 } 698 ) 699 } 700 701 // Traverse the list and process the referents, by either 702 // clearing them or keeping them (and their reachable 703 // closure) alive. 704 void 705 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 706 bool clear_referent, 707 BoolObjectClosure* is_alive, 708 OopClosure* keep_alive, 709 VoidClosure* complete_gc) { 710 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 711 while (iter.has_next()) { 712 iter.update_discovered(); 713 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 714 if (clear_referent) { 715 // NULL out referent pointer 716 iter.clear_referent(); 717 } else { 718 // keep the referent around 719 iter.make_referent_alive(); 720 } 721 if (TraceReferenceGC) { 722 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 723 clear_referent ? "cleared " : "", 724 iter.obj(), iter.obj()->blueprint()->internal_name()); 725 } 726 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 727 // If discovery is concurrent, we may have objects with null referents, 728 // being those that were concurrently cleared after they were discovered 729 // (and not subsequently precleaned). 730 assert( (discovery_is_atomic() && iter.referent()->is_oop()) 731 || (!discovery_is_atomic() && iter.referent()->is_oop_or_null(UseConcMarkSweepGC)), 732 "Adding a bad referent"); 733 iter.next(); 734 } 735 // Remember to keep sentinel pointer around 736 iter.update_discovered(); 737 // Close the reachable set 738 complete_gc->do_void(); 739 } 740 741 void 742 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 743 oop obj = refs_list.head(); 744 while (obj != sentinel_ref()) { 745 oop discovered = java_lang_ref_Reference::discovered(obj); 746 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 747 obj = discovered; 748 } 749 refs_list.set_head(sentinel_ref()); 750 refs_list.set_length(0); 751 } 752 753 void ReferenceProcessor::abandon_partial_discovery() { 754 // loop over the lists 755 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 756 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { 757 gclog_or_tty->print_cr( 758 "\nAbandoning %s discovered list", 759 list_name(i)); 760 } 761 abandon_partial_discovered_list(_discoveredSoftRefs[i]); 762 } 763 } 764 765 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 766 public: 767 RefProcPhase1Task(ReferenceProcessor& ref_processor, 768 DiscoveredList refs_lists[], 769 ReferencePolicy* policy, 770 bool marks_oops_alive) 771 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 772 _policy(policy) 773 { } 774 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 775 OopClosure& keep_alive, 776 VoidClosure& complete_gc) 777 { 778 _ref_processor.process_phase1(_refs_lists[i], _policy, 779 &is_alive, &keep_alive, &complete_gc); 780 } 781 private: 782 ReferencePolicy* _policy; 783 }; 784 785 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 786 public: 787 RefProcPhase2Task(ReferenceProcessor& ref_processor, 788 DiscoveredList refs_lists[], 789 bool marks_oops_alive) 790 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 791 { } 792 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 793 OopClosure& keep_alive, 794 VoidClosure& complete_gc) 795 { 796 _ref_processor.process_phase2(_refs_lists[i], 797 &is_alive, &keep_alive, &complete_gc); 798 } 799 }; 800 801 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 802 public: 803 RefProcPhase3Task(ReferenceProcessor& ref_processor, 804 DiscoveredList refs_lists[], 805 bool clear_referent, 806 bool marks_oops_alive) 807 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 808 _clear_referent(clear_referent) 809 { } 810 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 811 OopClosure& keep_alive, 812 VoidClosure& complete_gc) 813 { 814 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 815 &is_alive, &keep_alive, &complete_gc); 816 } 817 private: 818 bool _clear_referent; 819 }; 820 821 // Balances reference queues. 822 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 823 { 824 // calculate total length 825 size_t total_refs = 0; 826 for (int i = 0; i < _num_q; ++i) { 827 total_refs += ref_lists[i].length(); 828 } 829 size_t avg_refs = total_refs / _num_q + 1; 830 int to_idx = 0; 831 for (int from_idx = 0; from_idx < _num_q; from_idx++) { 832 while (ref_lists[from_idx].length() > avg_refs) { 833 assert(to_idx < _num_q, "Sanity Check!"); 834 if (ref_lists[to_idx].length() < avg_refs) { 835 // move superfluous refs 836 size_t refs_to_move = 837 MIN2(ref_lists[from_idx].length() - avg_refs, 838 avg_refs - ref_lists[to_idx].length()); 839 oop move_head = ref_lists[from_idx].head(); 840 oop move_tail = move_head; 841 oop new_head = move_head; 842 // find an element to split the list on 843 for (size_t j = 0; j < refs_to_move; ++j) { 844 move_tail = new_head; 845 new_head = java_lang_ref_Reference::discovered(new_head); 846 } 847 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); 848 ref_lists[to_idx].set_head(move_head); 849 ref_lists[to_idx].inc_length(refs_to_move); 850 ref_lists[from_idx].set_head(new_head); 851 ref_lists[from_idx].dec_length(refs_to_move); 852 } else { 853 ++to_idx; 854 } 855 } 856 } 857 } 858 859 void 860 ReferenceProcessor::process_discovered_reflist( 861 DiscoveredList refs_lists[], 862 ReferencePolicy* policy, 863 bool clear_referent, 864 BoolObjectClosure* is_alive, 865 OopClosure* keep_alive, 866 VoidClosure* complete_gc, 867 AbstractRefProcTaskExecutor* task_executor) 868 { 869 bool mt = task_executor != NULL && _processing_is_mt; 870 if (mt && ParallelRefProcBalancingEnabled) { 871 balance_queues(refs_lists); 872 } 873 if (PrintReferenceGC && PrintGCDetails) { 874 size_t total = 0; 875 for (int i = 0; i < _num_q; ++i) { 876 total += refs_lists[i].length(); 877 } 878 gclog_or_tty->print(", %u refs", total); 879 } 880 881 // Phase 1 (soft refs only): 882 // . Traverse the list and remove any SoftReferences whose 883 // referents are not alive, but that should be kept alive for 884 // policy reasons. Keep alive the transitive closure of all 885 // such referents. 886 if (policy != NULL) { 887 if (mt) { 888 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 889 task_executor->execute(phase1); 890 } else { 891 for (int i = 0; i < _num_q; i++) { 892 process_phase1(refs_lists[i], policy, 893 is_alive, keep_alive, complete_gc); 894 } 895 } 896 } else { // policy == NULL 897 assert(refs_lists != _discoveredSoftRefs, 898 "Policy must be specified for soft references."); 899 } 900 901 // Phase 2: 902 // . Traverse the list and remove any refs whose referents are alive. 903 if (mt) { 904 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 905 task_executor->execute(phase2); 906 } else { 907 for (int i = 0; i < _num_q; i++) { 908 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 909 } 910 } 911 912 // Phase 3: 913 // . Traverse the list and process referents as appropriate. 914 if (mt) { 915 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 916 task_executor->execute(phase3); 917 } else { 918 for (int i = 0; i < _num_q; i++) { 919 process_phase3(refs_lists[i], clear_referent, 920 is_alive, keep_alive, complete_gc); 921 } 922 } 923 } 924 925 void ReferenceProcessor::clean_up_discovered_references() { 926 // loop over the lists 927 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 928 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { 929 gclog_or_tty->print_cr( 930 "\nScrubbing %s discovered list of Null referents", 931 list_name(i)); 932 } 933 clean_up_discovered_reflist(_discoveredSoftRefs[i]); 934 } 935 } 936 937 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 938 assert(!discovery_is_atomic(), "Else why call this method?"); 939 DiscoveredListIterator iter(refs_list, NULL, NULL); 940 while (iter.has_next()) { 941 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 942 oop next = java_lang_ref_Reference::next(iter.obj()); 943 assert(next->is_oop_or_null(), "bad next field"); 944 // If referent has been cleared or Reference is not active, 945 // drop it. 946 if (iter.referent() == NULL || next != NULL) { 947 debug_only( 948 if (PrintGCDetails && TraceReferenceGC) { 949 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 950 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 951 " and referent: " INTPTR_FORMAT, 952 iter.obj(), next, iter.referent()); 953 } 954 ) 955 // Remove Reference object from list 956 iter.remove(); 957 iter.move_to_next(); 958 } else { 959 iter.next(); 960 } 961 } 962 NOT_PRODUCT( 963 if (PrintGCDetails && TraceReferenceGC) { 964 gclog_or_tty->print( 965 " Removed %d Refs with NULL referents out of %d discovered Refs", 966 iter.removed(), iter.processed()); 967 } 968 ) 969 } 970 971 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 972 int id = 0; 973 // Determine the queue index to use for this object. 974 if (_discovery_is_mt) { 975 // During a multi-threaded discovery phase, 976 // each thread saves to its "own" list. 977 Thread* thr = Thread::current(); 978 assert(thr->is_GC_task_thread(), 979 "Dubious cast from Thread* to WorkerThread*?"); 980 id = ((WorkerThread*)thr)->id(); 981 } else { 982 // single-threaded discovery, we save in round-robin 983 // fashion to each of the lists. 984 if (_processing_is_mt) { 985 id = next_id(); 986 } 987 } 988 assert(0 <= id && id < _num_q, "Id is out-of-bounds (call Freud?)"); 989 990 // Get the discovered queue to which we will add 991 DiscoveredList* list = NULL; 992 switch (rt) { 993 case REF_OTHER: 994 // Unknown reference type, no special treatment 995 break; 996 case REF_SOFT: 997 list = &_discoveredSoftRefs[id]; 998 break; 999 case REF_WEAK: 1000 list = &_discoveredWeakRefs[id]; 1001 break; 1002 case REF_FINAL: 1003 list = &_discoveredFinalRefs[id]; 1004 break; 1005 case REF_PHANTOM: 1006 list = &_discoveredPhantomRefs[id]; 1007 break; 1008 case REF_NONE: 1009 // we should not reach here if we are an instanceRefKlass 1010 default: 1011 ShouldNotReachHere(); 1012 } 1013 return list; 1014 } 1015 1016 inline void 1017 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1018 oop obj, 1019 HeapWord* discovered_addr) { 1020 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1021 // First we must make sure this object is only enqueued once. CAS in a non null 1022 // discovered_addr. 1023 oop current_head = refs_list.head(); 1024 1025 // Note: In the case of G1, this pre-barrier is strictly 1026 // not necessary because the only case we are interested in 1027 // here is when *discovered_addr is NULL, so this will expand to 1028 // nothing. As a result, I am just manually eliding this out for G1. 1029 if (_discovered_list_needs_barrier && !UseG1GC) { 1030 _bs->write_ref_field_pre((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR"); 1031 } 1032 oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr, 1033 NULL); 1034 if (retest == NULL) { 1035 // This thread just won the right to enqueue the object. 1036 // We have separate lists for enqueueing so no synchronization 1037 // is necessary. 1038 refs_list.set_head(obj); 1039 refs_list.inc_length(1); 1040 if (_discovered_list_needs_barrier) { 1041 _bs->write_ref_field((void*)discovered_addr, current_head); guarantee(false, "Needs to be fixed: YSR"); 1042 } 1043 1044 } else { 1045 // If retest was non NULL, another thread beat us to it: 1046 // The reference has already been discovered... 1047 if (TraceReferenceGC) { 1048 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1049 obj, obj->blueprint()->internal_name()); 1050 } 1051 } 1052 } 1053 1054 // We mention two of several possible choices here: 1055 // #0: if the reference object is not in the "originating generation" 1056 // (or part of the heap being collected, indicated by our "span" 1057 // we don't treat it specially (i.e. we scan it as we would 1058 // a normal oop, treating its references as strong references). 1059 // This means that references can't be enqueued unless their 1060 // referent is also in the same span. This is the simplest, 1061 // most "local" and most conservative approach, albeit one 1062 // that may cause weak references to be enqueued least promptly. 1063 // We call this choice the "ReferenceBasedDiscovery" policy. 1064 // #1: the reference object may be in any generation (span), but if 1065 // the referent is in the generation (span) being currently collected 1066 // then we can discover the reference object, provided 1067 // the object has not already been discovered by 1068 // a different concurrently running collector (as may be the 1069 // case, for instance, if the reference object is in CMS and 1070 // the referent in DefNewGeneration), and provided the processing 1071 // of this reference object by the current collector will 1072 // appear atomic to every other collector in the system. 1073 // (Thus, for instance, a concurrent collector may not 1074 // discover references in other generations even if the 1075 // referent is in its own generation). This policy may, 1076 // in certain cases, enqueue references somewhat sooner than 1077 // might Policy #0 above, but at marginally increased cost 1078 // and complexity in processing these references. 1079 // We call this choice the "RefeferentBasedDiscovery" policy. 1080 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1081 // We enqueue references only if we are discovering refs 1082 // (rather than processing discovered refs). 1083 if (!_discovering_refs || !RegisterReferences) { 1084 return false; 1085 } 1086 // We only enqueue active references. 1087 oop next = java_lang_ref_Reference::next(obj); 1088 if (next != NULL) { 1089 return false; 1090 } 1091 1092 HeapWord* obj_addr = (HeapWord*)obj; 1093 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1094 !_span.contains(obj_addr)) { 1095 // Reference is not in the originating generation; 1096 // don't treat it specially (i.e. we want to scan it as a normal 1097 // object with strong references). 1098 return false; 1099 } 1100 1101 // We only enqueue references whose referents are not (yet) strongly 1102 // reachable. 1103 if (is_alive_non_header() != NULL) { 1104 oop referent = java_lang_ref_Reference::referent(obj); 1105 // In the case of non-concurrent discovery, the last 1106 // disjunct below should hold. It may not hold in the 1107 // case of concurrent discovery because mutators may 1108 // concurrently clear() a Reference. 1109 assert(UseConcMarkSweepGC || UseG1GC || referent != NULL, 1110 "Refs with null referents already filtered"); 1111 if (is_alive_non_header()->do_object_b(referent)) { 1112 return false; // referent is reachable 1113 } 1114 } 1115 if (rt == REF_SOFT) { 1116 // For soft refs we can decide now if these are not 1117 // current candidates for clearing, in which case we 1118 // can mark through them now, rather than delaying that 1119 // to the reference-processing phase. Since all current 1120 // time-stamp policies advance the soft-ref clock only 1121 // at a major collection cycle, this is always currently 1122 // accurate. 1123 if (!_current_soft_ref_policy->should_clear_reference(obj)) { 1124 return false; 1125 } 1126 } 1127 1128 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1129 const oop discovered = java_lang_ref_Reference::discovered(obj); 1130 assert(discovered->is_oop_or_null(), "bad discovered field"); 1131 if (discovered != NULL) { 1132 // The reference has already been discovered... 1133 if (TraceReferenceGC) { 1134 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1135 obj, obj->blueprint()->internal_name()); 1136 } 1137 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1138 // assumes that an object is not processed twice; 1139 // if it's been already discovered it must be on another 1140 // generation's discovered list; so we won't discover it. 1141 return false; 1142 } else { 1143 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1144 "Unrecognized policy"); 1145 // Check assumption that an object is not potentially 1146 // discovered twice except by concurrent collectors that potentially 1147 // trace the same Reference object twice. 1148 assert(UseConcMarkSweepGC, 1149 "Only possible with an incremental-update concurrent collector"); 1150 return true; 1151 } 1152 } 1153 1154 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1155 oop referent = java_lang_ref_Reference::referent(obj); 1156 assert(referent->is_oop(), "bad referent"); 1157 // enqueue if and only if either: 1158 // reference is in our span or 1159 // we are an atomic collector and referent is in our span 1160 if (_span.contains(obj_addr) || 1161 (discovery_is_atomic() && _span.contains(referent))) { 1162 // should_enqueue = true; 1163 } else { 1164 return false; 1165 } 1166 } else { 1167 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1168 _span.contains(obj_addr), "code inconsistency"); 1169 } 1170 1171 // Get the right type of discovered queue head. 1172 DiscoveredList* list = get_discovered_list(rt); 1173 if (list == NULL) { 1174 return false; // nothing special needs to be done 1175 } 1176 1177 if (_discovery_is_mt) { 1178 add_to_discovered_list_mt(*list, obj, discovered_addr); 1179 } else { 1180 // If "_discovered_list_needs_barrier", we do write barriers when 1181 // updating the discovered reference list. Otherwise, we do a raw store 1182 // here: the field will be visited later when processing the discovered 1183 // references. 1184 oop current_head = list->head(); 1185 // As in the case further above, since we are over-writing a NULL 1186 // pre-value, we can safely elide the pre-barrier here for the case of G1. 1187 assert(discovered == NULL, "control point invariant"); 1188 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 1189 _bs->write_ref_field_pre((oop*)discovered_addr, current_head); 1190 } 1191 oop_store_raw(discovered_addr, current_head); 1192 if (_discovered_list_needs_barrier) { 1193 _bs->write_ref_field((oop*)discovered_addr, current_head); 1194 } 1195 list->set_head(obj); 1196 list->inc_length(1); 1197 } 1198 1199 // In the MT discovery case, it is currently possible to see 1200 // the following message multiple times if several threads 1201 // discover a reference about the same time. Only one will 1202 // however have actually added it to the disocvered queue. 1203 // One could let add_to_discovered_list_mt() return an 1204 // indication for success in queueing (by 1 thread) or 1205 // failure (by all other threads), but I decided the extra 1206 // code was not worth the effort for something that is 1207 // only used for debugging support. 1208 if (TraceReferenceGC) { 1209 oop referent = java_lang_ref_Reference::referent(obj); 1210 if (PrintGCDetails) { 1211 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", 1212 obj, obj->blueprint()->internal_name()); 1213 } 1214 assert(referent->is_oop(), "Enqueued a bad referent"); 1215 } 1216 assert(obj->is_oop(), "Enqueued a bad reference"); 1217 return true; 1218 } 1219 1220 // Preclean the discovered references by removing those 1221 // whose referents are alive, and by marking from those that 1222 // are not active. These lists can be handled here 1223 // in any order and, indeed, concurrently. 1224 void ReferenceProcessor::preclean_discovered_references( 1225 BoolObjectClosure* is_alive, 1226 OopClosure* keep_alive, 1227 VoidClosure* complete_gc, 1228 YieldClosure* yield) { 1229 1230 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1231 1232 // Soft references 1233 { 1234 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1235 false, gclog_or_tty); 1236 for (int i = 0; i < _num_q; i++) { 1237 if (yield->should_return()) { 1238 return; 1239 } 1240 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1241 keep_alive, complete_gc, yield); 1242 } 1243 } 1244 1245 // Weak references 1246 { 1247 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1248 false, gclog_or_tty); 1249 for (int i = 0; i < _num_q; i++) { 1250 if (yield->should_return()) { 1251 return; 1252 } 1253 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1254 keep_alive, complete_gc, yield); 1255 } 1256 } 1257 1258 // Final references 1259 { 1260 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1261 false, gclog_or_tty); 1262 for (int i = 0; i < _num_q; i++) { 1263 if (yield->should_return()) { 1264 return; 1265 } 1266 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1267 keep_alive, complete_gc, yield); 1268 } 1269 } 1270 1271 // Phantom references 1272 { 1273 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1274 false, gclog_or_tty); 1275 for (int i = 0; i < _num_q; i++) { 1276 if (yield->should_return()) { 1277 return; 1278 } 1279 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1280 keep_alive, complete_gc, yield); 1281 } 1282 } 1283 } 1284 1285 // Walk the given discovered ref list, and remove all reference objects 1286 // whose referents are still alive, whose referents are NULL or which 1287 // are not active (have a non-NULL next field). NOTE: When we are 1288 // thus precleaning the ref lists (which happens single-threaded today), 1289 // we do not disable refs discovery to honour the correct semantics of 1290 // java.lang.Reference. As a result, we need to be careful below 1291 // that ref removal steps interleave safely with ref discovery steps 1292 // (in this thread). 1293 void 1294 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1295 BoolObjectClosure* is_alive, 1296 OopClosure* keep_alive, 1297 VoidClosure* complete_gc, 1298 YieldClosure* yield) { 1299 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1300 while (iter.has_next()) { 1301 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1302 oop obj = iter.obj(); 1303 oop next = java_lang_ref_Reference::next(obj); 1304 if (iter.referent() == NULL || iter.is_referent_alive() || 1305 next != NULL) { 1306 // The referent has been cleared, or is alive, or the Reference is not 1307 // active; we need to trace and mark its cohort. 1308 if (TraceReferenceGC) { 1309 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1310 iter.obj(), iter.obj()->blueprint()->internal_name()); 1311 } 1312 // Remove Reference object from list 1313 iter.remove(); 1314 // Keep alive its cohort. 1315 iter.make_referent_alive(); 1316 if (UseCompressedOops) { 1317 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1318 keep_alive->do_oop(next_addr); 1319 } else { 1320 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1321 keep_alive->do_oop(next_addr); 1322 } 1323 iter.move_to_next(); 1324 } else { 1325 iter.next(); 1326 } 1327 } 1328 // Close the reachable set 1329 complete_gc->do_void(); 1330 1331 NOT_PRODUCT( 1332 if (PrintGCDetails && PrintReferenceGC) { 1333 gclog_or_tty->print(" Dropped %d Refs out of %d " 1334 "Refs in discovered list ", iter.removed(), iter.processed()); 1335 } 1336 ) 1337 } 1338 1339 const char* ReferenceProcessor::list_name(int i) { 1340 assert(i >= 0 && i <= _num_q * subclasses_of_ref, "Out of bounds index"); 1341 int j = i / _num_q; 1342 switch (j) { 1343 case 0: return "SoftRef"; 1344 case 1: return "WeakRef"; 1345 case 2: return "FinalRef"; 1346 case 3: return "PhantomRef"; 1347 } 1348 ShouldNotReachHere(); 1349 return NULL; 1350 } 1351 1352 #ifndef PRODUCT 1353 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1354 // empty for now 1355 } 1356 #endif 1357 1358 void ReferenceProcessor::verify() { 1359 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef"); 1360 } 1361 1362 #ifndef PRODUCT 1363 void ReferenceProcessor::clear_discovered_references() { 1364 guarantee(!_discovering_refs, "Discovering refs?"); 1365 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 1366 oop obj = _discoveredSoftRefs[i].head(); 1367 while (obj != sentinel_ref()) { 1368 oop next = java_lang_ref_Reference::discovered(obj); 1369 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 1370 obj = next; 1371 } 1372 _discoveredSoftRefs[i].set_head(sentinel_ref()); 1373 _discoveredSoftRefs[i].set_length(0); 1374 } 1375 } 1376 #endif // PRODUCT