1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc_interface/collectedHeap.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "memory/referencePolicy.hpp" 31 #include "memory/referenceProcessor.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/jniHandles.hpp" 35 36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 38 oop ReferenceProcessor::_sentinelRef = NULL; 39 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; 40 41 // List of discovered references. 42 class DiscoveredList { 43 public: 44 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 45 oop head() const { 46 return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) : 47 _oop_head; 48 } 49 HeapWord* adr_head() { 50 return UseCompressedOops ? (HeapWord*)&_compressed_head : 51 (HeapWord*)&_oop_head; 52 } 53 void set_head(oop o) { 54 if (UseCompressedOops) { 55 // Must compress the head ptr. 56 _compressed_head = oopDesc::encode_heap_oop_not_null(o); 57 } else { 58 _oop_head = o; 59 } 60 } 61 bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); } 62 size_t length() { return _len; } 63 void set_length(size_t len) { _len = len; } 64 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 65 void dec_length(size_t dec) { _len -= dec; } 66 private: 67 // Set value depending on UseCompressedOops. This could be a template class 68 // but then we have to fix all the instantiations and declarations that use this class. 69 oop _oop_head; 70 narrowOop _compressed_head; 71 size_t _len; 72 }; 73 74 void referenceProcessor_init() { 75 ReferenceProcessor::init_statics(); 76 } 77 78 void ReferenceProcessor::init_statics() { 79 assert(_sentinelRef == NULL, "should be initialized precisely once"); 80 EXCEPTION_MARK; 81 _sentinelRef = instanceKlass::cast( 82 SystemDictionary::Reference_klass())-> 83 allocate_permanent_instance(THREAD); 84 85 // Initialize the master soft ref clock. 86 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); 87 88 if (HAS_PENDING_EXCEPTION) { 89 Handle ex(THREAD, PENDING_EXCEPTION); 90 vm_exit_during_initialization(ex); 91 } 92 assert(_sentinelRef != NULL && _sentinelRef->is_oop(), 93 "Just constructed it!"); 94 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 95 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 96 NOT_COMPILER2(LRUCurrentHeapPolicy()); 97 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 98 vm_exit_during_initialization("Could not allocate reference policy object"); 99 } 100 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 101 RefDiscoveryPolicy == ReferentBasedDiscovery, 102 "Unrecongnized RefDiscoveryPolicy"); 103 } 104 105 ReferenceProcessor* 106 ReferenceProcessor::create_ref_processor(MemRegion span, 107 bool atomic_discovery, 108 bool mt_discovery, 109 BoolObjectClosure* is_alive_non_header, 110 int parallel_gc_threads, 111 bool mt_processing, 112 bool dl_needs_barrier) { 113 int mt_degree = 1; 114 if (parallel_gc_threads > 1) { 115 mt_degree = parallel_gc_threads; 116 } 117 ReferenceProcessor* rp = 118 new ReferenceProcessor(span, atomic_discovery, 119 mt_discovery, mt_degree, 120 mt_processing && (parallel_gc_threads > 0), 121 dl_needs_barrier); 122 if (rp == NULL) { 123 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); 124 } 125 rp->set_is_alive_non_header(is_alive_non_header); 126 rp->setup_policy(false /* default soft ref policy */); 127 return rp; 128 } 129 130 ReferenceProcessor::ReferenceProcessor(MemRegion span, 131 bool atomic_discovery, 132 bool mt_discovery, 133 int mt_degree, 134 bool mt_processing, 135 bool discovered_list_needs_barrier) : 136 _discovering_refs(false), 137 _enqueuing_is_done(false), 138 _is_alive_non_header(NULL), 139 _discovered_list_needs_barrier(discovered_list_needs_barrier), 140 _bs(NULL), 141 _processing_is_mt(mt_processing), 142 _next_id(0) 143 { 144 _span = span; 145 _discovery_is_atomic = atomic_discovery; 146 _discovery_is_mt = mt_discovery; 147 _num_q = mt_degree; 148 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _num_q * subclasses_of_ref); 149 if (_discoveredSoftRefs == NULL) { 150 vm_exit_during_initialization("Could not allocated RefProc Array"); 151 } 152 _discoveredWeakRefs = &_discoveredSoftRefs[_num_q]; 153 _discoveredFinalRefs = &_discoveredWeakRefs[_num_q]; 154 _discoveredPhantomRefs = &_discoveredFinalRefs[_num_q]; 155 assert(sentinel_ref() != NULL, "_sentinelRef is NULL"); 156 // Initialized all entries to _sentinelRef 157 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 158 _discoveredSoftRefs[i].set_head(sentinel_ref()); 159 _discoveredSoftRefs[i].set_length(0); 160 } 161 // If we do barreirs, cache a copy of the barrier set. 162 if (discovered_list_needs_barrier) { 163 _bs = Universe::heap()->barrier_set(); 164 } 165 } 166 167 #ifndef PRODUCT 168 void ReferenceProcessor::verify_no_references_recorded() { 169 guarantee(!_discovering_refs, "Discovering refs?"); 170 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 171 guarantee(_discoveredSoftRefs[i].empty(), 172 "Found non-empty discovered list"); 173 } 174 } 175 #endif 176 177 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 178 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 179 if (UseCompressedOops) { 180 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); 181 } else { 182 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); 183 } 184 } 185 } 186 187 void ReferenceProcessor::oops_do(OopClosure* f) { 188 f->do_oop(adr_sentinel_ref()); 189 } 190 191 void ReferenceProcessor::update_soft_ref_master_clock() { 192 // Update (advance) the soft ref master clock field. This must be done 193 // after processing the soft ref list. 194 jlong now = os::javaTimeMillis(); 195 jlong clock = java_lang_ref_SoftReference::clock(); 196 NOT_PRODUCT( 197 if (now < clock) { 198 warning("time warp: %d to %d", clock, now); 199 } 200 ) 201 // In product mode, protect ourselves from system time being adjusted 202 // externally and going backward; see note in the implementation of 203 // GenCollectedHeap::time_since_last_gc() for the right way to fix 204 // this uniformly throughout the VM; see bug-id 4741166. XXX 205 if (now > clock) { 206 java_lang_ref_SoftReference::set_clock(now); 207 } 208 // Else leave clock stalled at its old value until time progresses 209 // past clock value. 210 } 211 212 void ReferenceProcessor::process_discovered_references( 213 BoolObjectClosure* is_alive, 214 OopClosure* keep_alive, 215 VoidClosure* complete_gc, 216 AbstractRefProcTaskExecutor* task_executor) { 217 NOT_PRODUCT(verify_ok_to_handle_reflists()); 218 219 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 220 // Stop treating discovered references specially. 221 disable_discovery(); 222 223 bool trace_time = PrintGCDetails && PrintReferenceGC; 224 // Soft references 225 { 226 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); 227 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 228 is_alive, keep_alive, complete_gc, task_executor); 229 } 230 231 update_soft_ref_master_clock(); 232 233 // Weak references 234 { 235 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); 236 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 237 is_alive, keep_alive, complete_gc, task_executor); 238 } 239 240 // Final references 241 { 242 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); 243 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 244 is_alive, keep_alive, complete_gc, task_executor); 245 } 246 247 // Phantom references 248 { 249 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); 250 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 251 is_alive, keep_alive, complete_gc, task_executor); 252 } 253 254 // Weak global JNI references. It would make more sense (semantically) to 255 // traverse these simultaneously with the regular weak references above, but 256 // that is not how the JDK1.2 specification is. See #4126360. Native code can 257 // thus use JNI weak references to circumvent the phantom references and 258 // resurrect a "post-mortem" object. 259 { 260 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); 261 if (task_executor != NULL) { 262 task_executor->set_single_threaded_mode(); 263 } 264 process_phaseJNI(is_alive, keep_alive, complete_gc); 265 } 266 } 267 268 #ifndef PRODUCT 269 // Calculate the number of jni handles. 270 uint ReferenceProcessor::count_jni_refs() { 271 class AlwaysAliveClosure: public BoolObjectClosure { 272 public: 273 virtual bool do_object_b(oop obj) { return true; } 274 virtual void do_object(oop obj) { assert(false, "Don't call"); } 275 }; 276 277 class CountHandleClosure: public OopClosure { 278 private: 279 int _count; 280 public: 281 CountHandleClosure(): _count(0) {} 282 void do_oop(oop* unused) { _count++; } 283 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 284 int count() { return _count; } 285 }; 286 CountHandleClosure global_handle_count; 287 AlwaysAliveClosure always_alive; 288 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 289 return global_handle_count.count(); 290 } 291 #endif 292 293 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 294 OopClosure* keep_alive, 295 VoidClosure* complete_gc) { 296 #ifndef PRODUCT 297 if (PrintGCDetails && PrintReferenceGC) { 298 unsigned int count = count_jni_refs(); 299 gclog_or_tty->print(", %u refs", count); 300 } 301 #endif 302 JNIHandles::weak_oops_do(is_alive, keep_alive); 303 // Finally remember to keep sentinel around 304 keep_alive->do_oop(adr_sentinel_ref()); 305 complete_gc->do_void(); 306 } 307 308 309 template <class T> 310 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 311 AbstractRefProcTaskExecutor* task_executor) { 312 313 // Remember old value of pending references list 314 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 315 T old_pending_list_value = *pending_list_addr; 316 317 // Enqueue references that are not made active again, and 318 // clear the decks for the next collection (cycle). 319 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 320 // Do the oop-check on pending_list_addr missed in 321 // enqueue_discovered_reflist. We should probably 322 // do a raw oop_check so that future such idempotent 323 // oop_stores relying on the oop-check side-effect 324 // may be elided automatically and safely without 325 // affecting correctness. 326 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 327 328 // Stop treating discovered references specially. 329 ref->disable_discovery(); 330 331 // Return true if new pending references were added 332 return old_pending_list_value != *pending_list_addr; 333 } 334 335 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 336 NOT_PRODUCT(verify_ok_to_handle_reflists()); 337 if (UseCompressedOops) { 338 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 339 } else { 340 return enqueue_discovered_ref_helper<oop>(this, task_executor); 341 } 342 } 343 344 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 345 HeapWord* pending_list_addr) { 346 // Given a list of refs linked through the "discovered" field 347 // (java.lang.ref.Reference.discovered) chain them through the 348 // "next" field (java.lang.ref.Reference.next) and prepend 349 // to the pending list. 350 if (TraceReferenceGC && PrintGCDetails) { 351 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 352 INTPTR_FORMAT, (address)refs_list.head()); 353 } 354 oop obj = refs_list.head(); 355 // Walk down the list, copying the discovered field into 356 // the next field and clearing it (except for the last 357 // non-sentinel object which is treated specially to avoid 358 // confusion with an active reference). 359 while (obj != sentinel_ref()) { 360 assert(obj->is_instanceRef(), "should be reference object"); 361 oop next = java_lang_ref_Reference::discovered(obj); 362 if (TraceReferenceGC && PrintGCDetails) { 363 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, 364 obj, next); 365 } 366 assert(java_lang_ref_Reference::next(obj) == NULL, 367 "The reference should not be enqueued"); 368 if (next == sentinel_ref()) { // obj is last 369 // Swap refs_list into pendling_list_addr and 370 // set obj's next to what we read from pending_list_addr. 371 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 372 // Need oop_check on pending_list_addr above; 373 // see special oop-check code at the end of 374 // enqueue_discovered_reflists() further below. 375 if (old == NULL) { 376 // obj should be made to point to itself, since 377 // pending list was empty. 378 java_lang_ref_Reference::set_next(obj, obj); 379 } else { 380 java_lang_ref_Reference::set_next(obj, old); 381 } 382 } else { 383 java_lang_ref_Reference::set_next(obj, next); 384 } 385 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 386 obj = next; 387 } 388 } 389 390 // Parallel enqueue task 391 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 392 public: 393 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 394 DiscoveredList discovered_refs[], 395 HeapWord* pending_list_addr, 396 oop sentinel_ref, 397 int n_queues) 398 : EnqueueTask(ref_processor, discovered_refs, 399 pending_list_addr, sentinel_ref, n_queues) 400 { } 401 402 virtual void work(unsigned int work_id) { 403 assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds"); 404 // Simplest first cut: static partitioning. 405 int index = work_id; 406 for (int j = 0; j < subclasses_of_ref; j++, index += _n_queues) { 407 _ref_processor.enqueue_discovered_reflist( 408 _refs_lists[index], _pending_list_addr); 409 _refs_lists[index].set_head(_sentinel_ref); 410 _refs_lists[index].set_length(0); 411 } 412 } 413 }; 414 415 // Enqueue references that are not made active again 416 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 417 AbstractRefProcTaskExecutor* task_executor) { 418 if (_processing_is_mt && task_executor != NULL) { 419 // Parallel code 420 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, 421 pending_list_addr, sentinel_ref(), _num_q); 422 task_executor->execute(tsk); 423 } else { 424 // Serial code: call the parent class's implementation 425 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 426 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); 427 _discoveredSoftRefs[i].set_head(sentinel_ref()); 428 _discoveredSoftRefs[i].set_length(0); 429 } 430 } 431 } 432 433 // Iterator for the list of discovered references. 434 class DiscoveredListIterator { 435 public: 436 inline DiscoveredListIterator(DiscoveredList& refs_list, 437 OopClosure* keep_alive, 438 BoolObjectClosure* is_alive); 439 440 // End Of List. 441 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); } 442 443 // Get oop to the Reference object. 444 inline oop obj() const { return _ref; } 445 446 // Get oop to the referent object. 447 inline oop referent() const { return _referent; } 448 449 // Returns true if referent is alive. 450 inline bool is_referent_alive() const; 451 452 // Loads data for the current reference. 453 // The "allow_null_referent" argument tells us to allow for the possibility 454 // of a NULL referent in the discovered Reference object. This typically 455 // happens in the case of concurrent collectors that may have done the 456 // discovery concurrently, or interleaved, with mutator execution. 457 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 458 459 // Move to the next discovered reference. 460 inline void next(); 461 462 // Remove the current reference from the list 463 inline void remove(); 464 465 // Make the Reference object active again. 466 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); } 467 468 // Make the referent alive. 469 inline void make_referent_alive() { 470 if (UseCompressedOops) { 471 _keep_alive->do_oop((narrowOop*)_referent_addr); 472 } else { 473 _keep_alive->do_oop((oop*)_referent_addr); 474 } 475 } 476 477 // Update the discovered field. 478 inline void update_discovered() { 479 // First _prev_next ref actually points into DiscoveredList (gross). 480 if (UseCompressedOops) { 481 _keep_alive->do_oop((narrowOop*)_prev_next); 482 } else { 483 _keep_alive->do_oop((oop*)_prev_next); 484 } 485 } 486 487 // NULL out referent pointer. 488 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } 489 490 // Statistics 491 NOT_PRODUCT( 492 inline size_t processed() const { return _processed; } 493 inline size_t removed() const { return _removed; } 494 ) 495 496 inline void move_to_next(); 497 498 private: 499 DiscoveredList& _refs_list; 500 HeapWord* _prev_next; 501 oop _ref; 502 HeapWord* _discovered_addr; 503 oop _next; 504 HeapWord* _referent_addr; 505 oop _referent; 506 OopClosure* _keep_alive; 507 BoolObjectClosure* _is_alive; 508 DEBUG_ONLY( 509 oop _first_seen; // cyclic linked list check 510 ) 511 NOT_PRODUCT( 512 size_t _processed; 513 size_t _removed; 514 ) 515 }; 516 517 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list, 518 OopClosure* keep_alive, 519 BoolObjectClosure* is_alive) 520 : _refs_list(refs_list), 521 _prev_next(refs_list.adr_head()), 522 _ref(refs_list.head()), 523 #ifdef ASSERT 524 _first_seen(refs_list.head()), 525 #endif 526 #ifndef PRODUCT 527 _processed(0), 528 _removed(0), 529 #endif 530 _next(refs_list.head()), 531 _keep_alive(keep_alive), 532 _is_alive(is_alive) 533 { } 534 535 inline bool DiscoveredListIterator::is_referent_alive() const { 536 return _is_alive->do_object_b(_referent); 537 } 538 539 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 540 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 541 oop discovered = java_lang_ref_Reference::discovered(_ref); 542 assert(_discovered_addr && discovered->is_oop_or_null(), 543 "discovered field is bad"); 544 _next = discovered; 545 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 546 _referent = java_lang_ref_Reference::referent(_ref); 547 assert(Universe::heap()->is_in_reserved_or_null(_referent), 548 "Wrong oop found in java.lang.Reference object"); 549 assert(allow_null_referent ? 550 _referent->is_oop_or_null() 551 : _referent->is_oop(), 552 "bad referent"); 553 } 554 555 inline void DiscoveredListIterator::next() { 556 _prev_next = _discovered_addr; 557 move_to_next(); 558 } 559 560 inline void DiscoveredListIterator::remove() { 561 assert(_ref->is_oop(), "Dropping a bad reference"); 562 oop_store_raw(_discovered_addr, NULL); 563 // First _prev_next ref actually points into DiscoveredList (gross). 564 if (UseCompressedOops) { 565 // Remove Reference object from list. 566 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next); 567 } else { 568 // Remove Reference object from list. 569 oopDesc::store_heap_oop((oop*)_prev_next, _next); 570 } 571 NOT_PRODUCT(_removed++); 572 _refs_list.dec_length(1); 573 } 574 575 inline void DiscoveredListIterator::move_to_next() { 576 _ref = _next; 577 assert(_ref != _first_seen, "cyclic ref_list found"); 578 NOT_PRODUCT(_processed++); 579 } 580 581 // NOTE: process_phase*() are largely similar, and at a high level 582 // merely iterate over the extant list applying a predicate to 583 // each of its elements and possibly removing that element from the 584 // list and applying some further closures to that element. 585 // We should consider the possibility of replacing these 586 // process_phase*() methods by abstracting them into 587 // a single general iterator invocation that receives appropriate 588 // closures that accomplish this work. 589 590 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 591 // referents are not alive, but that should be kept alive for policy reasons. 592 // Keep alive the transitive closure of all such referents. 593 void 594 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 595 ReferencePolicy* policy, 596 BoolObjectClosure* is_alive, 597 OopClosure* keep_alive, 598 VoidClosure* complete_gc) { 599 assert(policy != NULL, "Must have a non-NULL policy"); 600 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 601 // Decide which softly reachable refs should be kept alive. 602 while (iter.has_next()) { 603 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 604 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 605 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { 606 if (TraceReferenceGC) { 607 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 608 iter.obj(), iter.obj()->blueprint()->internal_name()); 609 } 610 // Remove Reference object from list 611 iter.remove(); 612 // Make the Reference object active again 613 iter.make_active(); 614 // keep the referent around 615 iter.make_referent_alive(); 616 iter.move_to_next(); 617 } else { 618 iter.next(); 619 } 620 } 621 // Close the reachable set 622 complete_gc->do_void(); 623 NOT_PRODUCT( 624 if (PrintGCDetails && TraceReferenceGC) { 625 gclog_or_tty->print(" Dropped %d dead Refs out of %d " 626 "discovered Refs by policy ", iter.removed(), iter.processed()); 627 } 628 ) 629 } 630 631 // Traverse the list and remove any Refs that are not active, or 632 // whose referents are either alive or NULL. 633 void 634 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 635 BoolObjectClosure* is_alive, 636 OopClosure* keep_alive) { 637 assert(discovery_is_atomic(), "Error"); 638 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 639 while (iter.has_next()) { 640 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 641 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 642 assert(next == NULL, "Should not discover inactive Reference"); 643 if (iter.is_referent_alive()) { 644 if (TraceReferenceGC) { 645 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 646 iter.obj(), iter.obj()->blueprint()->internal_name()); 647 } 648 // The referent is reachable after all. 649 // Remove Reference object from list. 650 iter.remove(); 651 // Update the referent pointer as necessary: Note that this 652 // should not entail any recursive marking because the 653 // referent must already have been traversed. 654 iter.make_referent_alive(); 655 iter.move_to_next(); 656 } else { 657 iter.next(); 658 } 659 } 660 NOT_PRODUCT( 661 if (PrintGCDetails && TraceReferenceGC) { 662 gclog_or_tty->print(" Dropped %d active Refs out of %d " 663 "Refs in discovered list ", iter.removed(), iter.processed()); 664 } 665 ) 666 } 667 668 void 669 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 670 BoolObjectClosure* is_alive, 671 OopClosure* keep_alive, 672 VoidClosure* complete_gc) { 673 assert(!discovery_is_atomic(), "Error"); 674 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 675 while (iter.has_next()) { 676 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 677 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 678 oop next = java_lang_ref_Reference::next(iter.obj()); 679 if ((iter.referent() == NULL || iter.is_referent_alive() || 680 next != NULL)) { 681 assert(next->is_oop_or_null(), "bad next field"); 682 // Remove Reference object from list 683 iter.remove(); 684 // Trace the cohorts 685 iter.make_referent_alive(); 686 if (UseCompressedOops) { 687 keep_alive->do_oop((narrowOop*)next_addr); 688 } else { 689 keep_alive->do_oop((oop*)next_addr); 690 } 691 iter.move_to_next(); 692 } else { 693 iter.next(); 694 } 695 } 696 // Now close the newly reachable set 697 complete_gc->do_void(); 698 NOT_PRODUCT( 699 if (PrintGCDetails && TraceReferenceGC) { 700 gclog_or_tty->print(" Dropped %d active Refs out of %d " 701 "Refs in discovered list ", iter.removed(), iter.processed()); 702 } 703 ) 704 } 705 706 // Traverse the list and process the referents, by either 707 // clearing them or keeping them (and their reachable 708 // closure) alive. 709 void 710 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 711 bool clear_referent, 712 BoolObjectClosure* is_alive, 713 OopClosure* keep_alive, 714 VoidClosure* complete_gc) { 715 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 716 while (iter.has_next()) { 717 iter.update_discovered(); 718 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 719 if (clear_referent) { 720 // NULL out referent pointer 721 iter.clear_referent(); 722 } else { 723 // keep the referent around 724 iter.make_referent_alive(); 725 } 726 if (TraceReferenceGC) { 727 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 728 clear_referent ? "cleared " : "", 729 iter.obj(), iter.obj()->blueprint()->internal_name()); 730 } 731 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 732 iter.next(); 733 } 734 // Remember to keep sentinel pointer around 735 iter.update_discovered(); 736 // Close the reachable set 737 complete_gc->do_void(); 738 } 739 740 void 741 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 742 oop obj = refs_list.head(); 743 while (obj != sentinel_ref()) { 744 oop discovered = java_lang_ref_Reference::discovered(obj); 745 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 746 obj = discovered; 747 } 748 refs_list.set_head(sentinel_ref()); 749 refs_list.set_length(0); 750 } 751 752 void ReferenceProcessor::abandon_partial_discovery() { 753 // loop over the lists 754 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 755 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { 756 gclog_or_tty->print_cr( 757 "\nAbandoning %s discovered list", 758 list_name(i)); 759 } 760 abandon_partial_discovered_list(_discoveredSoftRefs[i]); 761 } 762 } 763 764 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 765 public: 766 RefProcPhase1Task(ReferenceProcessor& ref_processor, 767 DiscoveredList refs_lists[], 768 ReferencePolicy* policy, 769 bool marks_oops_alive) 770 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 771 _policy(policy) 772 { } 773 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 774 OopClosure& keep_alive, 775 VoidClosure& complete_gc) 776 { 777 _ref_processor.process_phase1(_refs_lists[i], _policy, 778 &is_alive, &keep_alive, &complete_gc); 779 } 780 private: 781 ReferencePolicy* _policy; 782 }; 783 784 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 785 public: 786 RefProcPhase2Task(ReferenceProcessor& ref_processor, 787 DiscoveredList refs_lists[], 788 bool marks_oops_alive) 789 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 790 { } 791 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 792 OopClosure& keep_alive, 793 VoidClosure& complete_gc) 794 { 795 _ref_processor.process_phase2(_refs_lists[i], 796 &is_alive, &keep_alive, &complete_gc); 797 } 798 }; 799 800 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 801 public: 802 RefProcPhase3Task(ReferenceProcessor& ref_processor, 803 DiscoveredList refs_lists[], 804 bool clear_referent, 805 bool marks_oops_alive) 806 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 807 _clear_referent(clear_referent) 808 { } 809 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 810 OopClosure& keep_alive, 811 VoidClosure& complete_gc) 812 { 813 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 814 &is_alive, &keep_alive, &complete_gc); 815 } 816 private: 817 bool _clear_referent; 818 }; 819 820 // Balances reference queues. 821 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 822 { 823 // calculate total length 824 size_t total_refs = 0; 825 for (int i = 0; i < _num_q; ++i) { 826 total_refs += ref_lists[i].length(); 827 } 828 size_t avg_refs = total_refs / _num_q + 1; 829 int to_idx = 0; 830 for (int from_idx = 0; from_idx < _num_q; from_idx++) { 831 while (ref_lists[from_idx].length() > avg_refs) { 832 assert(to_idx < _num_q, "Sanity Check!"); 833 if (ref_lists[to_idx].length() < avg_refs) { 834 // move superfluous refs 835 size_t refs_to_move = 836 MIN2(ref_lists[from_idx].length() - avg_refs, 837 avg_refs - ref_lists[to_idx].length()); 838 oop move_head = ref_lists[from_idx].head(); 839 oop move_tail = move_head; 840 oop new_head = move_head; 841 // find an element to split the list on 842 for (size_t j = 0; j < refs_to_move; ++j) { 843 move_tail = new_head; 844 new_head = java_lang_ref_Reference::discovered(new_head); 845 } 846 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); 847 ref_lists[to_idx].set_head(move_head); 848 ref_lists[to_idx].inc_length(refs_to_move); 849 ref_lists[from_idx].set_head(new_head); 850 ref_lists[from_idx].dec_length(refs_to_move); 851 } else { 852 ++to_idx; 853 } 854 } 855 } 856 } 857 858 void 859 ReferenceProcessor::process_discovered_reflist( 860 DiscoveredList refs_lists[], 861 ReferencePolicy* policy, 862 bool clear_referent, 863 BoolObjectClosure* is_alive, 864 OopClosure* keep_alive, 865 VoidClosure* complete_gc, 866 AbstractRefProcTaskExecutor* task_executor) 867 { 868 bool mt = task_executor != NULL && _processing_is_mt; 869 if (mt && ParallelRefProcBalancingEnabled) { 870 balance_queues(refs_lists); 871 } 872 if (PrintReferenceGC && PrintGCDetails) { 873 size_t total = 0; 874 for (int i = 0; i < _num_q; ++i) { 875 total += refs_lists[i].length(); 876 } 877 gclog_or_tty->print(", %u refs", total); 878 } 879 880 // Phase 1 (soft refs only): 881 // . Traverse the list and remove any SoftReferences whose 882 // referents are not alive, but that should be kept alive for 883 // policy reasons. Keep alive the transitive closure of all 884 // such referents. 885 if (policy != NULL) { 886 if (mt) { 887 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 888 task_executor->execute(phase1); 889 } else { 890 for (int i = 0; i < _num_q; i++) { 891 process_phase1(refs_lists[i], policy, 892 is_alive, keep_alive, complete_gc); 893 } 894 } 895 } else { // policy == NULL 896 assert(refs_lists != _discoveredSoftRefs, 897 "Policy must be specified for soft references."); 898 } 899 900 // Phase 2: 901 // . Traverse the list and remove any refs whose referents are alive. 902 if (mt) { 903 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 904 task_executor->execute(phase2); 905 } else { 906 for (int i = 0; i < _num_q; i++) { 907 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 908 } 909 } 910 911 // Phase 3: 912 // . Traverse the list and process referents as appropriate. 913 if (mt) { 914 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 915 task_executor->execute(phase3); 916 } else { 917 for (int i = 0; i < _num_q; i++) { 918 process_phase3(refs_lists[i], clear_referent, 919 is_alive, keep_alive, complete_gc); 920 } 921 } 922 } 923 924 void ReferenceProcessor::clean_up_discovered_references() { 925 // loop over the lists 926 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 927 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { 928 gclog_or_tty->print_cr( 929 "\nScrubbing %s discovered list of Null referents", 930 list_name(i)); 931 } 932 clean_up_discovered_reflist(_discoveredSoftRefs[i]); 933 } 934 } 935 936 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 937 assert(!discovery_is_atomic(), "Else why call this method?"); 938 DiscoveredListIterator iter(refs_list, NULL, NULL); 939 while (iter.has_next()) { 940 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 941 oop next = java_lang_ref_Reference::next(iter.obj()); 942 assert(next->is_oop_or_null(), "bad next field"); 943 // If referent has been cleared or Reference is not active, 944 // drop it. 945 if (iter.referent() == NULL || next != NULL) { 946 debug_only( 947 if (PrintGCDetails && TraceReferenceGC) { 948 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 949 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 950 " and referent: " INTPTR_FORMAT, 951 iter.obj(), next, iter.referent()); 952 } 953 ) 954 // Remove Reference object from list 955 iter.remove(); 956 iter.move_to_next(); 957 } else { 958 iter.next(); 959 } 960 } 961 NOT_PRODUCT( 962 if (PrintGCDetails && TraceReferenceGC) { 963 gclog_or_tty->print( 964 " Removed %d Refs with NULL referents out of %d discovered Refs", 965 iter.removed(), iter.processed()); 966 } 967 ) 968 } 969 970 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 971 int id = 0; 972 // Determine the queue index to use for this object. 973 if (_discovery_is_mt) { 974 // During a multi-threaded discovery phase, 975 // each thread saves to its "own" list. 976 Thread* thr = Thread::current(); 977 assert(thr->is_GC_task_thread(), 978 "Dubious cast from Thread* to WorkerThread*?"); 979 id = ((WorkerThread*)thr)->id(); 980 } else { 981 // single-threaded discovery, we save in round-robin 982 // fashion to each of the lists. 983 if (_processing_is_mt) { 984 id = next_id(); 985 } 986 } 987 assert(0 <= id && id < _num_q, "Id is out-of-bounds (call Freud?)"); 988 989 // Get the discovered queue to which we will add 990 DiscoveredList* list = NULL; 991 switch (rt) { 992 case REF_OTHER: 993 // Unknown reference type, no special treatment 994 break; 995 case REF_SOFT: 996 list = &_discoveredSoftRefs[id]; 997 break; 998 case REF_WEAK: 999 list = &_discoveredWeakRefs[id]; 1000 break; 1001 case REF_FINAL: 1002 list = &_discoveredFinalRefs[id]; 1003 break; 1004 case REF_PHANTOM: 1005 list = &_discoveredPhantomRefs[id]; 1006 break; 1007 case REF_NONE: 1008 // we should not reach here if we are an instanceRefKlass 1009 default: 1010 ShouldNotReachHere(); 1011 } 1012 return list; 1013 } 1014 1015 inline void 1016 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1017 oop obj, 1018 HeapWord* discovered_addr) { 1019 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1020 // First we must make sure this object is only enqueued once. CAS in a non null 1021 // discovered_addr. 1022 oop current_head = refs_list.head(); 1023 1024 // Note: In the case of G1, this specific pre-barrier is strictly 1025 // not necessary because the only case we are interested in 1026 // here is when *discovered_addr is NULL (see the CAS further below), 1027 // so this will expand to nothing. As a result, we have manually 1028 // elided this out for G1, but left in the test for some future 1029 // collector that might have need for a pre-barrier here. 1030 if (_discovered_list_needs_barrier && !UseG1GC) { 1031 if (UseCompressedOops) { 1032 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); 1033 } else { 1034 _bs->write_ref_field_pre((oop*)discovered_addr, current_head); 1035 } 1036 guarantee(false, "Need to check non-G1 collector"); 1037 } 1038 oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr, 1039 NULL); 1040 if (retest == NULL) { 1041 // This thread just won the right to enqueue the object. 1042 // We have separate lists for enqueueing so no synchronization 1043 // is necessary. 1044 refs_list.set_head(obj); 1045 refs_list.inc_length(1); 1046 if (_discovered_list_needs_barrier) { 1047 _bs->write_ref_field((void*)discovered_addr, current_head); 1048 } 1049 } else { 1050 // If retest was non NULL, another thread beat us to it: 1051 // The reference has already been discovered... 1052 if (TraceReferenceGC) { 1053 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1054 obj, obj->blueprint()->internal_name()); 1055 } 1056 } 1057 } 1058 1059 // We mention two of several possible choices here: 1060 // #0: if the reference object is not in the "originating generation" 1061 // (or part of the heap being collected, indicated by our "span" 1062 // we don't treat it specially (i.e. we scan it as we would 1063 // a normal oop, treating its references as strong references). 1064 // This means that references can't be enqueued unless their 1065 // referent is also in the same span. This is the simplest, 1066 // most "local" and most conservative approach, albeit one 1067 // that may cause weak references to be enqueued least promptly. 1068 // We call this choice the "ReferenceBasedDiscovery" policy. 1069 // #1: the reference object may be in any generation (span), but if 1070 // the referent is in the generation (span) being currently collected 1071 // then we can discover the reference object, provided 1072 // the object has not already been discovered by 1073 // a different concurrently running collector (as may be the 1074 // case, for instance, if the reference object is in CMS and 1075 // the referent in DefNewGeneration), and provided the processing 1076 // of this reference object by the current collector will 1077 // appear atomic to every other collector in the system. 1078 // (Thus, for instance, a concurrent collector may not 1079 // discover references in other generations even if the 1080 // referent is in its own generation). This policy may, 1081 // in certain cases, enqueue references somewhat sooner than 1082 // might Policy #0 above, but at marginally increased cost 1083 // and complexity in processing these references. 1084 // We call this choice the "RefeferentBasedDiscovery" policy. 1085 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1086 // We enqueue references only if we are discovering refs 1087 // (rather than processing discovered refs). 1088 if (!_discovering_refs || !RegisterReferences) { 1089 return false; 1090 } 1091 // We only enqueue active references. 1092 oop next = java_lang_ref_Reference::next(obj); 1093 if (next != NULL) { 1094 return false; 1095 } 1096 1097 HeapWord* obj_addr = (HeapWord*)obj; 1098 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1099 !_span.contains(obj_addr)) { 1100 // Reference is not in the originating generation; 1101 // don't treat it specially (i.e. we want to scan it as a normal 1102 // object with strong references). 1103 return false; 1104 } 1105 1106 // We only enqueue references whose referents are not (yet) strongly 1107 // reachable. 1108 if (is_alive_non_header() != NULL) { 1109 oop referent = java_lang_ref_Reference::referent(obj); 1110 // In the case of non-concurrent discovery, the last 1111 // disjunct below should hold. It may not hold in the 1112 // case of concurrent discovery because mutators may 1113 // concurrently clear() a Reference. 1114 assert(UseConcMarkSweepGC || UseG1GC || referent != NULL, 1115 "Refs with null referents already filtered"); 1116 if (is_alive_non_header()->do_object_b(referent)) { 1117 return false; // referent is reachable 1118 } 1119 } 1120 if (rt == REF_SOFT) { 1121 // For soft refs we can decide now if these are not 1122 // current candidates for clearing, in which case we 1123 // can mark through them now, rather than delaying that 1124 // to the reference-processing phase. Since all current 1125 // time-stamp policies advance the soft-ref clock only 1126 // at a major collection cycle, this is always currently 1127 // accurate. 1128 if (!_current_soft_ref_policy->should_clear_reference(obj)) { 1129 return false; 1130 } 1131 } 1132 1133 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1134 const oop discovered = java_lang_ref_Reference::discovered(obj); 1135 assert(discovered->is_oop_or_null(), "bad discovered field"); 1136 if (discovered != NULL) { 1137 // The reference has already been discovered... 1138 if (TraceReferenceGC) { 1139 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1140 obj, obj->blueprint()->internal_name()); 1141 } 1142 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1143 // assumes that an object is not processed twice; 1144 // if it's been already discovered it must be on another 1145 // generation's discovered list; so we won't discover it. 1146 return false; 1147 } else { 1148 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1149 "Unrecognized policy"); 1150 // Check assumption that an object is not potentially 1151 // discovered twice except by concurrent collectors that potentially 1152 // trace the same Reference object twice. 1153 assert(UseConcMarkSweepGC, 1154 "Only possible with an incremental-update concurrent collector"); 1155 return true; 1156 } 1157 } 1158 1159 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1160 oop referent = java_lang_ref_Reference::referent(obj); 1161 assert(referent->is_oop(), "bad referent"); 1162 // enqueue if and only if either: 1163 // reference is in our span or 1164 // we are an atomic collector and referent is in our span 1165 if (_span.contains(obj_addr) || 1166 (discovery_is_atomic() && _span.contains(referent))) { 1167 // should_enqueue = true; 1168 } else { 1169 return false; 1170 } 1171 } else { 1172 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1173 _span.contains(obj_addr), "code inconsistency"); 1174 } 1175 1176 // Get the right type of discovered queue head. 1177 DiscoveredList* list = get_discovered_list(rt); 1178 if (list == NULL) { 1179 return false; // nothing special needs to be done 1180 } 1181 1182 if (_discovery_is_mt) { 1183 add_to_discovered_list_mt(*list, obj, discovered_addr); 1184 } else { 1185 // If "_discovered_list_needs_barrier", we do write barriers when 1186 // updating the discovered reference list. Otherwise, we do a raw store 1187 // here: the field will be visited later when processing the discovered 1188 // references. 1189 oop current_head = list->head(); 1190 // As in the case further above, since we are over-writing a NULL 1191 // pre-value, we can safely elide the pre-barrier here for the case of G1. 1192 assert(discovered == NULL, "control point invariant"); 1193 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 1194 if (UseCompressedOops) { 1195 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); 1196 } else { 1197 _bs->write_ref_field_pre((oop*)discovered_addr, current_head); 1198 } 1199 guarantee(false, "Need to check non-G1 collector"); 1200 } 1201 oop_store_raw(discovered_addr, current_head); 1202 if (_discovered_list_needs_barrier) { 1203 _bs->write_ref_field((void*)discovered_addr, current_head); 1204 } 1205 list->set_head(obj); 1206 list->inc_length(1); 1207 } 1208 1209 // In the MT discovery case, it is currently possible to see 1210 // the following message multiple times if several threads 1211 // discover a reference about the same time. Only one will 1212 // however have actually added it to the disocvered queue. 1213 // One could let add_to_discovered_list_mt() return an 1214 // indication for success in queueing (by 1 thread) or 1215 // failure (by all other threads), but I decided the extra 1216 // code was not worth the effort for something that is 1217 // only used for debugging support. 1218 if (TraceReferenceGC) { 1219 oop referent = java_lang_ref_Reference::referent(obj); 1220 if (PrintGCDetails) { 1221 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", 1222 obj, obj->blueprint()->internal_name()); 1223 } 1224 assert(referent->is_oop(), "Enqueued a bad referent"); 1225 } 1226 assert(obj->is_oop(), "Enqueued a bad reference"); 1227 return true; 1228 } 1229 1230 // Preclean the discovered references by removing those 1231 // whose referents are alive, and by marking from those that 1232 // are not active. These lists can be handled here 1233 // in any order and, indeed, concurrently. 1234 void ReferenceProcessor::preclean_discovered_references( 1235 BoolObjectClosure* is_alive, 1236 OopClosure* keep_alive, 1237 VoidClosure* complete_gc, 1238 YieldClosure* yield, 1239 bool should_unload_classes) { 1240 1241 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1242 1243 #ifdef ASSERT 1244 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC || 1245 CMSClassUnloadingEnabled && UseConcMarkSweepGC || 1246 ExplicitGCInvokesConcurrentAndUnloadsClasses && 1247 UseConcMarkSweepGC && should_unload_classes; 1248 RememberKlassesChecker mx(must_remember_klasses); 1249 #endif 1250 // Soft references 1251 { 1252 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1253 false, gclog_or_tty); 1254 for (int i = 0; i < _num_q; i++) { 1255 if (yield->should_return()) { 1256 return; 1257 } 1258 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1259 keep_alive, complete_gc, yield); 1260 } 1261 } 1262 1263 // Weak references 1264 { 1265 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1266 false, gclog_or_tty); 1267 for (int i = 0; i < _num_q; i++) { 1268 if (yield->should_return()) { 1269 return; 1270 } 1271 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1272 keep_alive, complete_gc, yield); 1273 } 1274 } 1275 1276 // Final references 1277 { 1278 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1279 false, gclog_or_tty); 1280 for (int i = 0; i < _num_q; i++) { 1281 if (yield->should_return()) { 1282 return; 1283 } 1284 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1285 keep_alive, complete_gc, yield); 1286 } 1287 } 1288 1289 // Phantom references 1290 { 1291 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1292 false, gclog_or_tty); 1293 for (int i = 0; i < _num_q; i++) { 1294 if (yield->should_return()) { 1295 return; 1296 } 1297 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1298 keep_alive, complete_gc, yield); 1299 } 1300 } 1301 } 1302 1303 // Walk the given discovered ref list, and remove all reference objects 1304 // whose referents are still alive, whose referents are NULL or which 1305 // are not active (have a non-NULL next field). NOTE: When we are 1306 // thus precleaning the ref lists (which happens single-threaded today), 1307 // we do not disable refs discovery to honour the correct semantics of 1308 // java.lang.Reference. As a result, we need to be careful below 1309 // that ref removal steps interleave safely with ref discovery steps 1310 // (in this thread). 1311 void 1312 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1313 BoolObjectClosure* is_alive, 1314 OopClosure* keep_alive, 1315 VoidClosure* complete_gc, 1316 YieldClosure* yield) { 1317 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1318 while (iter.has_next()) { 1319 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1320 oop obj = iter.obj(); 1321 oop next = java_lang_ref_Reference::next(obj); 1322 if (iter.referent() == NULL || iter.is_referent_alive() || 1323 next != NULL) { 1324 // The referent has been cleared, or is alive, or the Reference is not 1325 // active; we need to trace and mark its cohort. 1326 if (TraceReferenceGC) { 1327 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1328 iter.obj(), iter.obj()->blueprint()->internal_name()); 1329 } 1330 // Remove Reference object from list 1331 iter.remove(); 1332 // Keep alive its cohort. 1333 iter.make_referent_alive(); 1334 if (UseCompressedOops) { 1335 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1336 keep_alive->do_oop(next_addr); 1337 } else { 1338 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1339 keep_alive->do_oop(next_addr); 1340 } 1341 iter.move_to_next(); 1342 } else { 1343 iter.next(); 1344 } 1345 } 1346 // Close the reachable set 1347 complete_gc->do_void(); 1348 1349 NOT_PRODUCT( 1350 if (PrintGCDetails && PrintReferenceGC) { 1351 gclog_or_tty->print(" Dropped %d Refs out of %d " 1352 "Refs in discovered list ", iter.removed(), iter.processed()); 1353 } 1354 ) 1355 } 1356 1357 const char* ReferenceProcessor::list_name(int i) { 1358 assert(i >= 0 && i <= _num_q * subclasses_of_ref, "Out of bounds index"); 1359 int j = i / _num_q; 1360 switch (j) { 1361 case 0: return "SoftRef"; 1362 case 1: return "WeakRef"; 1363 case 2: return "FinalRef"; 1364 case 3: return "PhantomRef"; 1365 } 1366 ShouldNotReachHere(); 1367 return NULL; 1368 } 1369 1370 #ifndef PRODUCT 1371 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1372 // empty for now 1373 } 1374 #endif 1375 1376 void ReferenceProcessor::verify() { 1377 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef"); 1378 } 1379 1380 #ifndef PRODUCT 1381 void ReferenceProcessor::clear_discovered_references() { 1382 guarantee(!_discovering_refs, "Discovering refs?"); 1383 for (int i = 0; i < _num_q * subclasses_of_ref; i++) { 1384 oop obj = _discoveredSoftRefs[i].head(); 1385 while (obj != sentinel_ref()) { 1386 oop next = java_lang_ref_Reference::discovered(obj); 1387 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 1388 obj = next; 1389 } 1390 _discoveredSoftRefs[i].set_head(sentinel_ref()); 1391 _discoveredSoftRefs[i].set_length(0); 1392 } 1393 } 1394 #endif // PRODUCT