1 /* 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc_interface/collectedHeap.hpp" 29 #include "gc_interface/collectedHeap.inline.hpp" 30 #include "memory/referencePolicy.hpp" 31 #include "memory/referenceProcessor.hpp" 32 #include "oops/oop.inline.hpp" 33 #include "runtime/java.hpp" 34 #include "runtime/jniHandles.hpp" 35 36 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 37 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 38 oop ReferenceProcessor::_sentinelRef = NULL; 39 40 bool DiscoveredList::empty() const { 41 return head() == ReferenceProcessor::sentinel_ref(); 42 } 43 44 void referenceProcessor_init() { 45 ReferenceProcessor::init_statics(); 46 } 47 48 void ReferenceProcessor::init_statics() { 49 assert(_sentinelRef == NULL, "should be initialized precisely once"); 50 EXCEPTION_MARK; 51 _sentinelRef = instanceKlass::cast( 52 SystemDictionary::Reference_klass())-> 53 allocate_permanent_instance(THREAD); 54 55 // Initialize the master soft ref clock. 56 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); 57 58 if (HAS_PENDING_EXCEPTION) { 59 Handle ex(THREAD, PENDING_EXCEPTION); 60 vm_exit_during_initialization(ex); 61 } 62 assert(_sentinelRef != NULL && _sentinelRef->is_oop(), 63 "Just constructed it!"); 64 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 65 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 66 NOT_COMPILER2(LRUCurrentHeapPolicy()); 67 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 68 vm_exit_during_initialization("Could not allocate reference policy object"); 69 } 70 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 71 RefDiscoveryPolicy == ReferentBasedDiscovery, 72 "Unrecongnized RefDiscoveryPolicy"); 73 } 74 75 ReferenceProcessor::ReferenceProcessor(MemRegion span, 76 bool mt_processing, 77 int mt_processing_degree, 78 bool mt_discovery, 79 int mt_discovery_degree, 80 bool atomic_discovery, 81 BoolObjectClosure* is_alive_non_header, 82 bool discovered_list_needs_barrier) : 83 _discovering_refs(false), 84 _enqueuing_is_done(false), 85 _is_alive_non_header(is_alive_non_header), 86 _discovered_list_needs_barrier(discovered_list_needs_barrier), 87 _bs(NULL), 88 _processing_is_mt(mt_processing), 89 _next_id(0) 90 { 91 _span = span; 92 _discovery_is_atomic = atomic_discovery; 93 _discovery_is_mt = mt_discovery; 94 _num_q = MAX2(1, mt_processing_degree); 95 _max_num_q = MAX2(_num_q, mt_discovery_degree); 96 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref()); 97 if (_discoveredSoftRefs == NULL) { 98 vm_exit_during_initialization("Could not allocated RefProc Array"); 99 } 100 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 101 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 102 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 103 assert(sentinel_ref() != NULL, "_sentinelRef is NULL"); 104 // Initialized all entries to _sentinelRef 105 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) { 106 _discoveredSoftRefs[i].set_head(sentinel_ref()); 107 _discoveredSoftRefs[i].set_length(0); 108 } 109 // If we do barriers, cache a copy of the barrier set. 110 if (discovered_list_needs_barrier) { 111 _bs = Universe::heap()->barrier_set(); 112 } 113 setup_policy(false /* default soft ref policy */); 114 } 115 116 #ifndef PRODUCT 117 void ReferenceProcessor::verify_no_references_recorded() { 118 guarantee(!_discovering_refs, "Discovering refs?"); 119 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) { 120 guarantee(_discoveredSoftRefs[i].empty(), 121 "Found non-empty discovered list"); 122 } 123 } 124 #endif 125 126 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 127 // Should this instead be 128 // for (int i = 0; i < subclasses_of_ref(); i++_ { 129 // for (int j = 0; j < _num_q; j++) { 130 // int index = i * _max_num_q + j; 131 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) { 132 if (UseCompressedOops) { 133 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); 134 } else { 135 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); 136 } 137 } 138 } 139 140 void ReferenceProcessor::oops_do(OopClosure* f) { 141 f->do_oop(adr_sentinel_ref()); 142 } 143 144 void ReferenceProcessor::update_soft_ref_master_clock() { 145 // Update (advance) the soft ref master clock field. This must be done 146 // after processing the soft ref list. 147 jlong now = os::javaTimeMillis(); 148 jlong clock = java_lang_ref_SoftReference::clock(); 149 NOT_PRODUCT( 150 if (now < clock) { 151 warning("time warp: %d to %d", clock, now); 152 } 153 ) 154 // In product mode, protect ourselves from system time being adjusted 155 // externally and going backward; see note in the implementation of 156 // GenCollectedHeap::time_since_last_gc() for the right way to fix 157 // this uniformly throughout the VM; see bug-id 4741166. XXX 158 if (now > clock) { 159 java_lang_ref_SoftReference::set_clock(now); 160 } 161 // Else leave clock stalled at its old value until time progresses 162 // past clock value. 163 } 164 165 void ReferenceProcessor::process_discovered_references( 166 BoolObjectClosure* is_alive, 167 OopClosure* keep_alive, 168 VoidClosure* complete_gc, 169 AbstractRefProcTaskExecutor* task_executor) { 170 NOT_PRODUCT(verify_ok_to_handle_reflists()); 171 172 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 173 // Stop treating discovered references specially. 174 disable_discovery(); 175 176 bool trace_time = PrintGCDetails && PrintReferenceGC; 177 // Soft references 178 { 179 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); 180 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 181 is_alive, keep_alive, complete_gc, task_executor); 182 } 183 184 update_soft_ref_master_clock(); 185 186 // Weak references 187 { 188 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); 189 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 190 is_alive, keep_alive, complete_gc, task_executor); 191 } 192 193 // Final references 194 { 195 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); 196 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 197 is_alive, keep_alive, complete_gc, task_executor); 198 } 199 200 // Phantom references 201 { 202 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); 203 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 204 is_alive, keep_alive, complete_gc, task_executor); 205 } 206 207 // Weak global JNI references. It would make more sense (semantically) to 208 // traverse these simultaneously with the regular weak references above, but 209 // that is not how the JDK1.2 specification is. See #4126360. Native code can 210 // thus use JNI weak references to circumvent the phantom references and 211 // resurrect a "post-mortem" object. 212 { 213 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); 214 if (task_executor != NULL) { 215 task_executor->set_single_threaded_mode(); 216 } 217 process_phaseJNI(is_alive, keep_alive, complete_gc); 218 } 219 } 220 221 #ifndef PRODUCT 222 // Calculate the number of jni handles. 223 uint ReferenceProcessor::count_jni_refs() { 224 class AlwaysAliveClosure: public BoolObjectClosure { 225 public: 226 virtual bool do_object_b(oop obj) { return true; } 227 virtual void do_object(oop obj) { assert(false, "Don't call"); } 228 }; 229 230 class CountHandleClosure: public OopClosure { 231 private: 232 int _count; 233 public: 234 CountHandleClosure(): _count(0) {} 235 void do_oop(oop* unused) { _count++; } 236 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 237 int count() { return _count; } 238 }; 239 CountHandleClosure global_handle_count; 240 AlwaysAliveClosure always_alive; 241 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 242 return global_handle_count.count(); 243 } 244 #endif 245 246 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 247 OopClosure* keep_alive, 248 VoidClosure* complete_gc) { 249 #ifndef PRODUCT 250 if (PrintGCDetails && PrintReferenceGC) { 251 unsigned int count = count_jni_refs(); 252 gclog_or_tty->print(", %u refs", count); 253 } 254 #endif 255 JNIHandles::weak_oops_do(is_alive, keep_alive); 256 // Finally remember to keep sentinel around 257 keep_alive->do_oop(adr_sentinel_ref()); 258 complete_gc->do_void(); 259 } 260 261 262 template <class T> 263 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 264 AbstractRefProcTaskExecutor* task_executor) { 265 266 // Remember old value of pending references list 267 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 268 T old_pending_list_value = *pending_list_addr; 269 270 // Enqueue references that are not made active again, and 271 // clear the decks for the next collection (cycle). 272 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 273 // Do the oop-check on pending_list_addr missed in 274 // enqueue_discovered_reflist. We should probably 275 // do a raw oop_check so that future such idempotent 276 // oop_stores relying on the oop-check side-effect 277 // may be elided automatically and safely without 278 // affecting correctness. 279 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 280 281 // Stop treating discovered references specially. 282 ref->disable_discovery(); 283 284 // Return true if new pending references were added 285 return old_pending_list_value != *pending_list_addr; 286 } 287 288 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 289 NOT_PRODUCT(verify_ok_to_handle_reflists()); 290 if (UseCompressedOops) { 291 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 292 } else { 293 return enqueue_discovered_ref_helper<oop>(this, task_executor); 294 } 295 } 296 297 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 298 HeapWord* pending_list_addr) { 299 // Given a list of refs linked through the "discovered" field 300 // (java.lang.ref.Reference.discovered) chain them through the 301 // "next" field (java.lang.ref.Reference.next) and prepend 302 // to the pending list. 303 if (TraceReferenceGC && PrintGCDetails) { 304 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 305 INTPTR_FORMAT, (address)refs_list.head()); 306 } 307 oop obj = refs_list.head(); 308 // Walk down the list, copying the discovered field into 309 // the next field and clearing it (except for the last 310 // non-sentinel object which is treated specially to avoid 311 // confusion with an active reference). 312 while (obj != sentinel_ref()) { 313 assert(obj->is_instanceRef(), "should be reference object"); 314 oop next = java_lang_ref_Reference::discovered(obj); 315 if (TraceReferenceGC && PrintGCDetails) { 316 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, 317 obj, next); 318 } 319 assert(java_lang_ref_Reference::next(obj) == NULL, 320 "The reference should not be enqueued"); 321 if (next == sentinel_ref()) { // obj is last 322 // Swap refs_list into pendling_list_addr and 323 // set obj's next to what we read from pending_list_addr. 324 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 325 // Need oop_check on pending_list_addr above; 326 // see special oop-check code at the end of 327 // enqueue_discovered_reflists() further below. 328 if (old == NULL) { 329 // obj should be made to point to itself, since 330 // pending list was empty. 331 java_lang_ref_Reference::set_next(obj, obj); 332 } else { 333 java_lang_ref_Reference::set_next(obj, old); 334 } 335 } else { 336 java_lang_ref_Reference::set_next(obj, next); 337 } 338 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 339 obj = next; 340 } 341 } 342 343 // Parallel enqueue task 344 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 345 public: 346 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 347 DiscoveredList discovered_refs[], 348 HeapWord* pending_list_addr, 349 oop sentinel_ref, 350 int n_queues) 351 : EnqueueTask(ref_processor, discovered_refs, 352 pending_list_addr, sentinel_ref, n_queues) 353 { } 354 355 virtual void work(unsigned int work_id) { 356 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 357 // Simplest first cut: static partitioning. 358 int index = work_id; 359 // The increment on "index" must correspond to the maximum number of queues 360 // (n_queues) with which that ReferenceProcessor was created. That 361 // is because of the "clever" way the discovered references lists were 362 // allocated and are indexed into. 363 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 364 for (int j = 0; 365 j < ReferenceProcessor::subclasses_of_ref(); 366 j++, index += _n_queues) { 367 _ref_processor.enqueue_discovered_reflist( 368 _refs_lists[index], _pending_list_addr); 369 _refs_lists[index].set_head(_sentinel_ref); 370 _refs_lists[index].set_length(0); 371 } 372 } 373 }; 374 375 // Enqueue references that are not made active again 376 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 377 AbstractRefProcTaskExecutor* task_executor) { 378 if (_processing_is_mt && task_executor != NULL) { 379 // Parallel code 380 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, 381 pending_list_addr, sentinel_ref(), _max_num_q); 382 task_executor->execute(tsk); 383 } else { 384 // Serial code: call the parent class's implementation 385 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) { 386 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); 387 _discoveredSoftRefs[i].set_head(sentinel_ref()); 388 _discoveredSoftRefs[i].set_length(0); 389 } 390 } 391 } 392 393 // Iterator for the list of discovered references. 394 class DiscoveredListIterator { 395 public: 396 inline DiscoveredListIterator(DiscoveredList& refs_list, 397 OopClosure* keep_alive, 398 BoolObjectClosure* is_alive); 399 400 // End Of List. 401 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); } 402 403 // Get oop to the Reference object. 404 inline oop obj() const { return _ref; } 405 406 // Get oop to the referent object. 407 inline oop referent() const { return _referent; } 408 409 // Returns true if referent is alive. 410 inline bool is_referent_alive() const; 411 412 // Loads data for the current reference. 413 // The "allow_null_referent" argument tells us to allow for the possibility 414 // of a NULL referent in the discovered Reference object. This typically 415 // happens in the case of concurrent collectors that may have done the 416 // discovery concurrently, or interleaved, with mutator execution. 417 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 418 419 // Move to the next discovered reference. 420 inline void next(); 421 422 // Remove the current reference from the list 423 inline void remove(); 424 425 // Make the Reference object active again. 426 inline void make_active() { 427 // For G1 we don't want to use set_next - it 428 // will dirty the card for the next field of 429 // the reference object and will fail 430 // CT verification. 431 if (UseG1GC) { 432 BarrierSet* bs = oopDesc::bs(); 433 HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref); 434 435 if (UseCompressedOops) { 436 bs->write_ref_field_pre((narrowOop*)next_addr, NULL); 437 } else { 438 bs->write_ref_field_pre((oop*)next_addr, NULL); 439 } 440 java_lang_ref_Reference::set_next_raw(_ref, NULL); 441 } else { 442 java_lang_ref_Reference::set_next(_ref, NULL); 443 } 444 } 445 446 // Make the referent alive. 447 inline void make_referent_alive() { 448 if (UseCompressedOops) { 449 _keep_alive->do_oop((narrowOop*)_referent_addr); 450 } else { 451 _keep_alive->do_oop((oop*)_referent_addr); 452 } 453 } 454 455 // Update the discovered field. 456 inline void update_discovered() { 457 // First _prev_next ref actually points into DiscoveredList (gross). 458 if (UseCompressedOops) { 459 _keep_alive->do_oop((narrowOop*)_prev_next); 460 } else { 461 _keep_alive->do_oop((oop*)_prev_next); 462 } 463 } 464 465 // NULL out referent pointer. 466 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } 467 468 // Statistics 469 NOT_PRODUCT( 470 inline size_t processed() const { return _processed; } 471 inline size_t removed() const { return _removed; } 472 ) 473 474 inline void move_to_next(); 475 476 private: 477 DiscoveredList& _refs_list; 478 HeapWord* _prev_next; 479 oop _ref; 480 HeapWord* _discovered_addr; 481 oop _next; 482 HeapWord* _referent_addr; 483 oop _referent; 484 OopClosure* _keep_alive; 485 BoolObjectClosure* _is_alive; 486 487 DEBUG_ONLY( 488 oop _first_seen; // cyclic linked list check 489 ) 490 491 NOT_PRODUCT( 492 size_t _processed; 493 size_t _removed; 494 ) 495 }; 496 497 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list, 498 OopClosure* keep_alive, 499 BoolObjectClosure* is_alive) : 500 _refs_list(refs_list), 501 _prev_next(refs_list.adr_head()), 502 _ref(refs_list.head()), 503 #ifdef ASSERT 504 _first_seen(refs_list.head()), 505 #endif 506 #ifndef PRODUCT 507 _processed(0), 508 _removed(0), 509 #endif 510 _next(refs_list.head()), 511 _keep_alive(keep_alive), 512 _is_alive(is_alive) 513 { } 514 515 inline bool DiscoveredListIterator::is_referent_alive() const { 516 return _is_alive->do_object_b(_referent); 517 } 518 519 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 520 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 521 oop discovered = java_lang_ref_Reference::discovered(_ref); 522 assert(_discovered_addr && discovered->is_oop_or_null(), 523 "discovered field is bad"); 524 _next = discovered; 525 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 526 _referent = java_lang_ref_Reference::referent(_ref); 527 assert(Universe::heap()->is_in_reserved_or_null(_referent), 528 "Wrong oop found in java.lang.Reference object"); 529 assert(allow_null_referent ? 530 _referent->is_oop_or_null() 531 : _referent->is_oop(), 532 "bad referent"); 533 } 534 535 inline void DiscoveredListIterator::next() { 536 _prev_next = _discovered_addr; 537 move_to_next(); 538 } 539 540 inline void DiscoveredListIterator::remove() { 541 assert(_ref->is_oop(), "Dropping a bad reference"); 542 oop_store_raw(_discovered_addr, NULL); 543 // First _prev_next ref actually points into DiscoveredList (gross). 544 if (UseCompressedOops) { 545 // Remove Reference object from list. 546 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next); 547 } else { 548 // Remove Reference object from list. 549 oopDesc::store_heap_oop((oop*)_prev_next, _next); 550 } 551 NOT_PRODUCT(_removed++); 552 _refs_list.dec_length(1); 553 } 554 555 inline void DiscoveredListIterator::move_to_next() { 556 _ref = _next; 557 assert(_ref != _first_seen, "cyclic ref_list found"); 558 NOT_PRODUCT(_processed++); 559 } 560 561 // NOTE: process_phase*() are largely similar, and at a high level 562 // merely iterate over the extant list applying a predicate to 563 // each of its elements and possibly removing that element from the 564 // list and applying some further closures to that element. 565 // We should consider the possibility of replacing these 566 // process_phase*() methods by abstracting them into 567 // a single general iterator invocation that receives appropriate 568 // closures that accomplish this work. 569 570 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 571 // referents are not alive, but that should be kept alive for policy reasons. 572 // Keep alive the transitive closure of all such referents. 573 void 574 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 575 ReferencePolicy* policy, 576 BoolObjectClosure* is_alive, 577 OopClosure* keep_alive, 578 VoidClosure* complete_gc) { 579 assert(policy != NULL, "Must have a non-NULL policy"); 580 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 581 // Decide which softly reachable refs should be kept alive. 582 while (iter.has_next()) { 583 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 584 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 585 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { 586 if (TraceReferenceGC) { 587 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 588 iter.obj(), iter.obj()->blueprint()->internal_name()); 589 } 590 // Remove Reference object from list 591 iter.remove(); 592 // Make the Reference object active again 593 iter.make_active(); 594 // keep the referent around 595 iter.make_referent_alive(); 596 iter.move_to_next(); 597 } else { 598 iter.next(); 599 } 600 } 601 // Close the reachable set 602 complete_gc->do_void(); 603 NOT_PRODUCT( 604 if (PrintGCDetails && TraceReferenceGC) { 605 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 606 "discovered Refs by policy list " INTPTR_FORMAT, 607 iter.removed(), iter.processed(), (address)refs_list.head()); 608 } 609 ) 610 } 611 612 // Traverse the list and remove any Refs that are not active, or 613 // whose referents are either alive or NULL. 614 void 615 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 616 BoolObjectClosure* is_alive, 617 OopClosure* keep_alive) { 618 assert(discovery_is_atomic(), "Error"); 619 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 620 while (iter.has_next()) { 621 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 622 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 623 assert(next == NULL, "Should not discover inactive Reference"); 624 if (iter.is_referent_alive()) { 625 if (TraceReferenceGC) { 626 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 627 iter.obj(), iter.obj()->blueprint()->internal_name()); 628 } 629 // The referent is reachable after all. 630 // Remove Reference object from list. 631 iter.remove(); 632 // Update the referent pointer as necessary: Note that this 633 // should not entail any recursive marking because the 634 // referent must already have been traversed. 635 iter.make_referent_alive(); 636 iter.move_to_next(); 637 } else { 638 iter.next(); 639 } 640 } 641 NOT_PRODUCT( 642 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 643 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 644 "Refs in discovered list " INTPTR_FORMAT, 645 iter.removed(), iter.processed(), (address)refs_list.head()); 646 } 647 ) 648 } 649 650 void 651 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 652 BoolObjectClosure* is_alive, 653 OopClosure* keep_alive, 654 VoidClosure* complete_gc) { 655 assert(!discovery_is_atomic(), "Error"); 656 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 657 while (iter.has_next()) { 658 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 659 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 660 oop next = java_lang_ref_Reference::next(iter.obj()); 661 if ((iter.referent() == NULL || iter.is_referent_alive() || 662 next != NULL)) { 663 assert(next->is_oop_or_null(), "bad next field"); 664 // Remove Reference object from list 665 iter.remove(); 666 // Trace the cohorts 667 iter.make_referent_alive(); 668 if (UseCompressedOops) { 669 keep_alive->do_oop((narrowOop*)next_addr); 670 } else { 671 keep_alive->do_oop((oop*)next_addr); 672 } 673 iter.move_to_next(); 674 } else { 675 iter.next(); 676 } 677 } 678 // Now close the newly reachable set 679 complete_gc->do_void(); 680 NOT_PRODUCT( 681 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 682 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 683 "Refs in discovered list " INTPTR_FORMAT, 684 iter.removed(), iter.processed(), (address)refs_list.head()); 685 } 686 ) 687 } 688 689 // Traverse the list and process the referents, by either 690 // clearing them or keeping them (and their reachable 691 // closure) alive. 692 void 693 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 694 bool clear_referent, 695 BoolObjectClosure* is_alive, 696 OopClosure* keep_alive, 697 VoidClosure* complete_gc) { 698 ResourceMark rm; 699 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 700 while (iter.has_next()) { 701 iter.update_discovered(); 702 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 703 if (clear_referent) { 704 // NULL out referent pointer 705 iter.clear_referent(); 706 } else { 707 // keep the referent around 708 iter.make_referent_alive(); 709 } 710 if (TraceReferenceGC) { 711 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 712 clear_referent ? "cleared " : "", 713 iter.obj(), iter.obj()->blueprint()->internal_name()); 714 } 715 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 716 iter.next(); 717 } 718 // Remember to keep sentinel pointer around 719 iter.update_discovered(); 720 // Close the reachable set 721 complete_gc->do_void(); 722 } 723 724 void 725 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 726 oop obj = refs_list.head(); 727 while (obj != sentinel_ref()) { 728 oop discovered = java_lang_ref_Reference::discovered(obj); 729 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 730 obj = discovered; 731 } 732 refs_list.set_head(sentinel_ref()); 733 refs_list.set_length(0); 734 } 735 736 void ReferenceProcessor::abandon_partial_discovery() { 737 // loop over the lists 738 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) { 739 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 740 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i)); 741 } 742 abandon_partial_discovered_list(_discoveredSoftRefs[i]); 743 } 744 } 745 746 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 747 public: 748 RefProcPhase1Task(ReferenceProcessor& ref_processor, 749 DiscoveredList refs_lists[], 750 ReferencePolicy* policy, 751 bool marks_oops_alive) 752 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 753 _policy(policy) 754 { } 755 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 756 OopClosure& keep_alive, 757 VoidClosure& complete_gc) 758 { 759 Thread* thr = Thread::current(); 760 int refs_list_index = ((WorkerThread*)thr)->id(); 761 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 762 &is_alive, &keep_alive, &complete_gc); 763 } 764 private: 765 ReferencePolicy* _policy; 766 }; 767 768 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 769 public: 770 RefProcPhase2Task(ReferenceProcessor& ref_processor, 771 DiscoveredList refs_lists[], 772 bool marks_oops_alive) 773 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 774 { } 775 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 776 OopClosure& keep_alive, 777 VoidClosure& complete_gc) 778 { 779 _ref_processor.process_phase2(_refs_lists[i], 780 &is_alive, &keep_alive, &complete_gc); 781 } 782 }; 783 784 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 785 public: 786 RefProcPhase3Task(ReferenceProcessor& ref_processor, 787 DiscoveredList refs_lists[], 788 bool clear_referent, 789 bool marks_oops_alive) 790 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 791 _clear_referent(clear_referent) 792 { } 793 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 794 OopClosure& keep_alive, 795 VoidClosure& complete_gc) 796 { 797 // Don't use "refs_list_index" calculated in this way because 798 // balance_queues() has moved the Ref's into the first n queues. 799 // Thread* thr = Thread::current(); 800 // int refs_list_index = ((WorkerThread*)thr)->id(); 801 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 802 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 803 &is_alive, &keep_alive, &complete_gc); 804 } 805 private: 806 bool _clear_referent; 807 }; 808 809 // Balances reference queues. 810 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 811 // queues[0, 1, ..., _num_q-1] because only the first _num_q 812 // corresponding to the active workers will be processed. 813 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 814 { 815 // calculate total length 816 size_t total_refs = 0; 817 if (TraceReferenceGC && PrintGCDetails) { 818 gclog_or_tty->print_cr("\nBalance ref_lists "); 819 } 820 821 for (int i = 0; i < _max_num_q; ++i) { 822 total_refs += ref_lists[i].length(); 823 if (TraceReferenceGC && PrintGCDetails) { 824 gclog_or_tty->print("%d ", ref_lists[i].length()); 825 } 826 } 827 if (TraceReferenceGC && PrintGCDetails) { 828 gclog_or_tty->print_cr(" = %d", total_refs); 829 } 830 size_t avg_refs = total_refs / _num_q + 1; 831 int to_idx = 0; 832 for (int from_idx = 0; from_idx < _max_num_q; from_idx++) { 833 bool move_all = false; 834 if (from_idx >= _num_q) { 835 move_all = ref_lists[from_idx].length() > 0; 836 } 837 while ((ref_lists[from_idx].length() > avg_refs) || 838 move_all) { 839 assert(to_idx < _num_q, "Sanity Check!"); 840 if (ref_lists[to_idx].length() < avg_refs) { 841 // move superfluous refs 842 size_t refs_to_move; 843 // Move all the Ref's if the from queue will not be processed. 844 if (move_all) { 845 refs_to_move = MIN2(ref_lists[from_idx].length(), 846 avg_refs - ref_lists[to_idx].length()); 847 } else { 848 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 849 avg_refs - ref_lists[to_idx].length()); 850 } 851 oop move_head = ref_lists[from_idx].head(); 852 oop move_tail = move_head; 853 oop new_head = move_head; 854 // find an element to split the list on 855 for (size_t j = 0; j < refs_to_move; ++j) { 856 move_tail = new_head; 857 new_head = java_lang_ref_Reference::discovered(new_head); 858 } 859 860 if (_discovered_list_needs_barrier) { 861 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); 862 } else { 863 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(move_tail); 864 oop_store_raw(discovered_addr, ref_lists[to_idx].head()); 865 } 866 867 ref_lists[to_idx].set_head(move_head); 868 ref_lists[to_idx].inc_length(refs_to_move); 869 ref_lists[from_idx].set_head(new_head); 870 ref_lists[from_idx].dec_length(refs_to_move); 871 if (ref_lists[from_idx].length() == 0) { 872 break; 873 } 874 } else { 875 to_idx = (to_idx + 1) % _num_q; 876 } 877 } 878 } 879 #ifdef ASSERT 880 size_t balanced_total_refs = 0; 881 for (int i = 0; i < _max_num_q; ++i) { 882 balanced_total_refs += ref_lists[i].length(); 883 if (TraceReferenceGC && PrintGCDetails) { 884 gclog_or_tty->print("%d ", ref_lists[i].length()); 885 } 886 } 887 if (TraceReferenceGC && PrintGCDetails) { 888 gclog_or_tty->print_cr(" = %d", balanced_total_refs); 889 gclog_or_tty->flush(); 890 } 891 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 892 #endif 893 } 894 895 void ReferenceProcessor::balance_all_queues() { 896 balance_queues(_discoveredSoftRefs); 897 balance_queues(_discoveredWeakRefs); 898 balance_queues(_discoveredFinalRefs); 899 balance_queues(_discoveredPhantomRefs); 900 } 901 902 void 903 ReferenceProcessor::process_discovered_reflist( 904 DiscoveredList refs_lists[], 905 ReferencePolicy* policy, 906 bool clear_referent, 907 BoolObjectClosure* is_alive, 908 OopClosure* keep_alive, 909 VoidClosure* complete_gc, 910 AbstractRefProcTaskExecutor* task_executor) 911 { 912 bool mt_processing = task_executor != NULL && _processing_is_mt; 913 // If discovery used MT and a dynamic number of GC threads, then 914 // the queues must be balanced for correctness if fewer than the 915 // maximum number of queues were used. The number of queue used 916 // during discovery may be different than the number to be used 917 // for processing so don't depend of _num_q < _max_num_q as part 918 // of the test. 919 bool must_balance = _discovery_is_mt; 920 921 if ((mt_processing && ParallelRefProcBalancingEnabled) || 922 must_balance) { 923 balance_queues(refs_lists); 924 } 925 if (PrintReferenceGC && PrintGCDetails) { 926 size_t total = 0; 927 for (int i = 0; i < _max_num_q; ++i) { 928 total += refs_lists[i].length(); 929 } 930 gclog_or_tty->print(", %u refs", total); 931 } 932 933 // Phase 1 (soft refs only): 934 // . Traverse the list and remove any SoftReferences whose 935 // referents are not alive, but that should be kept alive for 936 // policy reasons. Keep alive the transitive closure of all 937 // such referents. 938 if (policy != NULL) { 939 if (mt_processing) { 940 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 941 task_executor->execute(phase1); 942 } else { 943 for (int i = 0; i < _max_num_q; i++) { 944 process_phase1(refs_lists[i], policy, 945 is_alive, keep_alive, complete_gc); 946 } 947 } 948 } else { // policy == NULL 949 assert(refs_lists != _discoveredSoftRefs, 950 "Policy must be specified for soft references."); 951 } 952 953 // Phase 2: 954 // . Traverse the list and remove any refs whose referents are alive. 955 if (mt_processing) { 956 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 957 task_executor->execute(phase2); 958 } else { 959 for (int i = 0; i < _max_num_q; i++) { 960 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 961 } 962 } 963 964 // Phase 3: 965 // . Traverse the list and process referents as appropriate. 966 if (mt_processing) { 967 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 968 task_executor->execute(phase3); 969 } else { 970 for (int i = 0; i < _max_num_q; i++) { 971 process_phase3(refs_lists[i], clear_referent, 972 is_alive, keep_alive, complete_gc); 973 } 974 } 975 } 976 977 void ReferenceProcessor::clean_up_discovered_references() { 978 // loop over the lists 979 // Should this instead be 980 // for (int i = 0; i < subclasses_of_ref(); i++) { 981 // for (int j = 0; j < _num_q; j++) { 982 // int index = i * _max_num_q + j; 983 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) { 984 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 985 gclog_or_tty->print_cr( 986 "\nScrubbing %s discovered list of Null referents", 987 list_name(i)); 988 } 989 clean_up_discovered_reflist(_discoveredSoftRefs[i]); 990 } 991 } 992 993 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 994 assert(!discovery_is_atomic(), "Else why call this method?"); 995 DiscoveredListIterator iter(refs_list, NULL, NULL); 996 while (iter.has_next()) { 997 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 998 oop next = java_lang_ref_Reference::next(iter.obj()); 999 assert(next->is_oop_or_null(), "bad next field"); 1000 // If referent has been cleared or Reference is not active, 1001 // drop it. 1002 if (iter.referent() == NULL || next != NULL) { 1003 debug_only( 1004 if (PrintGCDetails && TraceReferenceGC) { 1005 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 1006 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 1007 " and referent: " INTPTR_FORMAT, 1008 iter.obj(), next, iter.referent()); 1009 } 1010 ) 1011 // Remove Reference object from list 1012 iter.remove(); 1013 iter.move_to_next(); 1014 } else { 1015 iter.next(); 1016 } 1017 } 1018 NOT_PRODUCT( 1019 if (PrintGCDetails && TraceReferenceGC) { 1020 gclog_or_tty->print( 1021 " Removed %d Refs with NULL referents out of %d discovered Refs", 1022 iter.removed(), iter.processed()); 1023 } 1024 ) 1025 } 1026 1027 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 1028 int id = 0; 1029 // Determine the queue index to use for this object. 1030 if (_discovery_is_mt) { 1031 // During a multi-threaded discovery phase, 1032 // each thread saves to its "own" list. 1033 Thread* thr = Thread::current(); 1034 id = thr->as_Worker_thread()->id(); 1035 } else { 1036 // single-threaded discovery, we save in round-robin 1037 // fashion to each of the lists. 1038 if (_processing_is_mt) { 1039 id = next_id(); 1040 } 1041 } 1042 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 1043 1044 // Get the discovered queue to which we will add 1045 DiscoveredList* list = NULL; 1046 switch (rt) { 1047 case REF_OTHER: 1048 // Unknown reference type, no special treatment 1049 break; 1050 case REF_SOFT: 1051 list = &_discoveredSoftRefs[id]; 1052 break; 1053 case REF_WEAK: 1054 list = &_discoveredWeakRefs[id]; 1055 break; 1056 case REF_FINAL: 1057 list = &_discoveredFinalRefs[id]; 1058 break; 1059 case REF_PHANTOM: 1060 list = &_discoveredPhantomRefs[id]; 1061 break; 1062 case REF_NONE: 1063 // we should not reach here if we are an instanceRefKlass 1064 default: 1065 ShouldNotReachHere(); 1066 } 1067 if (TraceReferenceGC && PrintGCDetails) { 1068 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, list); 1069 } 1070 return list; 1071 } 1072 1073 inline void 1074 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1075 oop obj, 1076 HeapWord* discovered_addr) { 1077 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1078 // First we must make sure this object is only enqueued once. CAS in a non null 1079 // discovered_addr. 1080 oop current_head = refs_list.head(); 1081 1082 // Note: In the case of G1, this specific pre-barrier is strictly 1083 // not necessary because the only case we are interested in 1084 // here is when *discovered_addr is NULL (see the CAS further below), 1085 // so this will expand to nothing. As a result, we have manually 1086 // elided this out for G1, but left in the test for some future 1087 // collector that might have need for a pre-barrier here. 1088 if (_discovered_list_needs_barrier && !UseG1GC) { 1089 if (UseCompressedOops) { 1090 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); 1091 } else { 1092 _bs->write_ref_field_pre((oop*)discovered_addr, current_head); 1093 } 1094 guarantee(false, "Need to check non-G1 collector"); 1095 } 1096 oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr, 1097 NULL); 1098 if (retest == NULL) { 1099 // This thread just won the right to enqueue the object. 1100 // We have separate lists for enqueueing so no synchronization 1101 // is necessary. 1102 refs_list.set_head(obj); 1103 refs_list.inc_length(1); 1104 if (_discovered_list_needs_barrier) { 1105 _bs->write_ref_field((void*)discovered_addr, current_head); 1106 } 1107 1108 if (TraceReferenceGC) { 1109 gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)", 1110 obj, obj->blueprint()->internal_name()); 1111 } 1112 } else { 1113 // If retest was non NULL, another thread beat us to it: 1114 // The reference has already been discovered... 1115 if (TraceReferenceGC) { 1116 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1117 obj, obj->blueprint()->internal_name()); 1118 } 1119 } 1120 } 1121 1122 #ifndef PRODUCT 1123 // Non-atomic (i.e. concurrent) discovery might allow us 1124 // to observe j.l.References with NULL referents, being those 1125 // cleared concurrently by mutators during (or after) discovery. 1126 void ReferenceProcessor::verify_referent(oop obj) { 1127 bool da = discovery_is_atomic(); 1128 oop referent = java_lang_ref_Reference::referent(obj); 1129 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 1130 err_msg("Bad referent " INTPTR_FORMAT " found in Reference " 1131 INTPTR_FORMAT " during %satomic discovery ", 1132 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-")); 1133 } 1134 #endif 1135 1136 // We mention two of several possible choices here: 1137 // #0: if the reference object is not in the "originating generation" 1138 // (or part of the heap being collected, indicated by our "span" 1139 // we don't treat it specially (i.e. we scan it as we would 1140 // a normal oop, treating its references as strong references). 1141 // This means that references can't be enqueued unless their 1142 // referent is also in the same span. This is the simplest, 1143 // most "local" and most conservative approach, albeit one 1144 // that may cause weak references to be enqueued least promptly. 1145 // We call this choice the "ReferenceBasedDiscovery" policy. 1146 // #1: the reference object may be in any generation (span), but if 1147 // the referent is in the generation (span) being currently collected 1148 // then we can discover the reference object, provided 1149 // the object has not already been discovered by 1150 // a different concurrently running collector (as may be the 1151 // case, for instance, if the reference object is in CMS and 1152 // the referent in DefNewGeneration), and provided the processing 1153 // of this reference object by the current collector will 1154 // appear atomic to every other collector in the system. 1155 // (Thus, for instance, a concurrent collector may not 1156 // discover references in other generations even if the 1157 // referent is in its own generation). This policy may, 1158 // in certain cases, enqueue references somewhat sooner than 1159 // might Policy #0 above, but at marginally increased cost 1160 // and complexity in processing these references. 1161 // We call this choice the "RefeferentBasedDiscovery" policy. 1162 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1163 // We enqueue references only if we are discovering refs 1164 // (rather than processing discovered refs). 1165 if (!_discovering_refs || !RegisterReferences) { 1166 return false; 1167 } 1168 // We only enqueue active references. 1169 oop next = java_lang_ref_Reference::next(obj); 1170 if (next != NULL) { 1171 return false; 1172 } 1173 1174 HeapWord* obj_addr = (HeapWord*)obj; 1175 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1176 !_span.contains(obj_addr)) { 1177 // Reference is not in the originating generation; 1178 // don't treat it specially (i.e. we want to scan it as a normal 1179 // object with strong references). 1180 return false; 1181 } 1182 1183 // We only enqueue references whose referents are not (yet) strongly 1184 // reachable. 1185 if (is_alive_non_header() != NULL) { 1186 verify_referent(obj); 1187 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1188 return false; // referent is reachable 1189 } 1190 } 1191 if (rt == REF_SOFT) { 1192 // For soft refs we can decide now if these are not 1193 // current candidates for clearing, in which case we 1194 // can mark through them now, rather than delaying that 1195 // to the reference-processing phase. Since all current 1196 // time-stamp policies advance the soft-ref clock only 1197 // at a major collection cycle, this is always currently 1198 // accurate. 1199 if (!_current_soft_ref_policy->should_clear_reference(obj)) { 1200 return false; 1201 } 1202 } 1203 1204 ResourceMark rm; // Needed for tracing. 1205 1206 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1207 const oop discovered = java_lang_ref_Reference::discovered(obj); 1208 assert(discovered->is_oop_or_null(), "bad discovered field"); 1209 if (discovered != NULL) { 1210 // The reference has already been discovered... 1211 if (TraceReferenceGC) { 1212 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1213 obj, obj->blueprint()->internal_name()); 1214 } 1215 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1216 // assumes that an object is not processed twice; 1217 // if it's been already discovered it must be on another 1218 // generation's discovered list; so we won't discover it. 1219 return false; 1220 } else { 1221 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1222 "Unrecognized policy"); 1223 // Check assumption that an object is not potentially 1224 // discovered twice except by concurrent collectors that potentially 1225 // trace the same Reference object twice. 1226 assert(UseConcMarkSweepGC || UseG1GC, 1227 "Only possible with a concurrent marking collector"); 1228 return true; 1229 } 1230 } 1231 1232 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1233 verify_referent(obj); 1234 // enqueue if and only if either: 1235 // reference is in our span or 1236 // we are an atomic collector and referent is in our span 1237 if (_span.contains(obj_addr) || 1238 (discovery_is_atomic() && 1239 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1240 // should_enqueue = true; 1241 } else { 1242 return false; 1243 } 1244 } else { 1245 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1246 _span.contains(obj_addr), "code inconsistency"); 1247 } 1248 1249 // Get the right type of discovered queue head. 1250 DiscoveredList* list = get_discovered_list(rt); 1251 if (list == NULL) { 1252 return false; // nothing special needs to be done 1253 } 1254 1255 if (_discovery_is_mt) { 1256 add_to_discovered_list_mt(*list, obj, discovered_addr); 1257 } else { 1258 // If "_discovered_list_needs_barrier", we do write barriers when 1259 // updating the discovered reference list. Otherwise, we do a raw store 1260 // here: the field will be visited later when processing the discovered 1261 // references. 1262 oop current_head = list->head(); 1263 // As in the case further above, since we are over-writing a NULL 1264 // pre-value, we can safely elide the pre-barrier here for the case of G1. 1265 assert(discovered == NULL, "control point invariant"); 1266 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 1267 if (UseCompressedOops) { 1268 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); 1269 } else { 1270 _bs->write_ref_field_pre((oop*)discovered_addr, current_head); 1271 } 1272 guarantee(false, "Need to check non-G1 collector"); 1273 } 1274 oop_store_raw(discovered_addr, current_head); 1275 if (_discovered_list_needs_barrier) { 1276 _bs->write_ref_field((void*)discovered_addr, current_head); 1277 } 1278 list->set_head(obj); 1279 list->inc_length(1); 1280 1281 if (TraceReferenceGC) { 1282 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", 1283 obj, obj->blueprint()->internal_name()); 1284 } 1285 } 1286 assert(obj->is_oop(), "Enqueued a bad reference"); 1287 verify_referent(obj); 1288 return true; 1289 } 1290 1291 // Preclean the discovered references by removing those 1292 // whose referents are alive, and by marking from those that 1293 // are not active. These lists can be handled here 1294 // in any order and, indeed, concurrently. 1295 void ReferenceProcessor::preclean_discovered_references( 1296 BoolObjectClosure* is_alive, 1297 OopClosure* keep_alive, 1298 VoidClosure* complete_gc, 1299 YieldClosure* yield, 1300 bool should_unload_classes) { 1301 1302 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1303 1304 #ifdef ASSERT 1305 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC || 1306 CMSClassUnloadingEnabled && UseConcMarkSweepGC || 1307 ExplicitGCInvokesConcurrentAndUnloadsClasses && 1308 UseConcMarkSweepGC && should_unload_classes; 1309 RememberKlassesChecker mx(must_remember_klasses); 1310 #endif 1311 // Soft references 1312 { 1313 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1314 false, gclog_or_tty); 1315 for (int i = 0; i < _max_num_q; i++) { 1316 if (yield->should_return()) { 1317 return; 1318 } 1319 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1320 keep_alive, complete_gc, yield); 1321 } 1322 } 1323 1324 // Weak references 1325 { 1326 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1327 false, gclog_or_tty); 1328 for (int i = 0; i < _max_num_q; i++) { 1329 if (yield->should_return()) { 1330 return; 1331 } 1332 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1333 keep_alive, complete_gc, yield); 1334 } 1335 } 1336 1337 // Final references 1338 { 1339 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1340 false, gclog_or_tty); 1341 for (int i = 0; i < _max_num_q; i++) { 1342 if (yield->should_return()) { 1343 return; 1344 } 1345 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1346 keep_alive, complete_gc, yield); 1347 } 1348 } 1349 1350 // Phantom references 1351 { 1352 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1353 false, gclog_or_tty); 1354 for (int i = 0; i < _max_num_q; i++) { 1355 if (yield->should_return()) { 1356 return; 1357 } 1358 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1359 keep_alive, complete_gc, yield); 1360 } 1361 } 1362 } 1363 1364 // Walk the given discovered ref list, and remove all reference objects 1365 // whose referents are still alive, whose referents are NULL or which 1366 // are not active (have a non-NULL next field). NOTE: When we are 1367 // thus precleaning the ref lists (which happens single-threaded today), 1368 // we do not disable refs discovery to honour the correct semantics of 1369 // java.lang.Reference. As a result, we need to be careful below 1370 // that ref removal steps interleave safely with ref discovery steps 1371 // (in this thread). 1372 void 1373 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1374 BoolObjectClosure* is_alive, 1375 OopClosure* keep_alive, 1376 VoidClosure* complete_gc, 1377 YieldClosure* yield) { 1378 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1379 while (iter.has_next()) { 1380 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1381 oop obj = iter.obj(); 1382 oop next = java_lang_ref_Reference::next(obj); 1383 if (iter.referent() == NULL || iter.is_referent_alive() || 1384 next != NULL) { 1385 // The referent has been cleared, or is alive, or the Reference is not 1386 // active; we need to trace and mark its cohort. 1387 if (TraceReferenceGC) { 1388 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1389 iter.obj(), iter.obj()->blueprint()->internal_name()); 1390 } 1391 // Remove Reference object from list 1392 iter.remove(); 1393 // Keep alive its cohort. 1394 iter.make_referent_alive(); 1395 if (UseCompressedOops) { 1396 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1397 keep_alive->do_oop(next_addr); 1398 } else { 1399 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1400 keep_alive->do_oop(next_addr); 1401 } 1402 iter.move_to_next(); 1403 } else { 1404 iter.next(); 1405 } 1406 } 1407 // Close the reachable set 1408 complete_gc->do_void(); 1409 1410 NOT_PRODUCT( 1411 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 1412 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1413 "Refs in discovered list " INTPTR_FORMAT, 1414 iter.removed(), iter.processed(), (address)refs_list.head()); 1415 } 1416 ) 1417 } 1418 1419 const char* ReferenceProcessor::list_name(int i) { 1420 assert(i >= 0 && i <= _max_num_q * subclasses_of_ref(), "Out of bounds index"); 1421 int j = i / _max_num_q; 1422 switch (j) { 1423 case 0: return "SoftRef"; 1424 case 1: return "WeakRef"; 1425 case 2: return "FinalRef"; 1426 case 3: return "PhantomRef"; 1427 } 1428 ShouldNotReachHere(); 1429 return NULL; 1430 } 1431 1432 #ifndef PRODUCT 1433 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1434 // empty for now 1435 } 1436 #endif 1437 1438 void ReferenceProcessor::verify() { 1439 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef"); 1440 } 1441 1442 #ifndef PRODUCT 1443 void ReferenceProcessor::clear_discovered_references() { 1444 guarantee(!_discovering_refs, "Discovering refs?"); 1445 for (int i = 0; i < _max_num_q * subclasses_of_ref(); i++) { 1446 oop obj = _discoveredSoftRefs[i].head(); 1447 while (obj != sentinel_ref()) { 1448 oop next = java_lang_ref_Reference::discovered(obj); 1449 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 1450 obj = next; 1451 } 1452 _discoveredSoftRefs[i].set_head(sentinel_ref()); 1453 _discoveredSoftRefs[i].set_length(0); 1454 } 1455 } 1456 #endif // PRODUCT