1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_referenceProcessor.cpp.incl" 27 28 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 29 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 30 oop ReferenceProcessor::_sentinelRef = NULL; 31 const int subclasses_of_ref = REF_PHANTOM - REF_OTHER; 32 33 // List of discovered references. 34 class DiscoveredList { 35 public: 36 DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } 37 oop head() const { 38 return UseCompressedOops ? oopDesc::decode_heap_oop_not_null(_compressed_head) : 39 _oop_head; 40 } 41 HeapWord* adr_head() { 42 return UseCompressedOops ? (HeapWord*)&_compressed_head : 43 (HeapWord*)&_oop_head; 44 } 45 void set_head(oop o) { 46 if (UseCompressedOops) { 47 // Must compress the head ptr. 48 _compressed_head = oopDesc::encode_heap_oop_not_null(o); 49 } else { 50 _oop_head = o; 51 } 52 } 53 bool empty() const { return head() == ReferenceProcessor::sentinel_ref(); } 54 size_t length() { return _len; } 55 void set_length(size_t len) { _len = len; } 56 void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } 57 void dec_length(size_t dec) { _len -= dec; } 58 private: 59 // Set value depending on UseCompressedOops. This could be a template class 60 // but then we have to fix all the instantiations and declarations that use this class. 61 oop _oop_head; 62 narrowOop _compressed_head; 63 size_t _len; 64 }; 65 66 void referenceProcessor_init() { 67 ReferenceProcessor::init_statics(); 68 } 69 70 void ReferenceProcessor::init_statics() { 71 assert(_sentinelRef == NULL, "should be initialized precisely once"); 72 EXCEPTION_MARK; 73 _sentinelRef = instanceKlass::cast( 74 SystemDictionary::Reference_klass())-> 75 allocate_permanent_instance(THREAD); 76 77 // Initialize the master soft ref clock. 78 java_lang_ref_SoftReference::set_clock(os::javaTimeMillis()); 79 80 if (HAS_PENDING_EXCEPTION) { 81 Handle ex(THREAD, PENDING_EXCEPTION); 82 vm_exit_during_initialization(ex); 83 } 84 assert(_sentinelRef != NULL && _sentinelRef->is_oop(), 85 "Just constructed it!"); 86 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 87 _default_soft_ref_policy = new COMPILER2_PRESENT(LRUMaxHeapPolicy()) 88 NOT_COMPILER2(LRUCurrentHeapPolicy()); 89 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 90 vm_exit_during_initialization("Could not allocate reference policy object"); 91 } 92 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 93 RefDiscoveryPolicy == ReferentBasedDiscovery, 94 "Unrecongnized RefDiscoveryPolicy"); 95 } 96 97 ReferenceProcessor* 98 ReferenceProcessor::create_ref_processor(MemRegion span, 99 bool atomic_discovery, 100 bool mt_discovery, 101 BoolObjectClosure* is_alive_non_header, 102 int parallel_gc_threads, 103 bool mt_processing, 104 bool dl_needs_barrier) { 105 int mt_degree = 1; 106 if (parallel_gc_threads > 1) { 107 mt_degree = parallel_gc_threads; 108 } 109 ReferenceProcessor* rp = 110 new ReferenceProcessor(span, atomic_discovery, 111 mt_discovery, mt_degree, 112 mt_processing && (parallel_gc_threads > 0), 113 dl_needs_barrier); 114 if (rp == NULL) { 115 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); 116 } 117 rp->set_is_alive_non_header(is_alive_non_header); 118 rp->setup_policy(false /* default soft ref policy */); 119 return rp; 120 } 121 122 ReferenceProcessor::ReferenceProcessor(MemRegion span, 123 bool atomic_discovery, 124 bool mt_discovery, 125 int mt_degree, 126 bool mt_processing, 127 bool discovered_list_needs_barrier) : 128 _discovering_refs(false), 129 _enqueuing_is_done(false), 130 _is_alive_non_header(NULL), 131 _discovered_list_needs_barrier(discovered_list_needs_barrier), 132 _bs(NULL), 133 _processing_is_mt(mt_processing), 134 _next_id(0) 135 { 136 _span = span; 137 _discovery_is_atomic = atomic_discovery; 138 _discovery_is_mt = mt_discovery; 139 _num_q = mt_degree; 140 _max_num_q = mt_degree; 141 _discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref); 142 if (_discoveredSoftRefs == NULL) { 143 vm_exit_during_initialization("Could not allocated RefProc Array"); 144 } 145 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 146 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 147 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 148 assert(sentinel_ref() != NULL, "_sentinelRef is NULL"); 149 // Initialized all entries to _sentinelRef 150 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 151 _discoveredSoftRefs[i].set_head(sentinel_ref()); 152 _discoveredSoftRefs[i].set_length(0); 153 } 154 // If we do barreirs, cache a copy of the barrier set. 155 if (discovered_list_needs_barrier) { 156 _bs = Universe::heap()->barrier_set(); 157 } 158 } 159 160 #ifndef PRODUCT 161 void ReferenceProcessor::verify_no_references_recorded() { 162 guarantee(!_discovering_refs, "Discovering refs?"); 163 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 164 guarantee(_discoveredSoftRefs[i].empty(), 165 "Found non-empty discovered list"); 166 } 167 } 168 #endif 169 170 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 171 // Should this instead be 172 // for (int i = 0; i < subclasses_of_ref; i++_ { 173 // for (int j = 0; j < _num_q; j++) { 174 // int index = i * _max_num_q + j; 175 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 176 if (UseCompressedOops) { 177 f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head()); 178 } else { 179 f->do_oop((oop*)_discoveredSoftRefs[i].adr_head()); 180 } 181 } 182 } 183 184 void ReferenceProcessor::oops_do(OopClosure* f) { 185 f->do_oop(adr_sentinel_ref()); 186 } 187 188 void ReferenceProcessor::update_soft_ref_master_clock() { 189 // Update (advance) the soft ref master clock field. This must be done 190 // after processing the soft ref list. 191 jlong now = os::javaTimeMillis(); 192 jlong clock = java_lang_ref_SoftReference::clock(); 193 NOT_PRODUCT( 194 if (now < clock) { 195 warning("time warp: %d to %d", clock, now); 196 } 197 ) 198 // In product mode, protect ourselves from system time being adjusted 199 // externally and going backward; see note in the implementation of 200 // GenCollectedHeap::time_since_last_gc() for the right way to fix 201 // this uniformly throughout the VM; see bug-id 4741166. XXX 202 if (now > clock) { 203 java_lang_ref_SoftReference::set_clock(now); 204 } 205 // Else leave clock stalled at its old value until time progresses 206 // past clock value. 207 } 208 209 void ReferenceProcessor::process_discovered_references( 210 BoolObjectClosure* is_alive, 211 OopClosure* keep_alive, 212 VoidClosure* complete_gc, 213 AbstractRefProcTaskExecutor* task_executor) { 214 NOT_PRODUCT(verify_ok_to_handle_reflists()); 215 216 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 217 // Stop treating discovered references specially. 218 disable_discovery(); 219 220 bool trace_time = PrintGCDetails && PrintReferenceGC; 221 // Soft references 222 { 223 TraceTime tt("SoftReference", trace_time, false, gclog_or_tty); 224 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 225 is_alive, keep_alive, complete_gc, task_executor); 226 } 227 228 update_soft_ref_master_clock(); 229 230 // Weak references 231 { 232 TraceTime tt("WeakReference", trace_time, false, gclog_or_tty); 233 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 234 is_alive, keep_alive, complete_gc, task_executor); 235 } 236 237 // Final references 238 { 239 TraceTime tt("FinalReference", trace_time, false, gclog_or_tty); 240 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 241 is_alive, keep_alive, complete_gc, task_executor); 242 } 243 244 // Phantom references 245 { 246 TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty); 247 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 248 is_alive, keep_alive, complete_gc, task_executor); 249 } 250 251 // Weak global JNI references. It would make more sense (semantically) to 252 // traverse these simultaneously with the regular weak references above, but 253 // that is not how the JDK1.2 specification is. See #4126360. Native code can 254 // thus use JNI weak references to circumvent the phantom references and 255 // resurrect a "post-mortem" object. 256 { 257 TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty); 258 if (task_executor != NULL) { 259 task_executor->set_single_threaded_mode(); 260 } 261 process_phaseJNI(is_alive, keep_alive, complete_gc); 262 } 263 } 264 265 #ifndef PRODUCT 266 // Calculate the number of jni handles. 267 uint ReferenceProcessor::count_jni_refs() { 268 class AlwaysAliveClosure: public BoolObjectClosure { 269 public: 270 virtual bool do_object_b(oop obj) { return true; } 271 virtual void do_object(oop obj) { assert(false, "Don't call"); } 272 }; 273 274 class CountHandleClosure: public OopClosure { 275 private: 276 int _count; 277 public: 278 CountHandleClosure(): _count(0) {} 279 void do_oop(oop* unused) { _count++; } 280 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 281 int count() { return _count; } 282 }; 283 CountHandleClosure global_handle_count; 284 AlwaysAliveClosure always_alive; 285 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 286 return global_handle_count.count(); 287 } 288 #endif 289 290 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 291 OopClosure* keep_alive, 292 VoidClosure* complete_gc) { 293 #ifndef PRODUCT 294 if (PrintGCDetails && PrintReferenceGC) { 295 unsigned int count = count_jni_refs(); 296 gclog_or_tty->print(", %u refs", count); 297 } 298 #endif 299 JNIHandles::weak_oops_do(is_alive, keep_alive); 300 // Finally remember to keep sentinel around 301 keep_alive->do_oop(adr_sentinel_ref()); 302 complete_gc->do_void(); 303 } 304 305 306 template <class T> 307 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 308 AbstractRefProcTaskExecutor* task_executor) { 309 310 // Remember old value of pending references list 311 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 312 T old_pending_list_value = *pending_list_addr; 313 314 // Enqueue references that are not made active again, and 315 // clear the decks for the next collection (cycle). 316 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 317 // Do the oop-check on pending_list_addr missed in 318 // enqueue_discovered_reflist. We should probably 319 // do a raw oop_check so that future such idempotent 320 // oop_stores relying on the oop-check side-effect 321 // may be elided automatically and safely without 322 // affecting correctness. 323 oop_store(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 324 325 // Stop treating discovered references specially. 326 ref->disable_discovery(); 327 328 // Return true if new pending references were added 329 return old_pending_list_value != *pending_list_addr; 330 } 331 332 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 333 NOT_PRODUCT(verify_ok_to_handle_reflists()); 334 if (UseCompressedOops) { 335 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 336 } else { 337 return enqueue_discovered_ref_helper<oop>(this, task_executor); 338 } 339 } 340 341 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 342 HeapWord* pending_list_addr) { 343 // Given a list of refs linked through the "discovered" field 344 // (java.lang.ref.Reference.discovered) chain them through the 345 // "next" field (java.lang.ref.Reference.next) and prepend 346 // to the pending list. 347 if (TraceReferenceGC && PrintGCDetails) { 348 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 349 INTPTR_FORMAT, (address)refs_list.head()); 350 } 351 oop obj = refs_list.head(); 352 // Walk down the list, copying the discovered field into 353 // the next field and clearing it (except for the last 354 // non-sentinel object which is treated specially to avoid 355 // confusion with an active reference). 356 while (obj != sentinel_ref()) { 357 assert(obj->is_instanceRef(), "should be reference object"); 358 oop next = java_lang_ref_Reference::discovered(obj); 359 if (TraceReferenceGC && PrintGCDetails) { 360 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT, 361 obj, next); 362 } 363 assert(java_lang_ref_Reference::next(obj) == NULL, 364 "The reference should not be enqueued"); 365 if (next == sentinel_ref()) { // obj is last 366 // Swap refs_list into pendling_list_addr and 367 // set obj's next to what we read from pending_list_addr. 368 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 369 // Need oop_check on pending_list_addr above; 370 // see special oop-check code at the end of 371 // enqueue_discovered_reflists() further below. 372 if (old == NULL) { 373 // obj should be made to point to itself, since 374 // pending list was empty. 375 java_lang_ref_Reference::set_next(obj, obj); 376 } else { 377 java_lang_ref_Reference::set_next(obj, old); 378 } 379 } else { 380 java_lang_ref_Reference::set_next(obj, next); 381 } 382 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 383 obj = next; 384 } 385 } 386 387 // Parallel enqueue task 388 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 389 public: 390 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 391 DiscoveredList discovered_refs[], 392 HeapWord* pending_list_addr, 393 oop sentinel_ref, 394 int n_queues) 395 : EnqueueTask(ref_processor, discovered_refs, 396 pending_list_addr, sentinel_ref, n_queues) 397 { } 398 399 virtual void work(unsigned int work_id) { 400 assert(work_id < (unsigned int)_ref_processor.num_q(), "Index out-of-bounds"); 401 // Simplest first cut: static partitioning. 402 int index = work_id; 403 // The increment on "index" must correspond to the maximum number of queues 404 // (n_queues) with which that ReferenceProcessor was created. That 405 // is because of the "clever" way the discovered references lists were 406 // allocated and are indexed into. That number is ParallelGCThreads 407 // currently. Assert that. 408 assert(_n_queues == (int) ParallelGCThreads, "Different number not expected"); 409 for (int j = 0; 410 j < subclasses_of_ref; 411 j++, index += _n_queues) { 412 _ref_processor.enqueue_discovered_reflist( 413 _refs_lists[index], _pending_list_addr); 414 _refs_lists[index].set_head(_sentinel_ref); 415 _refs_lists[index].set_length(0); 416 } 417 } 418 }; 419 420 // Enqueue references that are not made active again 421 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 422 AbstractRefProcTaskExecutor* task_executor) { 423 if (_processing_is_mt && task_executor != NULL) { 424 // Parallel code 425 RefProcEnqueueTask tsk(*this, _discoveredSoftRefs, 426 pending_list_addr, sentinel_ref(), _max_num_q); 427 task_executor->execute(tsk); 428 } else { 429 // Serial code: call the parent class's implementation 430 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 431 enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr); 432 _discoveredSoftRefs[i].set_head(sentinel_ref()); 433 _discoveredSoftRefs[i].set_length(0); 434 } 435 } 436 } 437 438 // Iterator for the list of discovered references. 439 class DiscoveredListIterator { 440 public: 441 inline DiscoveredListIterator(DiscoveredList& refs_list, 442 OopClosure* keep_alive, 443 BoolObjectClosure* is_alive); 444 445 // End Of List. 446 inline bool has_next() const { return _next != ReferenceProcessor::sentinel_ref(); } 447 448 // Get oop to the Reference object. 449 inline oop obj() const { return _ref; } 450 451 // Get oop to the referent object. 452 inline oop referent() const { return _referent; } 453 454 // Returns true if referent is alive. 455 inline bool is_referent_alive() const; 456 457 // Loads data for the current reference. 458 // The "allow_null_referent" argument tells us to allow for the possibility 459 // of a NULL referent in the discovered Reference object. This typically 460 // happens in the case of concurrent collectors that may have done the 461 // discovery concurrently, or interleaved, with mutator execution. 462 inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); 463 464 // Move to the next discovered reference. 465 inline void next(); 466 467 // Remove the current reference from the list 468 inline void remove(); 469 470 // Make the Reference object active again. 471 inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); } 472 473 // Make the referent alive. 474 inline void make_referent_alive() { 475 if (UseCompressedOops) { 476 _keep_alive->do_oop((narrowOop*)_referent_addr); 477 } else { 478 _keep_alive->do_oop((oop*)_referent_addr); 479 } 480 } 481 482 // Update the discovered field. 483 inline void update_discovered() { 484 // First _prev_next ref actually points into DiscoveredList (gross). 485 if (UseCompressedOops) { 486 _keep_alive->do_oop((narrowOop*)_prev_next); 487 } else { 488 _keep_alive->do_oop((oop*)_prev_next); 489 } 490 } 491 492 // NULL out referent pointer. 493 inline void clear_referent() { oop_store_raw(_referent_addr, NULL); } 494 495 // Statistics 496 NOT_PRODUCT( 497 inline size_t processed() const { return _processed; } 498 inline size_t removed() const { return _removed; } 499 ) 500 501 inline void move_to_next(); 502 503 private: 504 DiscoveredList& _refs_list; 505 HeapWord* _prev_next; 506 oop _ref; 507 HeapWord* _discovered_addr; 508 oop _next; 509 HeapWord* _referent_addr; 510 oop _referent; 511 OopClosure* _keep_alive; 512 BoolObjectClosure* _is_alive; 513 DEBUG_ONLY( 514 oop _first_seen; // cyclic linked list check 515 ) 516 NOT_PRODUCT( 517 size_t _processed; 518 size_t _removed; 519 ) 520 }; 521 522 inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list, 523 OopClosure* keep_alive, 524 BoolObjectClosure* is_alive) 525 : _refs_list(refs_list), 526 _prev_next(refs_list.adr_head()), 527 _ref(refs_list.head()), 528 #ifdef ASSERT 529 _first_seen(refs_list.head()), 530 #endif 531 #ifndef PRODUCT 532 _processed(0), 533 _removed(0), 534 #endif 535 _next(refs_list.head()), 536 _keep_alive(keep_alive), 537 _is_alive(is_alive) 538 { } 539 540 inline bool DiscoveredListIterator::is_referent_alive() const { 541 return _is_alive->do_object_b(_referent); 542 } 543 544 inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 545 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 546 oop discovered = java_lang_ref_Reference::discovered(_ref); 547 assert(_discovered_addr && discovered->is_oop_or_null(), 548 "discovered field is bad"); 549 _next = discovered; 550 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 551 _referent = java_lang_ref_Reference::referent(_ref); 552 assert(Universe::heap()->is_in_reserved_or_null(_referent), 553 "Wrong oop found in java.lang.Reference object"); 554 assert(allow_null_referent ? 555 _referent->is_oop_or_null() 556 : _referent->is_oop(), 557 "bad referent"); 558 } 559 560 inline void DiscoveredListIterator::next() { 561 _prev_next = _discovered_addr; 562 move_to_next(); 563 } 564 565 inline void DiscoveredListIterator::remove() { 566 assert(_ref->is_oop(), "Dropping a bad reference"); 567 oop_store_raw(_discovered_addr, NULL); 568 // First _prev_next ref actually points into DiscoveredList (gross). 569 if (UseCompressedOops) { 570 // Remove Reference object from list. 571 oopDesc::encode_store_heap_oop_not_null((narrowOop*)_prev_next, _next); 572 } else { 573 // Remove Reference object from list. 574 oopDesc::store_heap_oop((oop*)_prev_next, _next); 575 } 576 NOT_PRODUCT(_removed++); 577 _refs_list.dec_length(1); 578 } 579 580 inline void DiscoveredListIterator::move_to_next() { 581 _ref = _next; 582 assert(_ref != _first_seen, "cyclic ref_list found"); 583 NOT_PRODUCT(_processed++); 584 } 585 586 // NOTE: process_phase*() are largely similar, and at a high level 587 // merely iterate over the extant list applying a predicate to 588 // each of its elements and possibly removing that element from the 589 // list and applying some further closures to that element. 590 // We should consider the possibility of replacing these 591 // process_phase*() methods by abstracting them into 592 // a single general iterator invocation that receives appropriate 593 // closures that accomplish this work. 594 595 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 596 // referents are not alive, but that should be kept alive for policy reasons. 597 // Keep alive the transitive closure of all such referents. 598 void 599 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 600 ReferencePolicy* policy, 601 BoolObjectClosure* is_alive, 602 OopClosure* keep_alive, 603 VoidClosure* complete_gc) { 604 assert(policy != NULL, "Must have a non-NULL policy"); 605 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 606 // Decide which softly reachable refs should be kept alive. 607 while (iter.has_next()) { 608 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 609 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 610 if (referent_is_dead && !policy->should_clear_reference(iter.obj())) { 611 if (TraceReferenceGC) { 612 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 613 iter.obj(), iter.obj()->blueprint()->internal_name()); 614 } 615 // Remove Reference object from list 616 iter.remove(); 617 // Make the Reference object active again 618 iter.make_active(); 619 // keep the referent around 620 iter.make_referent_alive(); 621 iter.move_to_next(); 622 } else { 623 iter.next(); 624 } 625 } 626 // Close the reachable set 627 complete_gc->do_void(); 628 NOT_PRODUCT( 629 if (PrintGCDetails && TraceReferenceGC) { 630 gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d " 631 "discovered Refs by policy list " INTPTR_FORMAT, 632 iter.removed(), iter.processed(), (address)refs_list.head()); 633 } 634 ) 635 } 636 637 // Traverse the list and remove any Refs that are not active, or 638 // whose referents are either alive or NULL. 639 void 640 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 641 BoolObjectClosure* is_alive, 642 OopClosure* keep_alive) { 643 assert(discovery_is_atomic(), "Error"); 644 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 645 while (iter.has_next()) { 646 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 647 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 648 assert(next == NULL, "Should not discover inactive Reference"); 649 if (iter.is_referent_alive()) { 650 if (TraceReferenceGC) { 651 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 652 iter.obj(), iter.obj()->blueprint()->internal_name()); 653 } 654 // The referent is reachable after all. 655 // Remove Reference object from list. 656 iter.remove(); 657 // Update the referent pointer as necessary: Note that this 658 // should not entail any recursive marking because the 659 // referent must already have been traversed. 660 iter.make_referent_alive(); 661 iter.move_to_next(); 662 } else { 663 iter.next(); 664 } 665 } 666 NOT_PRODUCT( 667 if (PrintGCDetails && TraceReferenceGC) { 668 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 669 "Refs in discovered list " INTPTR_FORMAT, 670 iter.removed(), iter.processed(), (address)refs_list.head()); 671 } 672 ) 673 } 674 675 void 676 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 677 BoolObjectClosure* is_alive, 678 OopClosure* keep_alive, 679 VoidClosure* complete_gc) { 680 assert(!discovery_is_atomic(), "Error"); 681 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 682 while (iter.has_next()) { 683 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 684 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 685 oop next = java_lang_ref_Reference::next(iter.obj()); 686 if ((iter.referent() == NULL || iter.is_referent_alive() || 687 next != NULL)) { 688 assert(next->is_oop_or_null(), "bad next field"); 689 // Remove Reference object from list 690 iter.remove(); 691 // Trace the cohorts 692 iter.make_referent_alive(); 693 if (UseCompressedOops) { 694 keep_alive->do_oop((narrowOop*)next_addr); 695 } else { 696 keep_alive->do_oop((oop*)next_addr); 697 } 698 iter.move_to_next(); 699 } else { 700 iter.next(); 701 } 702 } 703 // Now close the newly reachable set 704 complete_gc->do_void(); 705 NOT_PRODUCT( 706 if (PrintGCDetails && TraceReferenceGC) { 707 gclog_or_tty->print_cr(" Dropped %d active Refs out of %d " 708 "Refs in discovered list " INTPTR_FORMAT, 709 iter.removed(), iter.processed(), (address)refs_list.head()); 710 } 711 ) 712 } 713 714 // Traverse the list and process the referents, by either 715 // clearing them or keeping them (and their reachable 716 // closure) alive. 717 void 718 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 719 bool clear_referent, 720 BoolObjectClosure* is_alive, 721 OopClosure* keep_alive, 722 VoidClosure* complete_gc) { 723 ResourceMark rm; 724 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 725 while (iter.has_next()) { 726 iter.update_discovered(); 727 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 728 if (clear_referent) { 729 // NULL out referent pointer 730 iter.clear_referent(); 731 } else { 732 // keep the referent around 733 iter.make_referent_alive(); 734 } 735 if (TraceReferenceGC) { 736 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 737 clear_referent ? "cleared " : "", 738 iter.obj(), iter.obj()->blueprint()->internal_name()); 739 } 740 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 741 iter.next(); 742 } 743 // Remember to keep sentinel pointer around 744 iter.update_discovered(); 745 // Close the reachable set 746 complete_gc->do_void(); 747 } 748 749 void 750 ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) { 751 oop obj = refs_list.head(); 752 while (obj != sentinel_ref()) { 753 oop discovered = java_lang_ref_Reference::discovered(obj); 754 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 755 obj = discovered; 756 } 757 refs_list.set_head(sentinel_ref()); 758 refs_list.set_length(0); 759 } 760 761 void ReferenceProcessor::abandon_partial_discovery() { 762 // loop over the lists 763 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 764 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 765 gclog_or_tty->print_cr( 766 "\nAbandoning %s discovered list", 767 list_name(i)); 768 } 769 abandon_partial_discovered_list(_discoveredSoftRefs[i]); 770 } 771 } 772 773 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 774 public: 775 RefProcPhase1Task(ReferenceProcessor& ref_processor, 776 DiscoveredList refs_lists[], 777 ReferencePolicy* policy, 778 bool marks_oops_alive) 779 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 780 _policy(policy) 781 { } 782 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 783 OopClosure& keep_alive, 784 VoidClosure& complete_gc) 785 { 786 Thread* thr = Thread::current(); 787 int refs_list_index = ((WorkerThread*)thr)->id(); 788 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 789 &is_alive, &keep_alive, &complete_gc); 790 } 791 private: 792 ReferencePolicy* _policy; 793 }; 794 795 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 796 public: 797 RefProcPhase2Task(ReferenceProcessor& ref_processor, 798 DiscoveredList refs_lists[], 799 bool marks_oops_alive) 800 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 801 { } 802 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 803 OopClosure& keep_alive, 804 VoidClosure& complete_gc) 805 { 806 _ref_processor.process_phase2(_refs_lists[i], 807 &is_alive, &keep_alive, &complete_gc); 808 } 809 }; 810 811 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 812 public: 813 RefProcPhase3Task(ReferenceProcessor& ref_processor, 814 DiscoveredList refs_lists[], 815 bool clear_referent, 816 bool marks_oops_alive) 817 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 818 _clear_referent(clear_referent) 819 { } 820 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 821 OopClosure& keep_alive, 822 VoidClosure& complete_gc) 823 { 824 // Don't use "refs_list_index" calculated in this way because 825 // balance_queues() has moved the Ref's into the first n queues. 826 // Thread* thr = Thread::current(); 827 // int refs_list_index = ((WorkerThread*)thr)->id(); 828 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 829 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 830 &is_alive, &keep_alive, &complete_gc); 831 } 832 private: 833 bool _clear_referent; 834 }; 835 836 // Balances reference queues. 837 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 838 // queues[0, 1, ..., _num_q-1] because only the first _num_q 839 // corresponding to the active workers will be processed. 840 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 841 { 842 // calculate total length 843 size_t total_refs = 0; 844 if (TraceReferenceGC && PrintGCDetails) { 845 gclog_or_tty->print_cr("\nBalance ref_lists "); 846 } 847 848 for (int i = 0; i < _max_num_q; ++i) { 849 total_refs += ref_lists[i].length(); 850 if (TraceReferenceGC && PrintGCDetails) { 851 gclog_or_tty->print("%d ", ref_lists[i].length()); 852 } 853 } 854 if (TraceReferenceGC && PrintGCDetails) { 855 gclog_or_tty->print_cr(" = %d", total_refs); 856 } 857 size_t avg_refs = total_refs / _num_q + 1; 858 int to_idx = 0; 859 for (int from_idx = 0; from_idx < _max_num_q; from_idx++) { 860 bool move_all = false; 861 if (from_idx >= _num_q) { 862 move_all = ref_lists[from_idx].length() > 0; 863 } 864 while ((ref_lists[from_idx].length() > avg_refs) || 865 move_all) { 866 assert(to_idx < _num_q, "Sanity Check!"); 867 if (ref_lists[to_idx].length() < avg_refs) { 868 // move superfluous refs 869 size_t refs_to_move; 870 // Move all the Ref's if the from queue will not be processed. 871 if (move_all) { 872 refs_to_move = MIN2(ref_lists[from_idx].length(), 873 avg_refs - ref_lists[to_idx].length()); 874 } else { 875 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 876 avg_refs - ref_lists[to_idx].length()); 877 } 878 oop move_head = ref_lists[from_idx].head(); 879 oop move_tail = move_head; 880 oop new_head = move_head; 881 // find an element to split the list on 882 for (size_t j = 0; j < refs_to_move; ++j) { 883 move_tail = new_head; 884 new_head = java_lang_ref_Reference::discovered(new_head); 885 } 886 java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head()); 887 ref_lists[to_idx].set_head(move_head); 888 ref_lists[to_idx].inc_length(refs_to_move); 889 ref_lists[from_idx].set_head(new_head); 890 ref_lists[from_idx].dec_length(refs_to_move); 891 if (ref_lists[from_idx].length() == 0) { 892 break; 893 } 894 } else { 895 to_idx = (to_idx + 1) % _num_q; 896 } 897 } 898 } 899 #ifdef ASSERT 900 size_t balanced_total_refs = 0; 901 for (int i = 0; i < _max_num_q; ++i) { 902 balanced_total_refs += ref_lists[i].length(); 903 if (TraceReferenceGC && PrintGCDetails) { 904 gclog_or_tty->print("%d ", ref_lists[i].length()); 905 } 906 } 907 if (TraceReferenceGC && PrintGCDetails) { 908 gclog_or_tty->print_cr(" = %d", balanced_total_refs); 909 gclog_or_tty->flush(); 910 } 911 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 912 #endif 913 } 914 915 void ReferenceProcessor::balance_all_queues() { 916 balance_queues(_discoveredSoftRefs); 917 balance_queues(_discoveredWeakRefs); 918 balance_queues(_discoveredFinalRefs); 919 balance_queues(_discoveredPhantomRefs); 920 } 921 922 void 923 ReferenceProcessor::process_discovered_reflist( 924 DiscoveredList refs_lists[], 925 ReferencePolicy* policy, 926 bool clear_referent, 927 BoolObjectClosure* is_alive, 928 OopClosure* keep_alive, 929 VoidClosure* complete_gc, 930 AbstractRefProcTaskExecutor* task_executor) 931 { 932 bool mt_processing = task_executor != NULL && _processing_is_mt; 933 // If discovery used MT and a dynamic number of GC threads, then 934 // the queues must be balanced for correctness if fewer than the 935 // maximum number of queues were used. The number of queue used 936 // during discovery may be different than the number to be used 937 // for processing so don't depend of _num_q < _max_num_q as part 938 // of the test. 939 bool must_balance = _discovery_is_mt; 940 941 if ((mt_processing && ParallelRefProcBalancingEnabled) || 942 must_balance) { 943 balance_queues(refs_lists); 944 } 945 if (PrintReferenceGC && PrintGCDetails) { 946 size_t total = 0; 947 for (int i = 0; i < _num_q; ++i) { 948 total += refs_lists[i].length(); 949 } 950 gclog_or_tty->print(", %u refs", total); 951 } 952 953 // Phase 1 (soft refs only): 954 // . Traverse the list and remove any SoftReferences whose 955 // referents are not alive, but that should be kept alive for 956 // policy reasons. Keep alive the transitive closure of all 957 // such referents. 958 if (policy != NULL) { 959 if (mt_processing) { 960 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 961 task_executor->execute(phase1); 962 } else { 963 for (int i = 0; i < _num_q; i++) { 964 process_phase1(refs_lists[i], policy, 965 is_alive, keep_alive, complete_gc); 966 } 967 } 968 } else { // policy == NULL 969 assert(refs_lists != _discoveredSoftRefs, 970 "Policy must be specified for soft references."); 971 } 972 973 // Phase 2: 974 // . Traverse the list and remove any refs whose referents are alive. 975 if (mt_processing) { 976 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 977 task_executor->execute(phase2); 978 } else { 979 for (int i = 0; i < _num_q; i++) { 980 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 981 } 982 } 983 984 // Phase 3: 985 // . Traverse the list and process referents as appropriate. 986 if (mt_processing) { 987 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 988 task_executor->execute(phase3); 989 } else { 990 for (int i = 0; i < _num_q; i++) { 991 process_phase3(refs_lists[i], clear_referent, 992 is_alive, keep_alive, complete_gc); 993 } 994 } 995 } 996 997 void ReferenceProcessor::clean_up_discovered_references() { 998 // loop over the lists 999 // Should this instead be 1000 // for (int i = 0; i < subclasses_of_ref; i++_ { 1001 // for (int j = 0; j < _num_q; j++) { 1002 // int index = i * _max_num_q + j; 1003 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 1004 if (TraceReferenceGC && PrintGCDetails && ((i % _num_q) == 0)) { 1005 gclog_or_tty->print_cr( 1006 "\nScrubbing %s discovered list of Null referents", 1007 list_name(i)); 1008 } 1009 clean_up_discovered_reflist(_discoveredSoftRefs[i]); 1010 } 1011 } 1012 1013 void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) { 1014 assert(!discovery_is_atomic(), "Else why call this method?"); 1015 DiscoveredListIterator iter(refs_list, NULL, NULL); 1016 while (iter.has_next()) { 1017 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1018 oop next = java_lang_ref_Reference::next(iter.obj()); 1019 assert(next->is_oop_or_null(), "bad next field"); 1020 // If referent has been cleared or Reference is not active, 1021 // drop it. 1022 if (iter.referent() == NULL || next != NULL) { 1023 debug_only( 1024 if (PrintGCDetails && TraceReferenceGC) { 1025 gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: " 1026 INTPTR_FORMAT " with next field: " INTPTR_FORMAT 1027 " and referent: " INTPTR_FORMAT, 1028 iter.obj(), next, iter.referent()); 1029 } 1030 ) 1031 // Remove Reference object from list 1032 iter.remove(); 1033 iter.move_to_next(); 1034 } else { 1035 iter.next(); 1036 } 1037 } 1038 NOT_PRODUCT( 1039 if (PrintGCDetails && TraceReferenceGC) { 1040 gclog_or_tty->print( 1041 " Removed %d Refs with NULL referents out of %d discovered Refs", 1042 iter.removed(), iter.processed()); 1043 } 1044 ) 1045 } 1046 1047 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 1048 int id = 0; 1049 // Determine the queue index to use for this object. 1050 if (_discovery_is_mt) { 1051 // During a multi-threaded discovery phase, 1052 // each thread saves to its "own" list. 1053 Thread* thr = Thread::current(); 1054 assert(thr->is_GC_task_thread(), 1055 "Dubious cast from Thread* to WorkerThread*?"); 1056 id = ((WorkerThread*)thr)->id(); 1057 } else { 1058 // single-threaded discovery, we save in round-robin 1059 // fashion to each of the lists. 1060 if (_processing_is_mt) { 1061 id = next_id(); 1062 } 1063 } 1064 assert(0 <= id && id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 1065 1066 // Get the discovered queue to which we will add 1067 DiscoveredList* list = NULL; 1068 switch (rt) { 1069 case REF_OTHER: 1070 // Unknown reference type, no special treatment 1071 break; 1072 case REF_SOFT: 1073 list = &_discoveredSoftRefs[id]; 1074 break; 1075 case REF_WEAK: 1076 list = &_discoveredWeakRefs[id]; 1077 break; 1078 case REF_FINAL: 1079 list = &_discoveredFinalRefs[id]; 1080 break; 1081 case REF_PHANTOM: 1082 list = &_discoveredPhantomRefs[id]; 1083 break; 1084 case REF_NONE: 1085 // we should not reach here if we are an instanceRefKlass 1086 default: 1087 ShouldNotReachHere(); 1088 } 1089 if (TraceReferenceGC && PrintGCDetails) { 1090 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, 1091 id, list); 1092 } 1093 return list; 1094 } 1095 1096 inline void 1097 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1098 oop obj, 1099 HeapWord* discovered_addr) { 1100 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1101 // First we must make sure this object is only enqueued once. CAS in a non null 1102 // discovered_addr. 1103 oop current_head = refs_list.head(); 1104 1105 // Note: In the case of G1, this specific pre-barrier is strictly 1106 // not necessary because the only case we are interested in 1107 // here is when *discovered_addr is NULL (see the CAS further below), 1108 // so this will expand to nothing. As a result, we have manually 1109 // elided this out for G1, but left in the test for some future 1110 // collector that might have need for a pre-barrier here. 1111 if (_discovered_list_needs_barrier && !UseG1GC) { 1112 if (UseCompressedOops) { 1113 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); 1114 } else { 1115 _bs->write_ref_field_pre((oop*)discovered_addr, current_head); 1116 } 1117 guarantee(false, "Need to check non-G1 collector"); 1118 } 1119 oop retest = oopDesc::atomic_compare_exchange_oop(current_head, discovered_addr, 1120 NULL); 1121 if (retest == NULL) { 1122 // This thread just won the right to enqueue the object. 1123 // We have separate lists for enqueueing so no synchronization 1124 // is necessary. 1125 refs_list.set_head(obj); 1126 refs_list.inc_length(1); 1127 if (_discovered_list_needs_barrier) { 1128 _bs->write_ref_field((void*)discovered_addr, current_head); 1129 } 1130 } else { 1131 // If retest was non NULL, another thread beat us to it: 1132 // The reference has already been discovered... 1133 if (TraceReferenceGC) { 1134 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1135 obj, obj->blueprint()->internal_name()); 1136 } 1137 } 1138 } 1139 1140 // We mention two of several possible choices here: 1141 // #0: if the reference object is not in the "originating generation" 1142 // (or part of the heap being collected, indicated by our "span" 1143 // we don't treat it specially (i.e. we scan it as we would 1144 // a normal oop, treating its references as strong references). 1145 // This means that references can't be enqueued unless their 1146 // referent is also in the same span. This is the simplest, 1147 // most "local" and most conservative approach, albeit one 1148 // that may cause weak references to be enqueued least promptly. 1149 // We call this choice the "ReferenceBasedDiscovery" policy. 1150 // #1: the reference object may be in any generation (span), but if 1151 // the referent is in the generation (span) being currently collected 1152 // then we can discover the reference object, provided 1153 // the object has not already been discovered by 1154 // a different concurrently running collector (as may be the 1155 // case, for instance, if the reference object is in CMS and 1156 // the referent in DefNewGeneration), and provided the processing 1157 // of this reference object by the current collector will 1158 // appear atomic to every other collector in the system. 1159 // (Thus, for instance, a concurrent collector may not 1160 // discover references in other generations even if the 1161 // referent is in its own generation). This policy may, 1162 // in certain cases, enqueue references somewhat sooner than 1163 // might Policy #0 above, but at marginally increased cost 1164 // and complexity in processing these references. 1165 // We call this choice the "RefeferentBasedDiscovery" policy. 1166 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1167 // We enqueue references only if we are discovering refs 1168 // (rather than processing discovered refs). 1169 if (!_discovering_refs || !RegisterReferences) { 1170 return false; 1171 } 1172 // We only enqueue active references. 1173 oop next = java_lang_ref_Reference::next(obj); 1174 if (next != NULL) { 1175 return false; 1176 } 1177 1178 HeapWord* obj_addr = (HeapWord*)obj; 1179 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1180 !_span.contains(obj_addr)) { 1181 // Reference is not in the originating generation; 1182 // don't treat it specially (i.e. we want to scan it as a normal 1183 // object with strong references). 1184 return false; 1185 } 1186 1187 // We only enqueue references whose referents are not (yet) strongly 1188 // reachable. 1189 if (is_alive_non_header() != NULL) { 1190 oop referent = java_lang_ref_Reference::referent(obj); 1191 // In the case of non-concurrent discovery, the last 1192 // disjunct below should hold. It may not hold in the 1193 // case of concurrent discovery because mutators may 1194 // concurrently clear() a Reference. 1195 assert(UseConcMarkSweepGC || UseG1GC || referent != NULL, 1196 "Refs with null referents already filtered"); 1197 if (is_alive_non_header()->do_object_b(referent)) { 1198 return false; // referent is reachable 1199 } 1200 } 1201 if (rt == REF_SOFT) { 1202 // For soft refs we can decide now if these are not 1203 // current candidates for clearing, in which case we 1204 // can mark through them now, rather than delaying that 1205 // to the reference-processing phase. Since all current 1206 // time-stamp policies advance the soft-ref clock only 1207 // at a major collection cycle, this is always currently 1208 // accurate. 1209 if (!_current_soft_ref_policy->should_clear_reference(obj)) { 1210 return false; 1211 } 1212 } 1213 1214 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1215 const oop discovered = java_lang_ref_Reference::discovered(obj); 1216 assert(discovered->is_oop_or_null(), "bad discovered field"); 1217 if (discovered != NULL) { 1218 // The reference has already been discovered... 1219 if (TraceReferenceGC) { 1220 gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)", 1221 obj, obj->blueprint()->internal_name()); 1222 } 1223 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1224 // assumes that an object is not processed twice; 1225 // if it's been already discovered it must be on another 1226 // generation's discovered list; so we won't discover it. 1227 return false; 1228 } else { 1229 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1230 "Unrecognized policy"); 1231 // Check assumption that an object is not potentially 1232 // discovered twice except by concurrent collectors that potentially 1233 // trace the same Reference object twice. 1234 assert(UseConcMarkSweepGC, 1235 "Only possible with an incremental-update concurrent collector"); 1236 return true; 1237 } 1238 } 1239 1240 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1241 oop referent = java_lang_ref_Reference::referent(obj); 1242 assert(referent->is_oop(), "bad referent"); 1243 // enqueue if and only if either: 1244 // reference is in our span or 1245 // we are an atomic collector and referent is in our span 1246 if (_span.contains(obj_addr) || 1247 (discovery_is_atomic() && _span.contains(referent))) { 1248 // should_enqueue = true; 1249 } else { 1250 return false; 1251 } 1252 } else { 1253 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1254 _span.contains(obj_addr), "code inconsistency"); 1255 } 1256 1257 // Get the right type of discovered queue head. 1258 DiscoveredList* list = get_discovered_list(rt); 1259 if (list == NULL) { 1260 return false; // nothing special needs to be done 1261 } 1262 1263 if (_discovery_is_mt) { 1264 add_to_discovered_list_mt(*list, obj, discovered_addr); 1265 } else { 1266 // If "_discovered_list_needs_barrier", we do write barriers when 1267 // updating the discovered reference list. Otherwise, we do a raw store 1268 // here: the field will be visited later when processing the discovered 1269 // references. 1270 oop current_head = list->head(); 1271 // As in the case further above, since we are over-writing a NULL 1272 // pre-value, we can safely elide the pre-barrier here for the case of G1. 1273 assert(discovered == NULL, "control point invariant"); 1274 if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1 1275 if (UseCompressedOops) { 1276 _bs->write_ref_field_pre((narrowOop*)discovered_addr, current_head); 1277 } else { 1278 _bs->write_ref_field_pre((oop*)discovered_addr, current_head); 1279 } 1280 guarantee(false, "Need to check non-G1 collector"); 1281 } 1282 oop_store_raw(discovered_addr, current_head); 1283 if (_discovered_list_needs_barrier) { 1284 _bs->write_ref_field((void*)discovered_addr, current_head); 1285 } 1286 list->set_head(obj); 1287 list->inc_length(1); 1288 } 1289 1290 // In the MT discovery case, it is currently possible to see 1291 // the following message multiple times if several threads 1292 // discover a reference about the same time. Only one will 1293 // however have actually added it to the disocvered queue. 1294 // One could let add_to_discovered_list_mt() return an 1295 // indication for success in queueing (by 1 thread) or 1296 // failure (by all other threads), but I decided the extra 1297 // code was not worth the effort for something that is 1298 // only used for debugging support. 1299 if (TraceReferenceGC) { 1300 oop referent = java_lang_ref_Reference::referent(obj); 1301 if (PrintGCDetails) { 1302 gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)", 1303 obj, obj->blueprint()->internal_name()); 1304 } 1305 assert(referent->is_oop(), "Enqueued a bad referent"); 1306 } 1307 assert(obj->is_oop(), "Enqueued a bad reference"); 1308 return true; 1309 } 1310 1311 // Preclean the discovered references by removing those 1312 // whose referents are alive, and by marking from those that 1313 // are not active. These lists can be handled here 1314 // in any order and, indeed, concurrently. 1315 void ReferenceProcessor::preclean_discovered_references( 1316 BoolObjectClosure* is_alive, 1317 OopClosure* keep_alive, 1318 VoidClosure* complete_gc, 1319 YieldClosure* yield, 1320 bool should_unload_classes) { 1321 1322 NOT_PRODUCT(verify_ok_to_handle_reflists()); 1323 1324 #ifdef ASSERT 1325 bool must_remember_klasses = ClassUnloading && !UseConcMarkSweepGC || 1326 CMSClassUnloadingEnabled && UseConcMarkSweepGC || 1327 ExplicitGCInvokesConcurrentAndUnloadsClasses && 1328 UseConcMarkSweepGC && should_unload_classes; 1329 RememberKlassesChecker mx(must_remember_klasses); 1330 #endif 1331 // Soft references 1332 { 1333 TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1334 false, gclog_or_tty); 1335 for (int i = 0; i < _max_num_q; i++) { 1336 if (yield->should_return()) { 1337 return; 1338 } 1339 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1340 keep_alive, complete_gc, yield); 1341 } 1342 } 1343 1344 // Weak references 1345 { 1346 TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1347 false, gclog_or_tty); 1348 for (int i = 0; i < _num_q; i++) { 1349 if (yield->should_return()) { 1350 return; 1351 } 1352 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1353 keep_alive, complete_gc, yield); 1354 } 1355 } 1356 1357 // Final references 1358 { 1359 TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1360 false, gclog_or_tty); 1361 for (int i = 0; i < _num_q; i++) { 1362 if (yield->should_return()) { 1363 return; 1364 } 1365 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1366 keep_alive, complete_gc, yield); 1367 } 1368 } 1369 1370 // Phantom references 1371 { 1372 TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1373 false, gclog_or_tty); 1374 for (int i = 0; i < _num_q; i++) { 1375 if (yield->should_return()) { 1376 return; 1377 } 1378 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1379 keep_alive, complete_gc, yield); 1380 } 1381 } 1382 } 1383 1384 // Walk the given discovered ref list, and remove all reference objects 1385 // whose referents are still alive, whose referents are NULL or which 1386 // are not active (have a non-NULL next field). NOTE: When we are 1387 // thus precleaning the ref lists (which happens single-threaded today), 1388 // we do not disable refs discovery to honour the correct semantics of 1389 // java.lang.Reference. As a result, we need to be careful below 1390 // that ref removal steps interleave safely with ref discovery steps 1391 // (in this thread). 1392 void 1393 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1394 BoolObjectClosure* is_alive, 1395 OopClosure* keep_alive, 1396 VoidClosure* complete_gc, 1397 YieldClosure* yield) { 1398 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1399 while (iter.has_next()) { 1400 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1401 oop obj = iter.obj(); 1402 oop next = java_lang_ref_Reference::next(obj); 1403 if (iter.referent() == NULL || iter.is_referent_alive() || 1404 next != NULL) { 1405 // The referent has been cleared, or is alive, or the Reference is not 1406 // active; we need to trace and mark its cohort. 1407 if (TraceReferenceGC) { 1408 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1409 iter.obj(), iter.obj()->blueprint()->internal_name()); 1410 } 1411 // Remove Reference object from list 1412 iter.remove(); 1413 // Keep alive its cohort. 1414 iter.make_referent_alive(); 1415 if (UseCompressedOops) { 1416 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1417 keep_alive->do_oop(next_addr); 1418 } else { 1419 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1420 keep_alive->do_oop(next_addr); 1421 } 1422 iter.move_to_next(); 1423 } else { 1424 iter.next(); 1425 } 1426 } 1427 // Close the reachable set 1428 complete_gc->do_void(); 1429 1430 NOT_PRODUCT( 1431 if (PrintGCDetails && PrintReferenceGC) { 1432 gclog_or_tty->print_cr(" Dropped %d Refs out of %d " 1433 "Refs in discovered list " INTPTR_FORMAT, 1434 iter.removed(), iter.processed(), (address)refs_list.head()); 1435 } 1436 ) 1437 } 1438 1439 const char* ReferenceProcessor::list_name(int i) { 1440 assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index"); 1441 int j = i / _max_num_q; 1442 switch (j) { 1443 case 0: return "SoftRef"; 1444 case 1: return "WeakRef"; 1445 case 2: return "FinalRef"; 1446 case 3: return "PhantomRef"; 1447 } 1448 ShouldNotReachHere(); 1449 return NULL; 1450 } 1451 1452 #ifndef PRODUCT 1453 void ReferenceProcessor::verify_ok_to_handle_reflists() { 1454 // empty for now 1455 } 1456 #endif 1457 1458 void ReferenceProcessor::verify() { 1459 guarantee(sentinel_ref() != NULL && sentinel_ref()->is_oop(), "Lost _sentinelRef"); 1460 } 1461 1462 #ifndef PRODUCT 1463 void ReferenceProcessor::clear_discovered_references() { 1464 guarantee(!_discovering_refs, "Discovering refs?"); 1465 for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) { 1466 oop obj = _discoveredSoftRefs[i].head(); 1467 while (obj != sentinel_ref()) { 1468 oop next = java_lang_ref_Reference::discovered(obj); 1469 java_lang_ref_Reference::set_discovered(obj, (oop) NULL); 1470 obj = next; 1471 } 1472 _discoveredSoftRefs[i].set_head(sentinel_ref()); 1473 _discoveredSoftRefs[i].set_length(0); 1474 } 1475 } 1476 #endif // PRODUCT