1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.hpp" 34 #include "memory/allocation.hpp" 35 #include "oops/oop.inline.hpp" 36 #include "runtime/java.hpp" 37 #include "runtime/jniHandles.hpp" 38 39 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 40 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 41 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 42 43 void referenceProcessor_init() { 44 ReferenceProcessor::init_statics(); 45 } 46 47 void ReferenceProcessor::init_statics() { 48 // We need a monotonically non-decreasing time in ms but 49 // os::javaTimeMillis() does not guarantee monotonicity. 50 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 51 52 // Initialize the soft ref timestamp clock. 53 _soft_ref_timestamp_clock = now; 54 // Also update the soft ref clock in j.l.r.SoftReference 55 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 56 57 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 58 #if defined(COMPILER2) || INCLUDE_JVMCI 59 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 60 #else 61 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 62 #endif 63 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 64 vm_exit_during_initialization("Could not allocate reference policy object"); 65 } 66 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 67 RefDiscoveryPolicy == ReferentBasedDiscovery, 68 "Unrecognized RefDiscoveryPolicy"); 69 } 70 71 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 72 #ifdef ASSERT 73 // Verify that we're not currently discovering refs 74 assert(!_discovering_refs, "nested call?"); 75 76 if (check_no_refs) { 77 // Verify that the discovered lists are empty 78 verify_no_references_recorded(); 79 } 80 #endif // ASSERT 81 82 // Someone could have modified the value of the static 83 // field in the j.l.r.SoftReference class that holds the 84 // soft reference timestamp clock using reflection or 85 // Unsafe between GCs. Unconditionally update the static 86 // field in ReferenceProcessor here so that we use the new 87 // value during reference discovery. 88 89 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 90 _discovering_refs = true; 91 } 92 93 ReferenceProcessor::ReferenceProcessor(MemRegion span, 94 bool mt_processing, 95 uint mt_processing_degree, 96 bool mt_discovery, 97 uint mt_discovery_degree, 98 bool atomic_discovery, 99 BoolObjectClosure* is_alive_non_header) : 100 _discovering_refs(false), 101 _enqueuing_is_done(false), 102 _is_alive_non_header(is_alive_non_header), 103 _processing_is_mt(mt_processing), 104 _next_id(0) 105 { 106 _span = span; 107 _discovery_is_atomic = atomic_discovery; 108 _discovery_is_mt = mt_discovery; 109 _num_q = MAX2(1U, mt_processing_degree); 110 _max_num_q = MAX2(_num_q, mt_discovery_degree); 111 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 112 _max_num_q * number_of_subclasses_of_ref(), mtGC); 113 114 if (_discovered_refs == NULL) { 115 vm_exit_during_initialization("Could not allocated RefProc Array"); 116 } 117 _discoveredSoftRefs = &_discovered_refs[0]; 118 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 119 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 120 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 121 _discoveredCleanerRefs = &_discoveredPhantomRefs[_max_num_q]; 122 123 // Initialize all entries to NULL 124 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 125 _discovered_refs[i].set_head(NULL); 126 _discovered_refs[i].set_length(0); 127 } 128 129 setup_policy(false /* default soft ref policy */); 130 } 131 132 #ifndef PRODUCT 133 void ReferenceProcessor::verify_no_references_recorded() { 134 guarantee(!_discovering_refs, "Discovering refs?"); 135 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 136 guarantee(_discovered_refs[i].is_empty(), 137 "Found non-empty discovered list"); 138 } 139 } 140 #endif 141 142 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 143 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 144 if (UseCompressedOops) { 145 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 146 } else { 147 f->do_oop((oop*)_discovered_refs[i].adr_head()); 148 } 149 } 150 } 151 152 void ReferenceProcessor::update_soft_ref_master_clock() { 153 // Update (advance) the soft ref master clock field. This must be done 154 // after processing the soft ref list. 155 156 // We need a monotonically non-decreasing time in ms but 157 // os::javaTimeMillis() does not guarantee monotonicity. 158 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 159 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 160 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 161 162 NOT_PRODUCT( 163 if (now < _soft_ref_timestamp_clock) { 164 warning("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 165 _soft_ref_timestamp_clock, now); 166 } 167 ) 168 // The values of now and _soft_ref_timestamp_clock are set using 169 // javaTimeNanos(), which is guaranteed to be monotonically 170 // non-decreasing provided the underlying platform provides such 171 // a time source (and it is bug free). 172 // In product mode, however, protect ourselves from non-monotonicity. 173 if (now > _soft_ref_timestamp_clock) { 174 _soft_ref_timestamp_clock = now; 175 java_lang_ref_SoftReference::set_clock(now); 176 } 177 // Else leave clock stalled at its old value until time progresses 178 // past clock value. 179 } 180 181 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { 182 size_t total = 0; 183 for (uint i = 0; i < _max_num_q; ++i) { 184 total += lists[i].length(); 185 } 186 return total; 187 } 188 189 static void log_ref_count(size_t count, bool doit) { 190 if (doit) { 191 gclog_or_tty->print(", " SIZE_FORMAT " refs", count); 192 } 193 } 194 195 class GCRefTraceTime : public StackObj { 196 GCTraceTimeImpl _gc_trace_time; 197 public: 198 GCRefTraceTime(const char* title, bool doit, GCTimer* timer, size_t count) : 199 _gc_trace_time(title, doit, false, timer) { 200 log_ref_count(count, doit); 201 } 202 }; 203 204 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 205 BoolObjectClosure* is_alive, 206 OopClosure* keep_alive, 207 VoidClosure* complete_gc, 208 AbstractRefProcTaskExecutor* task_executor, 209 GCTimer* gc_timer) { 210 211 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 212 // Stop treating discovered references specially. 213 disable_discovery(); 214 215 // If discovery was concurrent, someone could have modified 216 // the value of the static field in the j.l.r.SoftReference 217 // class that holds the soft reference timestamp clock using 218 // reflection or Unsafe between when discovery was enabled and 219 // now. Unconditionally update the static field in ReferenceProcessor 220 // here so that we use the new value during processing of the 221 // discovered soft refs. 222 223 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 224 225 bool trace_time = PrintGCDetails && PrintReferenceGC; 226 227 // Include cleaners in phantom statistics. We expect Cleaner 228 // references to be temporary, and don't want to deal with 229 // possible incompatibilities arising from making it more visible. 230 ReferenceProcessorStats stats( 231 total_count(_discoveredSoftRefs), 232 total_count(_discoveredWeakRefs), 233 total_count(_discoveredFinalRefs), 234 total_count(_discoveredPhantomRefs) + total_count(_discoveredCleanerRefs)); 235 236 // Soft references 237 { 238 GCRefTraceTime tt("SoftReference", trace_time, gc_timer, stats.soft_count()); 239 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 240 is_alive, keep_alive, complete_gc, task_executor); 241 } 242 243 update_soft_ref_master_clock(); 244 245 // Weak references 246 { 247 GCRefTraceTime tt("WeakReference", trace_time, gc_timer, stats.weak_count()); 248 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 249 is_alive, keep_alive, complete_gc, task_executor); 250 } 251 252 // Final references 253 { 254 GCRefTraceTime tt("FinalReference", trace_time, gc_timer, stats.final_count()); 255 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 256 is_alive, keep_alive, complete_gc, task_executor); 257 } 258 259 // Phantom references 260 { 261 GCRefTraceTime tt("PhantomReference", trace_time, gc_timer, stats.phantom_count()); 262 process_discovered_reflist(_discoveredPhantomRefs, NULL, false, 263 is_alive, keep_alive, complete_gc, task_executor); 264 265 // Process cleaners, but include them in phantom timing. We expect 266 // Cleaner references to be temporary, and don't want to deal with 267 // possible incompatibilities arising from making it more visible. 268 process_discovered_reflist(_discoveredCleanerRefs, NULL, true, 269 is_alive, keep_alive, complete_gc, task_executor); 270 } 271 272 // Weak global JNI references. It would make more sense (semantically) to 273 // traverse these simultaneously with the regular weak references above, but 274 // that is not how the JDK1.2 specification is. See #4126360. Native code can 275 // thus use JNI weak references to circumvent the phantom references and 276 // resurrect a "post-mortem" object. 277 { 278 GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer); 279 NOT_PRODUCT(log_ref_count(count_jni_refs(), trace_time);) 280 if (task_executor != NULL) { 281 task_executor->set_single_threaded_mode(); 282 } 283 process_phaseJNI(is_alive, keep_alive, complete_gc); 284 } 285 286 return stats; 287 } 288 289 #ifndef PRODUCT 290 // Calculate the number of jni handles. 291 uint ReferenceProcessor::count_jni_refs() { 292 class AlwaysAliveClosure: public BoolObjectClosure { 293 public: 294 virtual bool do_object_b(oop obj) { return true; } 295 }; 296 297 class CountHandleClosure: public OopClosure { 298 private: 299 int _count; 300 public: 301 CountHandleClosure(): _count(0) {} 302 void do_oop(oop* unused) { _count++; } 303 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 304 int count() { return _count; } 305 }; 306 CountHandleClosure global_handle_count; 307 AlwaysAliveClosure always_alive; 308 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 309 return global_handle_count.count(); 310 } 311 #endif 312 313 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 314 OopClosure* keep_alive, 315 VoidClosure* complete_gc) { 316 JNIHandles::weak_oops_do(is_alive, keep_alive); 317 complete_gc->do_void(); 318 } 319 320 321 template <class T> 322 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 323 AbstractRefProcTaskExecutor* task_executor) { 324 325 // Remember old value of pending references list 326 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 327 T old_pending_list_value = *pending_list_addr; 328 329 // Enqueue references that are not made active again, and 330 // clear the decks for the next collection (cycle). 331 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 332 // Do the post-barrier on pending_list_addr missed in 333 // enqueue_discovered_reflist. 334 oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 335 336 // Stop treating discovered references specially. 337 ref->disable_discovery(); 338 339 // Return true if new pending references were added 340 return old_pending_list_value != *pending_list_addr; 341 } 342 343 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 344 if (UseCompressedOops) { 345 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 346 } else { 347 return enqueue_discovered_ref_helper<oop>(this, task_executor); 348 } 349 } 350 351 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 352 HeapWord* pending_list_addr) { 353 // Given a list of refs linked through the "discovered" field 354 // (java.lang.ref.Reference.discovered), self-loop their "next" field 355 // thus distinguishing them from active References, then 356 // prepend them to the pending list. 357 // 358 // The Java threads will see the Reference objects linked together through 359 // the discovered field. Instead of trying to do the write barrier updates 360 // in all places in the reference processor where we manipulate the discovered 361 // field we make sure to do the barrier here where we anyway iterate through 362 // all linked Reference objects. Note that it is important to not dirty any 363 // cards during reference processing since this will cause card table 364 // verification to fail for G1. 365 if (TraceReferenceGC && PrintGCDetails) { 366 gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list " 367 INTPTR_FORMAT, p2i(refs_list.head())); 368 } 369 370 oop obj = NULL; 371 oop next_d = refs_list.head(); 372 // Walk down the list, self-looping the next field 373 // so that the References are not considered active. 374 while (obj != next_d) { 375 obj = next_d; 376 assert(obj->is_instance(), "should be an instance object"); 377 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object"); 378 next_d = java_lang_ref_Reference::discovered(obj); 379 if (TraceReferenceGC && PrintGCDetails) { 380 gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, 381 p2i(obj), p2i(next_d)); 382 } 383 assert(java_lang_ref_Reference::next(obj) == NULL, 384 "Reference not active; should not be discovered"); 385 // Self-loop next, so as to make Ref not active. 386 java_lang_ref_Reference::set_next_raw(obj, obj); 387 if (next_d != obj) { 388 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); 389 } else { 390 // This is the last object. 391 // Swap refs_list into pending_list_addr and 392 // set obj's discovered to what we read from pending_list_addr. 393 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 394 // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above. 395 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL 396 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); 397 } 398 } 399 } 400 401 // Parallel enqueue task 402 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 403 public: 404 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 405 DiscoveredList discovered_refs[], 406 HeapWord* pending_list_addr, 407 int n_queues) 408 : EnqueueTask(ref_processor, discovered_refs, 409 pending_list_addr, n_queues) 410 { } 411 412 virtual void work(unsigned int work_id) { 413 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 414 // Simplest first cut: static partitioning. 415 int index = work_id; 416 // The increment on "index" must correspond to the maximum number of queues 417 // (n_queues) with which that ReferenceProcessor was created. That 418 // is because of the "clever" way the discovered references lists were 419 // allocated and are indexed into. 420 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 421 for (int j = 0; 422 j < ReferenceProcessor::number_of_subclasses_of_ref(); 423 j++, index += _n_queues) { 424 _ref_processor.enqueue_discovered_reflist( 425 _refs_lists[index], _pending_list_addr); 426 _refs_lists[index].set_head(NULL); 427 _refs_lists[index].set_length(0); 428 } 429 } 430 }; 431 432 // Enqueue references that are not made active again 433 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 434 AbstractRefProcTaskExecutor* task_executor) { 435 if (_processing_is_mt && task_executor != NULL) { 436 // Parallel code 437 RefProcEnqueueTask tsk(*this, _discovered_refs, 438 pending_list_addr, _max_num_q); 439 task_executor->execute(tsk); 440 } else { 441 // Serial code: call the parent class's implementation 442 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 443 enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr); 444 _discovered_refs[i].set_head(NULL); 445 _discovered_refs[i].set_length(0); 446 } 447 } 448 } 449 450 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 451 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 452 oop discovered = java_lang_ref_Reference::discovered(_ref); 453 assert(_discovered_addr && discovered->is_oop_or_null(), 454 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 455 _next = discovered; 456 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 457 _referent = java_lang_ref_Reference::referent(_ref); 458 assert(Universe::heap()->is_in_reserved_or_null(_referent), 459 "Wrong oop found in java.lang.Reference object"); 460 assert(allow_null_referent ? 461 _referent->is_oop_or_null() 462 : _referent->is_oop(), 463 "Expected an oop%s for referent field at " PTR_FORMAT, 464 (allow_null_referent ? " or NULL" : ""), 465 p2i(_referent)); 466 } 467 468 void DiscoveredListIterator::remove() { 469 assert(_ref->is_oop(), "Dropping a bad reference"); 470 oop_store_raw(_discovered_addr, NULL); 471 472 // First _prev_next ref actually points into DiscoveredList (gross). 473 oop new_next; 474 if (_next == _ref) { 475 // At the end of the list, we should make _prev point to itself. 476 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 477 // and _prev will be NULL. 478 new_next = _prev; 479 } else { 480 new_next = _next; 481 } 482 // Remove Reference object from discovered list. Note that G1 does not need a 483 // pre-barrier here because we know the Reference has already been found/marked, 484 // that's how it ended up in the discovered list in the first place. 485 oop_store_raw(_prev_next, new_next); 486 NOT_PRODUCT(_removed++); 487 _refs_list.dec_length(1); 488 } 489 490 void DiscoveredListIterator::clear_referent() { 491 oop_store_raw(_referent_addr, NULL); 492 } 493 494 // NOTE: process_phase*() are largely similar, and at a high level 495 // merely iterate over the extant list applying a predicate to 496 // each of its elements and possibly removing that element from the 497 // list and applying some further closures to that element. 498 // We should consider the possibility of replacing these 499 // process_phase*() methods by abstracting them into 500 // a single general iterator invocation that receives appropriate 501 // closures that accomplish this work. 502 503 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 504 // referents are not alive, but that should be kept alive for policy reasons. 505 // Keep alive the transitive closure of all such referents. 506 void 507 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 508 ReferencePolicy* policy, 509 BoolObjectClosure* is_alive, 510 OopClosure* keep_alive, 511 VoidClosure* complete_gc) { 512 assert(policy != NULL, "Must have a non-NULL policy"); 513 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 514 // Decide which softly reachable refs should be kept alive. 515 while (iter.has_next()) { 516 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 517 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 518 if (referent_is_dead && 519 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 520 if (TraceReferenceGC) { 521 gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 522 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 523 } 524 // Remove Reference object from list 525 iter.remove(); 526 // keep the referent around 527 iter.make_referent_alive(); 528 iter.move_to_next(); 529 } else { 530 iter.next(); 531 } 532 } 533 // Close the reachable set 534 complete_gc->do_void(); 535 NOT_PRODUCT( 536 if (PrintGCDetails && TraceReferenceGC) { 537 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT 538 " discovered Refs by policy, from list " INTPTR_FORMAT, 539 iter.removed(), iter.processed(), p2i(refs_list.head())); 540 } 541 ) 542 } 543 544 // Traverse the list and remove any Refs that are not active, or 545 // whose referents are either alive or NULL. 546 void 547 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 548 BoolObjectClosure* is_alive, 549 OopClosure* keep_alive) { 550 assert(discovery_is_atomic(), "Error"); 551 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 552 while (iter.has_next()) { 553 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 554 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 555 assert(next == NULL, "Should not discover inactive Reference"); 556 if (iter.is_referent_alive()) { 557 if (TraceReferenceGC) { 558 gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 559 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 560 } 561 // The referent is reachable after all. 562 // Remove Reference object from list. 563 iter.remove(); 564 // Update the referent pointer as necessary: Note that this 565 // should not entail any recursive marking because the 566 // referent must already have been traversed. 567 iter.make_referent_alive(); 568 iter.move_to_next(); 569 } else { 570 iter.next(); 571 } 572 } 573 NOT_PRODUCT( 574 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 575 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 576 " Refs in discovered list " INTPTR_FORMAT, 577 iter.removed(), iter.processed(), p2i(refs_list.head())); 578 } 579 ) 580 } 581 582 void 583 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 584 BoolObjectClosure* is_alive, 585 OopClosure* keep_alive, 586 VoidClosure* complete_gc) { 587 assert(!discovery_is_atomic(), "Error"); 588 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 589 while (iter.has_next()) { 590 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 591 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 592 oop next = java_lang_ref_Reference::next(iter.obj()); 593 if ((iter.referent() == NULL || iter.is_referent_alive() || 594 next != NULL)) { 595 assert(next->is_oop_or_null(), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 596 // Remove Reference object from list 597 iter.remove(); 598 // Trace the cohorts 599 iter.make_referent_alive(); 600 if (UseCompressedOops) { 601 keep_alive->do_oop((narrowOop*)next_addr); 602 } else { 603 keep_alive->do_oop((oop*)next_addr); 604 } 605 iter.move_to_next(); 606 } else { 607 iter.next(); 608 } 609 } 610 // Now close the newly reachable set 611 complete_gc->do_void(); 612 NOT_PRODUCT( 613 if (PrintGCDetails && TraceReferenceGC && (iter.processed() > 0)) { 614 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 615 " Refs in discovered list " INTPTR_FORMAT, 616 iter.removed(), iter.processed(), p2i(refs_list.head())); 617 } 618 ) 619 } 620 621 // Traverse the list and process the referents, by either 622 // clearing them or keeping them (and their reachable 623 // closure) alive. 624 void 625 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 626 bool clear_referent, 627 BoolObjectClosure* is_alive, 628 OopClosure* keep_alive, 629 VoidClosure* complete_gc) { 630 ResourceMark rm; 631 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 632 while (iter.has_next()) { 633 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 634 if (clear_referent) { 635 // NULL out referent pointer 636 iter.clear_referent(); 637 } else { 638 // keep the referent around 639 iter.make_referent_alive(); 640 } 641 if (TraceReferenceGC) { 642 gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 643 clear_referent ? "cleared " : "", 644 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 645 } 646 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 647 iter.next(); 648 } 649 // Close the reachable set 650 complete_gc->do_void(); 651 } 652 653 void 654 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 655 oop obj = NULL; 656 oop next = refs_list.head(); 657 while (next != obj) { 658 obj = next; 659 next = java_lang_ref_Reference::discovered(obj); 660 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 661 } 662 refs_list.set_head(NULL); 663 refs_list.set_length(0); 664 } 665 666 void ReferenceProcessor::abandon_partial_discovery() { 667 // loop over the lists 668 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 669 if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) { 670 gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i)); 671 } 672 clear_discovered_references(_discovered_refs[i]); 673 } 674 } 675 676 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 677 public: 678 RefProcPhase1Task(ReferenceProcessor& ref_processor, 679 DiscoveredList refs_lists[], 680 ReferencePolicy* policy, 681 bool marks_oops_alive) 682 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 683 _policy(policy) 684 { } 685 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 686 OopClosure& keep_alive, 687 VoidClosure& complete_gc) 688 { 689 Thread* thr = Thread::current(); 690 int refs_list_index = ((WorkerThread*)thr)->id(); 691 _ref_processor.process_phase1(_refs_lists[refs_list_index], _policy, 692 &is_alive, &keep_alive, &complete_gc); 693 } 694 private: 695 ReferencePolicy* _policy; 696 }; 697 698 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 699 public: 700 RefProcPhase2Task(ReferenceProcessor& ref_processor, 701 DiscoveredList refs_lists[], 702 bool marks_oops_alive) 703 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 704 { } 705 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 706 OopClosure& keep_alive, 707 VoidClosure& complete_gc) 708 { 709 _ref_processor.process_phase2(_refs_lists[i], 710 &is_alive, &keep_alive, &complete_gc); 711 } 712 }; 713 714 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 715 public: 716 RefProcPhase3Task(ReferenceProcessor& ref_processor, 717 DiscoveredList refs_lists[], 718 bool clear_referent, 719 bool marks_oops_alive) 720 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 721 _clear_referent(clear_referent) 722 { } 723 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 724 OopClosure& keep_alive, 725 VoidClosure& complete_gc) 726 { 727 // Don't use "refs_list_index" calculated in this way because 728 // balance_queues() has moved the Ref's into the first n queues. 729 // Thread* thr = Thread::current(); 730 // int refs_list_index = ((WorkerThread*)thr)->id(); 731 // _ref_processor.process_phase3(_refs_lists[refs_list_index], _clear_referent, 732 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 733 &is_alive, &keep_alive, &complete_gc); 734 } 735 private: 736 bool _clear_referent; 737 }; 738 739 // Balances reference queues. 740 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 741 // queues[0, 1, ..., _num_q-1] because only the first _num_q 742 // corresponding to the active workers will be processed. 743 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 744 { 745 // calculate total length 746 size_t total_refs = 0; 747 if (TraceReferenceGC && PrintGCDetails) { 748 gclog_or_tty->print_cr("\nBalance ref_lists "); 749 } 750 751 for (uint i = 0; i < _max_num_q; ++i) { 752 total_refs += ref_lists[i].length(); 753 if (TraceReferenceGC && PrintGCDetails) { 754 gclog_or_tty->print(SIZE_FORMAT " ", ref_lists[i].length()); 755 } 756 } 757 if (TraceReferenceGC && PrintGCDetails) { 758 gclog_or_tty->print_cr(" = " SIZE_FORMAT, total_refs); 759 } 760 size_t avg_refs = total_refs / _num_q + 1; 761 uint to_idx = 0; 762 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 763 bool move_all = false; 764 if (from_idx >= _num_q) { 765 move_all = ref_lists[from_idx].length() > 0; 766 } 767 while ((ref_lists[from_idx].length() > avg_refs) || 768 move_all) { 769 assert(to_idx < _num_q, "Sanity Check!"); 770 if (ref_lists[to_idx].length() < avg_refs) { 771 // move superfluous refs 772 size_t refs_to_move; 773 // Move all the Ref's if the from queue will not be processed. 774 if (move_all) { 775 refs_to_move = MIN2(ref_lists[from_idx].length(), 776 avg_refs - ref_lists[to_idx].length()); 777 } else { 778 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 779 avg_refs - ref_lists[to_idx].length()); 780 } 781 782 assert(refs_to_move > 0, "otherwise the code below will fail"); 783 784 oop move_head = ref_lists[from_idx].head(); 785 oop move_tail = move_head; 786 oop new_head = move_head; 787 // find an element to split the list on 788 for (size_t j = 0; j < refs_to_move; ++j) { 789 move_tail = new_head; 790 new_head = java_lang_ref_Reference::discovered(new_head); 791 } 792 793 // Add the chain to the to list. 794 if (ref_lists[to_idx].head() == NULL) { 795 // to list is empty. Make a loop at the end. 796 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 797 } else { 798 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 799 } 800 ref_lists[to_idx].set_head(move_head); 801 ref_lists[to_idx].inc_length(refs_to_move); 802 803 // Remove the chain from the from list. 804 if (move_tail == new_head) { 805 // We found the end of the from list. 806 ref_lists[from_idx].set_head(NULL); 807 } else { 808 ref_lists[from_idx].set_head(new_head); 809 } 810 ref_lists[from_idx].dec_length(refs_to_move); 811 if (ref_lists[from_idx].length() == 0) { 812 break; 813 } 814 } else { 815 to_idx = (to_idx + 1) % _num_q; 816 } 817 } 818 } 819 #ifdef ASSERT 820 size_t balanced_total_refs = 0; 821 for (uint i = 0; i < _max_num_q; ++i) { 822 balanced_total_refs += ref_lists[i].length(); 823 if (TraceReferenceGC && PrintGCDetails) { 824 gclog_or_tty->print(SIZE_FORMAT " ", ref_lists[i].length()); 825 } 826 } 827 if (TraceReferenceGC && PrintGCDetails) { 828 gclog_or_tty->print_cr(" = " SIZE_FORMAT, balanced_total_refs); 829 gclog_or_tty->flush(); 830 } 831 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 832 #endif 833 } 834 835 void ReferenceProcessor::balance_all_queues() { 836 balance_queues(_discoveredSoftRefs); 837 balance_queues(_discoveredWeakRefs); 838 balance_queues(_discoveredFinalRefs); 839 balance_queues(_discoveredPhantomRefs); 840 balance_queues(_discoveredCleanerRefs); 841 } 842 843 void ReferenceProcessor::process_discovered_reflist( 844 DiscoveredList refs_lists[], 845 ReferencePolicy* policy, 846 bool clear_referent, 847 BoolObjectClosure* is_alive, 848 OopClosure* keep_alive, 849 VoidClosure* complete_gc, 850 AbstractRefProcTaskExecutor* task_executor) 851 { 852 bool mt_processing = task_executor != NULL && _processing_is_mt; 853 // If discovery used MT and a dynamic number of GC threads, then 854 // the queues must be balanced for correctness if fewer than the 855 // maximum number of queues were used. The number of queue used 856 // during discovery may be different than the number to be used 857 // for processing so don't depend of _num_q < _max_num_q as part 858 // of the test. 859 bool must_balance = _discovery_is_mt; 860 861 if ((mt_processing && ParallelRefProcBalancingEnabled) || 862 must_balance) { 863 balance_queues(refs_lists); 864 } 865 866 // Phase 1 (soft refs only): 867 // . Traverse the list and remove any SoftReferences whose 868 // referents are not alive, but that should be kept alive for 869 // policy reasons. Keep alive the transitive closure of all 870 // such referents. 871 if (policy != NULL) { 872 if (mt_processing) { 873 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 874 task_executor->execute(phase1); 875 } else { 876 for (uint i = 0; i < _max_num_q; i++) { 877 process_phase1(refs_lists[i], policy, 878 is_alive, keep_alive, complete_gc); 879 } 880 } 881 } else { // policy == NULL 882 assert(refs_lists != _discoveredSoftRefs, 883 "Policy must be specified for soft references."); 884 } 885 886 // Phase 2: 887 // . Traverse the list and remove any refs whose referents are alive. 888 if (mt_processing) { 889 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 890 task_executor->execute(phase2); 891 } else { 892 for (uint i = 0; i < _max_num_q; i++) { 893 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 894 } 895 } 896 897 // Phase 3: 898 // . Traverse the list and process referents as appropriate. 899 if (mt_processing) { 900 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 901 task_executor->execute(phase3); 902 } else { 903 for (uint i = 0; i < _max_num_q; i++) { 904 process_phase3(refs_lists[i], clear_referent, 905 is_alive, keep_alive, complete_gc); 906 } 907 } 908 } 909 910 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 911 uint id = 0; 912 // Determine the queue index to use for this object. 913 if (_discovery_is_mt) { 914 // During a multi-threaded discovery phase, 915 // each thread saves to its "own" list. 916 Thread* thr = Thread::current(); 917 id = thr->as_Worker_thread()->id(); 918 } else { 919 // single-threaded discovery, we save in round-robin 920 // fashion to each of the lists. 921 if (_processing_is_mt) { 922 id = next_id(); 923 } 924 } 925 assert(id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 926 927 // Get the discovered queue to which we will add 928 DiscoveredList* list = NULL; 929 switch (rt) { 930 case REF_OTHER: 931 // Unknown reference type, no special treatment 932 break; 933 case REF_SOFT: 934 list = &_discoveredSoftRefs[id]; 935 break; 936 case REF_WEAK: 937 list = &_discoveredWeakRefs[id]; 938 break; 939 case REF_FINAL: 940 list = &_discoveredFinalRefs[id]; 941 break; 942 case REF_PHANTOM: 943 list = &_discoveredPhantomRefs[id]; 944 break; 945 case REF_CLEANER: 946 list = &_discoveredCleanerRefs[id]; 947 break; 948 case REF_NONE: 949 // we should not reach here if we are an InstanceRefKlass 950 default: 951 ShouldNotReachHere(); 952 } 953 if (TraceReferenceGC && PrintGCDetails) { 954 gclog_or_tty->print_cr("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 955 } 956 return list; 957 } 958 959 inline void 960 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 961 oop obj, 962 HeapWord* discovered_addr) { 963 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 964 // First we must make sure this object is only enqueued once. CAS in a non null 965 // discovered_addr. 966 oop current_head = refs_list.head(); 967 // The last ref must have its discovered field pointing to itself. 968 oop next_discovered = (current_head != NULL) ? current_head : obj; 969 970 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 971 NULL); 972 if (retest == NULL) { 973 // This thread just won the right to enqueue the object. 974 // We have separate lists for enqueueing, so no synchronization 975 // is necessary. 976 refs_list.set_head(obj); 977 refs_list.inc_length(1); 978 979 if (TraceReferenceGC) { 980 gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 981 p2i(obj), obj->klass()->internal_name()); 982 } 983 } else { 984 // If retest was non NULL, another thread beat us to it: 985 // The reference has already been discovered... 986 if (TraceReferenceGC) { 987 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 988 p2i(obj), obj->klass()->internal_name()); 989 } 990 } 991 } 992 993 #ifndef PRODUCT 994 // Non-atomic (i.e. concurrent) discovery might allow us 995 // to observe j.l.References with NULL referents, being those 996 // cleared concurrently by mutators during (or after) discovery. 997 void ReferenceProcessor::verify_referent(oop obj) { 998 bool da = discovery_is_atomic(); 999 oop referent = java_lang_ref_Reference::referent(obj); 1000 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 1001 "Bad referent " INTPTR_FORMAT " found in Reference " 1002 INTPTR_FORMAT " during %satomic discovery ", 1003 p2i(referent), p2i(obj), da ? "" : "non-"); 1004 } 1005 #endif 1006 1007 // We mention two of several possible choices here: 1008 // #0: if the reference object is not in the "originating generation" 1009 // (or part of the heap being collected, indicated by our "span" 1010 // we don't treat it specially (i.e. we scan it as we would 1011 // a normal oop, treating its references as strong references). 1012 // This means that references can't be discovered unless their 1013 // referent is also in the same span. This is the simplest, 1014 // most "local" and most conservative approach, albeit one 1015 // that may cause weak references to be enqueued least promptly. 1016 // We call this choice the "ReferenceBasedDiscovery" policy. 1017 // #1: the reference object may be in any generation (span), but if 1018 // the referent is in the generation (span) being currently collected 1019 // then we can discover the reference object, provided 1020 // the object has not already been discovered by 1021 // a different concurrently running collector (as may be the 1022 // case, for instance, if the reference object is in CMS and 1023 // the referent in DefNewGeneration), and provided the processing 1024 // of this reference object by the current collector will 1025 // appear atomic to every other collector in the system. 1026 // (Thus, for instance, a concurrent collector may not 1027 // discover references in other generations even if the 1028 // referent is in its own generation). This policy may, 1029 // in certain cases, enqueue references somewhat sooner than 1030 // might Policy #0 above, but at marginally increased cost 1031 // and complexity in processing these references. 1032 // We call this choice the "RefeferentBasedDiscovery" policy. 1033 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1034 // Make sure we are discovering refs (rather than processing discovered refs). 1035 if (!_discovering_refs || !RegisterReferences) { 1036 return false; 1037 } 1038 // We only discover active references. 1039 oop next = java_lang_ref_Reference::next(obj); 1040 if (next != NULL) { // Ref is no longer active 1041 return false; 1042 } 1043 1044 HeapWord* obj_addr = (HeapWord*)obj; 1045 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1046 !_span.contains(obj_addr)) { 1047 // Reference is not in the originating generation; 1048 // don't treat it specially (i.e. we want to scan it as a normal 1049 // object with strong references). 1050 return false; 1051 } 1052 1053 // We only discover references whose referents are not (yet) 1054 // known to be strongly reachable. 1055 if (is_alive_non_header() != NULL) { 1056 verify_referent(obj); 1057 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1058 return false; // referent is reachable 1059 } 1060 } 1061 if (rt == REF_SOFT) { 1062 // For soft refs we can decide now if these are not 1063 // current candidates for clearing, in which case we 1064 // can mark through them now, rather than delaying that 1065 // to the reference-processing phase. Since all current 1066 // time-stamp policies advance the soft-ref clock only 1067 // at a full collection cycle, this is always currently 1068 // accurate. 1069 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1070 return false; 1071 } 1072 } 1073 1074 ResourceMark rm; // Needed for tracing. 1075 1076 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1077 const oop discovered = java_lang_ref_Reference::discovered(obj); 1078 assert(discovered->is_oop_or_null(), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1079 if (discovered != NULL) { 1080 // The reference has already been discovered... 1081 if (TraceReferenceGC) { 1082 gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)", 1083 p2i(obj), obj->klass()->internal_name()); 1084 } 1085 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1086 // assumes that an object is not processed twice; 1087 // if it's been already discovered it must be on another 1088 // generation's discovered list; so we won't discover it. 1089 return false; 1090 } else { 1091 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1092 "Unrecognized policy"); 1093 // Check assumption that an object is not potentially 1094 // discovered twice except by concurrent collectors that potentially 1095 // trace the same Reference object twice. 1096 assert(UseConcMarkSweepGC || UseG1GC, 1097 "Only possible with a concurrent marking collector"); 1098 return true; 1099 } 1100 } 1101 1102 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1103 verify_referent(obj); 1104 // Discover if and only if EITHER: 1105 // .. reference is in our span, OR 1106 // .. we are an atomic collector and referent is in our span 1107 if (_span.contains(obj_addr) || 1108 (discovery_is_atomic() && 1109 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1110 // should_enqueue = true; 1111 } else { 1112 return false; 1113 } 1114 } else { 1115 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1116 _span.contains(obj_addr), "code inconsistency"); 1117 } 1118 1119 // Get the right type of discovered queue head. 1120 DiscoveredList* list = get_discovered_list(rt); 1121 if (list == NULL) { 1122 return false; // nothing special needs to be done 1123 } 1124 1125 if (_discovery_is_mt) { 1126 add_to_discovered_list_mt(*list, obj, discovered_addr); 1127 } else { 1128 // We do a raw store here: the field will be visited later when processing 1129 // the discovered references. 1130 oop current_head = list->head(); 1131 // The last ref must have its discovered field pointing to itself. 1132 oop next_discovered = (current_head != NULL) ? current_head : obj; 1133 1134 assert(discovered == NULL, "control point invariant"); 1135 oop_store_raw(discovered_addr, next_discovered); 1136 list->set_head(obj); 1137 list->inc_length(1); 1138 1139 if (TraceReferenceGC) { 1140 gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)", 1141 p2i(obj), obj->klass()->internal_name()); 1142 } 1143 } 1144 assert(obj->is_oop(), "Discovered a bad reference"); 1145 verify_referent(obj); 1146 return true; 1147 } 1148 1149 // Preclean the discovered references by removing those 1150 // whose referents are alive, and by marking from those that 1151 // are not active. These lists can be handled here 1152 // in any order and, indeed, concurrently. 1153 void ReferenceProcessor::preclean_discovered_references( 1154 BoolObjectClosure* is_alive, 1155 OopClosure* keep_alive, 1156 VoidClosure* complete_gc, 1157 YieldClosure* yield, 1158 GCTimer* gc_timer) { 1159 1160 // Soft references 1161 { 1162 GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC, 1163 false, gc_timer); 1164 for (uint i = 0; i < _max_num_q; i++) { 1165 if (yield->should_return()) { 1166 return; 1167 } 1168 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1169 keep_alive, complete_gc, yield); 1170 } 1171 } 1172 1173 // Weak references 1174 { 1175 GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC, 1176 false, gc_timer); 1177 for (uint i = 0; i < _max_num_q; i++) { 1178 if (yield->should_return()) { 1179 return; 1180 } 1181 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1182 keep_alive, complete_gc, yield); 1183 } 1184 } 1185 1186 // Final references 1187 { 1188 GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC, 1189 false, gc_timer); 1190 for (uint i = 0; i < _max_num_q; i++) { 1191 if (yield->should_return()) { 1192 return; 1193 } 1194 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1195 keep_alive, complete_gc, yield); 1196 } 1197 } 1198 1199 // Phantom references 1200 { 1201 GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC, 1202 false, gc_timer); 1203 for (uint i = 0; i < _max_num_q; i++) { 1204 if (yield->should_return()) { 1205 return; 1206 } 1207 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1208 keep_alive, complete_gc, yield); 1209 } 1210 1211 // Cleaner references. Included in timing for phantom references. We 1212 // expect Cleaner references to be temporary, and don't want to deal with 1213 // possible incompatibilities arising from making it more visible. 1214 for (uint i = 0; i < _max_num_q; i++) { 1215 if (yield->should_return()) { 1216 return; 1217 } 1218 preclean_discovered_reflist(_discoveredCleanerRefs[i], is_alive, 1219 keep_alive, complete_gc, yield); 1220 } 1221 } 1222 } 1223 1224 // Walk the given discovered ref list, and remove all reference objects 1225 // whose referents are still alive, whose referents are NULL or which 1226 // are not active (have a non-NULL next field). NOTE: When we are 1227 // thus precleaning the ref lists (which happens single-threaded today), 1228 // we do not disable refs discovery to honor the correct semantics of 1229 // java.lang.Reference. As a result, we need to be careful below 1230 // that ref removal steps interleave safely with ref discovery steps 1231 // (in this thread). 1232 void 1233 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1234 BoolObjectClosure* is_alive, 1235 OopClosure* keep_alive, 1236 VoidClosure* complete_gc, 1237 YieldClosure* yield) { 1238 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1239 while (iter.has_next()) { 1240 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1241 oop obj = iter.obj(); 1242 oop next = java_lang_ref_Reference::next(obj); 1243 if (iter.referent() == NULL || iter.is_referent_alive() || 1244 next != NULL) { 1245 // The referent has been cleared, or is alive, or the Reference is not 1246 // active; we need to trace and mark its cohort. 1247 if (TraceReferenceGC) { 1248 gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1249 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1250 } 1251 // Remove Reference object from list 1252 iter.remove(); 1253 // Keep alive its cohort. 1254 iter.make_referent_alive(); 1255 if (UseCompressedOops) { 1256 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1257 keep_alive->do_oop(next_addr); 1258 } else { 1259 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1260 keep_alive->do_oop(next_addr); 1261 } 1262 iter.move_to_next(); 1263 } else { 1264 iter.next(); 1265 } 1266 } 1267 // Close the reachable set 1268 complete_gc->do_void(); 1269 1270 NOT_PRODUCT( 1271 if (PrintGCDetails && PrintReferenceGC && (iter.processed() > 0)) { 1272 gclog_or_tty->print_cr(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT 1273 " Refs in discovered list " INTPTR_FORMAT, 1274 iter.removed(), iter.processed(), p2i(refs_list.head())); 1275 } 1276 ) 1277 } 1278 1279 const char* ReferenceProcessor::list_name(uint i) { 1280 assert(i <= _max_num_q * number_of_subclasses_of_ref(), 1281 "Out of bounds index"); 1282 1283 int j = i / _max_num_q; 1284 switch (j) { 1285 case 0: return "SoftRef"; 1286 case 1: return "WeakRef"; 1287 case 2: return "FinalRef"; 1288 case 3: return "PhantomRef"; 1289 case 4: return "CleanerRef"; 1290 } 1291 ShouldNotReachHere(); 1292 return NULL; 1293 } 1294