1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/java.hpp" 39 #include "runtime/jniHandles.hpp" 40 41 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 42 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 43 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 44 45 void referenceProcessor_init() { 46 ReferenceProcessor::init_statics(); 47 } 48 49 void ReferenceProcessor::init_statics() { 50 // We need a monotonically non-decreasing time in ms but 51 // os::javaTimeMillis() does not guarantee monotonicity. 52 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 53 54 // Initialize the soft ref timestamp clock. 55 _soft_ref_timestamp_clock = now; 56 // Also update the soft ref clock in j.l.r.SoftReference 57 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 58 59 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 60 #if defined(COMPILER2) || INCLUDE_JVMCI 61 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 62 #else 63 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 64 #endif 65 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 66 vm_exit_during_initialization("Could not allocate reference policy object"); 67 } 68 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 69 RefDiscoveryPolicy == ReferentBasedDiscovery, 70 "Unrecognized RefDiscoveryPolicy"); 71 } 72 73 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 74 #ifdef ASSERT 75 // Verify that we're not currently discovering refs 76 assert(!_discovering_refs, "nested call?"); 77 78 if (check_no_refs) { 79 // Verify that the discovered lists are empty 80 verify_no_references_recorded(); 81 } 82 #endif // ASSERT 83 84 // Someone could have modified the value of the static 85 // field in the j.l.r.SoftReference class that holds the 86 // soft reference timestamp clock using reflection or 87 // Unsafe between GCs. Unconditionally update the static 88 // field in ReferenceProcessor here so that we use the new 89 // value during reference discovery. 90 91 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 92 _discovering_refs = true; 93 } 94 95 ReferenceProcessor::ReferenceProcessor(MemRegion span, 96 bool mt_processing, 97 uint mt_processing_degree, 98 bool mt_discovery, 99 uint mt_discovery_degree, 100 bool atomic_discovery, 101 BoolObjectClosure* is_alive_non_header) : 102 _discovering_refs(false), 103 _enqueuing_is_done(false), 104 _is_alive_non_header(is_alive_non_header), 105 _processing_is_mt(mt_processing), 106 _next_id(0) 107 { 108 _span = span; 109 _discovery_is_atomic = atomic_discovery; 110 _discovery_is_mt = mt_discovery; 111 _num_q = MAX2(1U, mt_processing_degree); 112 _max_num_q = MAX2(_num_q, mt_discovery_degree); 113 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 114 _max_num_q * number_of_subclasses_of_ref(), mtGC); 115 116 if (_discovered_refs == NULL) { 117 vm_exit_during_initialization("Could not allocated RefProc Array"); 118 } 119 _discoveredSoftRefs = &_discovered_refs[0]; 120 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 121 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 122 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 123 124 // Initialize all entries to NULL 125 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 126 _discovered_refs[i].set_head(NULL); 127 _discovered_refs[i].set_length(0); 128 } 129 130 setup_policy(false /* default soft ref policy */); 131 } 132 133 #ifndef PRODUCT 134 void ReferenceProcessor::verify_no_references_recorded() { 135 guarantee(!_discovering_refs, "Discovering refs?"); 136 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 137 guarantee(_discovered_refs[i].is_empty(), 138 "Found non-empty discovered list at %u", i); 139 } 140 } 141 #endif 142 143 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 144 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 145 if (UseCompressedOops) { 146 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 147 } else { 148 f->do_oop((oop*)_discovered_refs[i].adr_head()); 149 } 150 } 151 } 152 153 void ReferenceProcessor::update_soft_ref_master_clock() { 154 // Update (advance) the soft ref master clock field. This must be done 155 // after processing the soft ref list. 156 157 // We need a monotonically non-decreasing time in ms but 158 // os::javaTimeMillis() does not guarantee monotonicity. 159 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 160 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 161 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 162 163 NOT_PRODUCT( 164 if (now < _soft_ref_timestamp_clock) { 165 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 166 _soft_ref_timestamp_clock, now); 167 } 168 ) 169 // The values of now and _soft_ref_timestamp_clock are set using 170 // javaTimeNanos(), which is guaranteed to be monotonically 171 // non-decreasing provided the underlying platform provides such 172 // a time source (and it is bug free). 173 // In product mode, however, protect ourselves from non-monotonicity. 174 if (now > _soft_ref_timestamp_clock) { 175 _soft_ref_timestamp_clock = now; 176 java_lang_ref_SoftReference::set_clock(now); 177 } 178 // Else leave clock stalled at its old value until time progresses 179 // past clock value. 180 } 181 182 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { 183 size_t total = 0; 184 for (uint i = 0; i < _max_num_q; ++i) { 185 total += lists[i].length(); 186 } 187 return total; 188 } 189 190 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 191 BoolObjectClosure* is_alive, 192 OopClosure* keep_alive, 193 VoidClosure* complete_gc, 194 AbstractRefProcTaskExecutor* task_executor, 195 GCTimer* gc_timer) { 196 197 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 198 // Stop treating discovered references specially. 199 disable_discovery(); 200 201 // If discovery was concurrent, someone could have modified 202 // the value of the static field in the j.l.r.SoftReference 203 // class that holds the soft reference timestamp clock using 204 // reflection or Unsafe between when discovery was enabled and 205 // now. Unconditionally update the static field in ReferenceProcessor 206 // here so that we use the new value during processing of the 207 // discovered soft refs. 208 209 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 210 211 ReferenceProcessorStats stats( 212 total_count(_discoveredSoftRefs), 213 total_count(_discoveredWeakRefs), 214 total_count(_discoveredFinalRefs), 215 total_count(_discoveredPhantomRefs)); 216 217 // Soft references 218 { 219 GCTraceTime(Debug, gc, ref) tt("SoftReference", gc_timer); 220 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 221 is_alive, keep_alive, complete_gc, task_executor); 222 } 223 224 update_soft_ref_master_clock(); 225 226 // Weak references 227 { 228 GCTraceTime(Debug, gc, ref) tt("WeakReference", gc_timer); 229 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 230 is_alive, keep_alive, complete_gc, task_executor); 231 } 232 233 // Final references 234 { 235 GCTraceTime(Debug, gc, ref) tt("FinalReference", gc_timer); 236 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 237 is_alive, keep_alive, complete_gc, task_executor); 238 } 239 240 // Phantom references 241 { 242 GCTraceTime(Debug, gc, ref) tt("PhantomReference", gc_timer); 243 process_discovered_reflist(_discoveredPhantomRefs, NULL, true, 244 is_alive, keep_alive, complete_gc, task_executor); 245 } 246 247 // Weak global JNI references. It would make more sense (semantically) to 248 // traverse these simultaneously with the regular weak references above, but 249 // that is not how the JDK1.2 specification is. See #4126360. Native code can 250 // thus use JNI weak references to circumvent the phantom references and 251 // resurrect a "post-mortem" object. 252 { 253 GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", gc_timer); 254 if (task_executor != NULL) { 255 task_executor->set_single_threaded_mode(); 256 } 257 process_phaseJNI(is_alive, keep_alive, complete_gc); 258 } 259 260 log_debug(gc, ref)("Ref Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT " Final: " SIZE_FORMAT " Phantom: " SIZE_FORMAT, 261 stats.soft_count(), stats.weak_count(), stats.final_count(), stats.phantom_count()); 262 log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs()); 263 264 return stats; 265 } 266 267 #ifndef PRODUCT 268 // Calculate the number of jni handles. 269 size_t ReferenceProcessor::count_jni_refs() { 270 class CountHandleClosure: public OopClosure { 271 private: 272 size_t _count; 273 public: 274 CountHandleClosure(): _count(0) {} 275 void do_oop(oop* unused) { _count++; } 276 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 277 size_t count() { return _count; } 278 }; 279 CountHandleClosure global_handle_count; 280 JNIHandles::weak_oops_do(&global_handle_count); 281 return global_handle_count.count(); 282 } 283 #endif 284 285 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 286 OopClosure* keep_alive, 287 VoidClosure* complete_gc) { 288 JNIHandles::weak_oops_do(is_alive, keep_alive); 289 complete_gc->do_void(); 290 } 291 292 293 template <class T> 294 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 295 AbstractRefProcTaskExecutor* task_executor) { 296 297 // Remember old value of pending references list 298 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 299 T old_pending_list_value = *pending_list_addr; 300 301 // Enqueue references that are not made active again, and 302 // clear the decks for the next collection (cycle). 303 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 304 // Do the post-barrier on pending_list_addr missed in 305 // enqueue_discovered_reflist. 306 oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 307 308 // Stop treating discovered references specially. 309 ref->disable_discovery(); 310 311 // Return true if new pending references were added 312 return old_pending_list_value != *pending_list_addr; 313 } 314 315 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 316 if (UseCompressedOops) { 317 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 318 } else { 319 return enqueue_discovered_ref_helper<oop>(this, task_executor); 320 } 321 } 322 323 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 324 HeapWord* pending_list_addr) { 325 // Given a list of refs linked through the "discovered" field 326 // (java.lang.ref.Reference.discovered), self-loop their "next" field 327 // thus distinguishing them from active References, then 328 // prepend them to the pending list. 329 // 330 // The Java threads will see the Reference objects linked together through 331 // the discovered field. Instead of trying to do the write barrier updates 332 // in all places in the reference processor where we manipulate the discovered 333 // field we make sure to do the barrier here where we anyway iterate through 334 // all linked Reference objects. Note that it is important to not dirty any 335 // cards during reference processing since this will cause card table 336 // verification to fail for G1. 337 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list)); 338 339 oop obj = NULL; 340 oop next_d = refs_list.head(); 341 // Walk down the list, self-looping the next field 342 // so that the References are not considered active. 343 while (obj != next_d) { 344 obj = next_d; 345 assert(obj->is_instance(), "should be an instance object"); 346 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object"); 347 next_d = java_lang_ref_Reference::discovered(obj); 348 log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d)); 349 assert(java_lang_ref_Reference::next(obj) == NULL, 350 "Reference not active; should not be discovered"); 351 // Self-loop next, so as to make Ref not active. 352 java_lang_ref_Reference::set_next_raw(obj, obj); 353 if (next_d != obj) { 354 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); 355 } else { 356 // This is the last object. 357 // Swap refs_list into pending_list_addr and 358 // set obj's discovered to what we read from pending_list_addr. 359 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 360 // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above. 361 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL 362 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); 363 } 364 } 365 } 366 367 // Parallel enqueue task 368 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 369 public: 370 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 371 DiscoveredList discovered_refs[], 372 HeapWord* pending_list_addr, 373 int n_queues) 374 : EnqueueTask(ref_processor, discovered_refs, 375 pending_list_addr, n_queues) 376 { } 377 378 virtual void work(unsigned int work_id) { 379 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 380 // Simplest first cut: static partitioning. 381 int index = work_id; 382 // The increment on "index" must correspond to the maximum number of queues 383 // (n_queues) with which that ReferenceProcessor was created. That 384 // is because of the "clever" way the discovered references lists were 385 // allocated and are indexed into. 386 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 387 for (int j = 0; 388 j < ReferenceProcessor::number_of_subclasses_of_ref(); 389 j++, index += _n_queues) { 390 _ref_processor.enqueue_discovered_reflist( 391 _refs_lists[index], _pending_list_addr); 392 _refs_lists[index].set_head(NULL); 393 _refs_lists[index].set_length(0); 394 } 395 } 396 }; 397 398 // Enqueue references that are not made active again 399 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 400 AbstractRefProcTaskExecutor* task_executor) { 401 if (_processing_is_mt && task_executor != NULL) { 402 // Parallel code 403 RefProcEnqueueTask tsk(*this, _discovered_refs, 404 pending_list_addr, _max_num_q); 405 task_executor->execute(tsk); 406 } else { 407 // Serial code: call the parent class's implementation 408 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 409 enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr); 410 _discovered_refs[i].set_head(NULL); 411 _discovered_refs[i].set_length(0); 412 } 413 } 414 } 415 416 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 417 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 418 oop discovered = java_lang_ref_Reference::discovered(_ref); 419 assert(_discovered_addr && discovered->is_oop_or_null(), 420 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 421 _next = discovered; 422 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 423 _referent = java_lang_ref_Reference::referent(_ref); 424 assert(Universe::heap()->is_in_reserved_or_null(_referent), 425 "Wrong oop found in java.lang.Reference object"); 426 assert(allow_null_referent ? 427 _referent->is_oop_or_null() 428 : _referent->is_oop(), 429 "Expected an oop%s for referent field at " PTR_FORMAT, 430 (allow_null_referent ? " or NULL" : ""), 431 p2i(_referent)); 432 } 433 434 void DiscoveredListIterator::remove() { 435 assert(_ref->is_oop(), "Dropping a bad reference"); 436 oop_store_raw(_discovered_addr, NULL); 437 438 // First _prev_next ref actually points into DiscoveredList (gross). 439 oop new_next; 440 if (_next == _ref) { 441 // At the end of the list, we should make _prev point to itself. 442 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 443 // and _prev will be NULL. 444 new_next = _prev; 445 } else { 446 new_next = _next; 447 } 448 // Remove Reference object from discovered list. Note that G1 does not need a 449 // pre-barrier here because we know the Reference has already been found/marked, 450 // that's how it ended up in the discovered list in the first place. 451 oop_store_raw(_prev_next, new_next); 452 NOT_PRODUCT(_removed++); 453 _refs_list.dec_length(1); 454 } 455 456 void DiscoveredListIterator::clear_referent() { 457 oop_store_raw(_referent_addr, NULL); 458 } 459 460 // NOTE: process_phase*() are largely similar, and at a high level 461 // merely iterate over the extant list applying a predicate to 462 // each of its elements and possibly removing that element from the 463 // list and applying some further closures to that element. 464 // We should consider the possibility of replacing these 465 // process_phase*() methods by abstracting them into 466 // a single general iterator invocation that receives appropriate 467 // closures that accomplish this work. 468 469 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 470 // referents are not alive, but that should be kept alive for policy reasons. 471 // Keep alive the transitive closure of all such referents. 472 void 473 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 474 ReferencePolicy* policy, 475 BoolObjectClosure* is_alive, 476 OopClosure* keep_alive, 477 VoidClosure* complete_gc) { 478 assert(policy != NULL, "Must have a non-NULL policy"); 479 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 480 // Decide which softly reachable refs should be kept alive. 481 while (iter.has_next()) { 482 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 483 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 484 if (referent_is_dead && 485 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 486 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 487 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 488 // Remove Reference object from list 489 iter.remove(); 490 // keep the referent around 491 iter.make_referent_alive(); 492 iter.move_to_next(); 493 } else { 494 iter.next(); 495 } 496 } 497 // Close the reachable set 498 complete_gc->do_void(); 499 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 500 iter.removed(), iter.processed(), p2i(&refs_list)); 501 } 502 503 // Traverse the list and remove any Refs that are not active, or 504 // whose referents are either alive or NULL. 505 void 506 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 507 BoolObjectClosure* is_alive, 508 OopClosure* keep_alive) { 509 assert(discovery_is_atomic(), "Error"); 510 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 511 while (iter.has_next()) { 512 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 513 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 514 assert(next == NULL, "Should not discover inactive Reference"); 515 if (iter.is_referent_alive()) { 516 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 517 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 518 // The referent is reachable after all. 519 // Remove Reference object from list. 520 iter.remove(); 521 // Update the referent pointer as necessary: Note that this 522 // should not entail any recursive marking because the 523 // referent must already have been traversed. 524 iter.make_referent_alive(); 525 iter.move_to_next(); 526 } else { 527 iter.next(); 528 } 529 } 530 NOT_PRODUCT( 531 if (iter.processed() > 0) { 532 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 533 " Refs in discovered list " INTPTR_FORMAT, 534 iter.removed(), iter.processed(), p2i(&refs_list)); 535 } 536 ) 537 } 538 539 void 540 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 541 BoolObjectClosure* is_alive, 542 OopClosure* keep_alive, 543 VoidClosure* complete_gc) { 544 assert(!discovery_is_atomic(), "Error"); 545 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 546 while (iter.has_next()) { 547 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 548 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 549 oop next = java_lang_ref_Reference::next(iter.obj()); 550 if ((iter.referent() == NULL || iter.is_referent_alive() || 551 next != NULL)) { 552 assert(next->is_oop_or_null(), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 553 // Remove Reference object from list 554 iter.remove(); 555 // Trace the cohorts 556 iter.make_referent_alive(); 557 if (UseCompressedOops) { 558 keep_alive->do_oop((narrowOop*)next_addr); 559 } else { 560 keep_alive->do_oop((oop*)next_addr); 561 } 562 iter.move_to_next(); 563 } else { 564 iter.next(); 565 } 566 } 567 // Now close the newly reachable set 568 complete_gc->do_void(); 569 NOT_PRODUCT( 570 if (iter.processed() > 0) { 571 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 572 " Refs in discovered list " INTPTR_FORMAT, 573 iter.removed(), iter.processed(), p2i(&refs_list)); 574 } 575 ) 576 } 577 578 // Traverse the list and process the referents, by either 579 // clearing them or keeping them (and their reachable 580 // closure) alive. 581 void 582 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 583 bool clear_referent, 584 BoolObjectClosure* is_alive, 585 OopClosure* keep_alive, 586 VoidClosure* complete_gc) { 587 ResourceMark rm; 588 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 589 while (iter.has_next()) { 590 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 591 if (clear_referent) { 592 // NULL out referent pointer 593 iter.clear_referent(); 594 } else { 595 // keep the referent around 596 iter.make_referent_alive(); 597 } 598 log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 599 clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name()); 600 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 601 iter.next(); 602 } 603 // Close the reachable set 604 complete_gc->do_void(); 605 } 606 607 void 608 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 609 oop obj = NULL; 610 oop next = refs_list.head(); 611 while (next != obj) { 612 obj = next; 613 next = java_lang_ref_Reference::discovered(obj); 614 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 615 } 616 refs_list.set_head(NULL); 617 refs_list.set_length(0); 618 } 619 620 void ReferenceProcessor::abandon_partial_discovery() { 621 // loop over the lists 622 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 623 if ((i % _max_num_q) == 0) { 624 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 625 } 626 clear_discovered_references(_discovered_refs[i]); 627 } 628 } 629 630 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 631 public: 632 RefProcPhase1Task(ReferenceProcessor& ref_processor, 633 DiscoveredList refs_lists[], 634 ReferencePolicy* policy, 635 bool marks_oops_alive) 636 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 637 _policy(policy) 638 { } 639 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 640 OopClosure& keep_alive, 641 VoidClosure& complete_gc) 642 { 643 _ref_processor.process_phase1(_refs_lists[i], _policy, 644 &is_alive, &keep_alive, &complete_gc); 645 } 646 private: 647 ReferencePolicy* _policy; 648 }; 649 650 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 651 public: 652 RefProcPhase2Task(ReferenceProcessor& ref_processor, 653 DiscoveredList refs_lists[], 654 bool marks_oops_alive) 655 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 656 { } 657 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 658 OopClosure& keep_alive, 659 VoidClosure& complete_gc) 660 { 661 _ref_processor.process_phase2(_refs_lists[i], 662 &is_alive, &keep_alive, &complete_gc); 663 } 664 }; 665 666 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 667 public: 668 RefProcPhase3Task(ReferenceProcessor& ref_processor, 669 DiscoveredList refs_lists[], 670 bool clear_referent, 671 bool marks_oops_alive) 672 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 673 _clear_referent(clear_referent) 674 { } 675 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 676 OopClosure& keep_alive, 677 VoidClosure& complete_gc) 678 { 679 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 680 &is_alive, &keep_alive, &complete_gc); 681 } 682 private: 683 bool _clear_referent; 684 }; 685 686 #ifndef PRODUCT 687 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) { 688 if (!log_is_enabled(Trace, gc, ref)) { 689 return; 690 } 691 692 stringStream st; 693 for (uint i = 0; i < active_length; ++i) { 694 st.print(SIZE_FORMAT " ", ref_lists[i].length()); 695 } 696 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); 697 #ifdef ASSERT 698 for (uint i = active_length; i < _max_num_q; i++) { 699 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 700 ref_lists[i].length(), i); 701 } 702 #endif 703 } 704 #endif 705 706 void ReferenceProcessor::set_active_mt_degree(uint v) { 707 _num_q = v; 708 _next_id = 0; 709 } 710 711 // Balances reference queues. 712 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 713 // queues[0, 1, ..., _num_q-1] because only the first _num_q 714 // corresponding to the active workers will be processed. 715 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 716 { 717 // calculate total length 718 size_t total_refs = 0; 719 log_develop_trace(gc, ref)("Balance ref_lists "); 720 721 for (uint i = 0; i < _max_num_q; ++i) { 722 total_refs += ref_lists[i].length(); 723 } 724 log_reflist_counts(ref_lists, _max_num_q, total_refs); 725 size_t avg_refs = total_refs / _num_q + 1; 726 uint to_idx = 0; 727 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 728 bool move_all = false; 729 if (from_idx >= _num_q) { 730 move_all = ref_lists[from_idx].length() > 0; 731 } 732 while ((ref_lists[from_idx].length() > avg_refs) || 733 move_all) { 734 assert(to_idx < _num_q, "Sanity Check!"); 735 if (ref_lists[to_idx].length() < avg_refs) { 736 // move superfluous refs 737 size_t refs_to_move; 738 // Move all the Ref's if the from queue will not be processed. 739 if (move_all) { 740 refs_to_move = MIN2(ref_lists[from_idx].length(), 741 avg_refs - ref_lists[to_idx].length()); 742 } else { 743 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 744 avg_refs - ref_lists[to_idx].length()); 745 } 746 747 assert(refs_to_move > 0, "otherwise the code below will fail"); 748 749 oop move_head = ref_lists[from_idx].head(); 750 oop move_tail = move_head; 751 oop new_head = move_head; 752 // find an element to split the list on 753 for (size_t j = 0; j < refs_to_move; ++j) { 754 move_tail = new_head; 755 new_head = java_lang_ref_Reference::discovered(new_head); 756 } 757 758 // Add the chain to the to list. 759 if (ref_lists[to_idx].head() == NULL) { 760 // to list is empty. Make a loop at the end. 761 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 762 } else { 763 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 764 } 765 ref_lists[to_idx].set_head(move_head); 766 ref_lists[to_idx].inc_length(refs_to_move); 767 768 // Remove the chain from the from list. 769 if (move_tail == new_head) { 770 // We found the end of the from list. 771 ref_lists[from_idx].set_head(NULL); 772 } else { 773 ref_lists[from_idx].set_head(new_head); 774 } 775 ref_lists[from_idx].dec_length(refs_to_move); 776 if (ref_lists[from_idx].length() == 0) { 777 break; 778 } 779 } else { 780 to_idx = (to_idx + 1) % _num_q; 781 } 782 } 783 } 784 #ifdef ASSERT 785 size_t balanced_total_refs = 0; 786 for (uint i = 0; i < _num_q; ++i) { 787 balanced_total_refs += ref_lists[i].length(); 788 } 789 log_reflist_counts(ref_lists, _num_q, balanced_total_refs); 790 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 791 #endif 792 } 793 794 void ReferenceProcessor::balance_all_queues() { 795 balance_queues(_discoveredSoftRefs); 796 balance_queues(_discoveredWeakRefs); 797 balance_queues(_discoveredFinalRefs); 798 balance_queues(_discoveredPhantomRefs); 799 } 800 801 void ReferenceProcessor::process_discovered_reflist( 802 DiscoveredList refs_lists[], 803 ReferencePolicy* policy, 804 bool clear_referent, 805 BoolObjectClosure* is_alive, 806 OopClosure* keep_alive, 807 VoidClosure* complete_gc, 808 AbstractRefProcTaskExecutor* task_executor) 809 { 810 bool mt_processing = task_executor != NULL && _processing_is_mt; 811 // If discovery used MT and a dynamic number of GC threads, then 812 // the queues must be balanced for correctness if fewer than the 813 // maximum number of queues were used. The number of queue used 814 // during discovery may be different than the number to be used 815 // for processing so don't depend of _num_q < _max_num_q as part 816 // of the test. 817 bool must_balance = _discovery_is_mt; 818 819 if ((mt_processing && ParallelRefProcBalancingEnabled) || 820 must_balance) { 821 balance_queues(refs_lists); 822 } 823 824 // Phase 1 (soft refs only): 825 // . Traverse the list and remove any SoftReferences whose 826 // referents are not alive, but that should be kept alive for 827 // policy reasons. Keep alive the transitive closure of all 828 // such referents. 829 if (policy != NULL) { 830 if (mt_processing) { 831 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 832 task_executor->execute(phase1); 833 } else { 834 for (uint i = 0; i < _max_num_q; i++) { 835 process_phase1(refs_lists[i], policy, 836 is_alive, keep_alive, complete_gc); 837 } 838 } 839 } else { // policy == NULL 840 assert(refs_lists != _discoveredSoftRefs, 841 "Policy must be specified for soft references."); 842 } 843 844 // Phase 2: 845 // . Traverse the list and remove any refs whose referents are alive. 846 if (mt_processing) { 847 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 848 task_executor->execute(phase2); 849 } else { 850 for (uint i = 0; i < _max_num_q; i++) { 851 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 852 } 853 } 854 855 // Phase 3: 856 // . Traverse the list and process referents as appropriate. 857 if (mt_processing) { 858 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 859 task_executor->execute(phase3); 860 } else { 861 for (uint i = 0; i < _max_num_q; i++) { 862 process_phase3(refs_lists[i], clear_referent, 863 is_alive, keep_alive, complete_gc); 864 } 865 } 866 } 867 868 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 869 uint id = 0; 870 // Determine the queue index to use for this object. 871 if (_discovery_is_mt) { 872 // During a multi-threaded discovery phase, 873 // each thread saves to its "own" list. 874 Thread* thr = Thread::current(); 875 id = thr->as_Worker_thread()->id(); 876 } else { 877 // single-threaded discovery, we save in round-robin 878 // fashion to each of the lists. 879 if (_processing_is_mt) { 880 id = next_id(); 881 } 882 } 883 assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q); 884 885 // Get the discovered queue to which we will add 886 DiscoveredList* list = NULL; 887 switch (rt) { 888 case REF_OTHER: 889 // Unknown reference type, no special treatment 890 break; 891 case REF_SOFT: 892 list = &_discoveredSoftRefs[id]; 893 break; 894 case REF_WEAK: 895 list = &_discoveredWeakRefs[id]; 896 break; 897 case REF_FINAL: 898 list = &_discoveredFinalRefs[id]; 899 break; 900 case REF_PHANTOM: 901 list = &_discoveredPhantomRefs[id]; 902 break; 903 case REF_NONE: 904 // we should not reach here if we are an InstanceRefKlass 905 default: 906 ShouldNotReachHere(); 907 } 908 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 909 return list; 910 } 911 912 inline void 913 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 914 oop obj, 915 HeapWord* discovered_addr) { 916 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 917 // First we must make sure this object is only enqueued once. CAS in a non null 918 // discovered_addr. 919 oop current_head = refs_list.head(); 920 // The last ref must have its discovered field pointing to itself. 921 oop next_discovered = (current_head != NULL) ? current_head : obj; 922 923 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 924 NULL); 925 if (retest == NULL) { 926 // This thread just won the right to enqueue the object. 927 // We have separate lists for enqueueing, so no synchronization 928 // is necessary. 929 refs_list.set_head(obj); 930 refs_list.inc_length(1); 931 932 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 933 p2i(obj), obj->klass()->internal_name()); 934 } else { 935 // If retest was non NULL, another thread beat us to it: 936 // The reference has already been discovered... 937 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 938 p2i(obj), obj->klass()->internal_name()); 939 } 940 } 941 942 #ifndef PRODUCT 943 // Non-atomic (i.e. concurrent) discovery might allow us 944 // to observe j.l.References with NULL referents, being those 945 // cleared concurrently by mutators during (or after) discovery. 946 void ReferenceProcessor::verify_referent(oop obj) { 947 bool da = discovery_is_atomic(); 948 oop referent = java_lang_ref_Reference::referent(obj); 949 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 950 "Bad referent " INTPTR_FORMAT " found in Reference " 951 INTPTR_FORMAT " during %satomic discovery ", 952 p2i(referent), p2i(obj), da ? "" : "non-"); 953 } 954 #endif 955 956 // We mention two of several possible choices here: 957 // #0: if the reference object is not in the "originating generation" 958 // (or part of the heap being collected, indicated by our "span" 959 // we don't treat it specially (i.e. we scan it as we would 960 // a normal oop, treating its references as strong references). 961 // This means that references can't be discovered unless their 962 // referent is also in the same span. This is the simplest, 963 // most "local" and most conservative approach, albeit one 964 // that may cause weak references to be enqueued least promptly. 965 // We call this choice the "ReferenceBasedDiscovery" policy. 966 // #1: the reference object may be in any generation (span), but if 967 // the referent is in the generation (span) being currently collected 968 // then we can discover the reference object, provided 969 // the object has not already been discovered by 970 // a different concurrently running collector (as may be the 971 // case, for instance, if the reference object is in CMS and 972 // the referent in DefNewGeneration), and provided the processing 973 // of this reference object by the current collector will 974 // appear atomic to every other collector in the system. 975 // (Thus, for instance, a concurrent collector may not 976 // discover references in other generations even if the 977 // referent is in its own generation). This policy may, 978 // in certain cases, enqueue references somewhat sooner than 979 // might Policy #0 above, but at marginally increased cost 980 // and complexity in processing these references. 981 // We call this choice the "RefeferentBasedDiscovery" policy. 982 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 983 // Make sure we are discovering refs (rather than processing discovered refs). 984 if (!_discovering_refs || !RegisterReferences) { 985 return false; 986 } 987 // We only discover active references. 988 oop next = java_lang_ref_Reference::next(obj); 989 if (next != NULL) { // Ref is no longer active 990 return false; 991 } 992 993 HeapWord* obj_addr = (HeapWord*)obj; 994 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 995 !_span.contains(obj_addr)) { 996 // Reference is not in the originating generation; 997 // don't treat it specially (i.e. we want to scan it as a normal 998 // object with strong references). 999 return false; 1000 } 1001 1002 // We only discover references whose referents are not (yet) 1003 // known to be strongly reachable. 1004 if (is_alive_non_header() != NULL) { 1005 verify_referent(obj); 1006 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1007 return false; // referent is reachable 1008 } 1009 } 1010 if (rt == REF_SOFT) { 1011 // For soft refs we can decide now if these are not 1012 // current candidates for clearing, in which case we 1013 // can mark through them now, rather than delaying that 1014 // to the reference-processing phase. Since all current 1015 // time-stamp policies advance the soft-ref clock only 1016 // at a full collection cycle, this is always currently 1017 // accurate. 1018 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1019 return false; 1020 } 1021 } 1022 1023 ResourceMark rm; // Needed for tracing. 1024 1025 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1026 const oop discovered = java_lang_ref_Reference::discovered(obj); 1027 assert(discovered->is_oop_or_null(), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1028 if (discovered != NULL) { 1029 // The reference has already been discovered... 1030 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1031 p2i(obj), obj->klass()->internal_name()); 1032 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1033 // assumes that an object is not processed twice; 1034 // if it's been already discovered it must be on another 1035 // generation's discovered list; so we won't discover it. 1036 return false; 1037 } else { 1038 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1039 "Unrecognized policy"); 1040 // Check assumption that an object is not potentially 1041 // discovered twice except by concurrent collectors that potentially 1042 // trace the same Reference object twice. 1043 assert(UseConcMarkSweepGC || UseG1GC, 1044 "Only possible with a concurrent marking collector"); 1045 return true; 1046 } 1047 } 1048 1049 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1050 verify_referent(obj); 1051 // Discover if and only if EITHER: 1052 // .. reference is in our span, OR 1053 // .. we are an atomic collector and referent is in our span 1054 if (_span.contains(obj_addr) || 1055 (discovery_is_atomic() && 1056 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1057 // should_enqueue = true; 1058 } else { 1059 return false; 1060 } 1061 } else { 1062 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1063 _span.contains(obj_addr), "code inconsistency"); 1064 } 1065 1066 // Get the right type of discovered queue head. 1067 DiscoveredList* list = get_discovered_list(rt); 1068 if (list == NULL) { 1069 return false; // nothing special needs to be done 1070 } 1071 1072 if (_discovery_is_mt) { 1073 add_to_discovered_list_mt(*list, obj, discovered_addr); 1074 } else { 1075 // We do a raw store here: the field will be visited later when processing 1076 // the discovered references. 1077 oop current_head = list->head(); 1078 // The last ref must have its discovered field pointing to itself. 1079 oop next_discovered = (current_head != NULL) ? current_head : obj; 1080 1081 assert(discovered == NULL, "control point invariant"); 1082 oop_store_raw(discovered_addr, next_discovered); 1083 list->set_head(obj); 1084 list->inc_length(1); 1085 1086 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1087 } 1088 assert(obj->is_oop(), "Discovered a bad reference"); 1089 verify_referent(obj); 1090 return true; 1091 } 1092 1093 // Preclean the discovered references by removing those 1094 // whose referents are alive, and by marking from those that 1095 // are not active. These lists can be handled here 1096 // in any order and, indeed, concurrently. 1097 void ReferenceProcessor::preclean_discovered_references( 1098 BoolObjectClosure* is_alive, 1099 OopClosure* keep_alive, 1100 VoidClosure* complete_gc, 1101 YieldClosure* yield, 1102 GCTimer* gc_timer) { 1103 1104 // Soft references 1105 { 1106 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1107 for (uint i = 0; i < _max_num_q; i++) { 1108 if (yield->should_return()) { 1109 return; 1110 } 1111 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1112 keep_alive, complete_gc, yield); 1113 } 1114 } 1115 1116 // Weak references 1117 { 1118 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1119 for (uint i = 0; i < _max_num_q; i++) { 1120 if (yield->should_return()) { 1121 return; 1122 } 1123 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1124 keep_alive, complete_gc, yield); 1125 } 1126 } 1127 1128 // Final references 1129 { 1130 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1131 for (uint i = 0; i < _max_num_q; i++) { 1132 if (yield->should_return()) { 1133 return; 1134 } 1135 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1136 keep_alive, complete_gc, yield); 1137 } 1138 } 1139 1140 // Phantom references 1141 { 1142 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1143 for (uint i = 0; i < _max_num_q; i++) { 1144 if (yield->should_return()) { 1145 return; 1146 } 1147 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1148 keep_alive, complete_gc, yield); 1149 } 1150 } 1151 } 1152 1153 // Walk the given discovered ref list, and remove all reference objects 1154 // whose referents are still alive, whose referents are NULL or which 1155 // are not active (have a non-NULL next field). NOTE: When we are 1156 // thus precleaning the ref lists (which happens single-threaded today), 1157 // we do not disable refs discovery to honor the correct semantics of 1158 // java.lang.Reference. As a result, we need to be careful below 1159 // that ref removal steps interleave safely with ref discovery steps 1160 // (in this thread). 1161 void 1162 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1163 BoolObjectClosure* is_alive, 1164 OopClosure* keep_alive, 1165 VoidClosure* complete_gc, 1166 YieldClosure* yield) { 1167 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1168 while (iter.has_next()) { 1169 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1170 oop obj = iter.obj(); 1171 oop next = java_lang_ref_Reference::next(obj); 1172 if (iter.referent() == NULL || iter.is_referent_alive() || 1173 next != NULL) { 1174 // The referent has been cleared, or is alive, or the Reference is not 1175 // active; we need to trace and mark its cohort. 1176 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1177 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1178 // Remove Reference object from list 1179 iter.remove(); 1180 // Keep alive its cohort. 1181 iter.make_referent_alive(); 1182 if (UseCompressedOops) { 1183 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1184 keep_alive->do_oop(next_addr); 1185 } else { 1186 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1187 keep_alive->do_oop(next_addr); 1188 } 1189 iter.move_to_next(); 1190 } else { 1191 iter.next(); 1192 } 1193 } 1194 // Close the reachable set 1195 complete_gc->do_void(); 1196 1197 NOT_PRODUCT( 1198 if (iter.processed() > 0) { 1199 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1200 iter.removed(), iter.processed(), p2i(&refs_list)); 1201 } 1202 ) 1203 } 1204 1205 const char* ReferenceProcessor::list_name(uint i) { 1206 assert(i <= _max_num_q * number_of_subclasses_of_ref(), 1207 "Out of bounds index"); 1208 1209 int j = i / _max_num_q; 1210 switch (j) { 1211 case 0: return "SoftRef"; 1212 case 1: return "WeakRef"; 1213 case 2: return "FinalRef"; 1214 case 3: return "PhantomRef"; 1215 } 1216 ShouldNotReachHere(); 1217 return NULL; 1218 } 1219