1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "runtime/java.hpp" 38 #include "runtime/jniHandles.hpp" 39 40 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 41 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 42 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 43 44 void referenceProcessor_init() { 45 ReferenceProcessor::init_statics(); 46 } 47 48 void ReferenceProcessor::init_statics() { 49 // We need a monotonically non-decreasing time in ms but 50 // os::javaTimeMillis() does not guarantee monotonicity. 51 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 52 53 // Initialize the soft ref timestamp clock. 54 _soft_ref_timestamp_clock = now; 55 // Also update the soft ref clock in j.l.r.SoftReference 56 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 57 58 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 59 #if defined(COMPILER2) || INCLUDE_JVMCI 60 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 61 #else 62 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 63 #endif 64 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 65 vm_exit_during_initialization("Could not allocate reference policy object"); 66 } 67 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 68 RefDiscoveryPolicy == ReferentBasedDiscovery, 69 "Unrecognized RefDiscoveryPolicy"); 70 } 71 72 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 73 #ifdef ASSERT 74 // Verify that we're not currently discovering refs 75 assert(!_discovering_refs, "nested call?"); 76 77 if (check_no_refs) { 78 // Verify that the discovered lists are empty 79 verify_no_references_recorded(); 80 } 81 #endif // ASSERT 82 83 // Someone could have modified the value of the static 84 // field in the j.l.r.SoftReference class that holds the 85 // soft reference timestamp clock using reflection or 86 // Unsafe between GCs. Unconditionally update the static 87 // field in ReferenceProcessor here so that we use the new 88 // value during reference discovery. 89 90 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 91 _discovering_refs = true; 92 } 93 94 ReferenceProcessor::ReferenceProcessor(MemRegion span, 95 bool mt_processing, 96 uint mt_processing_degree, 97 bool mt_discovery, 98 uint mt_discovery_degree, 99 bool atomic_discovery, 100 BoolObjectClosure* is_alive_non_header) : 101 _discovering_refs(false), 102 _enqueuing_is_done(false), 103 _is_alive_non_header(is_alive_non_header), 104 _processing_is_mt(mt_processing), 105 _next_id(0) 106 { 107 _span = span; 108 _discovery_is_atomic = atomic_discovery; 109 _discovery_is_mt = mt_discovery; 110 _num_q = MAX2(1U, mt_processing_degree); 111 _max_num_q = MAX2(_num_q, mt_discovery_degree); 112 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 113 _max_num_q * number_of_subclasses_of_ref(), mtGC); 114 115 if (_discovered_refs == NULL) { 116 vm_exit_during_initialization("Could not allocated RefProc Array"); 117 } 118 _discoveredSoftRefs = &_discovered_refs[0]; 119 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 120 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 121 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 122 123 // Initialize all entries to NULL 124 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 125 _discovered_refs[i].set_head(NULL); 126 _discovered_refs[i].set_length(0); 127 } 128 129 setup_policy(false /* default soft ref policy */); 130 } 131 132 #ifndef PRODUCT 133 void ReferenceProcessor::verify_no_references_recorded() { 134 guarantee(!_discovering_refs, "Discovering refs?"); 135 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 136 guarantee(_discovered_refs[i].is_empty(), 137 "Found non-empty discovered list"); 138 } 139 } 140 #endif 141 142 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 143 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 144 if (UseCompressedOops) { 145 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 146 } else { 147 f->do_oop((oop*)_discovered_refs[i].adr_head()); 148 } 149 } 150 } 151 152 void ReferenceProcessor::update_soft_ref_master_clock() { 153 // Update (advance) the soft ref master clock field. This must be done 154 // after processing the soft ref list. 155 156 // We need a monotonically non-decreasing time in ms but 157 // os::javaTimeMillis() does not guarantee monotonicity. 158 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 159 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 160 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 161 162 NOT_PRODUCT( 163 if (now < _soft_ref_timestamp_clock) { 164 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 165 _soft_ref_timestamp_clock, now); 166 } 167 ) 168 // The values of now and _soft_ref_timestamp_clock are set using 169 // javaTimeNanos(), which is guaranteed to be monotonically 170 // non-decreasing provided the underlying platform provides such 171 // a time source (and it is bug free). 172 // In product mode, however, protect ourselves from non-monotonicity. 173 if (now > _soft_ref_timestamp_clock) { 174 _soft_ref_timestamp_clock = now; 175 java_lang_ref_SoftReference::set_clock(now); 176 } 177 // Else leave clock stalled at its old value until time progresses 178 // past clock value. 179 } 180 181 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { 182 size_t total = 0; 183 for (uint i = 0; i < _max_num_q; ++i) { 184 total += lists[i].length(); 185 } 186 return total; 187 } 188 189 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 190 BoolObjectClosure* is_alive, 191 OopClosure* keep_alive, 192 VoidClosure* complete_gc, 193 AbstractRefProcTaskExecutor* task_executor, 194 GCTimer* gc_timer) { 195 196 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 197 // Stop treating discovered references specially. 198 disable_discovery(); 199 200 // If discovery was concurrent, someone could have modified 201 // the value of the static field in the j.l.r.SoftReference 202 // class that holds the soft reference timestamp clock using 203 // reflection or Unsafe between when discovery was enabled and 204 // now. Unconditionally update the static field in ReferenceProcessor 205 // here so that we use the new value during processing of the 206 // discovered soft refs. 207 208 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 209 210 ReferenceProcessorStats stats( 211 total_count(_discoveredSoftRefs), 212 total_count(_discoveredWeakRefs), 213 total_count(_discoveredFinalRefs), 214 total_count(_discoveredPhantomRefs)); 215 216 // Soft references 217 { 218 GCTraceTime(Debug, gc, ref) tt("SoftReference", gc_timer); 219 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 220 is_alive, keep_alive, complete_gc, task_executor); 221 } 222 223 update_soft_ref_master_clock(); 224 225 // Weak references 226 { 227 GCTraceTime(Debug, gc, ref) tt("WeakReference", gc_timer); 228 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 229 is_alive, keep_alive, complete_gc, task_executor); 230 } 231 232 // Final references 233 { 234 GCTraceTime(Debug, gc, ref) tt("FinalReference", gc_timer); 235 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 236 is_alive, keep_alive, complete_gc, task_executor); 237 } 238 239 // Phantom references 240 { 241 GCTraceTime(Debug, gc, ref) tt("PhantomReference", gc_timer); 242 process_discovered_reflist(_discoveredPhantomRefs, NULL, true, 243 is_alive, keep_alive, complete_gc, task_executor); 244 } 245 246 // Weak global JNI references. It would make more sense (semantically) to 247 // traverse these simultaneously with the regular weak references above, but 248 // that is not how the JDK1.2 specification is. See #4126360. Native code can 249 // thus use JNI weak references to circumvent the phantom references and 250 // resurrect a "post-mortem" object. 251 { 252 GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", gc_timer); 253 if (task_executor != NULL) { 254 task_executor->set_single_threaded_mode(); 255 } 256 process_phaseJNI(is_alive, keep_alive, complete_gc); 257 } 258 259 log_debug(gc, ref)("Ref Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT " Final: " SIZE_FORMAT " Phantom: " SIZE_FORMAT, 260 stats.soft_count(), stats.weak_count(), stats.final_count(), stats.phantom_count()); 261 log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs()); 262 263 return stats; 264 } 265 266 #ifndef PRODUCT 267 // Calculate the number of jni handles. 268 size_t ReferenceProcessor::count_jni_refs() { 269 class AlwaysAliveClosure: public BoolObjectClosure { 270 public: 271 virtual bool do_object_b(oop obj) { return true; } 272 }; 273 274 class CountHandleClosure: public OopClosure { 275 private: 276 size_t _count; 277 public: 278 CountHandleClosure(): _count(0) {} 279 void do_oop(oop* unused) { _count++; } 280 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 281 size_t count() { return _count; } 282 }; 283 CountHandleClosure global_handle_count; 284 AlwaysAliveClosure always_alive; 285 JNIHandles::weak_oops_do(&always_alive, &global_handle_count); 286 return global_handle_count.count(); 287 } 288 #endif 289 290 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 291 OopClosure* keep_alive, 292 VoidClosure* complete_gc) { 293 JNIHandles::weak_oops_do(is_alive, keep_alive); 294 complete_gc->do_void(); 295 } 296 297 298 template <class T> 299 bool enqueue_discovered_ref_helper(ReferenceProcessor* ref, 300 AbstractRefProcTaskExecutor* task_executor) { 301 302 // Remember old value of pending references list 303 T* pending_list_addr = (T*)java_lang_ref_Reference::pending_list_addr(); 304 T old_pending_list_value = *pending_list_addr; 305 306 // Enqueue references that are not made active again, and 307 // clear the decks for the next collection (cycle). 308 ref->enqueue_discovered_reflists((HeapWord*)pending_list_addr, task_executor); 309 // Do the post-barrier on pending_list_addr missed in 310 // enqueue_discovered_reflist. 311 oopDesc::bs()->write_ref_field(pending_list_addr, oopDesc::load_decode_heap_oop(pending_list_addr)); 312 313 // Stop treating discovered references specially. 314 ref->disable_discovery(); 315 316 // Return true if new pending references were added 317 return old_pending_list_value != *pending_list_addr; 318 } 319 320 bool ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 321 if (UseCompressedOops) { 322 return enqueue_discovered_ref_helper<narrowOop>(this, task_executor); 323 } else { 324 return enqueue_discovered_ref_helper<oop>(this, task_executor); 325 } 326 } 327 328 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list, 329 HeapWord* pending_list_addr) { 330 // Given a list of refs linked through the "discovered" field 331 // (java.lang.ref.Reference.discovered), self-loop their "next" field 332 // thus distinguishing them from active References, then 333 // prepend them to the pending list. 334 // 335 // The Java threads will see the Reference objects linked together through 336 // the discovered field. Instead of trying to do the write barrier updates 337 // in all places in the reference processor where we manipulate the discovered 338 // field we make sure to do the barrier here where we anyway iterate through 339 // all linked Reference objects. Note that it is important to not dirty any 340 // cards during reference processing since this will cause card table 341 // verification to fail for G1. 342 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list)); 343 344 oop obj = NULL; 345 oop next_d = refs_list.head(); 346 // Walk down the list, self-looping the next field 347 // so that the References are not considered active. 348 while (obj != next_d) { 349 obj = next_d; 350 assert(obj->is_instance(), "should be an instance object"); 351 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object"); 352 next_d = java_lang_ref_Reference::discovered(obj); 353 log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d)); 354 assert(java_lang_ref_Reference::next(obj) == NULL, 355 "Reference not active; should not be discovered"); 356 // Self-loop next, so as to make Ref not active. 357 java_lang_ref_Reference::set_next_raw(obj, obj); 358 if (next_d != obj) { 359 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); 360 } else { 361 // This is the last object. 362 // Swap refs_list into pending_list_addr and 363 // set obj's discovered to what we read from pending_list_addr. 364 oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr); 365 // Need post-barrier on pending_list_addr. See enqueue_discovered_ref_helper() above. 366 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL 367 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); 368 } 369 } 370 } 371 372 // Parallel enqueue task 373 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 374 public: 375 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 376 DiscoveredList discovered_refs[], 377 HeapWord* pending_list_addr, 378 int n_queues) 379 : EnqueueTask(ref_processor, discovered_refs, 380 pending_list_addr, n_queues) 381 { } 382 383 virtual void work(unsigned int work_id) { 384 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 385 // Simplest first cut: static partitioning. 386 int index = work_id; 387 // The increment on "index" must correspond to the maximum number of queues 388 // (n_queues) with which that ReferenceProcessor was created. That 389 // is because of the "clever" way the discovered references lists were 390 // allocated and are indexed into. 391 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 392 for (int j = 0; 393 j < ReferenceProcessor::number_of_subclasses_of_ref(); 394 j++, index += _n_queues) { 395 _ref_processor.enqueue_discovered_reflist( 396 _refs_lists[index], _pending_list_addr); 397 _refs_lists[index].set_head(NULL); 398 _refs_lists[index].set_length(0); 399 } 400 } 401 }; 402 403 // Enqueue references that are not made active again 404 void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr, 405 AbstractRefProcTaskExecutor* task_executor) { 406 if (_processing_is_mt && task_executor != NULL) { 407 // Parallel code 408 RefProcEnqueueTask tsk(*this, _discovered_refs, 409 pending_list_addr, _max_num_q); 410 task_executor->execute(tsk); 411 } else { 412 // Serial code: call the parent class's implementation 413 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 414 enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr); 415 _discovered_refs[i].set_head(NULL); 416 _discovered_refs[i].set_length(0); 417 } 418 } 419 } 420 421 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 422 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 423 oop discovered = java_lang_ref_Reference::discovered(_ref); 424 assert(_discovered_addr && discovered->is_oop_or_null(), 425 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 426 _next = discovered; 427 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 428 _referent = java_lang_ref_Reference::referent(_ref); 429 assert(Universe::heap()->is_in_reserved_or_null(_referent), 430 "Wrong oop found in java.lang.Reference object"); 431 assert(allow_null_referent ? 432 _referent->is_oop_or_null() 433 : _referent->is_oop(), 434 "Expected an oop%s for referent field at " PTR_FORMAT, 435 (allow_null_referent ? " or NULL" : ""), 436 p2i(_referent)); 437 } 438 439 void DiscoveredListIterator::remove() { 440 assert(_ref->is_oop(), "Dropping a bad reference"); 441 oop_store_raw(_discovered_addr, NULL); 442 443 // First _prev_next ref actually points into DiscoveredList (gross). 444 oop new_next; 445 if (_next == _ref) { 446 // At the end of the list, we should make _prev point to itself. 447 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 448 // and _prev will be NULL. 449 new_next = _prev; 450 } else { 451 new_next = _next; 452 } 453 // Remove Reference object from discovered list. Note that G1 does not need a 454 // pre-barrier here because we know the Reference has already been found/marked, 455 // that's how it ended up in the discovered list in the first place. 456 oop_store_raw(_prev_next, new_next); 457 NOT_PRODUCT(_removed++); 458 _refs_list.dec_length(1); 459 } 460 461 void DiscoveredListIterator::clear_referent() { 462 oop_store_raw(_referent_addr, NULL); 463 } 464 465 // NOTE: process_phase*() are largely similar, and at a high level 466 // merely iterate over the extant list applying a predicate to 467 // each of its elements and possibly removing that element from the 468 // list and applying some further closures to that element. 469 // We should consider the possibility of replacing these 470 // process_phase*() methods by abstracting them into 471 // a single general iterator invocation that receives appropriate 472 // closures that accomplish this work. 473 474 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 475 // referents are not alive, but that should be kept alive for policy reasons. 476 // Keep alive the transitive closure of all such referents. 477 void 478 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 479 ReferencePolicy* policy, 480 BoolObjectClosure* is_alive, 481 OopClosure* keep_alive, 482 VoidClosure* complete_gc) { 483 assert(policy != NULL, "Must have a non-NULL policy"); 484 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 485 // Decide which softly reachable refs should be kept alive. 486 while (iter.has_next()) { 487 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 488 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 489 if (referent_is_dead && 490 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 491 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 492 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 493 // Remove Reference object from list 494 iter.remove(); 495 // keep the referent around 496 iter.make_referent_alive(); 497 iter.move_to_next(); 498 } else { 499 iter.next(); 500 } 501 } 502 // Close the reachable set 503 complete_gc->do_void(); 504 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 505 iter.removed(), iter.processed(), p2i(&refs_list)); 506 } 507 508 // Traverse the list and remove any Refs that are not active, or 509 // whose referents are either alive or NULL. 510 void 511 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 512 BoolObjectClosure* is_alive, 513 OopClosure* keep_alive) { 514 assert(discovery_is_atomic(), "Error"); 515 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 516 while (iter.has_next()) { 517 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 518 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 519 assert(next == NULL, "Should not discover inactive Reference"); 520 if (iter.is_referent_alive()) { 521 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 522 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 523 // The referent is reachable after all. 524 // Remove Reference object from list. 525 iter.remove(); 526 // Update the referent pointer as necessary: Note that this 527 // should not entail any recursive marking because the 528 // referent must already have been traversed. 529 iter.make_referent_alive(); 530 iter.move_to_next(); 531 } else { 532 iter.next(); 533 } 534 } 535 NOT_PRODUCT( 536 if (iter.processed() > 0) { 537 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 538 " Refs in discovered list " INTPTR_FORMAT, 539 iter.removed(), iter.processed(), p2i(&refs_list)); 540 } 541 ) 542 } 543 544 void 545 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 546 BoolObjectClosure* is_alive, 547 OopClosure* keep_alive, 548 VoidClosure* complete_gc) { 549 assert(!discovery_is_atomic(), "Error"); 550 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 551 while (iter.has_next()) { 552 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 553 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 554 oop next = java_lang_ref_Reference::next(iter.obj()); 555 if ((iter.referent() == NULL || iter.is_referent_alive() || 556 next != NULL)) { 557 assert(next->is_oop_or_null(), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 558 // Remove Reference object from list 559 iter.remove(); 560 // Trace the cohorts 561 iter.make_referent_alive(); 562 if (UseCompressedOops) { 563 keep_alive->do_oop((narrowOop*)next_addr); 564 } else { 565 keep_alive->do_oop((oop*)next_addr); 566 } 567 iter.move_to_next(); 568 } else { 569 iter.next(); 570 } 571 } 572 // Now close the newly reachable set 573 complete_gc->do_void(); 574 NOT_PRODUCT( 575 if (iter.processed() > 0) { 576 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 577 " Refs in discovered list " INTPTR_FORMAT, 578 iter.removed(), iter.processed(), p2i(&refs_list)); 579 } 580 ) 581 } 582 583 // Traverse the list and process the referents, by either 584 // clearing them or keeping them (and their reachable 585 // closure) alive. 586 void 587 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 588 bool clear_referent, 589 BoolObjectClosure* is_alive, 590 OopClosure* keep_alive, 591 VoidClosure* complete_gc) { 592 ResourceMark rm; 593 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 594 while (iter.has_next()) { 595 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 596 if (clear_referent) { 597 // NULL out referent pointer 598 iter.clear_referent(); 599 } else { 600 // keep the referent around 601 iter.make_referent_alive(); 602 } 603 log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 604 clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name()); 605 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 606 iter.next(); 607 } 608 // Close the reachable set 609 complete_gc->do_void(); 610 } 611 612 void 613 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 614 oop obj = NULL; 615 oop next = refs_list.head(); 616 while (next != obj) { 617 obj = next; 618 next = java_lang_ref_Reference::discovered(obj); 619 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 620 } 621 refs_list.set_head(NULL); 622 refs_list.set_length(0); 623 } 624 625 void ReferenceProcessor::abandon_partial_discovery() { 626 // loop over the lists 627 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 628 if ((i % _max_num_q) == 0) { 629 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 630 } 631 clear_discovered_references(_discovered_refs[i]); 632 } 633 } 634 635 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 636 public: 637 RefProcPhase1Task(ReferenceProcessor& ref_processor, 638 DiscoveredList refs_lists[], 639 ReferencePolicy* policy, 640 bool marks_oops_alive) 641 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 642 _policy(policy) 643 { } 644 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 645 OopClosure& keep_alive, 646 VoidClosure& complete_gc) 647 { 648 _ref_processor.process_phase1(_refs_lists[i], _policy, 649 &is_alive, &keep_alive, &complete_gc); 650 } 651 private: 652 ReferencePolicy* _policy; 653 }; 654 655 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 656 public: 657 RefProcPhase2Task(ReferenceProcessor& ref_processor, 658 DiscoveredList refs_lists[], 659 bool marks_oops_alive) 660 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 661 { } 662 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 663 OopClosure& keep_alive, 664 VoidClosure& complete_gc) 665 { 666 _ref_processor.process_phase2(_refs_lists[i], 667 &is_alive, &keep_alive, &complete_gc); 668 } 669 }; 670 671 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 672 public: 673 RefProcPhase3Task(ReferenceProcessor& ref_processor, 674 DiscoveredList refs_lists[], 675 bool clear_referent, 676 bool marks_oops_alive) 677 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 678 _clear_referent(clear_referent) 679 { } 680 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 681 OopClosure& keep_alive, 682 VoidClosure& complete_gc) 683 { 684 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 685 &is_alive, &keep_alive, &complete_gc); 686 } 687 private: 688 bool _clear_referent; 689 }; 690 691 #ifndef PRODUCT 692 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], size_t total_refs) { 693 if (!log_is_enabled(Trace, gc, ref)) { 694 return; 695 } 696 697 stringStream st; 698 for (uint i = 0; i < _max_num_q; ++i) { 699 st.print(SIZE_FORMAT " ", ref_lists[i].length()); 700 } 701 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); 702 } 703 #endif 704 705 // Balances reference queues. 706 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 707 // queues[0, 1, ..., _num_q-1] because only the first _num_q 708 // corresponding to the active workers will be processed. 709 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 710 { 711 // calculate total length 712 size_t total_refs = 0; 713 log_develop_trace(gc, ref)("Balance ref_lists "); 714 715 for (uint i = 0; i < _max_num_q; ++i) { 716 total_refs += ref_lists[i].length(); 717 } 718 log_reflist_counts(ref_lists, total_refs); 719 size_t avg_refs = total_refs / _num_q + 1; 720 uint to_idx = 0; 721 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 722 bool move_all = false; 723 if (from_idx >= _num_q) { 724 move_all = ref_lists[from_idx].length() > 0; 725 } 726 while ((ref_lists[from_idx].length() > avg_refs) || 727 move_all) { 728 assert(to_idx < _num_q, "Sanity Check!"); 729 if (ref_lists[to_idx].length() < avg_refs) { 730 // move superfluous refs 731 size_t refs_to_move; 732 // Move all the Ref's if the from queue will not be processed. 733 if (move_all) { 734 refs_to_move = MIN2(ref_lists[from_idx].length(), 735 avg_refs - ref_lists[to_idx].length()); 736 } else { 737 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 738 avg_refs - ref_lists[to_idx].length()); 739 } 740 741 assert(refs_to_move > 0, "otherwise the code below will fail"); 742 743 oop move_head = ref_lists[from_idx].head(); 744 oop move_tail = move_head; 745 oop new_head = move_head; 746 // find an element to split the list on 747 for (size_t j = 0; j < refs_to_move; ++j) { 748 move_tail = new_head; 749 new_head = java_lang_ref_Reference::discovered(new_head); 750 } 751 752 // Add the chain to the to list. 753 if (ref_lists[to_idx].head() == NULL) { 754 // to list is empty. Make a loop at the end. 755 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 756 } else { 757 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 758 } 759 ref_lists[to_idx].set_head(move_head); 760 ref_lists[to_idx].inc_length(refs_to_move); 761 762 // Remove the chain from the from list. 763 if (move_tail == new_head) { 764 // We found the end of the from list. 765 ref_lists[from_idx].set_head(NULL); 766 } else { 767 ref_lists[from_idx].set_head(new_head); 768 } 769 ref_lists[from_idx].dec_length(refs_to_move); 770 if (ref_lists[from_idx].length() == 0) { 771 break; 772 } 773 } else { 774 to_idx = (to_idx + 1) % _num_q; 775 } 776 } 777 } 778 #ifdef ASSERT 779 size_t balanced_total_refs = 0; 780 for (uint i = 0; i < _max_num_q; ++i) { 781 balanced_total_refs += ref_lists[i].length(); 782 } 783 log_reflist_counts(ref_lists, balanced_total_refs); 784 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 785 #endif 786 } 787 788 void ReferenceProcessor::balance_all_queues() { 789 balance_queues(_discoveredSoftRefs); 790 balance_queues(_discoveredWeakRefs); 791 balance_queues(_discoveredFinalRefs); 792 balance_queues(_discoveredPhantomRefs); 793 } 794 795 void ReferenceProcessor::process_discovered_reflist( 796 DiscoveredList refs_lists[], 797 ReferencePolicy* policy, 798 bool clear_referent, 799 BoolObjectClosure* is_alive, 800 OopClosure* keep_alive, 801 VoidClosure* complete_gc, 802 AbstractRefProcTaskExecutor* task_executor) 803 { 804 bool mt_processing = task_executor != NULL && _processing_is_mt; 805 // If discovery used MT and a dynamic number of GC threads, then 806 // the queues must be balanced for correctness if fewer than the 807 // maximum number of queues were used. The number of queue used 808 // during discovery may be different than the number to be used 809 // for processing so don't depend of _num_q < _max_num_q as part 810 // of the test. 811 bool must_balance = _discovery_is_mt; 812 813 if ((mt_processing && ParallelRefProcBalancingEnabled) || 814 must_balance) { 815 balance_queues(refs_lists); 816 } 817 818 // Phase 1 (soft refs only): 819 // . Traverse the list and remove any SoftReferences whose 820 // referents are not alive, but that should be kept alive for 821 // policy reasons. Keep alive the transitive closure of all 822 // such referents. 823 if (policy != NULL) { 824 if (mt_processing) { 825 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 826 task_executor->execute(phase1); 827 } else { 828 for (uint i = 0; i < _max_num_q; i++) { 829 process_phase1(refs_lists[i], policy, 830 is_alive, keep_alive, complete_gc); 831 } 832 } 833 } else { // policy == NULL 834 assert(refs_lists != _discoveredSoftRefs, 835 "Policy must be specified for soft references."); 836 } 837 838 // Phase 2: 839 // . Traverse the list and remove any refs whose referents are alive. 840 if (mt_processing) { 841 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 842 task_executor->execute(phase2); 843 } else { 844 for (uint i = 0; i < _max_num_q; i++) { 845 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 846 } 847 } 848 849 // Phase 3: 850 // . Traverse the list and process referents as appropriate. 851 if (mt_processing) { 852 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 853 task_executor->execute(phase3); 854 } else { 855 for (uint i = 0; i < _max_num_q; i++) { 856 process_phase3(refs_lists[i], clear_referent, 857 is_alive, keep_alive, complete_gc); 858 } 859 } 860 } 861 862 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 863 uint id = 0; 864 // Determine the queue index to use for this object. 865 if (_discovery_is_mt) { 866 // During a multi-threaded discovery phase, 867 // each thread saves to its "own" list. 868 Thread* thr = Thread::current(); 869 id = thr->as_Worker_thread()->id(); 870 } else { 871 // single-threaded discovery, we save in round-robin 872 // fashion to each of the lists. 873 if (_processing_is_mt) { 874 id = next_id(); 875 } 876 } 877 assert(id < _max_num_q, "Id is out-of-bounds (call Freud?)"); 878 879 // Get the discovered queue to which we will add 880 DiscoveredList* list = NULL; 881 switch (rt) { 882 case REF_OTHER: 883 // Unknown reference type, no special treatment 884 break; 885 case REF_SOFT: 886 list = &_discoveredSoftRefs[id]; 887 break; 888 case REF_WEAK: 889 list = &_discoveredWeakRefs[id]; 890 break; 891 case REF_FINAL: 892 list = &_discoveredFinalRefs[id]; 893 break; 894 case REF_PHANTOM: 895 list = &_discoveredPhantomRefs[id]; 896 break; 897 case REF_NONE: 898 // we should not reach here if we are an InstanceRefKlass 899 default: 900 ShouldNotReachHere(); 901 } 902 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 903 return list; 904 } 905 906 inline void 907 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 908 oop obj, 909 HeapWord* discovered_addr) { 910 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 911 // First we must make sure this object is only enqueued once. CAS in a non null 912 // discovered_addr. 913 oop current_head = refs_list.head(); 914 // The last ref must have its discovered field pointing to itself. 915 oop next_discovered = (current_head != NULL) ? current_head : obj; 916 917 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 918 NULL); 919 if (retest == NULL) { 920 // This thread just won the right to enqueue the object. 921 // We have separate lists for enqueueing, so no synchronization 922 // is necessary. 923 refs_list.set_head(obj); 924 refs_list.inc_length(1); 925 926 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 927 p2i(obj), obj->klass()->internal_name()); 928 } else { 929 // If retest was non NULL, another thread beat us to it: 930 // The reference has already been discovered... 931 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 932 p2i(obj), obj->klass()->internal_name()); 933 } 934 } 935 936 #ifndef PRODUCT 937 // Non-atomic (i.e. concurrent) discovery might allow us 938 // to observe j.l.References with NULL referents, being those 939 // cleared concurrently by mutators during (or after) discovery. 940 void ReferenceProcessor::verify_referent(oop obj) { 941 bool da = discovery_is_atomic(); 942 oop referent = java_lang_ref_Reference::referent(obj); 943 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 944 "Bad referent " INTPTR_FORMAT " found in Reference " 945 INTPTR_FORMAT " during %satomic discovery ", 946 p2i(referent), p2i(obj), da ? "" : "non-"); 947 } 948 #endif 949 950 // We mention two of several possible choices here: 951 // #0: if the reference object is not in the "originating generation" 952 // (or part of the heap being collected, indicated by our "span" 953 // we don't treat it specially (i.e. we scan it as we would 954 // a normal oop, treating its references as strong references). 955 // This means that references can't be discovered unless their 956 // referent is also in the same span. This is the simplest, 957 // most "local" and most conservative approach, albeit one 958 // that may cause weak references to be enqueued least promptly. 959 // We call this choice the "ReferenceBasedDiscovery" policy. 960 // #1: the reference object may be in any generation (span), but if 961 // the referent is in the generation (span) being currently collected 962 // then we can discover the reference object, provided 963 // the object has not already been discovered by 964 // a different concurrently running collector (as may be the 965 // case, for instance, if the reference object is in CMS and 966 // the referent in DefNewGeneration), and provided the processing 967 // of this reference object by the current collector will 968 // appear atomic to every other collector in the system. 969 // (Thus, for instance, a concurrent collector may not 970 // discover references in other generations even if the 971 // referent is in its own generation). This policy may, 972 // in certain cases, enqueue references somewhat sooner than 973 // might Policy #0 above, but at marginally increased cost 974 // and complexity in processing these references. 975 // We call this choice the "RefeferentBasedDiscovery" policy. 976 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 977 // Make sure we are discovering refs (rather than processing discovered refs). 978 if (!_discovering_refs || !RegisterReferences) { 979 return false; 980 } 981 // We only discover active references. 982 oop next = java_lang_ref_Reference::next(obj); 983 if (next != NULL) { // Ref is no longer active 984 return false; 985 } 986 987 HeapWord* obj_addr = (HeapWord*)obj; 988 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 989 !_span.contains(obj_addr)) { 990 // Reference is not in the originating generation; 991 // don't treat it specially (i.e. we want to scan it as a normal 992 // object with strong references). 993 return false; 994 } 995 996 // We only discover references whose referents are not (yet) 997 // known to be strongly reachable. 998 if (is_alive_non_header() != NULL) { 999 verify_referent(obj); 1000 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1001 return false; // referent is reachable 1002 } 1003 } 1004 if (rt == REF_SOFT) { 1005 // For soft refs we can decide now if these are not 1006 // current candidates for clearing, in which case we 1007 // can mark through them now, rather than delaying that 1008 // to the reference-processing phase. Since all current 1009 // time-stamp policies advance the soft-ref clock only 1010 // at a full collection cycle, this is always currently 1011 // accurate. 1012 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1013 return false; 1014 } 1015 } 1016 1017 ResourceMark rm; // Needed for tracing. 1018 1019 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1020 const oop discovered = java_lang_ref_Reference::discovered(obj); 1021 assert(discovered->is_oop_or_null(), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1022 if (discovered != NULL) { 1023 // The reference has already been discovered... 1024 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1025 p2i(obj), obj->klass()->internal_name()); 1026 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1027 // assumes that an object is not processed twice; 1028 // if it's been already discovered it must be on another 1029 // generation's discovered list; so we won't discover it. 1030 return false; 1031 } else { 1032 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1033 "Unrecognized policy"); 1034 // Check assumption that an object is not potentially 1035 // discovered twice except by concurrent collectors that potentially 1036 // trace the same Reference object twice. 1037 assert(UseConcMarkSweepGC || UseG1GC, 1038 "Only possible with a concurrent marking collector"); 1039 return true; 1040 } 1041 } 1042 1043 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1044 verify_referent(obj); 1045 // Discover if and only if EITHER: 1046 // .. reference is in our span, OR 1047 // .. we are an atomic collector and referent is in our span 1048 if (_span.contains(obj_addr) || 1049 (discovery_is_atomic() && 1050 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1051 // should_enqueue = true; 1052 } else { 1053 return false; 1054 } 1055 } else { 1056 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1057 _span.contains(obj_addr), "code inconsistency"); 1058 } 1059 1060 // Get the right type of discovered queue head. 1061 DiscoveredList* list = get_discovered_list(rt); 1062 if (list == NULL) { 1063 return false; // nothing special needs to be done 1064 } 1065 1066 if (_discovery_is_mt) { 1067 add_to_discovered_list_mt(*list, obj, discovered_addr); 1068 } else { 1069 // We do a raw store here: the field will be visited later when processing 1070 // the discovered references. 1071 oop current_head = list->head(); 1072 // The last ref must have its discovered field pointing to itself. 1073 oop next_discovered = (current_head != NULL) ? current_head : obj; 1074 1075 assert(discovered == NULL, "control point invariant"); 1076 oop_store_raw(discovered_addr, next_discovered); 1077 list->set_head(obj); 1078 list->inc_length(1); 1079 1080 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1081 } 1082 assert(obj->is_oop(), "Discovered a bad reference"); 1083 verify_referent(obj); 1084 return true; 1085 } 1086 1087 // Preclean the discovered references by removing those 1088 // whose referents are alive, and by marking from those that 1089 // are not active. These lists can be handled here 1090 // in any order and, indeed, concurrently. 1091 void ReferenceProcessor::preclean_discovered_references( 1092 BoolObjectClosure* is_alive, 1093 OopClosure* keep_alive, 1094 VoidClosure* complete_gc, 1095 YieldClosure* yield, 1096 GCTimer* gc_timer) { 1097 1098 // Soft references 1099 { 1100 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1101 for (uint i = 0; i < _max_num_q; i++) { 1102 if (yield->should_return()) { 1103 return; 1104 } 1105 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1106 keep_alive, complete_gc, yield); 1107 } 1108 } 1109 1110 // Weak references 1111 { 1112 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1113 for (uint i = 0; i < _max_num_q; i++) { 1114 if (yield->should_return()) { 1115 return; 1116 } 1117 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1118 keep_alive, complete_gc, yield); 1119 } 1120 } 1121 1122 // Final references 1123 { 1124 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1125 for (uint i = 0; i < _max_num_q; i++) { 1126 if (yield->should_return()) { 1127 return; 1128 } 1129 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1130 keep_alive, complete_gc, yield); 1131 } 1132 } 1133 1134 // Phantom references 1135 { 1136 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1137 for (uint i = 0; i < _max_num_q; i++) { 1138 if (yield->should_return()) { 1139 return; 1140 } 1141 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1142 keep_alive, complete_gc, yield); 1143 } 1144 } 1145 } 1146 1147 // Walk the given discovered ref list, and remove all reference objects 1148 // whose referents are still alive, whose referents are NULL or which 1149 // are not active (have a non-NULL next field). NOTE: When we are 1150 // thus precleaning the ref lists (which happens single-threaded today), 1151 // we do not disable refs discovery to honor the correct semantics of 1152 // java.lang.Reference. As a result, we need to be careful below 1153 // that ref removal steps interleave safely with ref discovery steps 1154 // (in this thread). 1155 void 1156 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1157 BoolObjectClosure* is_alive, 1158 OopClosure* keep_alive, 1159 VoidClosure* complete_gc, 1160 YieldClosure* yield) { 1161 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1162 while (iter.has_next()) { 1163 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1164 oop obj = iter.obj(); 1165 oop next = java_lang_ref_Reference::next(obj); 1166 if (iter.referent() == NULL || iter.is_referent_alive() || 1167 next != NULL) { 1168 // The referent has been cleared, or is alive, or the Reference is not 1169 // active; we need to trace and mark its cohort. 1170 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1171 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1172 // Remove Reference object from list 1173 iter.remove(); 1174 // Keep alive its cohort. 1175 iter.make_referent_alive(); 1176 if (UseCompressedOops) { 1177 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1178 keep_alive->do_oop(next_addr); 1179 } else { 1180 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1181 keep_alive->do_oop(next_addr); 1182 } 1183 iter.move_to_next(); 1184 } else { 1185 iter.next(); 1186 } 1187 } 1188 // Close the reachable set 1189 complete_gc->do_void(); 1190 1191 NOT_PRODUCT( 1192 if (iter.processed() > 0) { 1193 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1194 iter.removed(), iter.processed(), p2i(&refs_list)); 1195 } 1196 ) 1197 } 1198 1199 const char* ReferenceProcessor::list_name(uint i) { 1200 assert(i <= _max_num_q * number_of_subclasses_of_ref(), 1201 "Out of bounds index"); 1202 1203 int j = i / _max_num_q; 1204 switch (j) { 1205 case 0: return "SoftRef"; 1206 case 1: return "WeakRef"; 1207 case 2: return "FinalRef"; 1208 case 3: return "PhantomRef"; 1209 } 1210 ShouldNotReachHere(); 1211 return NULL; 1212 } 1213