1 /* 2 * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/heapMonitoring.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/jniHandles.hpp" 41 42 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 43 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 44 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 45 46 void referenceProcessor_init() { 47 ReferenceProcessor::init_statics(); 48 } 49 50 void ReferenceProcessor::init_statics() { 51 // We need a monotonically non-decreasing time in ms but 52 // os::javaTimeMillis() does not guarantee monotonicity. 53 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 54 55 // Initialize the soft ref timestamp clock. 56 _soft_ref_timestamp_clock = now; 57 // Also update the soft ref clock in j.l.r.SoftReference 58 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 59 60 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 61 if (is_server_compilation_mode_vm()) { 62 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 63 } else { 64 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 65 } 66 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 67 vm_exit_during_initialization("Could not allocate reference policy object"); 68 } 69 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 70 RefDiscoveryPolicy == ReferentBasedDiscovery, 71 "Unrecognized RefDiscoveryPolicy"); 72 } 73 74 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 75 #ifdef ASSERT 76 // Verify that we're not currently discovering refs 77 assert(!_discovering_refs, "nested call?"); 78 79 if (check_no_refs) { 80 // Verify that the discovered lists are empty 81 verify_no_references_recorded(); 82 } 83 #endif // ASSERT 84 85 // Someone could have modified the value of the static 86 // field in the j.l.r.SoftReference class that holds the 87 // soft reference timestamp clock using reflection or 88 // Unsafe between GCs. Unconditionally update the static 89 // field in ReferenceProcessor here so that we use the new 90 // value during reference discovery. 91 92 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 93 _discovering_refs = true; 94 } 95 96 ReferenceProcessor::ReferenceProcessor(MemRegion span, 97 bool mt_processing, 98 uint mt_processing_degree, 99 bool mt_discovery, 100 uint mt_discovery_degree, 101 bool atomic_discovery, 102 BoolObjectClosure* is_alive_non_header) : 103 _discovering_refs(false), 104 _enqueuing_is_done(false), 105 _is_alive_non_header(is_alive_non_header), 106 _processing_is_mt(mt_processing), 107 _next_id(0) 108 { 109 _span = span; 110 _discovery_is_atomic = atomic_discovery; 111 _discovery_is_mt = mt_discovery; 112 _num_q = MAX2(1U, mt_processing_degree); 113 _max_num_q = MAX2(_num_q, mt_discovery_degree); 114 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 115 _max_num_q * number_of_subclasses_of_ref(), mtGC); 116 117 if (_discovered_refs == NULL) { 118 vm_exit_during_initialization("Could not allocated RefProc Array"); 119 } 120 _discoveredSoftRefs = &_discovered_refs[0]; 121 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 122 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 123 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 124 125 // Initialize all entries to NULL 126 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 127 _discovered_refs[i].set_head(NULL); 128 _discovered_refs[i].set_length(0); 129 } 130 131 setup_policy(false /* default soft ref policy */); 132 } 133 134 #ifndef PRODUCT 135 void ReferenceProcessor::verify_no_references_recorded() { 136 guarantee(!_discovering_refs, "Discovering refs?"); 137 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 138 guarantee(_discovered_refs[i].is_empty(), 139 "Found non-empty discovered list at %u", i); 140 } 141 } 142 #endif 143 144 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 145 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 146 if (UseCompressedOops) { 147 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 148 } else { 149 f->do_oop((oop*)_discovered_refs[i].adr_head()); 150 } 151 } 152 } 153 154 void ReferenceProcessor::update_soft_ref_master_clock() { 155 // Update (advance) the soft ref master clock field. This must be done 156 // after processing the soft ref list. 157 158 // We need a monotonically non-decreasing time in ms but 159 // os::javaTimeMillis() does not guarantee monotonicity. 160 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 161 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 162 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 163 164 NOT_PRODUCT( 165 if (now < _soft_ref_timestamp_clock) { 166 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 167 _soft_ref_timestamp_clock, now); 168 } 169 ) 170 // The values of now and _soft_ref_timestamp_clock are set using 171 // javaTimeNanos(), which is guaranteed to be monotonically 172 // non-decreasing provided the underlying platform provides such 173 // a time source (and it is bug free). 174 // In product mode, however, protect ourselves from non-monotonicity. 175 if (now > _soft_ref_timestamp_clock) { 176 _soft_ref_timestamp_clock = now; 177 java_lang_ref_SoftReference::set_clock(now); 178 } 179 // Else leave clock stalled at its old value until time progresses 180 // past clock value. 181 } 182 183 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { 184 size_t total = 0; 185 for (uint i = 0; i < _max_num_q; ++i) { 186 total += lists[i].length(); 187 } 188 return total; 189 } 190 191 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 192 BoolObjectClosure* is_alive, 193 OopClosure* keep_alive, 194 VoidClosure* complete_gc, 195 AbstractRefProcTaskExecutor* task_executor, 196 GCTimer* gc_timer) { 197 198 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 199 // Stop treating discovered references specially. 200 disable_discovery(); 201 202 // If discovery was concurrent, someone could have modified 203 // the value of the static field in the j.l.r.SoftReference 204 // class that holds the soft reference timestamp clock using 205 // reflection or Unsafe between when discovery was enabled and 206 // now. Unconditionally update the static field in ReferenceProcessor 207 // here so that we use the new value during processing of the 208 // discovered soft refs. 209 210 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 211 212 ReferenceProcessorStats stats( 213 total_count(_discoveredSoftRefs), 214 total_count(_discoveredWeakRefs), 215 total_count(_discoveredFinalRefs), 216 total_count(_discoveredPhantomRefs)); 217 218 // Soft references 219 { 220 GCTraceTime(Debug, gc, ref) tt("SoftReference", gc_timer); 221 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 222 is_alive, keep_alive, complete_gc, task_executor); 223 } 224 225 update_soft_ref_master_clock(); 226 227 // Weak references 228 { 229 GCTraceTime(Debug, gc, ref) tt("WeakReference", gc_timer); 230 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 231 is_alive, keep_alive, complete_gc, task_executor); 232 } 233 234 // Final references 235 { 236 GCTraceTime(Debug, gc, ref) tt("FinalReference", gc_timer); 237 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 238 is_alive, keep_alive, complete_gc, task_executor); 239 } 240 241 // Phantom references 242 { 243 GCTraceTime(Debug, gc, ref) tt("PhantomReference", gc_timer); 244 process_discovered_reflist(_discoveredPhantomRefs, NULL, true, 245 is_alive, keep_alive, complete_gc, task_executor); 246 } 247 248 // Weak global JNI references. It would make more sense (semantically) to 249 // traverse these simultaneously with the regular weak references above, but 250 // that is not how the JDK1.2 specification is. See #4126360. Native code can 251 // thus use JNI weak references to circumvent the phantom references and 252 // resurrect a "post-mortem" object. 253 { 254 GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", gc_timer); 255 if (task_executor != NULL) { 256 task_executor->set_single_threaded_mode(); 257 } 258 process_phaseJNI(is_alive, keep_alive, complete_gc); 259 } 260 261 HeapMonitoring::weak_oops_do(task_executor, is_alive, keep_alive, complete_gc); 262 log_debug(gc, ref)("Ref Counts: Soft: " SIZE_FORMAT " Weak: " SIZE_FORMAT " Final: " SIZE_FORMAT " Phantom: " SIZE_FORMAT, 263 stats.soft_count(), stats.weak_count(), stats.final_count(), stats.phantom_count()); 264 log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs()); 265 266 return stats; 267 } 268 269 #ifndef PRODUCT 270 // Calculate the number of jni handles. 271 size_t ReferenceProcessor::count_jni_refs() { 272 class CountHandleClosure: public OopClosure { 273 private: 274 size_t _count; 275 public: 276 CountHandleClosure(): _count(0) {} 277 void do_oop(oop* unused) { _count++; } 278 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 279 size_t count() { return _count; } 280 }; 281 CountHandleClosure global_handle_count; 282 JNIHandles::weak_oops_do(&global_handle_count); 283 return global_handle_count.count(); 284 } 285 #endif 286 287 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 288 OopClosure* keep_alive, 289 VoidClosure* complete_gc) { 290 JNIHandles::weak_oops_do(is_alive, keep_alive); 291 complete_gc->do_void(); 292 } 293 294 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor) { 295 // Enqueue references that are not made active again, and 296 // clear the decks for the next collection (cycle). 297 enqueue_discovered_reflists(task_executor); 298 299 // Stop treating discovered references specially. 300 disable_discovery(); 301 } 302 303 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) { 304 // Given a list of refs linked through the "discovered" field 305 // (java.lang.ref.Reference.discovered), self-loop their "next" field 306 // thus distinguishing them from active References, then 307 // prepend them to the pending list. 308 // 309 // The Java threads will see the Reference objects linked together through 310 // the discovered field. Instead of trying to do the write barrier updates 311 // in all places in the reference processor where we manipulate the discovered 312 // field we make sure to do the barrier here where we anyway iterate through 313 // all linked Reference objects. Note that it is important to not dirty any 314 // cards during reference processing since this will cause card table 315 // verification to fail for G1. 316 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list)); 317 318 oop obj = NULL; 319 oop next_d = refs_list.head(); 320 // Walk down the list, self-looping the next field 321 // so that the References are not considered active. 322 while (obj != next_d) { 323 obj = next_d; 324 assert(obj->is_instance(), "should be an instance object"); 325 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object"); 326 next_d = java_lang_ref_Reference::discovered(obj); 327 log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d)); 328 assert(java_lang_ref_Reference::next(obj) == NULL, 329 "Reference not active; should not be discovered"); 330 // Self-loop next, so as to make Ref not active. 331 java_lang_ref_Reference::set_next_raw(obj, obj); 332 if (next_d != obj) { 333 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); 334 } else { 335 // This is the last object. 336 // Swap refs_list into pending list and set obj's 337 // discovered to what we read from the pending list. 338 oop old = Universe::swap_reference_pending_list(refs_list.head()); 339 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL 340 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); 341 } 342 } 343 } 344 345 // Parallel enqueue task 346 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 347 public: 348 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 349 DiscoveredList discovered_refs[], 350 int n_queues) 351 : EnqueueTask(ref_processor, discovered_refs, n_queues) 352 { } 353 354 virtual void work(unsigned int work_id) { 355 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 356 // Simplest first cut: static partitioning. 357 int index = work_id; 358 // The increment on "index" must correspond to the maximum number of queues 359 // (n_queues) with which that ReferenceProcessor was created. That 360 // is because of the "clever" way the discovered references lists were 361 // allocated and are indexed into. 362 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 363 for (int j = 0; 364 j < ReferenceProcessor::number_of_subclasses_of_ref(); 365 j++, index += _n_queues) { 366 _ref_processor.enqueue_discovered_reflist(_refs_lists[index]); 367 _refs_lists[index].set_head(NULL); 368 _refs_lists[index].set_length(0); 369 } 370 } 371 }; 372 373 // Enqueue references that are not made active again 374 void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor) { 375 if (_processing_is_mt && task_executor != NULL) { 376 // Parallel code 377 RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q); 378 task_executor->execute(tsk); 379 } else { 380 // Serial code: call the parent class's implementation 381 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 382 enqueue_discovered_reflist(_discovered_refs[i]); 383 _discovered_refs[i].set_head(NULL); 384 _discovered_refs[i].set_length(0); 385 } 386 } 387 } 388 389 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 390 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 391 oop discovered = java_lang_ref_Reference::discovered(_ref); 392 assert(_discovered_addr && discovered->is_oop_or_null(), 393 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 394 _next = discovered; 395 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 396 _referent = java_lang_ref_Reference::referent(_ref); 397 assert(Universe::heap()->is_in_reserved_or_null(_referent), 398 "Wrong oop found in java.lang.Reference object"); 399 assert(allow_null_referent ? 400 _referent->is_oop_or_null() 401 : _referent->is_oop(), 402 "Expected an oop%s for referent field at " PTR_FORMAT, 403 (allow_null_referent ? " or NULL" : ""), 404 p2i(_referent)); 405 } 406 407 void DiscoveredListIterator::remove() { 408 assert(_ref->is_oop(), "Dropping a bad reference"); 409 oop_store_raw(_discovered_addr, NULL); 410 411 // First _prev_next ref actually points into DiscoveredList (gross). 412 oop new_next; 413 if (_next == _ref) { 414 // At the end of the list, we should make _prev point to itself. 415 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 416 // and _prev will be NULL. 417 new_next = _prev; 418 } else { 419 new_next = _next; 420 } 421 // Remove Reference object from discovered list. Note that G1 does not need a 422 // pre-barrier here because we know the Reference has already been found/marked, 423 // that's how it ended up in the discovered list in the first place. 424 oop_store_raw(_prev_next, new_next); 425 NOT_PRODUCT(_removed++); 426 _refs_list.dec_length(1); 427 } 428 429 void DiscoveredListIterator::clear_referent() { 430 oop_store_raw(_referent_addr, NULL); 431 } 432 433 // NOTE: process_phase*() are largely similar, and at a high level 434 // merely iterate over the extant list applying a predicate to 435 // each of its elements and possibly removing that element from the 436 // list and applying some further closures to that element. 437 // We should consider the possibility of replacing these 438 // process_phase*() methods by abstracting them into 439 // a single general iterator invocation that receives appropriate 440 // closures that accomplish this work. 441 442 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 443 // referents are not alive, but that should be kept alive for policy reasons. 444 // Keep alive the transitive closure of all such referents. 445 void 446 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 447 ReferencePolicy* policy, 448 BoolObjectClosure* is_alive, 449 OopClosure* keep_alive, 450 VoidClosure* complete_gc) { 451 assert(policy != NULL, "Must have a non-NULL policy"); 452 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 453 // Decide which softly reachable refs should be kept alive. 454 while (iter.has_next()) { 455 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 456 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 457 if (referent_is_dead && 458 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 459 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 460 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 461 // Remove Reference object from list 462 iter.remove(); 463 // keep the referent around 464 iter.make_referent_alive(); 465 iter.move_to_next(); 466 } else { 467 iter.next(); 468 } 469 } 470 // Close the reachable set 471 complete_gc->do_void(); 472 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 473 iter.removed(), iter.processed(), p2i(&refs_list)); 474 } 475 476 // Traverse the list and remove any Refs that are not active, or 477 // whose referents are either alive or NULL. 478 void 479 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 480 BoolObjectClosure* is_alive, 481 OopClosure* keep_alive) { 482 assert(discovery_is_atomic(), "Error"); 483 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 484 while (iter.has_next()) { 485 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 486 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 487 assert(next == NULL, "Should not discover inactive Reference"); 488 if (iter.is_referent_alive()) { 489 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 490 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 491 // The referent is reachable after all. 492 // Remove Reference object from list. 493 iter.remove(); 494 // Update the referent pointer as necessary: Note that this 495 // should not entail any recursive marking because the 496 // referent must already have been traversed. 497 iter.make_referent_alive(); 498 iter.move_to_next(); 499 } else { 500 iter.next(); 501 } 502 } 503 NOT_PRODUCT( 504 if (iter.processed() > 0) { 505 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 506 " Refs in discovered list " INTPTR_FORMAT, 507 iter.removed(), iter.processed(), p2i(&refs_list)); 508 } 509 ) 510 } 511 512 void 513 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 514 BoolObjectClosure* is_alive, 515 OopClosure* keep_alive, 516 VoidClosure* complete_gc) { 517 assert(!discovery_is_atomic(), "Error"); 518 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 519 while (iter.has_next()) { 520 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 521 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 522 oop next = java_lang_ref_Reference::next(iter.obj()); 523 if ((iter.referent() == NULL || iter.is_referent_alive() || 524 next != NULL)) { 525 assert(next->is_oop_or_null(), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 526 // Remove Reference object from list 527 iter.remove(); 528 // Trace the cohorts 529 iter.make_referent_alive(); 530 if (UseCompressedOops) { 531 keep_alive->do_oop((narrowOop*)next_addr); 532 } else { 533 keep_alive->do_oop((oop*)next_addr); 534 } 535 iter.move_to_next(); 536 } else { 537 iter.next(); 538 } 539 } 540 // Now close the newly reachable set 541 complete_gc->do_void(); 542 NOT_PRODUCT( 543 if (iter.processed() > 0) { 544 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 545 " Refs in discovered list " INTPTR_FORMAT, 546 iter.removed(), iter.processed(), p2i(&refs_list)); 547 } 548 ) 549 } 550 551 // Traverse the list and process the referents, by either 552 // clearing them or keeping them (and their reachable 553 // closure) alive. 554 void 555 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 556 bool clear_referent, 557 BoolObjectClosure* is_alive, 558 OopClosure* keep_alive, 559 VoidClosure* complete_gc) { 560 ResourceMark rm; 561 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 562 while (iter.has_next()) { 563 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 564 if (clear_referent) { 565 // NULL out referent pointer 566 iter.clear_referent(); 567 } else { 568 // keep the referent around 569 iter.make_referent_alive(); 570 } 571 log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 572 clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name()); 573 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 574 iter.next(); 575 } 576 // Close the reachable set 577 complete_gc->do_void(); 578 } 579 580 void 581 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 582 oop obj = NULL; 583 oop next = refs_list.head(); 584 while (next != obj) { 585 obj = next; 586 next = java_lang_ref_Reference::discovered(obj); 587 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 588 } 589 refs_list.set_head(NULL); 590 refs_list.set_length(0); 591 } 592 593 void ReferenceProcessor::abandon_partial_discovery() { 594 // loop over the lists 595 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 596 if ((i % _max_num_q) == 0) { 597 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 598 } 599 clear_discovered_references(_discovered_refs[i]); 600 } 601 } 602 603 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 604 public: 605 RefProcPhase1Task(ReferenceProcessor& ref_processor, 606 DiscoveredList refs_lists[], 607 ReferencePolicy* policy, 608 bool marks_oops_alive) 609 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 610 _policy(policy) 611 { } 612 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 613 OopClosure& keep_alive, 614 VoidClosure& complete_gc) 615 { 616 _ref_processor.process_phase1(_refs_lists[i], _policy, 617 &is_alive, &keep_alive, &complete_gc); 618 } 619 private: 620 ReferencePolicy* _policy; 621 }; 622 623 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 624 public: 625 RefProcPhase2Task(ReferenceProcessor& ref_processor, 626 DiscoveredList refs_lists[], 627 bool marks_oops_alive) 628 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 629 { } 630 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 631 OopClosure& keep_alive, 632 VoidClosure& complete_gc) 633 { 634 _ref_processor.process_phase2(_refs_lists[i], 635 &is_alive, &keep_alive, &complete_gc); 636 } 637 }; 638 639 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 640 public: 641 RefProcPhase3Task(ReferenceProcessor& ref_processor, 642 DiscoveredList refs_lists[], 643 bool clear_referent, 644 bool marks_oops_alive) 645 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 646 _clear_referent(clear_referent) 647 { } 648 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 649 OopClosure& keep_alive, 650 VoidClosure& complete_gc) 651 { 652 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 653 &is_alive, &keep_alive, &complete_gc); 654 } 655 private: 656 bool _clear_referent; 657 }; 658 659 #ifndef PRODUCT 660 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) { 661 if (!log_is_enabled(Trace, gc, ref)) { 662 return; 663 } 664 665 stringStream st; 666 for (uint i = 0; i < active_length; ++i) { 667 st.print(SIZE_FORMAT " ", ref_lists[i].length()); 668 } 669 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); 670 #ifdef ASSERT 671 for (uint i = active_length; i < _max_num_q; i++) { 672 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 673 ref_lists[i].length(), i); 674 } 675 #endif 676 } 677 #endif 678 679 void ReferenceProcessor::set_active_mt_degree(uint v) { 680 _num_q = v; 681 _next_id = 0; 682 } 683 684 // Balances reference queues. 685 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 686 // queues[0, 1, ..., _num_q-1] because only the first _num_q 687 // corresponding to the active workers will be processed. 688 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 689 { 690 // calculate total length 691 size_t total_refs = 0; 692 log_develop_trace(gc, ref)("Balance ref_lists "); 693 694 for (uint i = 0; i < _max_num_q; ++i) { 695 total_refs += ref_lists[i].length(); 696 } 697 log_reflist_counts(ref_lists, _max_num_q, total_refs); 698 size_t avg_refs = total_refs / _num_q + 1; 699 uint to_idx = 0; 700 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 701 bool move_all = false; 702 if (from_idx >= _num_q) { 703 move_all = ref_lists[from_idx].length() > 0; 704 } 705 while ((ref_lists[from_idx].length() > avg_refs) || 706 move_all) { 707 assert(to_idx < _num_q, "Sanity Check!"); 708 if (ref_lists[to_idx].length() < avg_refs) { 709 // move superfluous refs 710 size_t refs_to_move; 711 // Move all the Ref's if the from queue will not be processed. 712 if (move_all) { 713 refs_to_move = MIN2(ref_lists[from_idx].length(), 714 avg_refs - ref_lists[to_idx].length()); 715 } else { 716 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 717 avg_refs - ref_lists[to_idx].length()); 718 } 719 720 assert(refs_to_move > 0, "otherwise the code below will fail"); 721 722 oop move_head = ref_lists[from_idx].head(); 723 oop move_tail = move_head; 724 oop new_head = move_head; 725 // find an element to split the list on 726 for (size_t j = 0; j < refs_to_move; ++j) { 727 move_tail = new_head; 728 new_head = java_lang_ref_Reference::discovered(new_head); 729 } 730 731 // Add the chain to the to list. 732 if (ref_lists[to_idx].head() == NULL) { 733 // to list is empty. Make a loop at the end. 734 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 735 } else { 736 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 737 } 738 ref_lists[to_idx].set_head(move_head); 739 ref_lists[to_idx].inc_length(refs_to_move); 740 741 // Remove the chain from the from list. 742 if (move_tail == new_head) { 743 // We found the end of the from list. 744 ref_lists[from_idx].set_head(NULL); 745 } else { 746 ref_lists[from_idx].set_head(new_head); 747 } 748 ref_lists[from_idx].dec_length(refs_to_move); 749 if (ref_lists[from_idx].length() == 0) { 750 break; 751 } 752 } else { 753 to_idx = (to_idx + 1) % _num_q; 754 } 755 } 756 } 757 #ifdef ASSERT 758 size_t balanced_total_refs = 0; 759 for (uint i = 0; i < _num_q; ++i) { 760 balanced_total_refs += ref_lists[i].length(); 761 } 762 log_reflist_counts(ref_lists, _num_q, balanced_total_refs); 763 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 764 #endif 765 } 766 767 void ReferenceProcessor::balance_all_queues() { 768 balance_queues(_discoveredSoftRefs); 769 balance_queues(_discoveredWeakRefs); 770 balance_queues(_discoveredFinalRefs); 771 balance_queues(_discoveredPhantomRefs); 772 } 773 774 void ReferenceProcessor::process_discovered_reflist( 775 DiscoveredList refs_lists[], 776 ReferencePolicy* policy, 777 bool clear_referent, 778 BoolObjectClosure* is_alive, 779 OopClosure* keep_alive, 780 VoidClosure* complete_gc, 781 AbstractRefProcTaskExecutor* task_executor) 782 { 783 bool mt_processing = task_executor != NULL && _processing_is_mt; 784 // If discovery used MT and a dynamic number of GC threads, then 785 // the queues must be balanced for correctness if fewer than the 786 // maximum number of queues were used. The number of queue used 787 // during discovery may be different than the number to be used 788 // for processing so don't depend of _num_q < _max_num_q as part 789 // of the test. 790 bool must_balance = _discovery_is_mt; 791 792 if ((mt_processing && ParallelRefProcBalancingEnabled) || 793 must_balance) { 794 balance_queues(refs_lists); 795 } 796 797 // Phase 1 (soft refs only): 798 // . Traverse the list and remove any SoftReferences whose 799 // referents are not alive, but that should be kept alive for 800 // policy reasons. Keep alive the transitive closure of all 801 // such referents. 802 if (policy != NULL) { 803 if (mt_processing) { 804 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 805 task_executor->execute(phase1); 806 } else { 807 for (uint i = 0; i < _max_num_q; i++) { 808 process_phase1(refs_lists[i], policy, 809 is_alive, keep_alive, complete_gc); 810 } 811 } 812 } else { // policy == NULL 813 assert(refs_lists != _discoveredSoftRefs, 814 "Policy must be specified for soft references."); 815 } 816 817 // Phase 2: 818 // . Traverse the list and remove any refs whose referents are alive. 819 if (mt_processing) { 820 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 821 task_executor->execute(phase2); 822 } else { 823 for (uint i = 0; i < _max_num_q; i++) { 824 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 825 } 826 } 827 828 // Phase 3: 829 // . Traverse the list and process referents as appropriate. 830 if (mt_processing) { 831 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 832 task_executor->execute(phase3); 833 } else { 834 for (uint i = 0; i < _max_num_q; i++) { 835 process_phase3(refs_lists[i], clear_referent, 836 is_alive, keep_alive, complete_gc); 837 } 838 } 839 } 840 841 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 842 uint id = 0; 843 // Determine the queue index to use for this object. 844 if (_discovery_is_mt) { 845 // During a multi-threaded discovery phase, 846 // each thread saves to its "own" list. 847 Thread* thr = Thread::current(); 848 id = thr->as_Worker_thread()->id(); 849 } else { 850 // single-threaded discovery, we save in round-robin 851 // fashion to each of the lists. 852 if (_processing_is_mt) { 853 id = next_id(); 854 } 855 } 856 assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q); 857 858 // Get the discovered queue to which we will add 859 DiscoveredList* list = NULL; 860 switch (rt) { 861 case REF_OTHER: 862 // Unknown reference type, no special treatment 863 break; 864 case REF_SOFT: 865 list = &_discoveredSoftRefs[id]; 866 break; 867 case REF_WEAK: 868 list = &_discoveredWeakRefs[id]; 869 break; 870 case REF_FINAL: 871 list = &_discoveredFinalRefs[id]; 872 break; 873 case REF_PHANTOM: 874 list = &_discoveredPhantomRefs[id]; 875 break; 876 case REF_NONE: 877 // we should not reach here if we are an InstanceRefKlass 878 default: 879 ShouldNotReachHere(); 880 } 881 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 882 return list; 883 } 884 885 inline void 886 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 887 oop obj, 888 HeapWord* discovered_addr) { 889 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 890 // First we must make sure this object is only enqueued once. CAS in a non null 891 // discovered_addr. 892 oop current_head = refs_list.head(); 893 // The last ref must have its discovered field pointing to itself. 894 oop next_discovered = (current_head != NULL) ? current_head : obj; 895 896 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 897 NULL); 898 if (retest == NULL) { 899 // This thread just won the right to enqueue the object. 900 // We have separate lists for enqueueing, so no synchronization 901 // is necessary. 902 refs_list.set_head(obj); 903 refs_list.inc_length(1); 904 905 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 906 p2i(obj), obj->klass()->internal_name()); 907 } else { 908 // If retest was non NULL, another thread beat us to it: 909 // The reference has already been discovered... 910 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 911 p2i(obj), obj->klass()->internal_name()); 912 } 913 } 914 915 #ifndef PRODUCT 916 // Non-atomic (i.e. concurrent) discovery might allow us 917 // to observe j.l.References with NULL referents, being those 918 // cleared concurrently by mutators during (or after) discovery. 919 void ReferenceProcessor::verify_referent(oop obj) { 920 bool da = discovery_is_atomic(); 921 oop referent = java_lang_ref_Reference::referent(obj); 922 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 923 "Bad referent " INTPTR_FORMAT " found in Reference " 924 INTPTR_FORMAT " during %satomic discovery ", 925 p2i(referent), p2i(obj), da ? "" : "non-"); 926 } 927 #endif 928 929 // We mention two of several possible choices here: 930 // #0: if the reference object is not in the "originating generation" 931 // (or part of the heap being collected, indicated by our "span" 932 // we don't treat it specially (i.e. we scan it as we would 933 // a normal oop, treating its references as strong references). 934 // This means that references can't be discovered unless their 935 // referent is also in the same span. This is the simplest, 936 // most "local" and most conservative approach, albeit one 937 // that may cause weak references to be enqueued least promptly. 938 // We call this choice the "ReferenceBasedDiscovery" policy. 939 // #1: the reference object may be in any generation (span), but if 940 // the referent is in the generation (span) being currently collected 941 // then we can discover the reference object, provided 942 // the object has not already been discovered by 943 // a different concurrently running collector (as may be the 944 // case, for instance, if the reference object is in CMS and 945 // the referent in DefNewGeneration), and provided the processing 946 // of this reference object by the current collector will 947 // appear atomic to every other collector in the system. 948 // (Thus, for instance, a concurrent collector may not 949 // discover references in other generations even if the 950 // referent is in its own generation). This policy may, 951 // in certain cases, enqueue references somewhat sooner than 952 // might Policy #0 above, but at marginally increased cost 953 // and complexity in processing these references. 954 // We call this choice the "RefeferentBasedDiscovery" policy. 955 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 956 // Make sure we are discovering refs (rather than processing discovered refs). 957 if (!_discovering_refs || !RegisterReferences) { 958 return false; 959 } 960 // We only discover active references. 961 oop next = java_lang_ref_Reference::next(obj); 962 if (next != NULL) { // Ref is no longer active 963 return false; 964 } 965 966 HeapWord* obj_addr = (HeapWord*)obj; 967 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 968 !_span.contains(obj_addr)) { 969 // Reference is not in the originating generation; 970 // don't treat it specially (i.e. we want to scan it as a normal 971 // object with strong references). 972 return false; 973 } 974 975 // We only discover references whose referents are not (yet) 976 // known to be strongly reachable. 977 if (is_alive_non_header() != NULL) { 978 verify_referent(obj); 979 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 980 return false; // referent is reachable 981 } 982 } 983 if (rt == REF_SOFT) { 984 // For soft refs we can decide now if these are not 985 // current candidates for clearing, in which case we 986 // can mark through them now, rather than delaying that 987 // to the reference-processing phase. Since all current 988 // time-stamp policies advance the soft-ref clock only 989 // at a full collection cycle, this is always currently 990 // accurate. 991 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 992 return false; 993 } 994 } 995 996 ResourceMark rm; // Needed for tracing. 997 998 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 999 const oop discovered = java_lang_ref_Reference::discovered(obj); 1000 assert(discovered->is_oop_or_null(), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1001 if (discovered != NULL) { 1002 // The reference has already been discovered... 1003 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1004 p2i(obj), obj->klass()->internal_name()); 1005 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1006 // assumes that an object is not processed twice; 1007 // if it's been already discovered it must be on another 1008 // generation's discovered list; so we won't discover it. 1009 return false; 1010 } else { 1011 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1012 "Unrecognized policy"); 1013 // Check assumption that an object is not potentially 1014 // discovered twice except by concurrent collectors that potentially 1015 // trace the same Reference object twice. 1016 assert(UseConcMarkSweepGC || UseG1GC, 1017 "Only possible with a concurrent marking collector"); 1018 return true; 1019 } 1020 } 1021 1022 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1023 verify_referent(obj); 1024 // Discover if and only if EITHER: 1025 // .. reference is in our span, OR 1026 // .. we are an atomic collector and referent is in our span 1027 if (_span.contains(obj_addr) || 1028 (discovery_is_atomic() && 1029 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1030 // should_enqueue = true; 1031 } else { 1032 return false; 1033 } 1034 } else { 1035 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1036 _span.contains(obj_addr), "code inconsistency"); 1037 } 1038 1039 // Get the right type of discovered queue head. 1040 DiscoveredList* list = get_discovered_list(rt); 1041 if (list == NULL) { 1042 return false; // nothing special needs to be done 1043 } 1044 1045 if (_discovery_is_mt) { 1046 add_to_discovered_list_mt(*list, obj, discovered_addr); 1047 } else { 1048 // We do a raw store here: the field will be visited later when processing 1049 // the discovered references. 1050 oop current_head = list->head(); 1051 // The last ref must have its discovered field pointing to itself. 1052 oop next_discovered = (current_head != NULL) ? current_head : obj; 1053 1054 assert(discovered == NULL, "control point invariant"); 1055 oop_store_raw(discovered_addr, next_discovered); 1056 list->set_head(obj); 1057 list->inc_length(1); 1058 1059 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1060 } 1061 assert(obj->is_oop(), "Discovered a bad reference"); 1062 verify_referent(obj); 1063 return true; 1064 } 1065 1066 bool ReferenceProcessor::has_discovered_references() { 1067 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1068 if (!_discovered_refs[i].is_empty()) { 1069 return true; 1070 } 1071 } 1072 return false; 1073 } 1074 1075 // Preclean the discovered references by removing those 1076 // whose referents are alive, and by marking from those that 1077 // are not active. These lists can be handled here 1078 // in any order and, indeed, concurrently. 1079 void ReferenceProcessor::preclean_discovered_references( 1080 BoolObjectClosure* is_alive, 1081 OopClosure* keep_alive, 1082 VoidClosure* complete_gc, 1083 YieldClosure* yield, 1084 GCTimer* gc_timer) { 1085 1086 // Soft references 1087 { 1088 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1089 for (uint i = 0; i < _max_num_q; i++) { 1090 if (yield->should_return()) { 1091 return; 1092 } 1093 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1094 keep_alive, complete_gc, yield); 1095 } 1096 } 1097 1098 // Weak references 1099 { 1100 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1101 for (uint i = 0; i < _max_num_q; i++) { 1102 if (yield->should_return()) { 1103 return; 1104 } 1105 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1106 keep_alive, complete_gc, yield); 1107 } 1108 } 1109 1110 // Final references 1111 { 1112 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1113 for (uint i = 0; i < _max_num_q; i++) { 1114 if (yield->should_return()) { 1115 return; 1116 } 1117 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1118 keep_alive, complete_gc, yield); 1119 } 1120 } 1121 1122 // Phantom references 1123 { 1124 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1125 for (uint i = 0; i < _max_num_q; i++) { 1126 if (yield->should_return()) { 1127 return; 1128 } 1129 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1130 keep_alive, complete_gc, yield); 1131 } 1132 } 1133 } 1134 1135 // Walk the given discovered ref list, and remove all reference objects 1136 // whose referents are still alive, whose referents are NULL or which 1137 // are not active (have a non-NULL next field). NOTE: When we are 1138 // thus precleaning the ref lists (which happens single-threaded today), 1139 // we do not disable refs discovery to honor the correct semantics of 1140 // java.lang.Reference. As a result, we need to be careful below 1141 // that ref removal steps interleave safely with ref discovery steps 1142 // (in this thread). 1143 void 1144 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1145 BoolObjectClosure* is_alive, 1146 OopClosure* keep_alive, 1147 VoidClosure* complete_gc, 1148 YieldClosure* yield) { 1149 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1150 while (iter.has_next()) { 1151 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1152 oop obj = iter.obj(); 1153 oop next = java_lang_ref_Reference::next(obj); 1154 if (iter.referent() == NULL || iter.is_referent_alive() || 1155 next != NULL) { 1156 // The referent has been cleared, or is alive, or the Reference is not 1157 // active; we need to trace and mark its cohort. 1158 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1159 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1160 // Remove Reference object from list 1161 iter.remove(); 1162 // Keep alive its cohort. 1163 iter.make_referent_alive(); 1164 if (UseCompressedOops) { 1165 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1166 keep_alive->do_oop(next_addr); 1167 } else { 1168 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1169 keep_alive->do_oop(next_addr); 1170 } 1171 iter.move_to_next(); 1172 } else { 1173 iter.next(); 1174 } 1175 } 1176 // Close the reachable set 1177 complete_gc->do_void(); 1178 1179 NOT_PRODUCT( 1180 if (iter.processed() > 0) { 1181 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1182 iter.removed(), iter.processed(), p2i(&refs_list)); 1183 } 1184 ) 1185 } 1186 1187 const char* ReferenceProcessor::list_name(uint i) { 1188 assert(i <= _max_num_q * number_of_subclasses_of_ref(), 1189 "Out of bounds index"); 1190 1191 int j = i / _max_num_q; 1192 switch (j) { 1193 case 0: return "SoftRef"; 1194 case 1: return "WeakRef"; 1195 case 2: return "FinalRef"; 1196 case 3: return "PhantomRef"; 1197 } 1198 ShouldNotReachHere(); 1199 return NULL; 1200 } 1201