1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/heapMonitoring.hpp" 39 #include "runtime/java.hpp" 40 #include "runtime/jniHandles.hpp" 41 42 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 43 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 44 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 45 46 void referenceProcessor_init() { 47 ReferenceProcessor::init_statics(); 48 } 49 50 void ReferenceProcessor::init_statics() { 51 // We need a monotonically non-decreasing time in ms but 52 // os::javaTimeMillis() does not guarantee monotonicity. 53 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 54 55 // Initialize the soft ref timestamp clock. 56 _soft_ref_timestamp_clock = now; 57 // Also update the soft ref clock in j.l.r.SoftReference 58 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 59 60 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 61 if (is_server_compilation_mode_vm()) { 62 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 63 } else { 64 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 65 } 66 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 67 vm_exit_during_initialization("Could not allocate reference policy object"); 68 } 69 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 70 RefDiscoveryPolicy == ReferentBasedDiscovery, 71 "Unrecognized RefDiscoveryPolicy"); 72 } 73 74 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 75 #ifdef ASSERT 76 // Verify that we're not currently discovering refs 77 assert(!_discovering_refs, "nested call?"); 78 79 if (check_no_refs) { 80 // Verify that the discovered lists are empty 81 verify_no_references_recorded(); 82 } 83 #endif // ASSERT 84 85 // Someone could have modified the value of the static 86 // field in the j.l.r.SoftReference class that holds the 87 // soft reference timestamp clock using reflection or 88 // Unsafe between GCs. Unconditionally update the static 89 // field in ReferenceProcessor here so that we use the new 90 // value during reference discovery. 91 92 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 93 _discovering_refs = true; 94 } 95 96 ReferenceProcessor::ReferenceProcessor(MemRegion span, 97 bool mt_processing, 98 uint mt_processing_degree, 99 bool mt_discovery, 100 uint mt_discovery_degree, 101 bool atomic_discovery, 102 BoolObjectClosure* is_alive_non_header) : 103 _discovering_refs(false), 104 _enqueuing_is_done(false), 105 _is_alive_non_header(is_alive_non_header), 106 _processing_is_mt(mt_processing), 107 _next_id(0) 108 { 109 _span = span; 110 _discovery_is_atomic = atomic_discovery; 111 _discovery_is_mt = mt_discovery; 112 _num_q = MAX2(1U, mt_processing_degree); 113 _max_num_q = MAX2(_num_q, mt_discovery_degree); 114 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 115 _max_num_q * number_of_subclasses_of_ref(), mtGC); 116 117 if (_discovered_refs == NULL) { 118 vm_exit_during_initialization("Could not allocated RefProc Array"); 119 } 120 _discoveredSoftRefs = &_discovered_refs[0]; 121 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 122 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 123 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 124 125 // Initialize all entries to NULL 126 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 127 _discovered_refs[i].set_head(NULL); 128 _discovered_refs[i].set_length(0); 129 } 130 131 setup_policy(false /* default soft ref policy */); 132 } 133 134 #ifndef PRODUCT 135 void ReferenceProcessor::verify_no_references_recorded() { 136 guarantee(!_discovering_refs, "Discovering refs?"); 137 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 138 guarantee(_discovered_refs[i].is_empty(), 139 "Found non-empty discovered list at %u", i); 140 } 141 } 142 #endif 143 144 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 145 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 146 if (UseCompressedOops) { 147 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 148 } else { 149 f->do_oop((oop*)_discovered_refs[i].adr_head()); 150 } 151 } 152 } 153 154 void ReferenceProcessor::update_soft_ref_master_clock() { 155 // Update (advance) the soft ref master clock field. This must be done 156 // after processing the soft ref list. 157 158 // We need a monotonically non-decreasing time in ms but 159 // os::javaTimeMillis() does not guarantee monotonicity. 160 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 161 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 162 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 163 164 NOT_PRODUCT( 165 if (now < _soft_ref_timestamp_clock) { 166 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 167 _soft_ref_timestamp_clock, now); 168 } 169 ) 170 // The values of now and _soft_ref_timestamp_clock are set using 171 // javaTimeNanos(), which is guaranteed to be monotonically 172 // non-decreasing provided the underlying platform provides such 173 // a time source (and it is bug free). 174 // In product mode, however, protect ourselves from non-monotonicity. 175 if (now > _soft_ref_timestamp_clock) { 176 _soft_ref_timestamp_clock = now; 177 java_lang_ref_SoftReference::set_clock(now); 178 } 179 // Else leave clock stalled at its old value until time progresses 180 // past clock value. 181 } 182 183 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { 184 size_t total = 0; 185 for (uint i = 0; i < _max_num_q; ++i) { 186 total += lists[i].length(); 187 } 188 return total; 189 } 190 191 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 192 BoolObjectClosure* is_alive, 193 OopClosure* keep_alive, 194 VoidClosure* complete_gc, 195 AbstractRefProcTaskExecutor* task_executor, 196 ReferenceProcessorPhaseTimes* phase_times) { 197 198 double start_time = os::elapsedTime(); 199 200 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 201 // Stop treating discovered references specially. 202 disable_discovery(); 203 204 // If discovery was concurrent, someone could have modified 205 // the value of the static field in the j.l.r.SoftReference 206 // class that holds the soft reference timestamp clock using 207 // reflection or Unsafe between when discovery was enabled and 208 // now. Unconditionally update the static field in ReferenceProcessor 209 // here so that we use the new value during processing of the 210 // discovered soft refs. 211 212 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 213 214 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 215 total_count(_discoveredWeakRefs), 216 total_count(_discoveredFinalRefs), 217 total_count(_discoveredPhantomRefs)); 218 219 // Soft references 220 { 221 RefProcPhaseTimesTracker tt(REF_SOFT, phase_times, this); 222 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 223 is_alive, keep_alive, complete_gc, task_executor, phase_times); 224 } 225 226 update_soft_ref_master_clock(); 227 228 // Weak references 229 { 230 RefProcPhaseTimesTracker tt(REF_WEAK, phase_times, this); 231 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 232 is_alive, keep_alive, complete_gc, task_executor, phase_times); 233 } 234 235 // Final references 236 { 237 RefProcPhaseTimesTracker tt(REF_FINAL, phase_times, this); 238 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 239 is_alive, keep_alive, complete_gc, task_executor, phase_times); 240 } 241 242 // Phantom references 243 { 244 RefProcPhaseTimesTracker tt(REF_PHANTOM, phase_times, this); 245 process_discovered_reflist(_discoveredPhantomRefs, NULL, true, 246 is_alive, keep_alive, complete_gc, task_executor, phase_times); 247 } 248 249 // Weak global JNI references. It would make more sense (semantically) to 250 // traverse these simultaneously with the regular weak references above, but 251 // that is not how the JDK1.2 specification is. See #4126360. Native code can 252 // thus use JNI weak references to circumvent the phantom references and 253 // resurrect a "post-mortem" object. 254 { 255 GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", phase_times->gc_timer()); 256 if (task_executor != NULL) { 257 task_executor->set_single_threaded_mode(); 258 } 259 process_phaseJNI(is_alive, keep_alive, complete_gc); 260 } 261 262 // Heap Monitoring references 263 size_t handled; 264 { 265 GCTraceTime(Debug, gc, ref) tt("Heap Sampler Weak Reference", phase_times->gc_timer()); 266 handled = process_phaseHeapSampling(is_alive, keep_alive, complete_gc); 267 } 268 269 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); 270 271 log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs()); 272 log_develop_trace(gc, ref)("Heap Sampler Weak Reference handled: " SIZE_FORMAT, handled); 273 274 return stats; 275 } 276 277 #ifndef PRODUCT 278 // Calculate the number of jni handles. 279 size_t ReferenceProcessor::count_jni_refs() { 280 class CountHandleClosure: public OopClosure { 281 private: 282 size_t _count; 283 public: 284 CountHandleClosure(): _count(0) {} 285 void do_oop(oop* unused) { _count++; } 286 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 287 size_t count() { return _count; } 288 }; 289 CountHandleClosure global_handle_count; 290 JNIHandles::weak_oops_do(&global_handle_count); 291 return global_handle_count.count(); 292 } 293 #endif 294 295 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 296 OopClosure* keep_alive, 297 VoidClosure* complete_gc) { 298 JNIHandles::weak_oops_do(is_alive, keep_alive); 299 complete_gc->do_void(); 300 } 301 302 size_t ReferenceProcessor::process_phaseHeapSampling( 303 BoolObjectClosure* is_alive, 304 OopClosure* keep_alive, 305 VoidClosure* complete_gc) { 306 size_t count = 0; 307 if (HeapMonitoring::enabled()) { 308 count = HeapMonitoring::weak_oops_do(is_alive, keep_alive); 309 complete_gc->do_void(); 310 } 311 return count; 312 } 313 314 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor, 315 ReferenceProcessorPhaseTimes* phase_times) { 316 // Enqueue references that are not made active again, and 317 // clear the decks for the next collection (cycle). 318 enqueue_discovered_reflists(task_executor, phase_times); 319 320 // Stop treating discovered references specially. 321 disable_discovery(); 322 } 323 324 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) { 325 // Given a list of refs linked through the "discovered" field 326 // (java.lang.ref.Reference.discovered), self-loop their "next" field 327 // thus distinguishing them from active References, then 328 // prepend them to the pending list. 329 // 330 // The Java threads will see the Reference objects linked together through 331 // the discovered field. Instead of trying to do the write barrier updates 332 // in all places in the reference processor where we manipulate the discovered 333 // field we make sure to do the barrier here where we anyway iterate through 334 // all linked Reference objects. Note that it is important to not dirty any 335 // cards during reference processing since this will cause card table 336 // verification to fail for G1. 337 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list)); 338 339 oop obj = NULL; 340 oop next_d = refs_list.head(); 341 // Walk down the list, self-looping the next field 342 // so that the References are not considered active. 343 while (obj != next_d) { 344 obj = next_d; 345 assert(obj->is_instance(), "should be an instance object"); 346 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object"); 347 next_d = java_lang_ref_Reference::discovered(obj); 348 log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d)); 349 assert(java_lang_ref_Reference::next(obj) == NULL, 350 "Reference not active; should not be discovered"); 351 // Self-loop next, so as to make Ref not active. 352 java_lang_ref_Reference::set_next_raw(obj, obj); 353 if (next_d != obj) { 354 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); 355 } else { 356 // This is the last object. 357 // Swap refs_list into pending list and set obj's 358 // discovered to what we read from the pending list. 359 oop old = Universe::swap_reference_pending_list(refs_list.head()); 360 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL 361 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); 362 } 363 } 364 } 365 366 // Parallel enqueue task 367 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 368 public: 369 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 370 DiscoveredList discovered_refs[], 371 int n_queues, 372 ReferenceProcessorPhaseTimes* phase_times) 373 : EnqueueTask(ref_processor, discovered_refs, n_queues, phase_times) 374 { } 375 376 virtual void work(unsigned int work_id) { 377 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefEnqueue, _phase_times, work_id); 378 379 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 380 // Simplest first cut: static partitioning. 381 int index = work_id; 382 // The increment on "index" must correspond to the maximum number of queues 383 // (n_queues) with which that ReferenceProcessor was created. That 384 // is because of the "clever" way the discovered references lists were 385 // allocated and are indexed into. 386 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 387 for (int j = 0; 388 j < ReferenceProcessor::number_of_subclasses_of_ref(); 389 j++, index += _n_queues) { 390 _ref_processor.enqueue_discovered_reflist(_refs_lists[index]); 391 _refs_lists[index].set_head(NULL); 392 _refs_lists[index].set_length(0); 393 } 394 } 395 }; 396 397 // Enqueue references that are not made active again 398 void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, 399 ReferenceProcessorPhaseTimes* phase_times) { 400 401 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 402 total_count(_discoveredWeakRefs), 403 total_count(_discoveredFinalRefs), 404 total_count(_discoveredPhantomRefs)); 405 406 RefProcEnqueueTimeTracker tt(phase_times, stats); 407 408 if (_processing_is_mt && task_executor != NULL) { 409 // Parallel code 410 RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q, phase_times); 411 task_executor->execute(tsk); 412 } else { 413 // Serial code: call the parent class's implementation 414 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 415 enqueue_discovered_reflist(_discovered_refs[i]); 416 _discovered_refs[i].set_head(NULL); 417 _discovered_refs[i].set_length(0); 418 } 419 } 420 } 421 422 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 423 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 424 oop discovered = java_lang_ref_Reference::discovered(_ref); 425 assert(_discovered_addr && oopDesc::is_oop_or_null(discovered), 426 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 427 _next = discovered; 428 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 429 _referent = java_lang_ref_Reference::referent(_ref); 430 assert(Universe::heap()->is_in_reserved_or_null(_referent), 431 "Wrong oop found in java.lang.Reference object"); 432 assert(allow_null_referent ? 433 oopDesc::is_oop_or_null(_referent) 434 : oopDesc::is_oop(_referent), 435 "Expected an oop%s for referent field at " PTR_FORMAT, 436 (allow_null_referent ? " or NULL" : ""), 437 p2i(_referent)); 438 } 439 440 void DiscoveredListIterator::remove() { 441 assert(oopDesc::is_oop(_ref), "Dropping a bad reference"); 442 oop_store_raw(_discovered_addr, NULL); 443 444 // First _prev_next ref actually points into DiscoveredList (gross). 445 oop new_next; 446 if (_next == _ref) { 447 // At the end of the list, we should make _prev point to itself. 448 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 449 // and _prev will be NULL. 450 new_next = _prev; 451 } else { 452 new_next = _next; 453 } 454 // Remove Reference object from discovered list. Note that G1 does not need a 455 // pre-barrier here because we know the Reference has already been found/marked, 456 // that's how it ended up in the discovered list in the first place. 457 oop_store_raw(_prev_next, new_next); 458 NOT_PRODUCT(_removed++); 459 _refs_list.dec_length(1); 460 } 461 462 void DiscoveredListIterator::clear_referent() { 463 oop_store_raw(_referent_addr, NULL); 464 } 465 466 // NOTE: process_phase*() are largely similar, and at a high level 467 // merely iterate over the extant list applying a predicate to 468 // each of its elements and possibly removing that element from the 469 // list and applying some further closures to that element. 470 // We should consider the possibility of replacing these 471 // process_phase*() methods by abstracting them into 472 // a single general iterator invocation that receives appropriate 473 // closures that accomplish this work. 474 475 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 476 // referents are not alive, but that should be kept alive for policy reasons. 477 // Keep alive the transitive closure of all such referents. 478 void 479 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 480 ReferencePolicy* policy, 481 BoolObjectClosure* is_alive, 482 OopClosure* keep_alive, 483 VoidClosure* complete_gc) { 484 assert(policy != NULL, "Must have a non-NULL policy"); 485 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 486 // Decide which softly reachable refs should be kept alive. 487 while (iter.has_next()) { 488 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 489 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 490 if (referent_is_dead && 491 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 492 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 493 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 494 // Remove Reference object from list 495 iter.remove(); 496 // keep the referent around 497 iter.make_referent_alive(); 498 iter.move_to_next(); 499 } else { 500 iter.next(); 501 } 502 } 503 // Close the reachable set 504 complete_gc->do_void(); 505 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 506 iter.removed(), iter.processed(), p2i(&refs_list)); 507 } 508 509 // Traverse the list and remove any Refs that are not active, or 510 // whose referents are either alive or NULL. 511 void 512 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 513 BoolObjectClosure* is_alive, 514 OopClosure* keep_alive) { 515 assert(discovery_is_atomic(), "Error"); 516 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 517 while (iter.has_next()) { 518 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 519 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 520 assert(next == NULL, "Should not discover inactive Reference"); 521 if (iter.is_referent_alive()) { 522 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 523 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 524 // The referent is reachable after all. 525 // Remove Reference object from list. 526 iter.remove(); 527 // Update the referent pointer as necessary: Note that this 528 // should not entail any recursive marking because the 529 // referent must already have been traversed. 530 iter.make_referent_alive(); 531 iter.move_to_next(); 532 } else { 533 iter.next(); 534 } 535 } 536 NOT_PRODUCT( 537 if (iter.processed() > 0) { 538 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 539 " Refs in discovered list " INTPTR_FORMAT, 540 iter.removed(), iter.processed(), p2i(&refs_list)); 541 } 542 ) 543 } 544 545 void 546 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 547 BoolObjectClosure* is_alive, 548 OopClosure* keep_alive, 549 VoidClosure* complete_gc) { 550 assert(!discovery_is_atomic(), "Error"); 551 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 552 while (iter.has_next()) { 553 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 554 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 555 oop next = java_lang_ref_Reference::next(iter.obj()); 556 if ((iter.referent() == NULL || iter.is_referent_alive() || 557 next != NULL)) { 558 assert(oopDesc::is_oop_or_null(next), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 559 // Remove Reference object from list 560 iter.remove(); 561 // Trace the cohorts 562 iter.make_referent_alive(); 563 if (UseCompressedOops) { 564 keep_alive->do_oop((narrowOop*)next_addr); 565 } else { 566 keep_alive->do_oop((oop*)next_addr); 567 } 568 iter.move_to_next(); 569 } else { 570 iter.next(); 571 } 572 } 573 // Now close the newly reachable set 574 complete_gc->do_void(); 575 NOT_PRODUCT( 576 if (iter.processed() > 0) { 577 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 578 " Refs in discovered list " INTPTR_FORMAT, 579 iter.removed(), iter.processed(), p2i(&refs_list)); 580 } 581 ) 582 } 583 584 // Traverse the list and process the referents, by either 585 // clearing them or keeping them (and their reachable 586 // closure) alive. 587 void 588 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 589 bool clear_referent, 590 BoolObjectClosure* is_alive, 591 OopClosure* keep_alive, 592 VoidClosure* complete_gc) { 593 ResourceMark rm; 594 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 595 while (iter.has_next()) { 596 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 597 if (clear_referent) { 598 // NULL out referent pointer 599 iter.clear_referent(); 600 } else { 601 // keep the referent around 602 iter.make_referent_alive(); 603 } 604 log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 605 clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name()); 606 assert(oopDesc::is_oop(iter.obj(), UseConcMarkSweepGC), "Adding a bad reference"); 607 iter.next(); 608 } 609 // Close the reachable set 610 complete_gc->do_void(); 611 } 612 613 void 614 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 615 oop obj = NULL; 616 oop next = refs_list.head(); 617 while (next != obj) { 618 obj = next; 619 next = java_lang_ref_Reference::discovered(obj); 620 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 621 } 622 refs_list.set_head(NULL); 623 refs_list.set_length(0); 624 } 625 626 void ReferenceProcessor::abandon_partial_discovery() { 627 // loop over the lists 628 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 629 if ((i % _max_num_q) == 0) { 630 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 631 } 632 clear_discovered_references(_discovered_refs[i]); 633 } 634 } 635 636 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const { 637 DiscoveredList* list = NULL; 638 639 switch (type) { 640 case REF_SOFT: 641 list = _discoveredSoftRefs; 642 break; 643 case REF_WEAK: 644 list = _discoveredWeakRefs; 645 break; 646 case REF_FINAL: 647 list = _discoveredFinalRefs; 648 break; 649 case REF_PHANTOM: 650 list = _discoveredPhantomRefs; 651 break; 652 case REF_OTHER: 653 case REF_NONE: 654 default: 655 ShouldNotReachHere(); 656 } 657 return total_count(list); 658 } 659 660 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 661 public: 662 RefProcPhase1Task(ReferenceProcessor& ref_processor, 663 DiscoveredList refs_lists[], 664 ReferencePolicy* policy, 665 bool marks_oops_alive, 666 ReferenceProcessorPhaseTimes* phase_times) 667 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 668 _policy(policy) 669 { } 670 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 671 OopClosure& keep_alive, 672 VoidClosure& complete_gc) 673 { 674 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, _phase_times, i); 675 676 _ref_processor.process_phase1(_refs_lists[i], _policy, 677 &is_alive, &keep_alive, &complete_gc); 678 } 679 private: 680 ReferencePolicy* _policy; 681 }; 682 683 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 684 public: 685 RefProcPhase2Task(ReferenceProcessor& ref_processor, 686 DiscoveredList refs_lists[], 687 bool marks_oops_alive, 688 ReferenceProcessorPhaseTimes* phase_times) 689 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times) 690 { } 691 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 692 OopClosure& keep_alive, 693 VoidClosure& complete_gc) 694 { 695 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, _phase_times, i); 696 697 _ref_processor.process_phase2(_refs_lists[i], 698 &is_alive, &keep_alive, &complete_gc); 699 } 700 }; 701 702 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 703 public: 704 RefProcPhase3Task(ReferenceProcessor& ref_processor, 705 DiscoveredList refs_lists[], 706 bool clear_referent, 707 bool marks_oops_alive, 708 ReferenceProcessorPhaseTimes* phase_times) 709 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 710 _clear_referent(clear_referent) 711 { } 712 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 713 OopClosure& keep_alive, 714 VoidClosure& complete_gc) 715 { 716 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, _phase_times, i); 717 718 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 719 &is_alive, &keep_alive, &complete_gc); 720 } 721 private: 722 bool _clear_referent; 723 }; 724 725 #ifndef PRODUCT 726 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) { 727 if (!log_is_enabled(Trace, gc, ref)) { 728 return; 729 } 730 731 stringStream st; 732 for (uint i = 0; i < active_length; ++i) { 733 st.print(SIZE_FORMAT " ", ref_lists[i].length()); 734 } 735 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); 736 #ifdef ASSERT 737 for (uint i = active_length; i < _max_num_q; i++) { 738 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 739 ref_lists[i].length(), i); 740 } 741 #endif 742 } 743 #endif 744 745 void ReferenceProcessor::set_active_mt_degree(uint v) { 746 _num_q = v; 747 _next_id = 0; 748 } 749 750 // Balances reference queues. 751 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 752 // queues[0, 1, ..., _num_q-1] because only the first _num_q 753 // corresponding to the active workers will be processed. 754 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 755 { 756 // calculate total length 757 size_t total_refs = 0; 758 log_develop_trace(gc, ref)("Balance ref_lists "); 759 760 for (uint i = 0; i < _max_num_q; ++i) { 761 total_refs += ref_lists[i].length(); 762 } 763 log_reflist_counts(ref_lists, _max_num_q, total_refs); 764 size_t avg_refs = total_refs / _num_q + 1; 765 uint to_idx = 0; 766 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 767 bool move_all = false; 768 if (from_idx >= _num_q) { 769 move_all = ref_lists[from_idx].length() > 0; 770 } 771 while ((ref_lists[from_idx].length() > avg_refs) || 772 move_all) { 773 assert(to_idx < _num_q, "Sanity Check!"); 774 if (ref_lists[to_idx].length() < avg_refs) { 775 // move superfluous refs 776 size_t refs_to_move; 777 // Move all the Ref's if the from queue will not be processed. 778 if (move_all) { 779 refs_to_move = MIN2(ref_lists[from_idx].length(), 780 avg_refs - ref_lists[to_idx].length()); 781 } else { 782 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 783 avg_refs - ref_lists[to_idx].length()); 784 } 785 786 assert(refs_to_move > 0, "otherwise the code below will fail"); 787 788 oop move_head = ref_lists[from_idx].head(); 789 oop move_tail = move_head; 790 oop new_head = move_head; 791 // find an element to split the list on 792 for (size_t j = 0; j < refs_to_move; ++j) { 793 move_tail = new_head; 794 new_head = java_lang_ref_Reference::discovered(new_head); 795 } 796 797 // Add the chain to the to list. 798 if (ref_lists[to_idx].head() == NULL) { 799 // to list is empty. Make a loop at the end. 800 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 801 } else { 802 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 803 } 804 ref_lists[to_idx].set_head(move_head); 805 ref_lists[to_idx].inc_length(refs_to_move); 806 807 // Remove the chain from the from list. 808 if (move_tail == new_head) { 809 // We found the end of the from list. 810 ref_lists[from_idx].set_head(NULL); 811 } else { 812 ref_lists[from_idx].set_head(new_head); 813 } 814 ref_lists[from_idx].dec_length(refs_to_move); 815 if (ref_lists[from_idx].length() == 0) { 816 break; 817 } 818 } else { 819 to_idx = (to_idx + 1) % _num_q; 820 } 821 } 822 } 823 #ifdef ASSERT 824 size_t balanced_total_refs = 0; 825 for (uint i = 0; i < _num_q; ++i) { 826 balanced_total_refs += ref_lists[i].length(); 827 } 828 log_reflist_counts(ref_lists, _num_q, balanced_total_refs); 829 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 830 #endif 831 } 832 833 void ReferenceProcessor::balance_all_queues() { 834 balance_queues(_discoveredSoftRefs); 835 balance_queues(_discoveredWeakRefs); 836 balance_queues(_discoveredFinalRefs); 837 balance_queues(_discoveredPhantomRefs); 838 } 839 840 void ReferenceProcessor::process_discovered_reflist( 841 DiscoveredList refs_lists[], 842 ReferencePolicy* policy, 843 bool clear_referent, 844 BoolObjectClosure* is_alive, 845 OopClosure* keep_alive, 846 VoidClosure* complete_gc, 847 AbstractRefProcTaskExecutor* task_executor, 848 ReferenceProcessorPhaseTimes* phase_times) 849 { 850 bool mt_processing = task_executor != NULL && _processing_is_mt; 851 852 phase_times->set_processing_is_mt(mt_processing); 853 854 // If discovery used MT and a dynamic number of GC threads, then 855 // the queues must be balanced for correctness if fewer than the 856 // maximum number of queues were used. The number of queue used 857 // during discovery may be different than the number to be used 858 // for processing so don't depend of _num_q < _max_num_q as part 859 // of the test. 860 bool must_balance = _discovery_is_mt; 861 862 if ((mt_processing && ParallelRefProcBalancingEnabled) || 863 must_balance) { 864 RefProcBalanceQueuesTimeTracker tt(phase_times); 865 balance_queues(refs_lists); 866 } 867 868 // Phase 1 (soft refs only): 869 // . Traverse the list and remove any SoftReferences whose 870 // referents are not alive, but that should be kept alive for 871 // policy reasons. Keep alive the transitive closure of all 872 // such referents. 873 if (policy != NULL) { 874 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, phase_times); 875 876 if (mt_processing) { 877 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times); 878 task_executor->execute(phase1); 879 } else { 880 for (uint i = 0; i < _max_num_q; i++) { 881 process_phase1(refs_lists[i], policy, 882 is_alive, keep_alive, complete_gc); 883 } 884 } 885 } else { // policy == NULL 886 assert(refs_lists != _discoveredSoftRefs, 887 "Policy must be specified for soft references."); 888 } 889 890 // Phase 2: 891 // . Traverse the list and remove any refs whose referents are alive. 892 { 893 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, phase_times); 894 895 if (mt_processing) { 896 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times); 897 task_executor->execute(phase2); 898 } else { 899 for (uint i = 0; i < _max_num_q; i++) { 900 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 901 } 902 } 903 } 904 905 // Phase 3: 906 // . Traverse the list and process referents as appropriate. 907 { 908 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, phase_times); 909 910 if (mt_processing) { 911 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times); 912 task_executor->execute(phase3); 913 } else { 914 for (uint i = 0; i < _max_num_q; i++) { 915 process_phase3(refs_lists[i], clear_referent, 916 is_alive, keep_alive, complete_gc); 917 } 918 } 919 } 920 } 921 922 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 923 uint id = 0; 924 // Determine the queue index to use for this object. 925 if (_discovery_is_mt) { 926 // During a multi-threaded discovery phase, 927 // each thread saves to its "own" list. 928 Thread* thr = Thread::current(); 929 id = thr->as_Worker_thread()->id(); 930 } else { 931 // single-threaded discovery, we save in round-robin 932 // fashion to each of the lists. 933 if (_processing_is_mt) { 934 id = next_id(); 935 } 936 } 937 assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q); 938 939 // Get the discovered queue to which we will add 940 DiscoveredList* list = NULL; 941 switch (rt) { 942 case REF_OTHER: 943 // Unknown reference type, no special treatment 944 break; 945 case REF_SOFT: 946 list = &_discoveredSoftRefs[id]; 947 break; 948 case REF_WEAK: 949 list = &_discoveredWeakRefs[id]; 950 break; 951 case REF_FINAL: 952 list = &_discoveredFinalRefs[id]; 953 break; 954 case REF_PHANTOM: 955 list = &_discoveredPhantomRefs[id]; 956 break; 957 case REF_NONE: 958 // we should not reach here if we are an InstanceRefKlass 959 default: 960 ShouldNotReachHere(); 961 } 962 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 963 return list; 964 } 965 966 inline void 967 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 968 oop obj, 969 HeapWord* discovered_addr) { 970 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 971 // First we must make sure this object is only enqueued once. CAS in a non null 972 // discovered_addr. 973 oop current_head = refs_list.head(); 974 // The last ref must have its discovered field pointing to itself. 975 oop next_discovered = (current_head != NULL) ? current_head : obj; 976 977 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 978 NULL); 979 if (retest == NULL) { 980 // This thread just won the right to enqueue the object. 981 // We have separate lists for enqueueing, so no synchronization 982 // is necessary. 983 refs_list.set_head(obj); 984 refs_list.inc_length(1); 985 986 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 987 p2i(obj), obj->klass()->internal_name()); 988 } else { 989 // If retest was non NULL, another thread beat us to it: 990 // The reference has already been discovered... 991 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 992 p2i(obj), obj->klass()->internal_name()); 993 } 994 } 995 996 #ifndef PRODUCT 997 // Non-atomic (i.e. concurrent) discovery might allow us 998 // to observe j.l.References with NULL referents, being those 999 // cleared concurrently by mutators during (or after) discovery. 1000 void ReferenceProcessor::verify_referent(oop obj) { 1001 bool da = discovery_is_atomic(); 1002 oop referent = java_lang_ref_Reference::referent(obj); 1003 assert(da ? oopDesc::is_oop(referent) : oopDesc::is_oop_or_null(referent), 1004 "Bad referent " INTPTR_FORMAT " found in Reference " 1005 INTPTR_FORMAT " during %satomic discovery ", 1006 p2i(referent), p2i(obj), da ? "" : "non-"); 1007 } 1008 #endif 1009 1010 // We mention two of several possible choices here: 1011 // #0: if the reference object is not in the "originating generation" 1012 // (or part of the heap being collected, indicated by our "span" 1013 // we don't treat it specially (i.e. we scan it as we would 1014 // a normal oop, treating its references as strong references). 1015 // This means that references can't be discovered unless their 1016 // referent is also in the same span. This is the simplest, 1017 // most "local" and most conservative approach, albeit one 1018 // that may cause weak references to be enqueued least promptly. 1019 // We call this choice the "ReferenceBasedDiscovery" policy. 1020 // #1: the reference object may be in any generation (span), but if 1021 // the referent is in the generation (span) being currently collected 1022 // then we can discover the reference object, provided 1023 // the object has not already been discovered by 1024 // a different concurrently running collector (as may be the 1025 // case, for instance, if the reference object is in CMS and 1026 // the referent in DefNewGeneration), and provided the processing 1027 // of this reference object by the current collector will 1028 // appear atomic to every other collector in the system. 1029 // (Thus, for instance, a concurrent collector may not 1030 // discover references in other generations even if the 1031 // referent is in its own generation). This policy may, 1032 // in certain cases, enqueue references somewhat sooner than 1033 // might Policy #0 above, but at marginally increased cost 1034 // and complexity in processing these references. 1035 // We call this choice the "RefeferentBasedDiscovery" policy. 1036 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1037 // Make sure we are discovering refs (rather than processing discovered refs). 1038 if (!_discovering_refs || !RegisterReferences) { 1039 return false; 1040 } 1041 // We only discover active references. 1042 oop next = java_lang_ref_Reference::next(obj); 1043 if (next != NULL) { // Ref is no longer active 1044 return false; 1045 } 1046 1047 HeapWord* obj_addr = (HeapWord*)obj; 1048 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1049 !_span.contains(obj_addr)) { 1050 // Reference is not in the originating generation; 1051 // don't treat it specially (i.e. we want to scan it as a normal 1052 // object with strong references). 1053 return false; 1054 } 1055 1056 // We only discover references whose referents are not (yet) 1057 // known to be strongly reachable. 1058 if (is_alive_non_header() != NULL) { 1059 verify_referent(obj); 1060 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1061 return false; // referent is reachable 1062 } 1063 } 1064 if (rt == REF_SOFT) { 1065 // For soft refs we can decide now if these are not 1066 // current candidates for clearing, in which case we 1067 // can mark through them now, rather than delaying that 1068 // to the reference-processing phase. Since all current 1069 // time-stamp policies advance the soft-ref clock only 1070 // at a full collection cycle, this is always currently 1071 // accurate. 1072 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1073 return false; 1074 } 1075 } 1076 1077 ResourceMark rm; // Needed for tracing. 1078 1079 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1080 const oop discovered = java_lang_ref_Reference::discovered(obj); 1081 assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1082 if (discovered != NULL) { 1083 // The reference has already been discovered... 1084 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1085 p2i(obj), obj->klass()->internal_name()); 1086 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1087 // assumes that an object is not processed twice; 1088 // if it's been already discovered it must be on another 1089 // generation's discovered list; so we won't discover it. 1090 return false; 1091 } else { 1092 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1093 "Unrecognized policy"); 1094 // Check assumption that an object is not potentially 1095 // discovered twice except by concurrent collectors that potentially 1096 // trace the same Reference object twice. 1097 assert(UseConcMarkSweepGC || UseG1GC, 1098 "Only possible with a concurrent marking collector"); 1099 return true; 1100 } 1101 } 1102 1103 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1104 verify_referent(obj); 1105 // Discover if and only if EITHER: 1106 // .. reference is in our span, OR 1107 // .. we are an atomic collector and referent is in our span 1108 if (_span.contains(obj_addr) || 1109 (discovery_is_atomic() && 1110 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1111 // should_enqueue = true; 1112 } else { 1113 return false; 1114 } 1115 } else { 1116 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1117 _span.contains(obj_addr), "code inconsistency"); 1118 } 1119 1120 // Get the right type of discovered queue head. 1121 DiscoveredList* list = get_discovered_list(rt); 1122 if (list == NULL) { 1123 return false; // nothing special needs to be done 1124 } 1125 1126 if (_discovery_is_mt) { 1127 add_to_discovered_list_mt(*list, obj, discovered_addr); 1128 } else { 1129 // We do a raw store here: the field will be visited later when processing 1130 // the discovered references. 1131 oop current_head = list->head(); 1132 // The last ref must have its discovered field pointing to itself. 1133 oop next_discovered = (current_head != NULL) ? current_head : obj; 1134 1135 assert(discovered == NULL, "control point invariant"); 1136 oop_store_raw(discovered_addr, next_discovered); 1137 list->set_head(obj); 1138 list->inc_length(1); 1139 1140 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1141 } 1142 assert(oopDesc::is_oop(obj), "Discovered a bad reference"); 1143 verify_referent(obj); 1144 return true; 1145 } 1146 1147 bool ReferenceProcessor::has_discovered_references() { 1148 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1149 if (!_discovered_refs[i].is_empty()) { 1150 return true; 1151 } 1152 } 1153 return false; 1154 } 1155 1156 // Preclean the discovered references by removing those 1157 // whose referents are alive, and by marking from those that 1158 // are not active. These lists can be handled here 1159 // in any order and, indeed, concurrently. 1160 void ReferenceProcessor::preclean_discovered_references( 1161 BoolObjectClosure* is_alive, 1162 OopClosure* keep_alive, 1163 VoidClosure* complete_gc, 1164 YieldClosure* yield, 1165 GCTimer* gc_timer) { 1166 1167 // Soft references 1168 { 1169 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1170 for (uint i = 0; i < _max_num_q; i++) { 1171 if (yield->should_return()) { 1172 return; 1173 } 1174 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1175 keep_alive, complete_gc, yield); 1176 } 1177 } 1178 1179 // Weak references 1180 { 1181 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1182 for (uint i = 0; i < _max_num_q; i++) { 1183 if (yield->should_return()) { 1184 return; 1185 } 1186 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1187 keep_alive, complete_gc, yield); 1188 } 1189 } 1190 1191 // Final references 1192 { 1193 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1194 for (uint i = 0; i < _max_num_q; i++) { 1195 if (yield->should_return()) { 1196 return; 1197 } 1198 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1199 keep_alive, complete_gc, yield); 1200 } 1201 } 1202 1203 // Phantom references 1204 { 1205 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1206 for (uint i = 0; i < _max_num_q; i++) { 1207 if (yield->should_return()) { 1208 return; 1209 } 1210 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1211 keep_alive, complete_gc, yield); 1212 } 1213 } 1214 } 1215 1216 // Walk the given discovered ref list, and remove all reference objects 1217 // whose referents are still alive, whose referents are NULL or which 1218 // are not active (have a non-NULL next field). NOTE: When we are 1219 // thus precleaning the ref lists (which happens single-threaded today), 1220 // we do not disable refs discovery to honor the correct semantics of 1221 // java.lang.Reference. As a result, we need to be careful below 1222 // that ref removal steps interleave safely with ref discovery steps 1223 // (in this thread). 1224 void 1225 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1226 BoolObjectClosure* is_alive, 1227 OopClosure* keep_alive, 1228 VoidClosure* complete_gc, 1229 YieldClosure* yield) { 1230 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1231 while (iter.has_next()) { 1232 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1233 oop obj = iter.obj(); 1234 oop next = java_lang_ref_Reference::next(obj); 1235 if (iter.referent() == NULL || iter.is_referent_alive() || 1236 next != NULL) { 1237 // The referent has been cleared, or is alive, or the Reference is not 1238 // active; we need to trace and mark its cohort. 1239 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1240 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1241 // Remove Reference object from list 1242 iter.remove(); 1243 // Keep alive its cohort. 1244 iter.make_referent_alive(); 1245 if (UseCompressedOops) { 1246 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1247 keep_alive->do_oop(next_addr); 1248 } else { 1249 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1250 keep_alive->do_oop(next_addr); 1251 } 1252 iter.move_to_next(); 1253 } else { 1254 iter.next(); 1255 } 1256 } 1257 // Close the reachable set 1258 complete_gc->do_void(); 1259 1260 NOT_PRODUCT( 1261 if (iter.processed() > 0) { 1262 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1263 iter.removed(), iter.processed(), p2i(&refs_list)); 1264 } 1265 ) 1266 } 1267 1268 const char* ReferenceProcessor::list_name(uint i) { 1269 assert(i <= _max_num_q * number_of_subclasses_of_ref(), 1270 "Out of bounds index"); 1271 1272 int j = i / _max_num_q; 1273 switch (j) { 1274 case 0: return "SoftRef"; 1275 case 1: return "WeakRef"; 1276 case 2: return "FinalRef"; 1277 case 3: return "PhantomRef"; 1278 } 1279 ShouldNotReachHere(); 1280 return NULL; 1281 }