1 /* 2 * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/java.hpp" 39 #include "runtime/jniHandles.hpp" 40 41 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 42 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 43 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 44 45 void referenceProcessor_init() { 46 ReferenceProcessor::init_statics(); 47 } 48 49 void ReferenceProcessor::init_statics() { 50 // We need a monotonically non-decreasing time in ms but 51 // os::javaTimeMillis() does not guarantee monotonicity. 52 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 53 54 // Initialize the soft ref timestamp clock. 55 _soft_ref_timestamp_clock = now; 56 // Also update the soft ref clock in j.l.r.SoftReference 57 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 58 59 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 60 if (is_server_compilation_mode_vm()) { 61 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 62 } else { 63 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 64 } 65 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 66 vm_exit_during_initialization("Could not allocate reference policy object"); 67 } 68 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 69 RefDiscoveryPolicy == ReferentBasedDiscovery, 70 "Unrecognized RefDiscoveryPolicy"); 71 } 72 73 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 74 #ifdef ASSERT 75 // Verify that we're not currently discovering refs 76 assert(!_discovering_refs, "nested call?"); 77 78 if (check_no_refs) { 79 // Verify that the discovered lists are empty 80 verify_no_references_recorded(); 81 } 82 #endif // ASSERT 83 84 // Someone could have modified the value of the static 85 // field in the j.l.r.SoftReference class that holds the 86 // soft reference timestamp clock using reflection or 87 // Unsafe between GCs. Unconditionally update the static 88 // field in ReferenceProcessor here so that we use the new 89 // value during reference discovery. 90 91 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 92 _discovering_refs = true; 93 } 94 95 ReferenceProcessor::ReferenceProcessor(MemRegion span, 96 bool mt_processing, 97 uint mt_processing_degree, 98 bool mt_discovery, 99 uint mt_discovery_degree, 100 bool atomic_discovery, 101 BoolObjectClosure* is_alive_non_header) : 102 _discovering_refs(false), 103 _enqueuing_is_done(false), 104 _is_alive_non_header(is_alive_non_header), 105 _processing_is_mt(mt_processing), 106 _next_id(0) 107 { 108 _span = span; 109 _discovery_is_atomic = atomic_discovery; 110 _discovery_is_mt = mt_discovery; 111 _num_q = MAX2(1U, mt_processing_degree); 112 _max_num_q = MAX2(_num_q, mt_discovery_degree); 113 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 114 _max_num_q * number_of_subclasses_of_ref(), mtGC); 115 116 if (_discovered_refs == NULL) { 117 vm_exit_during_initialization("Could not allocated RefProc Array"); 118 } 119 _discoveredSoftRefs = &_discovered_refs[0]; 120 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_q]; 121 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q]; 122 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q]; 123 124 // Initialize all entries to NULL 125 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 126 _discovered_refs[i].set_head(NULL); 127 _discovered_refs[i].set_length(0); 128 } 129 130 _phase_times = new ReferenceProcessorPhaseTimes(_num_q, _processing_is_mt); 131 132 setup_policy(false /* default soft ref policy */); 133 } 134 135 #ifndef PRODUCT 136 void ReferenceProcessor::verify_no_references_recorded() { 137 guarantee(!_discovering_refs, "Discovering refs?"); 138 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 139 guarantee(_discovered_refs[i].is_empty(), 140 "Found non-empty discovered list at %u", i); 141 } 142 } 143 #endif 144 145 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 146 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 147 if (UseCompressedOops) { 148 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 149 } else { 150 f->do_oop((oop*)_discovered_refs[i].adr_head()); 151 } 152 } 153 } 154 155 void ReferenceProcessor::update_soft_ref_master_clock() { 156 // Update (advance) the soft ref master clock field. This must be done 157 // after processing the soft ref list. 158 159 // We need a monotonically non-decreasing time in ms but 160 // os::javaTimeMillis() does not guarantee monotonicity. 161 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 162 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 163 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 164 165 NOT_PRODUCT( 166 if (now < _soft_ref_timestamp_clock) { 167 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 168 _soft_ref_timestamp_clock, now); 169 } 170 ) 171 // The values of now and _soft_ref_timestamp_clock are set using 172 // javaTimeNanos(), which is guaranteed to be monotonically 173 // non-decreasing provided the underlying platform provides such 174 // a time source (and it is bug free). 175 // In product mode, however, protect ourselves from non-monotonicity. 176 if (now > _soft_ref_timestamp_clock) { 177 _soft_ref_timestamp_clock = now; 178 java_lang_ref_SoftReference::set_clock(now); 179 } 180 // Else leave clock stalled at its old value until time progresses 181 // past clock value. 182 } 183 184 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) { 185 size_t total = 0; 186 for (uint i = 0; i < _max_num_q; ++i) { 187 total += lists[i].length(); 188 } 189 return total; 190 } 191 192 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 193 BoolObjectClosure* is_alive, 194 OopClosure* keep_alive, 195 VoidClosure* complete_gc, 196 AbstractRefProcTaskExecutor* task_executor, 197 GCTimer* gc_timer) { 198 199 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 200 // Stop treating discovered references specially. 201 disable_discovery(); 202 203 // If discovery was concurrent, someone could have modified 204 // the value of the static field in the j.l.r.SoftReference 205 // class that holds the soft reference timestamp clock using 206 // reflection or Unsafe between when discovery was enabled and 207 // now. Unconditionally update the static field in ReferenceProcessor 208 // here so that we use the new value during processing of the 209 // discovered soft refs. 210 211 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 212 213 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 214 total_count(_discoveredWeakRefs), 215 total_count(_discoveredFinalRefs), 216 total_count(_discoveredPhantomRefs)); 217 218 // Soft references 219 { 220 RefProcPhaseTimesLogger tt("SoftReference", phase_times(), _discoveredSoftRefs, gc_timer); 221 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 222 is_alive, keep_alive, complete_gc, task_executor, gc_timer); 223 } 224 225 update_soft_ref_master_clock(); 226 227 // Weak references 228 { 229 RefProcPhaseTimesLogger tt("WeakReference", phase_times(), _discoveredWeakRefs, gc_timer); 230 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 231 is_alive, keep_alive, complete_gc, task_executor, gc_timer); 232 } 233 234 // Final references 235 { 236 RefProcPhaseTimesLogger tt("FinalReference", phase_times(), _discoveredFinalRefs, gc_timer); 237 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 238 is_alive, keep_alive, complete_gc, task_executor, gc_timer); 239 } 240 241 // Phantom references 242 { 243 RefProcPhaseTimesLogger tt("PhantomReference", phase_times(), _discoveredPhantomRefs, gc_timer); 244 process_discovered_reflist(_discoveredPhantomRefs, NULL, true, 245 is_alive, keep_alive, complete_gc, task_executor, gc_timer); 246 } 247 248 // Weak global JNI references. It would make more sense (semantically) to 249 // traverse these simultaneously with the regular weak references above, but 250 // that is not how the JDK1.2 specification is. See #4126360. Native code can 251 // thus use JNI weak references to circumvent the phantom references and 252 // resurrect a "post-mortem" object. 253 { 254 GCTraceTime(Debug, gc, ref) tt("JNI Weak Reference", gc_timer); 255 if (task_executor != NULL) { 256 task_executor->set_single_threaded_mode(); 257 } 258 process_phaseJNI(is_alive, keep_alive, complete_gc); 259 } 260 261 log_develop_trace(gc, ref)("JNI Weak Reference count: " SIZE_FORMAT, count_jni_refs()); 262 263 return stats; 264 } 265 266 #ifndef PRODUCT 267 // Calculate the number of jni handles. 268 size_t ReferenceProcessor::count_jni_refs() { 269 class CountHandleClosure: public OopClosure { 270 private: 271 size_t _count; 272 public: 273 CountHandleClosure(): _count(0) {} 274 void do_oop(oop* unused) { _count++; } 275 void do_oop(narrowOop* unused) { ShouldNotReachHere(); } 276 size_t count() { return _count; } 277 }; 278 CountHandleClosure global_handle_count; 279 JNIHandles::weak_oops_do(&global_handle_count); 280 return global_handle_count.count(); 281 } 282 #endif 283 284 void ReferenceProcessor::process_phaseJNI(BoolObjectClosure* is_alive, 285 OopClosure* keep_alive, 286 VoidClosure* complete_gc) { 287 JNIHandles::weak_oops_do(is_alive, keep_alive); 288 complete_gc->do_void(); 289 } 290 291 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor, GCTimer* gc_timer) { 292 // Enqueue references that are not made active again, and 293 // clear the decks for the next collection (cycle). 294 enqueue_discovered_reflists(task_executor, gc_timer); 295 296 // Stop treating discovered references specially. 297 disable_discovery(); 298 } 299 300 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) { 301 // Given a list of refs linked through the "discovered" field 302 // (java.lang.ref.Reference.discovered), self-loop their "next" field 303 // thus distinguishing them from active References, then 304 // prepend them to the pending list. 305 // 306 // The Java threads will see the Reference objects linked together through 307 // the discovered field. Instead of trying to do the write barrier updates 308 // in all places in the reference processor where we manipulate the discovered 309 // field we make sure to do the barrier here where we anyway iterate through 310 // all linked Reference objects. Note that it is important to not dirty any 311 // cards during reference processing since this will cause card table 312 // verification to fail for G1. 313 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list)); 314 315 oop obj = NULL; 316 oop next_d = refs_list.head(); 317 // Walk down the list, self-looping the next field 318 // so that the References are not considered active. 319 while (obj != next_d) { 320 obj = next_d; 321 assert(obj->is_instance(), "should be an instance object"); 322 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object"); 323 next_d = java_lang_ref_Reference::discovered(obj); 324 log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT, p2i(obj), p2i(next_d)); 325 assert(java_lang_ref_Reference::next(obj) == NULL, 326 "Reference not active; should not be discovered"); 327 // Self-loop next, so as to make Ref not active. 328 java_lang_ref_Reference::set_next_raw(obj, obj); 329 if (next_d != obj) { 330 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), next_d); 331 } else { 332 // This is the last object. 333 // Swap refs_list into pending list and set obj's 334 // discovered to what we read from the pending list. 335 oop old = Universe::swap_reference_pending_list(refs_list.head()); 336 java_lang_ref_Reference::set_discovered_raw(obj, old); // old may be NULL 337 oopDesc::bs()->write_ref_field(java_lang_ref_Reference::discovered_addr(obj), old); 338 } 339 } 340 } 341 342 // Parallel enqueue task 343 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 344 public: 345 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 346 DiscoveredList discovered_refs[], 347 int n_queues) 348 : EnqueueTask(ref_processor, discovered_refs, n_queues) 349 { } 350 351 virtual void work(unsigned int work_id) { 352 RefProcWorkerTimeTracker tt(_ref_processor.phase_times()->worker_time_sec(ReferenceProcessorPhaseTimes::RefEnqueue), work_id); 353 354 assert(work_id < (unsigned int)_ref_processor.max_num_q(), "Index out-of-bounds"); 355 // Simplest first cut: static partitioning. 356 int index = work_id; 357 // The increment on "index" must correspond to the maximum number of queues 358 // (n_queues) with which that ReferenceProcessor was created. That 359 // is because of the "clever" way the discovered references lists were 360 // allocated and are indexed into. 361 assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected"); 362 for (int j = 0; 363 j < ReferenceProcessor::number_of_subclasses_of_ref(); 364 j++, index += _n_queues) { 365 _ref_processor.enqueue_discovered_reflist(_refs_lists[index]); 366 _refs_lists[index].set_head(NULL); 367 _refs_lists[index].set_length(0); 368 } 369 } 370 }; 371 372 // Enqueue references that are not made active again 373 void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, GCTimer* gc_timer) { 374 375 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 376 total_count(_discoveredWeakRefs), 377 total_count(_discoveredFinalRefs), 378 total_count(_discoveredPhantomRefs)); 379 380 RefProcEnqueueTimeLogger tt(phase_times(), stats, gc_timer); 381 382 if (_processing_is_mt && task_executor != NULL) { 383 // Parallel code 384 RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_q); 385 task_executor->execute(tsk); 386 } else { 387 // Serial code: call the parent class's implementation 388 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 389 enqueue_discovered_reflist(_discovered_refs[i]); 390 _discovered_refs[i].set_head(NULL); 391 _discovered_refs[i].set_length(0); 392 } 393 } 394 } 395 396 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 397 _discovered_addr = java_lang_ref_Reference::discovered_addr(_ref); 398 oop discovered = java_lang_ref_Reference::discovered(_ref); 399 assert(_discovered_addr && discovered->is_oop_or_null(), 400 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 401 _next = discovered; 402 _referent_addr = java_lang_ref_Reference::referent_addr(_ref); 403 _referent = java_lang_ref_Reference::referent(_ref); 404 assert(Universe::heap()->is_in_reserved_or_null(_referent), 405 "Wrong oop found in java.lang.Reference object"); 406 assert(allow_null_referent ? 407 _referent->is_oop_or_null() 408 : _referent->is_oop(), 409 "Expected an oop%s for referent field at " PTR_FORMAT, 410 (allow_null_referent ? " or NULL" : ""), 411 p2i(_referent)); 412 } 413 414 void DiscoveredListIterator::remove() { 415 assert(_ref->is_oop(), "Dropping a bad reference"); 416 oop_store_raw(_discovered_addr, NULL); 417 418 // First _prev_next ref actually points into DiscoveredList (gross). 419 oop new_next; 420 if (_next == _ref) { 421 // At the end of the list, we should make _prev point to itself. 422 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 423 // and _prev will be NULL. 424 new_next = _prev; 425 } else { 426 new_next = _next; 427 } 428 // Remove Reference object from discovered list. Note that G1 does not need a 429 // pre-barrier here because we know the Reference has already been found/marked, 430 // that's how it ended up in the discovered list in the first place. 431 oop_store_raw(_prev_next, new_next); 432 NOT_PRODUCT(_removed++); 433 _refs_list.dec_length(1); 434 } 435 436 void DiscoveredListIterator::clear_referent() { 437 oop_store_raw(_referent_addr, NULL); 438 } 439 440 // NOTE: process_phase*() are largely similar, and at a high level 441 // merely iterate over the extant list applying a predicate to 442 // each of its elements and possibly removing that element from the 443 // list and applying some further closures to that element. 444 // We should consider the possibility of replacing these 445 // process_phase*() methods by abstracting them into 446 // a single general iterator invocation that receives appropriate 447 // closures that accomplish this work. 448 449 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 450 // referents are not alive, but that should be kept alive for policy reasons. 451 // Keep alive the transitive closure of all such referents. 452 void 453 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 454 ReferencePolicy* policy, 455 BoolObjectClosure* is_alive, 456 OopClosure* keep_alive, 457 VoidClosure* complete_gc) { 458 assert(policy != NULL, "Must have a non-NULL policy"); 459 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 460 // Decide which softly reachable refs should be kept alive. 461 while (iter.has_next()) { 462 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 463 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 464 if (referent_is_dead && 465 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 466 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 467 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 468 // Remove Reference object from list 469 iter.remove(); 470 // keep the referent around 471 iter.make_referent_alive(); 472 iter.move_to_next(); 473 } else { 474 iter.next(); 475 } 476 } 477 // Close the reachable set 478 complete_gc->do_void(); 479 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 480 iter.removed(), iter.processed(), p2i(&refs_list)); 481 } 482 483 // Traverse the list and remove any Refs that are not active, or 484 // whose referents are either alive or NULL. 485 void 486 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 487 BoolObjectClosure* is_alive, 488 OopClosure* keep_alive) { 489 assert(discovery_is_atomic(), "Error"); 490 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 491 while (iter.has_next()) { 492 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 493 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 494 assert(next == NULL, "Should not discover inactive Reference"); 495 if (iter.is_referent_alive()) { 496 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 497 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 498 // The referent is reachable after all. 499 // Remove Reference object from list. 500 iter.remove(); 501 // Update the referent pointer as necessary: Note that this 502 // should not entail any recursive marking because the 503 // referent must already have been traversed. 504 iter.make_referent_alive(); 505 iter.move_to_next(); 506 } else { 507 iter.next(); 508 } 509 } 510 NOT_PRODUCT( 511 if (iter.processed() > 0) { 512 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 513 " Refs in discovered list " INTPTR_FORMAT, 514 iter.removed(), iter.processed(), p2i(&refs_list)); 515 } 516 ) 517 } 518 519 void 520 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 521 BoolObjectClosure* is_alive, 522 OopClosure* keep_alive, 523 VoidClosure* complete_gc) { 524 assert(!discovery_is_atomic(), "Error"); 525 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 526 while (iter.has_next()) { 527 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 528 HeapWord* next_addr = java_lang_ref_Reference::next_addr(iter.obj()); 529 oop next = java_lang_ref_Reference::next(iter.obj()); 530 if ((iter.referent() == NULL || iter.is_referent_alive() || 531 next != NULL)) { 532 assert(next->is_oop_or_null(), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 533 // Remove Reference object from list 534 iter.remove(); 535 // Trace the cohorts 536 iter.make_referent_alive(); 537 if (UseCompressedOops) { 538 keep_alive->do_oop((narrowOop*)next_addr); 539 } else { 540 keep_alive->do_oop((oop*)next_addr); 541 } 542 iter.move_to_next(); 543 } else { 544 iter.next(); 545 } 546 } 547 // Now close the newly reachable set 548 complete_gc->do_void(); 549 NOT_PRODUCT( 550 if (iter.processed() > 0) { 551 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 552 " Refs in discovered list " INTPTR_FORMAT, 553 iter.removed(), iter.processed(), p2i(&refs_list)); 554 } 555 ) 556 } 557 558 // Traverse the list and process the referents, by either 559 // clearing them or keeping them (and their reachable 560 // closure) alive. 561 void 562 ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 563 bool clear_referent, 564 BoolObjectClosure* is_alive, 565 OopClosure* keep_alive, 566 VoidClosure* complete_gc) { 567 ResourceMark rm; 568 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 569 while (iter.has_next()) { 570 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 571 if (clear_referent) { 572 // NULL out referent pointer 573 iter.clear_referent(); 574 } else { 575 // keep the referent around 576 iter.make_referent_alive(); 577 } 578 log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 579 clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name()); 580 assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference"); 581 iter.next(); 582 } 583 // Close the reachable set 584 complete_gc->do_void(); 585 } 586 587 void 588 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 589 oop obj = NULL; 590 oop next = refs_list.head(); 591 while (next != obj) { 592 obj = next; 593 next = java_lang_ref_Reference::discovered(obj); 594 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 595 } 596 refs_list.set_head(NULL); 597 refs_list.set_length(0); 598 } 599 600 void ReferenceProcessor::abandon_partial_discovery() { 601 // loop over the lists 602 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 603 if ((i % _max_num_q) == 0) { 604 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 605 } 606 clear_discovered_references(_discovered_refs[i]); 607 } 608 } 609 610 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 611 public: 612 RefProcPhase1Task(ReferenceProcessor& ref_processor, 613 DiscoveredList refs_lists[], 614 ReferencePolicy* policy, 615 bool marks_oops_alive) 616 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 617 _policy(policy) 618 { } 619 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 620 OopClosure& keep_alive, 621 VoidClosure& complete_gc) 622 { 623 RefProcWorkerTimeTracker tt(_ref_processor.phase_times()->worker_time_sec(ReferenceProcessorPhaseTimes::RefPhase1), i); 624 625 _ref_processor.process_phase1(_refs_lists[i], _policy, 626 &is_alive, &keep_alive, &complete_gc); 627 } 628 private: 629 ReferencePolicy* _policy; 630 }; 631 632 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 633 public: 634 RefProcPhase2Task(ReferenceProcessor& ref_processor, 635 DiscoveredList refs_lists[], 636 bool marks_oops_alive) 637 : ProcessTask(ref_processor, refs_lists, marks_oops_alive) 638 { } 639 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 640 OopClosure& keep_alive, 641 VoidClosure& complete_gc) 642 { 643 RefProcWorkerTimeTracker tt(_ref_processor.phase_times()->worker_time_sec(ReferenceProcessorPhaseTimes::RefPhase2), i); 644 645 _ref_processor.process_phase2(_refs_lists[i], 646 &is_alive, &keep_alive, &complete_gc); 647 } 648 }; 649 650 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 651 public: 652 RefProcPhase3Task(ReferenceProcessor& ref_processor, 653 DiscoveredList refs_lists[], 654 bool clear_referent, 655 bool marks_oops_alive) 656 : ProcessTask(ref_processor, refs_lists, marks_oops_alive), 657 _clear_referent(clear_referent) 658 { } 659 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 660 OopClosure& keep_alive, 661 VoidClosure& complete_gc) 662 { 663 RefProcWorkerTimeTracker tt(_ref_processor.phase_times()->worker_time_sec(ReferenceProcessorPhaseTimes::RefPhase3), i); 664 665 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 666 &is_alive, &keep_alive, &complete_gc); 667 } 668 private: 669 bool _clear_referent; 670 }; 671 672 #ifndef PRODUCT 673 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) { 674 if (!log_is_enabled(Trace, gc, ref)) { 675 return; 676 } 677 678 stringStream st; 679 for (uint i = 0; i < active_length; ++i) { 680 st.print(SIZE_FORMAT " ", ref_lists[i].length()); 681 } 682 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); 683 #ifdef ASSERT 684 for (uint i = active_length; i < _max_num_q; i++) { 685 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 686 ref_lists[i].length(), i); 687 } 688 #endif 689 } 690 #endif 691 692 void ReferenceProcessor::set_active_mt_degree(uint v) { 693 _num_q = v; 694 _next_id = 0; 695 } 696 697 // Balances reference queues. 698 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 699 // queues[0, 1, ..., _num_q-1] because only the first _num_q 700 // corresponding to the active workers will be processed. 701 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 702 { 703 // calculate total length 704 size_t total_refs = 0; 705 log_develop_trace(gc, ref)("Balance ref_lists "); 706 707 for (uint i = 0; i < _max_num_q; ++i) { 708 total_refs += ref_lists[i].length(); 709 } 710 log_reflist_counts(ref_lists, _max_num_q, total_refs); 711 size_t avg_refs = total_refs / _num_q + 1; 712 uint to_idx = 0; 713 for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) { 714 bool move_all = false; 715 if (from_idx >= _num_q) { 716 move_all = ref_lists[from_idx].length() > 0; 717 } 718 while ((ref_lists[from_idx].length() > avg_refs) || 719 move_all) { 720 assert(to_idx < _num_q, "Sanity Check!"); 721 if (ref_lists[to_idx].length() < avg_refs) { 722 // move superfluous refs 723 size_t refs_to_move; 724 // Move all the Ref's if the from queue will not be processed. 725 if (move_all) { 726 refs_to_move = MIN2(ref_lists[from_idx].length(), 727 avg_refs - ref_lists[to_idx].length()); 728 } else { 729 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 730 avg_refs - ref_lists[to_idx].length()); 731 } 732 733 assert(refs_to_move > 0, "otherwise the code below will fail"); 734 735 oop move_head = ref_lists[from_idx].head(); 736 oop move_tail = move_head; 737 oop new_head = move_head; 738 // find an element to split the list on 739 for (size_t j = 0; j < refs_to_move; ++j) { 740 move_tail = new_head; 741 new_head = java_lang_ref_Reference::discovered(new_head); 742 } 743 744 // Add the chain to the to list. 745 if (ref_lists[to_idx].head() == NULL) { 746 // to list is empty. Make a loop at the end. 747 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 748 } else { 749 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 750 } 751 ref_lists[to_idx].set_head(move_head); 752 ref_lists[to_idx].inc_length(refs_to_move); 753 754 // Remove the chain from the from list. 755 if (move_tail == new_head) { 756 // We found the end of the from list. 757 ref_lists[from_idx].set_head(NULL); 758 } else { 759 ref_lists[from_idx].set_head(new_head); 760 } 761 ref_lists[from_idx].dec_length(refs_to_move); 762 if (ref_lists[from_idx].length() == 0) { 763 break; 764 } 765 } else { 766 to_idx = (to_idx + 1) % _num_q; 767 } 768 } 769 } 770 #ifdef ASSERT 771 size_t balanced_total_refs = 0; 772 for (uint i = 0; i < _num_q; ++i) { 773 balanced_total_refs += ref_lists[i].length(); 774 } 775 log_reflist_counts(ref_lists, _num_q, balanced_total_refs); 776 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 777 #endif 778 } 779 780 void ReferenceProcessor::balance_all_queues() { 781 balance_queues(_discoveredSoftRefs); 782 balance_queues(_discoveredWeakRefs); 783 balance_queues(_discoveredFinalRefs); 784 balance_queues(_discoveredPhantomRefs); 785 } 786 787 void ReferenceProcessor::process_discovered_reflist( 788 DiscoveredList refs_lists[], 789 ReferencePolicy* policy, 790 bool clear_referent, 791 BoolObjectClosure* is_alive, 792 OopClosure* keep_alive, 793 VoidClosure* complete_gc, 794 AbstractRefProcTaskExecutor* task_executor, 795 GCTimer* gc_timer) 796 { 797 bool mt_processing = task_executor != NULL && _processing_is_mt; 798 799 phase_times()->set_processing_is_mt(mt_processing); 800 801 // If discovery used MT and a dynamic number of GC threads, then 802 // the queues must be balanced for correctness if fewer than the 803 // maximum number of queues were used. The number of queue used 804 // during discovery may be different than the number to be used 805 // for processing so don't depend of _num_q < _max_num_q as part 806 // of the test. 807 bool must_balance = _discovery_is_mt; 808 809 if ((mt_processing && ParallelRefProcBalancingEnabled) || 810 must_balance) { 811 RefProcBalanceQueuesTimeTracker tt(phase_times(), gc_timer); 812 balance_queues(refs_lists); 813 } 814 815 // Phase 1 (soft refs only): 816 // . Traverse the list and remove any SoftReferences whose 817 // referents are not alive, but that should be kept alive for 818 // policy reasons. Keep alive the transitive closure of all 819 // such referents. 820 if (policy != NULL) { 821 RefProcPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, phase_times(), gc_timer); 822 823 if (mt_processing) { 824 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/); 825 task_executor->execute(phase1); 826 } else { 827 for (uint i = 0; i < _max_num_q; i++) { 828 process_phase1(refs_lists[i], policy, 829 is_alive, keep_alive, complete_gc); 830 } 831 } 832 } else { // policy == NULL 833 assert(refs_lists != _discoveredSoftRefs, 834 "Policy must be specified for soft references."); 835 } 836 837 // Phase 2: 838 // . Traverse the list and remove any refs whose referents are alive. 839 { 840 RefProcPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, phase_times(), gc_timer); 841 842 if (mt_processing) { 843 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/); 844 task_executor->execute(phase2); 845 } else { 846 for (uint i = 0; i < _max_num_q; i++) { 847 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 848 } 849 } 850 } 851 852 // Phase 3: 853 // . Traverse the list and process referents as appropriate. 854 { 855 RefProcPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, phase_times(), gc_timer); 856 857 if (mt_processing) { 858 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/); 859 task_executor->execute(phase3); 860 } else { 861 for (uint i = 0; i < _max_num_q; i++) { 862 process_phase3(refs_lists[i], clear_referent, 863 is_alive, keep_alive, complete_gc); 864 } 865 } 866 } 867 } 868 869 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 870 uint id = 0; 871 // Determine the queue index to use for this object. 872 if (_discovery_is_mt) { 873 // During a multi-threaded discovery phase, 874 // each thread saves to its "own" list. 875 Thread* thr = Thread::current(); 876 id = thr->as_Worker_thread()->id(); 877 } else { 878 // single-threaded discovery, we save in round-robin 879 // fashion to each of the lists. 880 if (_processing_is_mt) { 881 id = next_id(); 882 } 883 } 884 assert(id < _max_num_q, "Id is out-of-bounds id %u and max id %u)", id, _max_num_q); 885 886 // Get the discovered queue to which we will add 887 DiscoveredList* list = NULL; 888 switch (rt) { 889 case REF_OTHER: 890 // Unknown reference type, no special treatment 891 break; 892 case REF_SOFT: 893 list = &_discoveredSoftRefs[id]; 894 break; 895 case REF_WEAK: 896 list = &_discoveredWeakRefs[id]; 897 break; 898 case REF_FINAL: 899 list = &_discoveredFinalRefs[id]; 900 break; 901 case REF_PHANTOM: 902 list = &_discoveredPhantomRefs[id]; 903 break; 904 case REF_NONE: 905 // we should not reach here if we are an InstanceRefKlass 906 default: 907 ShouldNotReachHere(); 908 } 909 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 910 return list; 911 } 912 913 inline void 914 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 915 oop obj, 916 HeapWord* discovered_addr) { 917 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 918 // First we must make sure this object is only enqueued once. CAS in a non null 919 // discovered_addr. 920 oop current_head = refs_list.head(); 921 // The last ref must have its discovered field pointing to itself. 922 oop next_discovered = (current_head != NULL) ? current_head : obj; 923 924 oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr, 925 NULL); 926 if (retest == NULL) { 927 // This thread just won the right to enqueue the object. 928 // We have separate lists for enqueueing, so no synchronization 929 // is necessary. 930 refs_list.set_head(obj); 931 refs_list.inc_length(1); 932 933 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 934 p2i(obj), obj->klass()->internal_name()); 935 } else { 936 // If retest was non NULL, another thread beat us to it: 937 // The reference has already been discovered... 938 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 939 p2i(obj), obj->klass()->internal_name()); 940 } 941 } 942 943 #ifndef PRODUCT 944 // Non-atomic (i.e. concurrent) discovery might allow us 945 // to observe j.l.References with NULL referents, being those 946 // cleared concurrently by mutators during (or after) discovery. 947 void ReferenceProcessor::verify_referent(oop obj) { 948 bool da = discovery_is_atomic(); 949 oop referent = java_lang_ref_Reference::referent(obj); 950 assert(da ? referent->is_oop() : referent->is_oop_or_null(), 951 "Bad referent " INTPTR_FORMAT " found in Reference " 952 INTPTR_FORMAT " during %satomic discovery ", 953 p2i(referent), p2i(obj), da ? "" : "non-"); 954 } 955 #endif 956 957 // We mention two of several possible choices here: 958 // #0: if the reference object is not in the "originating generation" 959 // (or part of the heap being collected, indicated by our "span" 960 // we don't treat it specially (i.e. we scan it as we would 961 // a normal oop, treating its references as strong references). 962 // This means that references can't be discovered unless their 963 // referent is also in the same span. This is the simplest, 964 // most "local" and most conservative approach, albeit one 965 // that may cause weak references to be enqueued least promptly. 966 // We call this choice the "ReferenceBasedDiscovery" policy. 967 // #1: the reference object may be in any generation (span), but if 968 // the referent is in the generation (span) being currently collected 969 // then we can discover the reference object, provided 970 // the object has not already been discovered by 971 // a different concurrently running collector (as may be the 972 // case, for instance, if the reference object is in CMS and 973 // the referent in DefNewGeneration), and provided the processing 974 // of this reference object by the current collector will 975 // appear atomic to every other collector in the system. 976 // (Thus, for instance, a concurrent collector may not 977 // discover references in other generations even if the 978 // referent is in its own generation). This policy may, 979 // in certain cases, enqueue references somewhat sooner than 980 // might Policy #0 above, but at marginally increased cost 981 // and complexity in processing these references. 982 // We call this choice the "RefeferentBasedDiscovery" policy. 983 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 984 // Make sure we are discovering refs (rather than processing discovered refs). 985 if (!_discovering_refs || !RegisterReferences) { 986 return false; 987 } 988 // We only discover active references. 989 oop next = java_lang_ref_Reference::next(obj); 990 if (next != NULL) { // Ref is no longer active 991 return false; 992 } 993 994 HeapWord* obj_addr = (HeapWord*)obj; 995 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 996 !_span.contains(obj_addr)) { 997 // Reference is not in the originating generation; 998 // don't treat it specially (i.e. we want to scan it as a normal 999 // object with strong references). 1000 return false; 1001 } 1002 1003 // We only discover references whose referents are not (yet) 1004 // known to be strongly reachable. 1005 if (is_alive_non_header() != NULL) { 1006 verify_referent(obj); 1007 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1008 return false; // referent is reachable 1009 } 1010 } 1011 if (rt == REF_SOFT) { 1012 // For soft refs we can decide now if these are not 1013 // current candidates for clearing, in which case we 1014 // can mark through them now, rather than delaying that 1015 // to the reference-processing phase. Since all current 1016 // time-stamp policies advance the soft-ref clock only 1017 // at a full collection cycle, this is always currently 1018 // accurate. 1019 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1020 return false; 1021 } 1022 } 1023 1024 ResourceMark rm; // Needed for tracing. 1025 1026 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj); 1027 const oop discovered = java_lang_ref_Reference::discovered(obj); 1028 assert(discovered->is_oop_or_null(), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1029 if (discovered != NULL) { 1030 // The reference has already been discovered... 1031 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1032 p2i(obj), obj->klass()->internal_name()); 1033 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1034 // assumes that an object is not processed twice; 1035 // if it's been already discovered it must be on another 1036 // generation's discovered list; so we won't discover it. 1037 return false; 1038 } else { 1039 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1040 "Unrecognized policy"); 1041 // Check assumption that an object is not potentially 1042 // discovered twice except by concurrent collectors that potentially 1043 // trace the same Reference object twice. 1044 assert(UseConcMarkSweepGC || UseG1GC, 1045 "Only possible with a concurrent marking collector"); 1046 return true; 1047 } 1048 } 1049 1050 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1051 verify_referent(obj); 1052 // Discover if and only if EITHER: 1053 // .. reference is in our span, OR 1054 // .. we are an atomic collector and referent is in our span 1055 if (_span.contains(obj_addr) || 1056 (discovery_is_atomic() && 1057 _span.contains(java_lang_ref_Reference::referent(obj)))) { 1058 // should_enqueue = true; 1059 } else { 1060 return false; 1061 } 1062 } else { 1063 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1064 _span.contains(obj_addr), "code inconsistency"); 1065 } 1066 1067 // Get the right type of discovered queue head. 1068 DiscoveredList* list = get_discovered_list(rt); 1069 if (list == NULL) { 1070 return false; // nothing special needs to be done 1071 } 1072 1073 if (_discovery_is_mt) { 1074 add_to_discovered_list_mt(*list, obj, discovered_addr); 1075 } else { 1076 // We do a raw store here: the field will be visited later when processing 1077 // the discovered references. 1078 oop current_head = list->head(); 1079 // The last ref must have its discovered field pointing to itself. 1080 oop next_discovered = (current_head != NULL) ? current_head : obj; 1081 1082 assert(discovered == NULL, "control point invariant"); 1083 oop_store_raw(discovered_addr, next_discovered); 1084 list->set_head(obj); 1085 list->inc_length(1); 1086 1087 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1088 } 1089 assert(obj->is_oop(), "Discovered a bad reference"); 1090 verify_referent(obj); 1091 return true; 1092 } 1093 1094 bool ReferenceProcessor::has_discovered_references() { 1095 for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) { 1096 if (!_discovered_refs[i].is_empty()) { 1097 return true; 1098 } 1099 } 1100 return false; 1101 } 1102 1103 // Preclean the discovered references by removing those 1104 // whose referents are alive, and by marking from those that 1105 // are not active. These lists can be handled here 1106 // in any order and, indeed, concurrently. 1107 void ReferenceProcessor::preclean_discovered_references( 1108 BoolObjectClosure* is_alive, 1109 OopClosure* keep_alive, 1110 VoidClosure* complete_gc, 1111 YieldClosure* yield, 1112 GCTimer* gc_timer) { 1113 1114 // Soft references 1115 { 1116 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1117 for (uint i = 0; i < _max_num_q; i++) { 1118 if (yield->should_return()) { 1119 return; 1120 } 1121 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1122 keep_alive, complete_gc, yield); 1123 } 1124 } 1125 1126 // Weak references 1127 { 1128 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1129 for (uint i = 0; i < _max_num_q; i++) { 1130 if (yield->should_return()) { 1131 return; 1132 } 1133 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1134 keep_alive, complete_gc, yield); 1135 } 1136 } 1137 1138 // Final references 1139 { 1140 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1141 for (uint i = 0; i < _max_num_q; i++) { 1142 if (yield->should_return()) { 1143 return; 1144 } 1145 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1146 keep_alive, complete_gc, yield); 1147 } 1148 } 1149 1150 // Phantom references 1151 { 1152 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1153 for (uint i = 0; i < _max_num_q; i++) { 1154 if (yield->should_return()) { 1155 return; 1156 } 1157 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1158 keep_alive, complete_gc, yield); 1159 } 1160 } 1161 } 1162 1163 // Walk the given discovered ref list, and remove all reference objects 1164 // whose referents are still alive, whose referents are NULL or which 1165 // are not active (have a non-NULL next field). NOTE: When we are 1166 // thus precleaning the ref lists (which happens single-threaded today), 1167 // we do not disable refs discovery to honor the correct semantics of 1168 // java.lang.Reference. As a result, we need to be careful below 1169 // that ref removal steps interleave safely with ref discovery steps 1170 // (in this thread). 1171 void 1172 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1173 BoolObjectClosure* is_alive, 1174 OopClosure* keep_alive, 1175 VoidClosure* complete_gc, 1176 YieldClosure* yield) { 1177 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1178 while (iter.has_next()) { 1179 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1180 oop obj = iter.obj(); 1181 oop next = java_lang_ref_Reference::next(obj); 1182 if (iter.referent() == NULL || iter.is_referent_alive() || 1183 next != NULL) { 1184 // The referent has been cleared, or is alive, or the Reference is not 1185 // active; we need to trace and mark its cohort. 1186 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1187 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1188 // Remove Reference object from list 1189 iter.remove(); 1190 // Keep alive its cohort. 1191 iter.make_referent_alive(); 1192 if (UseCompressedOops) { 1193 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr(obj); 1194 keep_alive->do_oop(next_addr); 1195 } else { 1196 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr(obj); 1197 keep_alive->do_oop(next_addr); 1198 } 1199 iter.move_to_next(); 1200 } else { 1201 iter.next(); 1202 } 1203 } 1204 // Close the reachable set 1205 complete_gc->do_void(); 1206 1207 NOT_PRODUCT( 1208 if (iter.processed() > 0) { 1209 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1210 iter.removed(), iter.processed(), p2i(&refs_list)); 1211 } 1212 ) 1213 } 1214 1215 const char* ReferenceProcessor::list_name(uint i) { 1216 assert(i <= _max_num_q * number_of_subclasses_of_ref(), 1217 "Out of bounds index"); 1218 1219 int j = i / _max_num_q; 1220 switch (j) { 1221 case 0: return "SoftRef"; 1222 case 1: return "WeakRef"; 1223 case 2: return "FinalRef"; 1224 case 3: return "PhantomRef"; 1225 } 1226 ShouldNotReachHere(); 1227 return NULL; 1228 }