1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/access.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/java.hpp" 40 41 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 42 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 43 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 44 45 void referenceProcessor_init() { 46 ReferenceProcessor::init_statics(); 47 } 48 49 void ReferenceProcessor::init_statics() { 50 // We need a monotonically non-decreasing time in ms but 51 // os::javaTimeMillis() does not guarantee monotonicity. 52 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 53 54 // Initialize the soft ref timestamp clock. 55 _soft_ref_timestamp_clock = now; 56 // Also update the soft ref clock in j.l.r.SoftReference 57 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 58 59 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 60 if (is_server_compilation_mode_vm()) { 61 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 62 } else { 63 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 64 } 65 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 66 vm_exit_during_initialization("Could not allocate reference policy object"); 67 } 68 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 69 RefDiscoveryPolicy == ReferentBasedDiscovery, 70 "Unrecognized RefDiscoveryPolicy"); 71 } 72 73 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 74 #ifdef ASSERT 75 // Verify that we're not currently discovering refs 76 assert(!_discovering_refs, "nested call?"); 77 78 if (check_no_refs) { 79 // Verify that the discovered lists are empty 80 verify_no_references_recorded(); 81 } 82 #endif // ASSERT 83 84 // Someone could have modified the value of the static 85 // field in the j.l.r.SoftReference class that holds the 86 // soft reference timestamp clock using reflection or 87 // Unsafe between GCs. Unconditionally update the static 88 // field in ReferenceProcessor here so that we use the new 89 // value during reference discovery. 90 91 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 92 _discovering_refs = true; 93 } 94 95 ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, 96 bool mt_processing, 97 uint mt_processing_degree, 98 bool mt_discovery, 99 uint mt_discovery_degree, 100 bool atomic_discovery, 101 BoolObjectClosure* is_alive_non_header) : 102 _is_subject_to_discovery(is_subject_to_discovery), 103 _discovering_refs(false), 104 _enqueuing_is_done(false), 105 _is_alive_non_header(is_alive_non_header), 106 _processing_is_mt(mt_processing), 107 _next_id(0) 108 { 109 assert(is_subject_to_discovery != NULL, "must be set"); 110 111 _discovery_is_atomic = atomic_discovery; 112 _discovery_is_mt = mt_discovery; 113 _num_queues = MAX2(1U, mt_processing_degree); 114 _max_num_queues = MAX2(_num_queues, mt_discovery_degree); 115 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 116 _max_num_queues * number_of_subclasses_of_ref(), mtGC); 117 118 if (_discovered_refs == NULL) { 119 vm_exit_during_initialization("Could not allocated RefProc Array"); 120 } 121 _discoveredSoftRefs = &_discovered_refs[0]; 122 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; 123 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; 124 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues]; 125 126 // Initialize all entries to NULL 127 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 128 _discovered_refs[i].set_head(NULL); 129 _discovered_refs[i].set_length(0); 130 } 131 132 setup_policy(false /* default soft ref policy */); 133 } 134 135 SpanReferenceProcessor::SpanReferenceProcessor(MemRegion span, 136 bool mt_processing, 137 uint mt_processing_degree, 138 bool mt_discovery, 139 uint mt_discovery_degree, 140 bool atomic_discovery, 141 BoolObjectClosure* is_alive_non_header) : 142 ReferenceProcessor(&_span_based_discoverer, 143 mt_processing, 144 mt_processing_degree, 145 mt_discovery, 146 mt_discovery_degree, 147 atomic_discovery, 148 is_alive_non_header), 149 _span_based_discoverer(span) { 150 151 } 152 153 #ifndef PRODUCT 154 void ReferenceProcessor::verify_no_references_recorded() { 155 guarantee(!_discovering_refs, "Discovering refs?"); 156 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 157 guarantee(_discovered_refs[i].is_empty(), 158 "Found non-empty discovered list at %u", i); 159 } 160 } 161 #endif 162 163 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 164 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 165 if (UseCompressedOops) { 166 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 167 } else { 168 f->do_oop((oop*)_discovered_refs[i].adr_head()); 169 } 170 } 171 } 172 173 void ReferenceProcessor::update_soft_ref_master_clock() { 174 // Update (advance) the soft ref master clock field. This must be done 175 // after processing the soft ref list. 176 177 // We need a monotonically non-decreasing time in ms but 178 // os::javaTimeMillis() does not guarantee monotonicity. 179 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 180 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 181 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 182 183 NOT_PRODUCT( 184 if (now < _soft_ref_timestamp_clock) { 185 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 186 _soft_ref_timestamp_clock, now); 187 } 188 ) 189 // The values of now and _soft_ref_timestamp_clock are set using 190 // javaTimeNanos(), which is guaranteed to be monotonically 191 // non-decreasing provided the underlying platform provides such 192 // a time source (and it is bug free). 193 // In product mode, however, protect ourselves from non-monotonicity. 194 if (now > _soft_ref_timestamp_clock) { 195 _soft_ref_timestamp_clock = now; 196 java_lang_ref_SoftReference::set_clock(now); 197 } 198 // Else leave clock stalled at its old value until time progresses 199 // past clock value. 200 } 201 202 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { 203 size_t total = 0; 204 for (uint i = 0; i < _max_num_queues; ++i) { 205 total += lists[i].length(); 206 } 207 return total; 208 } 209 210 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 211 BoolObjectClosure* is_alive, 212 OopClosure* keep_alive, 213 VoidClosure* complete_gc, 214 AbstractRefProcTaskExecutor* task_executor, 215 ReferenceProcessorPhaseTimes* phase_times) { 216 217 double start_time = os::elapsedTime(); 218 219 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 220 // Stop treating discovered references specially. 221 disable_discovery(); 222 223 // If discovery was concurrent, someone could have modified 224 // the value of the static field in the j.l.r.SoftReference 225 // class that holds the soft reference timestamp clock using 226 // reflection or Unsafe between when discovery was enabled and 227 // now. Unconditionally update the static field in ReferenceProcessor 228 // here so that we use the new value during processing of the 229 // discovered soft refs. 230 231 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 232 233 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 234 total_count(_discoveredWeakRefs), 235 total_count(_discoveredFinalRefs), 236 total_count(_discoveredPhantomRefs)); 237 238 // Soft references 239 { 240 RefProcPhaseTimesTracker tt(REF_SOFT, phase_times, this); 241 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 242 is_alive, keep_alive, complete_gc, task_executor, phase_times); 243 } 244 245 update_soft_ref_master_clock(); 246 247 // Weak references 248 { 249 RefProcPhaseTimesTracker tt(REF_WEAK, phase_times, this); 250 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 251 is_alive, keep_alive, complete_gc, task_executor, phase_times); 252 } 253 254 // Final references 255 { 256 RefProcPhaseTimesTracker tt(REF_FINAL, phase_times, this); 257 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 258 is_alive, keep_alive, complete_gc, task_executor, phase_times); 259 } 260 261 // Phantom references 262 { 263 RefProcPhaseTimesTracker tt(REF_PHANTOM, phase_times, this); 264 process_discovered_reflist(_discoveredPhantomRefs, NULL, true, 265 is_alive, keep_alive, complete_gc, task_executor, phase_times); 266 } 267 268 if (task_executor != NULL) { 269 // Record the work done by the parallel workers. 270 task_executor->set_single_threaded_mode(); 271 } 272 273 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); 274 275 return stats; 276 } 277 278 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor, 279 ReferenceProcessorPhaseTimes* phase_times) { 280 // Enqueue references that are not made active again, and 281 // clear the decks for the next collection (cycle). 282 enqueue_discovered_reflists(task_executor, phase_times); 283 284 // Stop treating discovered references specially. 285 disable_discovery(); 286 } 287 288 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) { 289 // Given a list of refs linked through the "discovered" field 290 // (java.lang.ref.Reference.discovered), self-loop their "next" field 291 // thus distinguishing them from active References, then 292 // prepend them to the pending list. 293 // 294 // The Java threads will see the Reference objects linked together through 295 // the discovered field. Instead of trying to do the write barrier updates 296 // in all places in the reference processor where we manipulate the discovered 297 // field we make sure to do the barrier here where we anyway iterate through 298 // all linked Reference objects. Note that it is important to not dirty any 299 // cards during reference processing since this will cause card table 300 // verification to fail for G1. 301 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list)); 302 303 oop obj = NULL; 304 oop next_discovered = refs_list.head(); 305 // Walk down the list, self-looping the next field 306 // so that the References are not considered active. 307 while (obj != next_discovered) { 308 obj = next_discovered; 309 assert(obj->is_instance(), "should be an instance object"); 310 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object"); 311 next_discovered = java_lang_ref_Reference::discovered(obj); 312 log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_discovered " INTPTR_FORMAT, p2i(obj), p2i(next_discovered)); 313 assert(java_lang_ref_Reference::next(obj) == NULL, 314 "Reference not active; should not be discovered"); 315 // Self-loop next, so as to make Ref not active. 316 java_lang_ref_Reference::set_next_raw(obj, obj); 317 if (next_discovered != obj) { 318 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, next_discovered); 319 } else { 320 // This is the last object. 321 // Swap refs_list into pending list and set obj's 322 // discovered to what we read from the pending list. 323 oop old = Universe::swap_reference_pending_list(refs_list.head()); 324 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, old); 325 } 326 } 327 } 328 329 // Parallel enqueue task 330 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 331 public: 332 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 333 DiscoveredList discovered_refs[], 334 int n_queues, 335 ReferenceProcessorPhaseTimes* phase_times) 336 : EnqueueTask(ref_processor, discovered_refs, n_queues, phase_times) 337 { } 338 339 virtual void work(unsigned int work_id) { 340 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefEnqueue, _phase_times, work_id); 341 342 assert(work_id < (unsigned int)_ref_processor.max_num_queues(), "Index out-of-bounds"); 343 // Simplest first cut: static partitioning. 344 int index = work_id; 345 // The increment on "index" must correspond to the maximum number of queues 346 // (n_queues) with which that ReferenceProcessor was created. That 347 // is because of the "clever" way the discovered references lists were 348 // allocated and are indexed into. 349 assert(_n_queues == (int) _ref_processor.max_num_queues(), "Different number not expected"); 350 for (int j = 0; 351 j < ReferenceProcessor::number_of_subclasses_of_ref(); 352 j++, index += _n_queues) { 353 _ref_processor.enqueue_discovered_reflist(_refs_lists[index]); 354 _refs_lists[index].set_head(NULL); 355 _refs_lists[index].set_length(0); 356 } 357 } 358 }; 359 360 // Enqueue references that are not made active again 361 void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, 362 ReferenceProcessorPhaseTimes* phase_times) { 363 364 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 365 total_count(_discoveredWeakRefs), 366 total_count(_discoveredFinalRefs), 367 total_count(_discoveredPhantomRefs)); 368 369 RefProcEnqueueTimeTracker tt(phase_times, stats); 370 371 if (_processing_is_mt && task_executor != NULL) { 372 // Parallel code 373 RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_queues, phase_times); 374 task_executor->execute(tsk); 375 } else { 376 // Serial code: call the parent class's implementation 377 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 378 enqueue_discovered_reflist(_discovered_refs[i]); 379 _discovered_refs[i].set_head(NULL); 380 _discovered_refs[i].set_length(0); 381 } 382 } 383 } 384 385 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 386 _discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered); 387 oop discovered = java_lang_ref_Reference::discovered(_current_discovered); 388 assert(_discovered_addr && oopDesc::is_oop_or_null(discovered), 389 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 390 _next_discovered = discovered; 391 392 _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered); 393 _referent = java_lang_ref_Reference::referent(_current_discovered); 394 assert(Universe::heap()->is_in_reserved_or_null(_referent), 395 "Wrong oop found in java.lang.Reference object"); 396 assert(allow_null_referent ? 397 oopDesc::is_oop_or_null(_referent) 398 : oopDesc::is_oop(_referent), 399 "Expected an oop%s for referent field at " PTR_FORMAT, 400 (allow_null_referent ? " or NULL" : ""), 401 p2i(_referent)); 402 } 403 404 void DiscoveredListIterator::remove() { 405 assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference"); 406 RawAccess<>::oop_store(_discovered_addr, oop(NULL)); 407 408 // First _prev_next ref actually points into DiscoveredList (gross). 409 oop new_next; 410 if (_next_discovered == _current_discovered) { 411 // At the end of the list, we should make _prev point to itself. 412 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 413 // and _prev will be NULL. 414 new_next = _prev_discovered; 415 } else { 416 new_next = _next_discovered; 417 } 418 // Remove Reference object from discovered list. Note that G1 does not need a 419 // pre-barrier here because we know the Reference has already been found/marked, 420 // that's how it ended up in the discovered list in the first place. 421 RawAccess<>::oop_store(_prev_discovered_addr, new_next); 422 NOT_PRODUCT(_removed++); 423 _refs_list.dec_length(1); 424 } 425 426 void DiscoveredListIterator::clear_referent() { 427 RawAccess<>::oop_store(_referent_addr, oop(NULL)); 428 } 429 430 // NOTE: process_phase*() are largely similar, and at a high level 431 // merely iterate over the extant list applying a predicate to 432 // each of its elements and possibly removing that element from the 433 // list and applying some further closures to that element. 434 // We should consider the possibility of replacing these 435 // process_phase*() methods by abstracting them into 436 // a single general iterator invocation that receives appropriate 437 // closures that accomplish this work. 438 439 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 440 // referents are not alive, but that should be kept alive for policy reasons. 441 // Keep alive the transitive closure of all such referents. 442 void 443 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 444 ReferencePolicy* policy, 445 BoolObjectClosure* is_alive, 446 OopClosure* keep_alive, 447 VoidClosure* complete_gc) { 448 assert(policy != NULL, "Must have a non-NULL policy"); 449 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 450 // Decide which softly reachable refs should be kept alive. 451 while (iter.has_next()) { 452 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 453 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 454 if (referent_is_dead && 455 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 456 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 457 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 458 // Remove Reference object from list 459 iter.remove(); 460 // keep the referent around 461 iter.make_referent_alive(); 462 iter.move_to_next(); 463 } else { 464 iter.next(); 465 } 466 } 467 // Close the reachable set 468 complete_gc->do_void(); 469 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 470 iter.removed(), iter.processed(), p2i(&refs_list)); 471 } 472 473 void ReferenceProcessor::process_phase2(DiscoveredList& refs_list, 474 BoolObjectClosure* is_alive, 475 OopClosure* keep_alive, 476 VoidClosure* complete_gc) { 477 if (discovery_is_atomic()) { 478 // complete_gc is ignored in this case for this phase 479 pp2_work(refs_list, is_alive, keep_alive); 480 } else { 481 assert(complete_gc != NULL, "Error"); 482 pp2_work_concurrent_discovery(refs_list, is_alive, 483 keep_alive, complete_gc); 484 } 485 } 486 // Traverse the list and remove any Refs that are not active, or 487 // whose referents are either alive or NULL. 488 void 489 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 490 BoolObjectClosure* is_alive, 491 OopClosure* keep_alive) { 492 assert(discovery_is_atomic(), "Error"); 493 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 494 while (iter.has_next()) { 495 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 496 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 497 assert(next == NULL, "Should not discover inactive Reference"); 498 if (iter.is_referent_alive()) { 499 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 500 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 501 // The referent is reachable after all. 502 // Remove Reference object from list. 503 iter.remove(); 504 // Update the referent pointer as necessary: Note that this 505 // should not entail any recursive marking because the 506 // referent must already have been traversed. 507 iter.make_referent_alive(); 508 iter.move_to_next(); 509 } else { 510 iter.next(); 511 } 512 } 513 NOT_PRODUCT( 514 if (iter.processed() > 0) { 515 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 516 " Refs in discovered list " INTPTR_FORMAT, 517 iter.removed(), iter.processed(), p2i(&refs_list)); 518 } 519 ) 520 } 521 522 void 523 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 524 BoolObjectClosure* is_alive, 525 OopClosure* keep_alive, 526 VoidClosure* complete_gc) { 527 assert(!discovery_is_atomic(), "Error"); 528 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 529 while (iter.has_next()) { 530 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 531 HeapWord* next_addr = java_lang_ref_Reference::next_addr_raw(iter.obj()); 532 oop next = java_lang_ref_Reference::next(iter.obj()); 533 if ((iter.referent() == NULL || iter.is_referent_alive() || 534 next != NULL)) { 535 assert(oopDesc::is_oop_or_null(next), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 536 // Remove Reference object from list 537 iter.remove(); 538 // Trace the cohorts 539 iter.make_referent_alive(); 540 if (UseCompressedOops) { 541 keep_alive->do_oop((narrowOop*)next_addr); 542 } else { 543 keep_alive->do_oop((oop*)next_addr); 544 } 545 iter.move_to_next(); 546 } else { 547 iter.next(); 548 } 549 } 550 // Now close the newly reachable set 551 complete_gc->do_void(); 552 NOT_PRODUCT( 553 if (iter.processed() > 0) { 554 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 555 " Refs in discovered list " INTPTR_FORMAT, 556 iter.removed(), iter.processed(), p2i(&refs_list)); 557 } 558 ) 559 } 560 561 void ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 562 bool clear_referent, 563 BoolObjectClosure* is_alive, 564 OopClosure* keep_alive, 565 VoidClosure* complete_gc) { 566 ResourceMark rm; 567 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 568 while (iter.has_next()) { 569 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 570 if (clear_referent) { 571 // NULL out referent pointer 572 iter.clear_referent(); 573 } else { 574 // keep the referent around 575 iter.make_referent_alive(); 576 } 577 log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 578 clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name()); 579 assert(oopDesc::is_oop(iter.obj(), UseConcMarkSweepGC), "Adding a bad reference"); 580 iter.next(); 581 } 582 // Close the reachable set 583 complete_gc->do_void(); 584 } 585 586 void 587 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 588 oop obj = NULL; 589 oop next = refs_list.head(); 590 while (next != obj) { 591 obj = next; 592 next = java_lang_ref_Reference::discovered(obj); 593 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 594 } 595 refs_list.set_head(NULL); 596 refs_list.set_length(0); 597 } 598 599 void ReferenceProcessor::abandon_partial_discovery() { 600 // loop over the lists 601 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 602 if ((i % _max_num_queues) == 0) { 603 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 604 } 605 clear_discovered_references(_discovered_refs[i]); 606 } 607 } 608 609 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const { 610 DiscoveredList* list = NULL; 611 612 switch (type) { 613 case REF_SOFT: 614 list = _discoveredSoftRefs; 615 break; 616 case REF_WEAK: 617 list = _discoveredWeakRefs; 618 break; 619 case REF_FINAL: 620 list = _discoveredFinalRefs; 621 break; 622 case REF_PHANTOM: 623 list = _discoveredPhantomRefs; 624 break; 625 case REF_OTHER: 626 case REF_NONE: 627 default: 628 ShouldNotReachHere(); 629 } 630 return total_count(list); 631 } 632 633 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 634 public: 635 RefProcPhase1Task(ReferenceProcessor& ref_processor, 636 DiscoveredList refs_lists[], 637 ReferencePolicy* policy, 638 bool marks_oops_alive, 639 ReferenceProcessorPhaseTimes* phase_times) 640 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 641 _policy(policy) 642 { } 643 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 644 OopClosure& keep_alive, 645 VoidClosure& complete_gc) 646 { 647 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, _phase_times, i); 648 649 _ref_processor.process_phase1(_refs_lists[i], _policy, 650 &is_alive, &keep_alive, &complete_gc); 651 } 652 private: 653 ReferencePolicy* _policy; 654 }; 655 656 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 657 public: 658 RefProcPhase2Task(ReferenceProcessor& ref_processor, 659 DiscoveredList refs_lists[], 660 bool marks_oops_alive, 661 ReferenceProcessorPhaseTimes* phase_times) 662 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times) 663 { } 664 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 665 OopClosure& keep_alive, 666 VoidClosure& complete_gc) 667 { 668 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, _phase_times, i); 669 670 _ref_processor.process_phase2(_refs_lists[i], 671 &is_alive, &keep_alive, &complete_gc); 672 } 673 }; 674 675 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 676 public: 677 RefProcPhase3Task(ReferenceProcessor& ref_processor, 678 DiscoveredList refs_lists[], 679 bool clear_referent, 680 bool marks_oops_alive, 681 ReferenceProcessorPhaseTimes* phase_times) 682 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 683 _clear_referent(clear_referent) 684 { } 685 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 686 OopClosure& keep_alive, 687 VoidClosure& complete_gc) 688 { 689 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, _phase_times, i); 690 691 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 692 &is_alive, &keep_alive, &complete_gc); 693 } 694 private: 695 bool _clear_referent; 696 }; 697 698 #ifndef PRODUCT 699 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) { 700 if (!log_is_enabled(Trace, gc, ref)) { 701 return; 702 } 703 704 stringStream st; 705 for (uint i = 0; i < active_length; ++i) { 706 st.print(SIZE_FORMAT " ", ref_lists[i].length()); 707 } 708 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); 709 #ifdef ASSERT 710 for (uint i = active_length; i < _max_num_queues; i++) { 711 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 712 ref_lists[i].length(), i); 713 } 714 #endif 715 } 716 #endif 717 718 void ReferenceProcessor::set_active_mt_degree(uint v) { 719 _num_queues = v; 720 _next_id = 0; 721 } 722 723 // Balances reference queues. 724 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 725 // queues[0, 1, ..., _num_q-1] because only the first _num_q 726 // corresponding to the active workers will be processed. 727 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 728 { 729 // calculate total length 730 size_t total_refs = 0; 731 log_develop_trace(gc, ref)("Balance ref_lists "); 732 733 for (uint i = 0; i < _max_num_queues; ++i) { 734 total_refs += ref_lists[i].length(); 735 } 736 log_reflist_counts(ref_lists, _max_num_queues, total_refs); 737 size_t avg_refs = total_refs / _num_queues + 1; 738 uint to_idx = 0; 739 for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { 740 bool move_all = false; 741 if (from_idx >= _num_queues) { 742 move_all = ref_lists[from_idx].length() > 0; 743 } 744 while ((ref_lists[from_idx].length() > avg_refs) || 745 move_all) { 746 assert(to_idx < _num_queues, "Sanity Check!"); 747 if (ref_lists[to_idx].length() < avg_refs) { 748 // move superfluous refs 749 size_t refs_to_move; 750 // Move all the Ref's if the from queue will not be processed. 751 if (move_all) { 752 refs_to_move = MIN2(ref_lists[from_idx].length(), 753 avg_refs - ref_lists[to_idx].length()); 754 } else { 755 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 756 avg_refs - ref_lists[to_idx].length()); 757 } 758 759 assert(refs_to_move > 0, "otherwise the code below will fail"); 760 761 oop move_head = ref_lists[from_idx].head(); 762 oop move_tail = move_head; 763 oop new_head = move_head; 764 // find an element to split the list on 765 for (size_t j = 0; j < refs_to_move; ++j) { 766 move_tail = new_head; 767 new_head = java_lang_ref_Reference::discovered(new_head); 768 } 769 770 // Add the chain to the to list. 771 if (ref_lists[to_idx].head() == NULL) { 772 // to list is empty. Make a loop at the end. 773 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 774 } else { 775 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 776 } 777 ref_lists[to_idx].set_head(move_head); 778 ref_lists[to_idx].inc_length(refs_to_move); 779 780 // Remove the chain from the from list. 781 if (move_tail == new_head) { 782 // We found the end of the from list. 783 ref_lists[from_idx].set_head(NULL); 784 } else { 785 ref_lists[from_idx].set_head(new_head); 786 } 787 ref_lists[from_idx].dec_length(refs_to_move); 788 if (ref_lists[from_idx].length() == 0) { 789 break; 790 } 791 } else { 792 to_idx = (to_idx + 1) % _num_queues; 793 } 794 } 795 } 796 #ifdef ASSERT 797 size_t balanced_total_refs = 0; 798 for (uint i = 0; i < _num_queues; ++i) { 799 balanced_total_refs += ref_lists[i].length(); 800 } 801 log_reflist_counts(ref_lists, _num_queues, balanced_total_refs); 802 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 803 #endif 804 } 805 806 void ReferenceProcessor::balance_all_queues() { 807 balance_queues(_discoveredSoftRefs); 808 balance_queues(_discoveredWeakRefs); 809 balance_queues(_discoveredFinalRefs); 810 balance_queues(_discoveredPhantomRefs); 811 } 812 813 void ReferenceProcessor::process_discovered_reflist( 814 DiscoveredList refs_lists[], 815 ReferencePolicy* policy, 816 bool clear_referent, 817 BoolObjectClosure* is_alive, 818 OopClosure* keep_alive, 819 VoidClosure* complete_gc, 820 AbstractRefProcTaskExecutor* task_executor, 821 ReferenceProcessorPhaseTimes* phase_times) 822 { 823 bool mt_processing = task_executor != NULL && _processing_is_mt; 824 825 phase_times->set_processing_is_mt(mt_processing); 826 827 if (mt_processing && ParallelRefProcBalancingEnabled) { 828 RefProcBalanceQueuesTimeTracker tt(phase_times); 829 balance_queues(refs_lists); 830 } 831 832 // Phase 1 (soft refs only): 833 // . Traverse the list and remove any SoftReferences whose 834 // referents are not alive, but that should be kept alive for 835 // policy reasons. Keep alive the transitive closure of all 836 // such referents. 837 if (policy != NULL) { 838 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, phase_times); 839 840 if (mt_processing) { 841 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times); 842 task_executor->execute(phase1); 843 } else { 844 for (uint i = 0; i < _max_num_queues; i++) { 845 process_phase1(refs_lists[i], policy, 846 is_alive, keep_alive, complete_gc); 847 } 848 } 849 } else { // policy == NULL 850 assert(refs_lists != _discoveredSoftRefs, 851 "Policy must be specified for soft references."); 852 } 853 854 // Phase 2: 855 // . Traverse the list and remove any refs whose referents are alive. 856 { 857 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, phase_times); 858 859 if (mt_processing) { 860 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times); 861 task_executor->execute(phase2); 862 } else { 863 for (uint i = 0; i < _max_num_queues; i++) { 864 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 865 } 866 } 867 } 868 869 // Phase 3: 870 // . Traverse the list and process referents as appropriate. 871 { 872 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, phase_times); 873 874 if (mt_processing) { 875 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times); 876 task_executor->execute(phase3); 877 } else { 878 for (uint i = 0; i < _max_num_queues; i++) { 879 process_phase3(refs_lists[i], clear_referent, 880 is_alive, keep_alive, complete_gc); 881 } 882 } 883 } 884 } 885 886 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 887 uint id = 0; 888 // Determine the queue index to use for this object. 889 if (_discovery_is_mt) { 890 // During a multi-threaded discovery phase, 891 // each thread saves to its "own" list. 892 Thread* thr = Thread::current(); 893 id = thr->as_Worker_thread()->id(); 894 } else { 895 // single-threaded discovery, we save in round-robin 896 // fashion to each of the lists. 897 if (_processing_is_mt) { 898 id = next_id(); 899 } 900 } 901 assert(id < _max_num_queues, "Id is out-of-bounds id %u and max id %u)", id, _max_num_queues); 902 903 // Get the discovered queue to which we will add 904 DiscoveredList* list = NULL; 905 switch (rt) { 906 case REF_OTHER: 907 // Unknown reference type, no special treatment 908 break; 909 case REF_SOFT: 910 list = &_discoveredSoftRefs[id]; 911 break; 912 case REF_WEAK: 913 list = &_discoveredWeakRefs[id]; 914 break; 915 case REF_FINAL: 916 list = &_discoveredFinalRefs[id]; 917 break; 918 case REF_PHANTOM: 919 list = &_discoveredPhantomRefs[id]; 920 break; 921 case REF_NONE: 922 // we should not reach here if we are an InstanceRefKlass 923 default: 924 ShouldNotReachHere(); 925 } 926 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 927 return list; 928 } 929 930 inline void 931 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 932 oop obj, 933 HeapWord* discovered_addr) { 934 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 935 // First we must make sure this object is only enqueued once. CAS in a non null 936 // discovered_addr. 937 oop current_head = refs_list.head(); 938 // The last ref must have its discovered field pointing to itself. 939 oop next_discovered = (current_head != NULL) ? current_head : obj; 940 941 oop retest = RawAccess<>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL)); 942 943 if (retest == NULL) { 944 // This thread just won the right to enqueue the object. 945 // We have separate lists for enqueueing, so no synchronization 946 // is necessary. 947 refs_list.set_head(obj); 948 refs_list.inc_length(1); 949 950 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 951 p2i(obj), obj->klass()->internal_name()); 952 } else { 953 // If retest was non NULL, another thread beat us to it: 954 // The reference has already been discovered... 955 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 956 p2i(obj), obj->klass()->internal_name()); 957 } 958 } 959 960 #ifndef PRODUCT 961 // Non-atomic (i.e. concurrent) discovery might allow us 962 // to observe j.l.References with NULL referents, being those 963 // cleared concurrently by mutators during (or after) discovery. 964 void ReferenceProcessor::verify_referent(oop obj) { 965 bool da = discovery_is_atomic(); 966 oop referent = java_lang_ref_Reference::referent(obj); 967 assert(da ? oopDesc::is_oop(referent) : oopDesc::is_oop_or_null(referent), 968 "Bad referent " INTPTR_FORMAT " found in Reference " 969 INTPTR_FORMAT " during %satomic discovery ", 970 p2i(referent), p2i(obj), da ? "" : "non-"); 971 } 972 #endif 973 974 template <class T> 975 bool ReferenceProcessor::is_subject_to_discovery(T const obj) const { 976 return _is_subject_to_discovery->do_object_b(obj); 977 } 978 979 // We mention two of several possible choices here: 980 // #0: if the reference object is not in the "originating generation" 981 // (or part of the heap being collected, indicated by our "span" 982 // we don't treat it specially (i.e. we scan it as we would 983 // a normal oop, treating its references as strong references). 984 // This means that references can't be discovered unless their 985 // referent is also in the same span. This is the simplest, 986 // most "local" and most conservative approach, albeit one 987 // that may cause weak references to be enqueued least promptly. 988 // We call this choice the "ReferenceBasedDiscovery" policy. 989 // #1: the reference object may be in any generation (span), but if 990 // the referent is in the generation (span) being currently collected 991 // then we can discover the reference object, provided 992 // the object has not already been discovered by 993 // a different concurrently running collector (as may be the 994 // case, for instance, if the reference object is in CMS and 995 // the referent in DefNewGeneration), and provided the processing 996 // of this reference object by the current collector will 997 // appear atomic to every other collector in the system. 998 // (Thus, for instance, a concurrent collector may not 999 // discover references in other generations even if the 1000 // referent is in its own generation). This policy may, 1001 // in certain cases, enqueue references somewhat sooner than 1002 // might Policy #0 above, but at marginally increased cost 1003 // and complexity in processing these references. 1004 // We call this choice the "RefeferentBasedDiscovery" policy. 1005 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1006 // Make sure we are discovering refs (rather than processing discovered refs). 1007 if (!_discovering_refs || !RegisterReferences) { 1008 return false; 1009 } 1010 // We only discover active references. 1011 oop next = java_lang_ref_Reference::next(obj); 1012 if (next != NULL) { // Ref is no longer active 1013 return false; 1014 } 1015 1016 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1017 !is_subject_to_discovery(obj)) { 1018 // Reference is not in the originating generation; 1019 // don't treat it specially (i.e. we want to scan it as a normal 1020 // object with strong references). 1021 return false; 1022 } 1023 1024 // We only discover references whose referents are not (yet) 1025 // known to be strongly reachable. 1026 if (is_alive_non_header() != NULL) { 1027 verify_referent(obj); 1028 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1029 return false; // referent is reachable 1030 } 1031 } 1032 if (rt == REF_SOFT) { 1033 // For soft refs we can decide now if these are not 1034 // current candidates for clearing, in which case we 1035 // can mark through them now, rather than delaying that 1036 // to the reference-processing phase. Since all current 1037 // time-stamp policies advance the soft-ref clock only 1038 // at a full collection cycle, this is always currently 1039 // accurate. 1040 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1041 return false; 1042 } 1043 } 1044 1045 ResourceMark rm; // Needed for tracing. 1046 1047 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj); 1048 const oop discovered = java_lang_ref_Reference::discovered(obj); 1049 assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1050 if (discovered != NULL) { 1051 // The reference has already been discovered... 1052 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1053 p2i(obj), obj->klass()->internal_name()); 1054 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1055 // assumes that an object is not processed twice; 1056 // if it's been already discovered it must be on another 1057 // generation's discovered list; so we won't discover it. 1058 return false; 1059 } else { 1060 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1061 "Unrecognized policy"); 1062 // Check assumption that an object is not potentially 1063 // discovered twice except by concurrent collectors that potentially 1064 // trace the same Reference object twice. 1065 assert(UseConcMarkSweepGC || UseG1GC, 1066 "Only possible with a concurrent marking collector"); 1067 return true; 1068 } 1069 } 1070 1071 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1072 verify_referent(obj); 1073 // Discover if and only if EITHER: 1074 // .. reference is in our span, OR 1075 // .. we are an atomic collector and referent is in our span 1076 if (is_subject_to_discovery(obj) || 1077 (discovery_is_atomic() && 1078 is_subject_to_discovery(java_lang_ref_Reference::referent(obj)))) { 1079 } else { 1080 return false; 1081 } 1082 } else { 1083 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1084 is_subject_to_discovery(obj), "code inconsistency"); 1085 } 1086 1087 // Get the right type of discovered queue head. 1088 DiscoveredList* list = get_discovered_list(rt); 1089 if (list == NULL) { 1090 return false; // nothing special needs to be done 1091 } 1092 1093 if (_discovery_is_mt) { 1094 add_to_discovered_list_mt(*list, obj, discovered_addr); 1095 } else { 1096 // We do a raw store here: the field will be visited later when processing 1097 // the discovered references. 1098 oop current_head = list->head(); 1099 // The last ref must have its discovered field pointing to itself. 1100 oop next_discovered = (current_head != NULL) ? current_head : obj; 1101 1102 assert(discovered == NULL, "control point invariant"); 1103 RawAccess<>::oop_store(discovered_addr, next_discovered); 1104 list->set_head(obj); 1105 list->inc_length(1); 1106 1107 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1108 } 1109 assert(oopDesc::is_oop(obj), "Discovered a bad reference"); 1110 verify_referent(obj); 1111 return true; 1112 } 1113 1114 bool ReferenceProcessor::has_discovered_references() { 1115 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 1116 if (!_discovered_refs[i].is_empty()) { 1117 return true; 1118 } 1119 } 1120 return false; 1121 } 1122 1123 // Preclean the discovered references by removing those 1124 // whose referents are alive, and by marking from those that 1125 // are not active. These lists can be handled here 1126 // in any order and, indeed, concurrently. 1127 void ReferenceProcessor::preclean_discovered_references( 1128 BoolObjectClosure* is_alive, 1129 OopClosure* keep_alive, 1130 VoidClosure* complete_gc, 1131 YieldClosure* yield, 1132 GCTimer* gc_timer) { 1133 1134 // Soft references 1135 { 1136 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1137 for (uint i = 0; i < _max_num_queues; i++) { 1138 if (yield->should_return()) { 1139 return; 1140 } 1141 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1142 keep_alive, complete_gc, yield); 1143 } 1144 } 1145 1146 // Weak references 1147 { 1148 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1149 for (uint i = 0; i < _max_num_queues; i++) { 1150 if (yield->should_return()) { 1151 return; 1152 } 1153 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1154 keep_alive, complete_gc, yield); 1155 } 1156 } 1157 1158 // Final references 1159 { 1160 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1161 for (uint i = 0; i < _max_num_queues; i++) { 1162 if (yield->should_return()) { 1163 return; 1164 } 1165 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1166 keep_alive, complete_gc, yield); 1167 } 1168 } 1169 1170 // Phantom references 1171 { 1172 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1173 for (uint i = 0; i < _max_num_queues; i++) { 1174 if (yield->should_return()) { 1175 return; 1176 } 1177 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1178 keep_alive, complete_gc, yield); 1179 } 1180 } 1181 } 1182 1183 // Walk the given discovered ref list, and remove all reference objects 1184 // whose referents are still alive, whose referents are NULL or which 1185 // are not active (have a non-NULL next field). NOTE: When we are 1186 // thus precleaning the ref lists (which happens single-threaded today), 1187 // we do not disable refs discovery to honor the correct semantics of 1188 // java.lang.Reference. As a result, we need to be careful below 1189 // that ref removal steps interleave safely with ref discovery steps 1190 // (in this thread). 1191 void 1192 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1193 BoolObjectClosure* is_alive, 1194 OopClosure* keep_alive, 1195 VoidClosure* complete_gc, 1196 YieldClosure* yield) { 1197 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1198 while (iter.has_next()) { 1199 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1200 oop obj = iter.obj(); 1201 oop next = java_lang_ref_Reference::next(obj); 1202 if (iter.referent() == NULL || iter.is_referent_alive() || 1203 next != NULL) { 1204 // The referent has been cleared, or is alive, or the Reference is not 1205 // active; we need to trace and mark its cohort. 1206 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1207 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1208 // Remove Reference object from list 1209 iter.remove(); 1210 // Keep alive its cohort. 1211 iter.make_referent_alive(); 1212 if (UseCompressedOops) { 1213 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr_raw(obj); 1214 keep_alive->do_oop(next_addr); 1215 } else { 1216 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr_raw(obj); 1217 keep_alive->do_oop(next_addr); 1218 } 1219 iter.move_to_next(); 1220 } else { 1221 iter.next(); 1222 } 1223 } 1224 // Close the reachable set 1225 complete_gc->do_void(); 1226 1227 NOT_PRODUCT( 1228 if (iter.processed() > 0) { 1229 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1230 iter.removed(), iter.processed(), p2i(&refs_list)); 1231 } 1232 ) 1233 } 1234 1235 const char* ReferenceProcessor::list_name(uint i) { 1236 assert(i <= _max_num_queues * number_of_subclasses_of_ref(), 1237 "Out of bounds index"); 1238 1239 int j = i / _max_num_queues; 1240 switch (j) { 1241 case 0: return "SoftRef"; 1242 case 1: return "WeakRef"; 1243 case 2: return "FinalRef"; 1244 case 3: return "PhantomRef"; 1245 } 1246 ShouldNotReachHere(); 1247 return NULL; 1248 }