1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 35 #include "logging/log.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "oops/access.inline.hpp" 39 #include "oops/oop.inline.hpp" 40 #include "runtime/java.hpp" 41 42 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 43 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 44 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 45 46 void referenceProcessor_init() { 47 ReferenceProcessor::init_statics(); 48 } 49 50 void ReferenceProcessor::init_statics() { 51 // We need a monotonically non-decreasing time in ms but 52 // os::javaTimeMillis() does not guarantee monotonicity. 53 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 54 55 // Initialize the soft ref timestamp clock. 56 _soft_ref_timestamp_clock = now; 57 // Also update the soft ref clock in j.l.r.SoftReference 58 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 59 60 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 61 if (is_server_compilation_mode_vm()) { 62 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 63 } else { 64 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 65 } 66 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 67 vm_exit_during_initialization("Could not allocate reference policy object"); 68 } 69 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 70 RefDiscoveryPolicy == ReferentBasedDiscovery, 71 "Unrecognized RefDiscoveryPolicy"); 72 } 73 74 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 75 #ifdef ASSERT 76 // Verify that we're not currently discovering refs 77 assert(!_discovering_refs, "nested call?"); 78 79 if (check_no_refs) { 80 // Verify that the discovered lists are empty 81 verify_no_references_recorded(); 82 } 83 #endif // ASSERT 84 85 // Someone could have modified the value of the static 86 // field in the j.l.r.SoftReference class that holds the 87 // soft reference timestamp clock using reflection or 88 // Unsafe between GCs. Unconditionally update the static 89 // field in ReferenceProcessor here so that we use the new 90 // value during reference discovery. 91 92 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 93 _discovering_refs = true; 94 } 95 96 ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, 97 bool mt_processing, 98 uint mt_processing_degree, 99 bool mt_discovery, 100 uint mt_discovery_degree, 101 bool atomic_discovery, 102 BoolObjectClosure* is_alive_non_header, 103 bool adjust_no_of_processing_threads) : 104 _is_subject_to_discovery(is_subject_to_discovery), 105 _discovering_refs(false), 106 _enqueuing_is_done(false), 107 _processing_is_mt(mt_processing), 108 _next_id(0), 109 _adjust_no_of_processing_threads(adjust_no_of_processing_threads), 110 _is_alive_non_header(is_alive_non_header) 111 { 112 assert(is_subject_to_discovery != NULL, "must be set"); 113 114 _discovery_is_atomic = atomic_discovery; 115 _discovery_is_mt = mt_discovery; 116 _num_queues = MAX2(1U, mt_processing_degree); 117 _max_num_queues = MAX2(_num_queues, mt_discovery_degree); 118 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 119 _max_num_queues * number_of_subclasses_of_ref(), mtGC); 120 121 if (_discovered_refs == NULL) { 122 vm_exit_during_initialization("Could not allocated RefProc Array"); 123 } 124 _discoveredSoftRefs = &_discovered_refs[0]; 125 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; 126 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; 127 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues]; 128 129 // Initialize all entries to NULL 130 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 131 _discovered_refs[i].clear(); 132 } 133 134 setup_policy(false /* default soft ref policy */); 135 } 136 137 #ifndef PRODUCT 138 void ReferenceProcessor::verify_no_references_recorded() { 139 guarantee(!_discovering_refs, "Discovering refs?"); 140 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 141 guarantee(_discovered_refs[i].is_empty(), 142 "Found non-empty discovered list at %u", i); 143 } 144 } 145 #endif 146 147 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 148 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 149 if (UseCompressedOops) { 150 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 151 } else { 152 f->do_oop((oop*)_discovered_refs[i].adr_head()); 153 } 154 } 155 } 156 157 void ReferenceProcessor::update_soft_ref_master_clock() { 158 // Update (advance) the soft ref master clock field. This must be done 159 // after processing the soft ref list. 160 161 // We need a monotonically non-decreasing time in ms but 162 // os::javaTimeMillis() does not guarantee monotonicity. 163 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 164 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 165 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 166 167 NOT_PRODUCT( 168 if (now < _soft_ref_timestamp_clock) { 169 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 170 _soft_ref_timestamp_clock, now); 171 } 172 ) 173 // The values of now and _soft_ref_timestamp_clock are set using 174 // javaTimeNanos(), which is guaranteed to be monotonically 175 // non-decreasing provided the underlying platform provides such 176 // a time source (and it is bug free). 177 // In product mode, however, protect ourselves from non-monotonicity. 178 if (now > _soft_ref_timestamp_clock) { 179 _soft_ref_timestamp_clock = now; 180 java_lang_ref_SoftReference::set_clock(now); 181 } 182 // Else leave clock stalled at its old value until time progresses 183 // past clock value. 184 } 185 186 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { 187 size_t total = 0; 188 for (uint i = 0; i < _max_num_queues; ++i) { 189 total += lists[i].length(); 190 } 191 return total; 192 } 193 194 #ifdef ASSERT 195 void ReferenceProcessor::verify_total_count_zero(DiscoveredList lists[], const char* type) { 196 size_t count = total_count(lists); 197 assert(count == 0, "%ss must be empty but has " SIZE_FORMAT " elements", type, count); 198 } 199 #endif 200 201 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 202 BoolObjectClosure* is_alive, 203 OopClosure* keep_alive, 204 VoidClosure* complete_gc, 205 AbstractRefProcTaskExecutor* task_executor, 206 ReferenceProcessorPhaseTimes* phase_times) { 207 208 double start_time = os::elapsedTime(); 209 210 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 211 // Stop treating discovered references specially. 212 disable_discovery(); 213 214 // If discovery was concurrent, someone could have modified 215 // the value of the static field in the j.l.r.SoftReference 216 // class that holds the soft reference timestamp clock using 217 // reflection or Unsafe between when discovery was enabled and 218 // now. Unconditionally update the static field in ReferenceProcessor 219 // here so that we use the new value during processing of the 220 // discovered soft refs. 221 222 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 223 224 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 225 total_count(_discoveredWeakRefs), 226 total_count(_discoveredFinalRefs), 227 total_count(_discoveredPhantomRefs)); 228 229 { 230 RefProcTotalPhaseTimesTracker tt(RefPhase1, phase_times, this); 231 process_soft_ref_reconsider(is_alive, keep_alive, complete_gc, 232 task_executor, phase_times); 233 } 234 235 update_soft_ref_master_clock(); 236 237 { 238 RefProcTotalPhaseTimesTracker tt(RefPhase2, phase_times, this); 239 process_soft_weak_final_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times); 240 } 241 242 { 243 RefProcTotalPhaseTimesTracker tt(RefPhase3, phase_times, this); 244 process_final_keep_alive(keep_alive, complete_gc, task_executor, phase_times); 245 } 246 247 { 248 RefProcTotalPhaseTimesTracker tt(RefPhase4, phase_times, this); 249 process_phantom_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times); 250 } 251 252 if (task_executor != NULL) { 253 // Record the work done by the parallel workers. 254 task_executor->set_single_threaded_mode(); 255 } 256 257 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); 258 259 return stats; 260 } 261 262 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 263 _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered); 264 oop discovered = java_lang_ref_Reference::discovered(_current_discovered); 265 assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered), 266 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 267 _next_discovered = discovered; 268 269 _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered); 270 _referent = java_lang_ref_Reference::referent(_current_discovered); 271 assert(Universe::heap()->is_in_reserved_or_null(_referent), 272 "Wrong oop found in java.lang.Reference object"); 273 assert(allow_null_referent ? 274 oopDesc::is_oop_or_null(_referent) 275 : oopDesc::is_oop(_referent), 276 "Expected an oop%s for referent field at " PTR_FORMAT, 277 (allow_null_referent ? " or NULL" : ""), 278 p2i(_referent)); 279 } 280 281 void DiscoveredListIterator::remove() { 282 assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference"); 283 RawAccess<>::oop_store(_current_discovered_addr, oop(NULL)); 284 285 // First _prev_next ref actually points into DiscoveredList (gross). 286 oop new_next; 287 if (oopDesc::equals_raw(_next_discovered, _current_discovered)) { 288 // At the end of the list, we should make _prev point to itself. 289 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 290 // and _prev will be NULL. 291 new_next = _prev_discovered; 292 } else { 293 new_next = _next_discovered; 294 } 295 // Remove Reference object from discovered list. Note that G1 does not need a 296 // pre-barrier here because we know the Reference has already been found/marked, 297 // that's how it ended up in the discovered list in the first place. 298 RawAccess<>::oop_store(_prev_discovered_addr, new_next); 299 _removed++; 300 _refs_list.dec_length(1); 301 } 302 303 void DiscoveredListIterator::clear_referent() { 304 RawAccess<>::oop_store(_referent_addr, oop(NULL)); 305 } 306 307 void DiscoveredListIterator::enqueue() { 308 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_current_discovered, 309 java_lang_ref_Reference::discovered_offset, 310 _next_discovered); 311 } 312 313 void DiscoveredListIterator::complete_enqueue() { 314 if (_prev_discovered != NULL) { 315 // This is the last object. 316 // Swap refs_list into pending list and set obj's 317 // discovered to what we read from the pending list. 318 oop old = Universe::swap_reference_pending_list(_refs_list.head()); 319 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_prev_discovered, java_lang_ref_Reference::discovered_offset, old); 320 } 321 } 322 323 inline void log_dropped_ref(const DiscoveredListIterator& iter, const char* reason) { 324 if (log_develop_is_enabled(Trace, gc, ref)) { 325 ResourceMark rm; 326 log_develop_trace(gc, ref)("Dropping %s reference " PTR_FORMAT ": %s", 327 reason, p2i(iter.obj()), 328 iter.obj()->klass()->internal_name()); 329 } 330 } 331 332 inline void log_enqueued_ref(const DiscoveredListIterator& iter, const char* reason) { 333 if (log_develop_is_enabled(Trace, gc, ref)) { 334 ResourceMark rm; 335 log_develop_trace(gc, ref)("Enqueue %s reference (" INTPTR_FORMAT ": %s)", 336 reason, p2i(iter.obj()), iter.obj()->klass()->internal_name()); 337 } 338 assert(oopDesc::is_oop(iter.obj(), UseConcMarkSweepGC), "Adding a bad reference"); 339 } 340 341 size_t ReferenceProcessor::process_soft_ref_reconsider_work(DiscoveredList& refs_list, 342 ReferencePolicy* policy, 343 BoolObjectClosure* is_alive, 344 OopClosure* keep_alive, 345 VoidClosure* complete_gc) { 346 assert(policy != NULL, "Must have a non-NULL policy"); 347 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 348 // Decide which softly reachable refs should be kept alive. 349 while (iter.has_next()) { 350 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 351 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 352 if (referent_is_dead && 353 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 354 log_dropped_ref(iter, "by policy"); 355 // Remove Reference object from list 356 iter.remove(); 357 // keep the referent around 358 iter.make_referent_alive(); 359 iter.move_to_next(); 360 } else { 361 iter.next(); 362 } 363 } 364 // Close the reachable set 365 complete_gc->do_void(); 366 367 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 368 iter.removed(), iter.processed(), p2i(&refs_list)); 369 return iter.removed(); 370 } 371 372 size_t ReferenceProcessor::process_soft_weak_final_refs_work(DiscoveredList& refs_list, 373 BoolObjectClosure* is_alive, 374 OopClosure* keep_alive, 375 bool do_enqueue_and_clear) { 376 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 377 while (iter.has_next()) { 378 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 379 if (iter.referent() == NULL) { 380 // Reference has been cleared since discovery; only possible if 381 // discovery is not atomic (checked by load_ptrs). Remove 382 // reference from list. 383 log_dropped_ref(iter, "cleared"); 384 iter.remove(); 385 iter.move_to_next(); 386 } else if (iter.is_referent_alive()) { 387 // The referent is reachable after all. 388 // Remove reference from list. 389 log_dropped_ref(iter, "reachable"); 390 iter.remove(); 391 // Update the referent pointer as necessary. Note that this 392 // should not entail any recursive marking because the 393 // referent must already have been traversed. 394 iter.make_referent_alive(); 395 iter.move_to_next(); 396 } else { 397 if (do_enqueue_and_clear) { 398 iter.clear_referent(); 399 iter.enqueue(); 400 log_enqueued_ref(iter, "cleared"); 401 } 402 // Keep in discovered list 403 iter.next(); 404 } 405 } 406 if (do_enqueue_and_clear) { 407 iter.complete_enqueue(); 408 refs_list.clear(); 409 } 410 411 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 412 " Refs in discovered list " INTPTR_FORMAT, 413 iter.removed(), iter.processed(), p2i(&refs_list)); 414 return iter.removed(); 415 } 416 417 size_t ReferenceProcessor::process_final_keep_alive_work(DiscoveredList& refs_list, 418 OopClosure* keep_alive, 419 VoidClosure* complete_gc) { 420 DiscoveredListIterator iter(refs_list, keep_alive, NULL); 421 while (iter.has_next()) { 422 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 423 // keep the referent and followers around 424 iter.make_referent_alive(); 425 426 // Self-loop next, to mark the FinalReference not active. 427 assert(java_lang_ref_Reference::next(iter.obj()) == NULL, "enqueued FinalReference"); 428 java_lang_ref_Reference::set_next_raw(iter.obj(), iter.obj()); 429 430 iter.enqueue(); 431 log_enqueued_ref(iter, "Final"); 432 iter.next(); 433 } 434 iter.complete_enqueue(); 435 // Close the reachable set 436 complete_gc->do_void(); 437 refs_list.clear(); 438 439 assert(iter.removed() == 0, "This phase does not remove anything."); 440 return iter.removed(); 441 } 442 443 size_t ReferenceProcessor::process_phantom_refs_work(DiscoveredList& refs_list, 444 BoolObjectClosure* is_alive, 445 OopClosure* keep_alive, 446 VoidClosure* complete_gc) { 447 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 448 while (iter.has_next()) { 449 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 450 451 oop const referent = iter.referent(); 452 453 if (referent == NULL || iter.is_referent_alive()) { 454 iter.make_referent_alive(); 455 iter.remove(); 456 iter.move_to_next(); 457 } else { 458 iter.clear_referent(); 459 iter.enqueue(); 460 log_enqueued_ref(iter, "cleared Phantom"); 461 iter.next(); 462 } 463 } 464 iter.complete_enqueue(); 465 // Close the reachable set; needed for collectors which keep_alive_closure do 466 // not immediately complete their work. 467 complete_gc->do_void(); 468 refs_list.clear(); 469 470 return iter.removed(); 471 } 472 473 void 474 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 475 oop obj = NULL; 476 oop next = refs_list.head(); 477 while (!oopDesc::equals_raw(next, obj)) { 478 obj = next; 479 next = java_lang_ref_Reference::discovered(obj); 480 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 481 } 482 refs_list.clear(); 483 } 484 485 void ReferenceProcessor::abandon_partial_discovery() { 486 // loop over the lists 487 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 488 if ((i % _max_num_queues) == 0) { 489 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 490 } 491 clear_discovered_references(_discovered_refs[i]); 492 } 493 } 494 495 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const { 496 DiscoveredList* list = NULL; 497 498 switch (type) { 499 case REF_SOFT: 500 list = _discoveredSoftRefs; 501 break; 502 case REF_WEAK: 503 list = _discoveredWeakRefs; 504 break; 505 case REF_FINAL: 506 list = _discoveredFinalRefs; 507 break; 508 case REF_PHANTOM: 509 list = _discoveredPhantomRefs; 510 break; 511 case REF_OTHER: 512 case REF_NONE: 513 default: 514 ShouldNotReachHere(); 515 } 516 return total_count(list); 517 } 518 519 class RefProcPhase1Task : public AbstractRefProcTaskExecutor::ProcessTask { 520 public: 521 RefProcPhase1Task(ReferenceProcessor& ref_processor, 522 ReferenceProcessorPhaseTimes* phase_times, 523 ReferencePolicy* policy) 524 : ProcessTask(ref_processor, true /* marks_oops_alive */, phase_times), 525 _policy(policy) { } 526 527 virtual void work(uint worker_id, 528 BoolObjectClosure& is_alive, 529 OopClosure& keep_alive, 530 VoidClosure& complete_gc) 531 { 532 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase1, _phase_times, worker_id); 533 size_t const removed = _ref_processor.process_soft_ref_reconsider_work(_ref_processor._discoveredSoftRefs[worker_id], 534 _policy, 535 &is_alive, 536 &keep_alive, 537 &complete_gc); 538 _phase_times->add_ref_cleared(REF_SOFT, removed); 539 } 540 private: 541 ReferencePolicy* _policy; 542 }; 543 544 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 545 void run_phase2(uint worker_id, 546 DiscoveredList list[], 547 BoolObjectClosure& is_alive, 548 OopClosure& keep_alive, 549 bool do_enqueue_and_clear, 550 ReferenceType ref_type) { 551 size_t const removed = _ref_processor.process_soft_weak_final_refs_work(list[worker_id], 552 &is_alive, 553 &keep_alive, 554 do_enqueue_and_clear); 555 _phase_times->add_ref_cleared(ref_type, removed); 556 } 557 558 public: 559 RefProcPhase2Task(ReferenceProcessor& ref_processor, 560 ReferenceProcessorPhaseTimes* phase_times) 561 : ProcessTask(ref_processor, false /* marks_oops_alive */, phase_times) { } 562 563 virtual void work(uint worker_id, 564 BoolObjectClosure& is_alive, 565 OopClosure& keep_alive, 566 VoidClosure& complete_gc) { 567 RefProcWorkerTimeTracker t(_phase_times->phase2_worker_time_sec(), worker_id); 568 { 569 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase2, _phase_times, worker_id); 570 run_phase2(worker_id, _ref_processor._discoveredSoftRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_SOFT); 571 } 572 { 573 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::WeakRefSubPhase2, _phase_times, worker_id); 574 run_phase2(worker_id, _ref_processor._discoveredWeakRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_WEAK); 575 } 576 { 577 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase2, _phase_times, worker_id); 578 run_phase2(worker_id, _ref_processor._discoveredFinalRefs, is_alive, keep_alive, false /* do_enqueue_and_clear */, REF_FINAL); 579 } 580 // Close the reachable set; needed for collectors which keep_alive_closure do 581 // not immediately complete their work. 582 complete_gc.do_void(); 583 } 584 }; 585 586 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 587 public: 588 RefProcPhase3Task(ReferenceProcessor& ref_processor, 589 ReferenceProcessorPhaseTimes* phase_times) 590 : ProcessTask(ref_processor, true /* marks_oops_alive */, phase_times) { } 591 592 virtual void work(uint worker_id, 593 BoolObjectClosure& is_alive, 594 OopClosure& keep_alive, 595 VoidClosure& complete_gc) 596 { 597 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase3, _phase_times, worker_id); 598 _ref_processor.process_final_keep_alive_work(_ref_processor._discoveredFinalRefs[worker_id], &keep_alive, &complete_gc); 599 } 600 }; 601 602 class RefProcPhase4Task: public AbstractRefProcTaskExecutor::ProcessTask { 603 public: 604 RefProcPhase4Task(ReferenceProcessor& ref_processor, 605 ReferenceProcessorPhaseTimes* phase_times) 606 : ProcessTask(ref_processor, false /* marks_oops_alive */, phase_times) { } 607 608 virtual void work(uint worker_id, 609 BoolObjectClosure& is_alive, 610 OopClosure& keep_alive, 611 VoidClosure& complete_gc) 612 { 613 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::PhantomRefSubPhase4, _phase_times, worker_id); 614 size_t const removed = _ref_processor.process_phantom_refs_work(_ref_processor._discoveredPhantomRefs[worker_id], 615 &is_alive, 616 &keep_alive, 617 &complete_gc); 618 _phase_times->add_ref_cleared(REF_PHANTOM, removed); 619 } 620 }; 621 622 void ReferenceProcessor::log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues) { 623 LogTarget(Trace, gc, ref) lt; 624 625 if (!lt.is_enabled()) { 626 return; 627 } 628 629 size_t total = 0; 630 631 LogStream ls(lt); 632 ls.print("%s", prefix); 633 for (uint i = 0; i < num_active_queues; i++) { 634 ls.print(SIZE_FORMAT " ", list[i].length()); 635 total += list[i].length(); 636 } 637 ls.print_cr("(" SIZE_FORMAT ")", total); 638 } 639 640 #ifndef PRODUCT 641 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) { 642 if (!log_is_enabled(Trace, gc, ref)) { 643 return; 644 } 645 646 log_reflist("", ref_lists, num_active_queues); 647 #ifdef ASSERT 648 for (uint i = num_active_queues; i < _max_num_queues; i++) { 649 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 650 ref_lists[i].length(), i); 651 } 652 #endif 653 } 654 #endif 655 656 void ReferenceProcessor::set_active_mt_degree(uint v) { 657 _num_queues = v; 658 _next_id = 0; 659 } 660 661 bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) { 662 assert(_processing_is_mt, "why balance non-mt processing?"); 663 // _num_queues is the processing degree. Only list entries up to 664 // _num_queues will be processed, so any non-empty lists beyond 665 // that must be redistributed to lists in that range. Even if not 666 // needed for that, balancing may be desirable to eliminate poor 667 // distribution of references among the lists. 668 if (ParallelRefProcBalancingEnabled) { 669 return true; // Configuration says do it. 670 } else { 671 // Configuration says don't balance, but if there are non-empty 672 // lists beyond the processing degree, then must ignore the 673 // configuration and balance anyway. 674 for (uint i = _num_queues; i < _max_num_queues; ++i) { 675 if (!refs_lists[i].is_empty()) { 676 return true; // Must balance despite configuration. 677 } 678 } 679 return false; // Safe to obey configuration and not balance. 680 } 681 } 682 683 void ReferenceProcessor::maybe_balance_queues(DiscoveredList refs_lists[]) { 684 assert(_processing_is_mt, "Should not call this otherwise"); 685 if (need_balance_queues(refs_lists)) { 686 balance_queues(refs_lists); 687 } 688 } 689 690 // Balances reference queues. 691 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 692 // queues[0, 1, ..., _num_q-1] because only the first _num_q 693 // corresponding to the active workers will be processed. 694 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 695 { 696 // calculate total length 697 size_t total_refs = 0; 698 log_develop_trace(gc, ref)("Balance ref_lists "); 699 700 log_reflist_counts(ref_lists, _max_num_queues); 701 702 for (uint i = 0; i < _max_num_queues; ++i) { 703 total_refs += ref_lists[i].length(); 704 } 705 size_t avg_refs = total_refs / _num_queues + 1; 706 uint to_idx = 0; 707 for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { 708 bool move_all = false; 709 if (from_idx >= _num_queues) { 710 move_all = ref_lists[from_idx].length() > 0; 711 } 712 while ((ref_lists[from_idx].length() > avg_refs) || 713 move_all) { 714 assert(to_idx < _num_queues, "Sanity Check!"); 715 if (ref_lists[to_idx].length() < avg_refs) { 716 // move superfluous refs 717 size_t refs_to_move; 718 // Move all the Ref's if the from queue will not be processed. 719 if (move_all) { 720 refs_to_move = MIN2(ref_lists[from_idx].length(), 721 avg_refs - ref_lists[to_idx].length()); 722 } else { 723 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 724 avg_refs - ref_lists[to_idx].length()); 725 } 726 727 assert(refs_to_move > 0, "otherwise the code below will fail"); 728 729 oop move_head = ref_lists[from_idx].head(); 730 oop move_tail = move_head; 731 oop new_head = move_head; 732 // find an element to split the list on 733 for (size_t j = 0; j < refs_to_move; ++j) { 734 move_tail = new_head; 735 new_head = java_lang_ref_Reference::discovered(new_head); 736 } 737 738 // Add the chain to the to list. 739 if (ref_lists[to_idx].head() == NULL) { 740 // to list is empty. Make a loop at the end. 741 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 742 } else { 743 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 744 } 745 ref_lists[to_idx].set_head(move_head); 746 ref_lists[to_idx].inc_length(refs_to_move); 747 748 // Remove the chain from the from list. 749 if (oopDesc::equals_raw(move_tail, new_head)) { 750 // We found the end of the from list. 751 ref_lists[from_idx].set_head(NULL); 752 } else { 753 ref_lists[from_idx].set_head(new_head); 754 } 755 ref_lists[from_idx].dec_length(refs_to_move); 756 if (ref_lists[from_idx].length() == 0) { 757 break; 758 } 759 } else { 760 to_idx = (to_idx + 1) % _num_queues; 761 } 762 } 763 } 764 #ifdef ASSERT 765 log_reflist_counts(ref_lists, _num_queues); 766 size_t balanced_total_refs = 0; 767 for (uint i = 0; i < _num_queues; ++i) { 768 balanced_total_refs += ref_lists[i].length(); 769 } 770 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 771 #endif 772 } 773 774 bool ReferenceProcessor::is_mt_processing_set_up(AbstractRefProcTaskExecutor* task_executor) const { 775 return task_executor != NULL && _processing_is_mt; 776 } 777 778 void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive, 779 OopClosure* keep_alive, 780 VoidClosure* complete_gc, 781 AbstractRefProcTaskExecutor* task_executor, 782 ReferenceProcessorPhaseTimes* phase_times) { 783 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 784 785 size_t const num_soft_refs = total_count(_discoveredSoftRefs); 786 phase_times->set_ref_discovered(REF_SOFT, num_soft_refs); 787 788 phase_times->set_processing_is_mt(_processing_is_mt); 789 790 if (num_soft_refs == 0 || _current_soft_ref_policy == NULL) { 791 log_debug(gc, ref)("Skipped phase1 of Reference Processing due to unavailable references"); 792 return; 793 } 794 795 RefProcMTDegreeAdjuster a(this, RefPhase1, num_soft_refs); 796 797 if (_processing_is_mt) { 798 RefProcBalanceQueuesTimeTracker tt(RefPhase1, phase_times); 799 maybe_balance_queues(_discoveredSoftRefs); 800 } 801 802 RefProcPhaseTimeTracker tt(RefPhase1, phase_times); 803 804 log_reflist("Phase1 Soft before", _discoveredSoftRefs, _max_num_queues); 805 if (_processing_is_mt) { 806 RefProcPhase1Task phase1(*this, phase_times, _current_soft_ref_policy); 807 task_executor->execute(phase1, num_queues()); 808 } else { 809 size_t removed = 0; 810 811 RefProcSubPhasesWorkerTimeTracker tt2(SoftRefSubPhase1, phase_times, 0); 812 for (uint i = 0; i < _max_num_queues; i++) { 813 removed += process_soft_ref_reconsider_work(_discoveredSoftRefs[i], _current_soft_ref_policy, 814 is_alive, keep_alive, complete_gc); 815 } 816 817 phase_times->add_ref_cleared(REF_SOFT, removed); 818 } 819 log_reflist("Phase1 Soft after", _discoveredSoftRefs, _max_num_queues); 820 } 821 822 void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_alive, 823 OopClosure* keep_alive, 824 VoidClosure* complete_gc, 825 AbstractRefProcTaskExecutor* task_executor, 826 ReferenceProcessorPhaseTimes* phase_times) { 827 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 828 829 size_t const num_soft_refs = total_count(_discoveredSoftRefs); 830 size_t const num_weak_refs = total_count(_discoveredWeakRefs); 831 size_t const num_final_refs = total_count(_discoveredFinalRefs); 832 size_t const num_total_refs = num_soft_refs + num_weak_refs + num_final_refs; 833 phase_times->set_ref_discovered(REF_WEAK, num_weak_refs); 834 phase_times->set_ref_discovered(REF_FINAL, num_final_refs); 835 836 phase_times->set_processing_is_mt(_processing_is_mt); 837 838 if (num_total_refs == 0) { 839 log_debug(gc, ref)("Skipped phase2 of Reference Processing due to unavailable references"); 840 return; 841 } 842 843 RefProcMTDegreeAdjuster a(this, RefPhase2, num_total_refs); 844 845 if (_processing_is_mt) { 846 RefProcBalanceQueuesTimeTracker tt(RefPhase2, phase_times); 847 maybe_balance_queues(_discoveredSoftRefs); 848 maybe_balance_queues(_discoveredWeakRefs); 849 maybe_balance_queues(_discoveredFinalRefs); 850 } 851 852 RefProcPhaseTimeTracker tt(RefPhase2, phase_times); 853 854 log_reflist("Phase2 Soft before", _discoveredSoftRefs, _max_num_queues); 855 log_reflist("Phase2 Weak before", _discoveredWeakRefs, _max_num_queues); 856 log_reflist("Phase2 Final before", _discoveredFinalRefs, _max_num_queues); 857 if (_processing_is_mt) { 858 RefProcPhase2Task phase2(*this, phase_times); 859 task_executor->execute(phase2, num_queues()); 860 } else { 861 RefProcWorkerTimeTracker t(phase_times->phase2_worker_time_sec(), 0); 862 { 863 size_t removed = 0; 864 865 RefProcSubPhasesWorkerTimeTracker tt2(SoftRefSubPhase2, phase_times, 0); 866 for (uint i = 0; i < _max_num_queues; i++) { 867 removed += process_soft_weak_final_refs_work(_discoveredSoftRefs[i], is_alive, keep_alive, true /* do_enqueue */); 868 } 869 870 phase_times->add_ref_cleared(REF_SOFT, removed); 871 } 872 { 873 size_t removed = 0; 874 875 RefProcSubPhasesWorkerTimeTracker tt2(WeakRefSubPhase2, phase_times, 0); 876 for (uint i = 0; i < _max_num_queues; i++) { 877 removed += process_soft_weak_final_refs_work(_discoveredWeakRefs[i], is_alive, keep_alive, true /* do_enqueue */); 878 } 879 880 phase_times->add_ref_cleared(REF_WEAK, removed); 881 } 882 { 883 size_t removed = 0; 884 885 RefProcSubPhasesWorkerTimeTracker tt2(FinalRefSubPhase2, phase_times, 0); 886 for (uint i = 0; i < _max_num_queues; i++) { 887 removed += process_soft_weak_final_refs_work(_discoveredFinalRefs[i], is_alive, keep_alive, false /* do_enqueue */); 888 } 889 890 phase_times->add_ref_cleared(REF_FINAL, removed); 891 } 892 complete_gc->do_void(); 893 } 894 verify_total_count_zero(_discoveredSoftRefs, "SoftReference"); 895 verify_total_count_zero(_discoveredWeakRefs, "WeakReference"); 896 log_reflist("Phase2 Final after", _discoveredFinalRefs, _max_num_queues); 897 } 898 899 void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive, 900 VoidClosure* complete_gc, 901 AbstractRefProcTaskExecutor* task_executor, 902 ReferenceProcessorPhaseTimes* phase_times) { 903 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 904 905 size_t const num_final_refs = total_count(_discoveredFinalRefs); 906 907 phase_times->set_processing_is_mt(_processing_is_mt); 908 909 if (num_final_refs == 0) { 910 log_debug(gc, ref)("Skipped phase3 of Reference Processing due to unavailable references"); 911 return; 912 } 913 914 RefProcMTDegreeAdjuster a(this, RefPhase3, num_final_refs); 915 916 if (_processing_is_mt) { 917 RefProcBalanceQueuesTimeTracker tt(RefPhase3, phase_times); 918 maybe_balance_queues(_discoveredFinalRefs); 919 } 920 921 // Phase 3: 922 // . Traverse referents of final references and keep them and followers alive. 923 RefProcPhaseTimeTracker tt(RefPhase3, phase_times); 924 925 if (_processing_is_mt) { 926 RefProcPhase3Task phase3(*this, phase_times); 927 task_executor->execute(phase3, num_queues()); 928 } else { 929 RefProcSubPhasesWorkerTimeTracker tt2(FinalRefSubPhase3, phase_times, 0); 930 for (uint i = 0; i < _max_num_queues; i++) { 931 process_final_keep_alive_work(_discoveredFinalRefs[i], keep_alive, complete_gc); 932 } 933 } 934 verify_total_count_zero(_discoveredFinalRefs, "FinalReference"); 935 } 936 937 void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive, 938 OopClosure* keep_alive, 939 VoidClosure* complete_gc, 940 AbstractRefProcTaskExecutor* task_executor, 941 ReferenceProcessorPhaseTimes* phase_times) { 942 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 943 944 size_t const num_phantom_refs = total_count(_discoveredPhantomRefs); 945 phase_times->set_ref_discovered(REF_PHANTOM, num_phantom_refs); 946 947 phase_times->set_processing_is_mt(_processing_is_mt); 948 949 if (num_phantom_refs == 0) { 950 log_debug(gc, ref)("Skipped phase4 of Reference Processing due to unavailable references"); 951 return; 952 } 953 954 RefProcMTDegreeAdjuster a(this, RefPhase4, num_phantom_refs); 955 956 if (_processing_is_mt) { 957 RefProcBalanceQueuesTimeTracker tt(RefPhase4, phase_times); 958 maybe_balance_queues(_discoveredPhantomRefs); 959 } 960 961 // Phase 4: Walk phantom references appropriately. 962 RefProcPhaseTimeTracker tt(RefPhase4, phase_times); 963 964 log_reflist("Phase4 Phantom before", _discoveredPhantomRefs, _max_num_queues); 965 if (_processing_is_mt) { 966 RefProcPhase4Task phase4(*this, phase_times); 967 task_executor->execute(phase4, num_queues()); 968 } else { 969 size_t removed = 0; 970 971 RefProcSubPhasesWorkerTimeTracker tt(PhantomRefSubPhase4, phase_times, 0); 972 for (uint i = 0; i < _max_num_queues; i++) { 973 removed += process_phantom_refs_work(_discoveredPhantomRefs[i], is_alive, keep_alive, complete_gc); 974 } 975 976 phase_times->add_ref_cleared(REF_PHANTOM, removed); 977 } 978 verify_total_count_zero(_discoveredPhantomRefs, "PhantomReference"); 979 } 980 981 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 982 uint id = 0; 983 // Determine the queue index to use for this object. 984 if (_discovery_is_mt) { 985 // During a multi-threaded discovery phase, 986 // each thread saves to its "own" list. 987 Thread* thr = Thread::current(); 988 id = thr->as_Worker_thread()->id(); 989 } else { 990 // single-threaded discovery, we save in round-robin 991 // fashion to each of the lists. 992 if (_processing_is_mt) { 993 id = next_id(); 994 } 995 } 996 assert(id < _max_num_queues, "Id is out of bounds id %u and max id %u)", id, _max_num_queues); 997 998 // Get the discovered queue to which we will add 999 DiscoveredList* list = NULL; 1000 switch (rt) { 1001 case REF_OTHER: 1002 // Unknown reference type, no special treatment 1003 break; 1004 case REF_SOFT: 1005 list = &_discoveredSoftRefs[id]; 1006 break; 1007 case REF_WEAK: 1008 list = &_discoveredWeakRefs[id]; 1009 break; 1010 case REF_FINAL: 1011 list = &_discoveredFinalRefs[id]; 1012 break; 1013 case REF_PHANTOM: 1014 list = &_discoveredPhantomRefs[id]; 1015 break; 1016 case REF_NONE: 1017 // we should not reach here if we are an InstanceRefKlass 1018 default: 1019 ShouldNotReachHere(); 1020 } 1021 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 1022 return list; 1023 } 1024 1025 inline void 1026 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1027 oop obj, 1028 HeapWord* discovered_addr) { 1029 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1030 // First we must make sure this object is only enqueued once. CAS in a non null 1031 // discovered_addr. 1032 oop current_head = refs_list.head(); 1033 // The last ref must have its discovered field pointing to itself. 1034 oop next_discovered = (current_head != NULL) ? current_head : obj; 1035 1036 oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL)); 1037 1038 if (retest == NULL) { 1039 // This thread just won the right to enqueue the object. 1040 // We have separate lists for enqueueing, so no synchronization 1041 // is necessary. 1042 refs_list.set_head(obj); 1043 refs_list.inc_length(1); 1044 1045 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 1046 p2i(obj), obj->klass()->internal_name()); 1047 } else { 1048 // If retest was non NULL, another thread beat us to it: 1049 // The reference has already been discovered... 1050 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1051 p2i(obj), obj->klass()->internal_name()); 1052 } 1053 } 1054 1055 #ifndef PRODUCT 1056 // Non-atomic (i.e. concurrent) discovery might allow us 1057 // to observe j.l.References with NULL referents, being those 1058 // cleared concurrently by mutators during (or after) discovery. 1059 void ReferenceProcessor::verify_referent(oop obj) { 1060 bool da = discovery_is_atomic(); 1061 oop referent = java_lang_ref_Reference::referent(obj); 1062 assert(da ? oopDesc::is_oop(referent) : oopDesc::is_oop_or_null(referent), 1063 "Bad referent " INTPTR_FORMAT " found in Reference " 1064 INTPTR_FORMAT " during %satomic discovery ", 1065 p2i(referent), p2i(obj), da ? "" : "non-"); 1066 } 1067 #endif 1068 1069 bool ReferenceProcessor::is_subject_to_discovery(oop const obj) const { 1070 return _is_subject_to_discovery->do_object_b(obj); 1071 } 1072 1073 // We mention two of several possible choices here: 1074 // #0: if the reference object is not in the "originating generation" 1075 // (or part of the heap being collected, indicated by our "span" 1076 // we don't treat it specially (i.e. we scan it as we would 1077 // a normal oop, treating its references as strong references). 1078 // This means that references can't be discovered unless their 1079 // referent is also in the same span. This is the simplest, 1080 // most "local" and most conservative approach, albeit one 1081 // that may cause weak references to be enqueued least promptly. 1082 // We call this choice the "ReferenceBasedDiscovery" policy. 1083 // #1: the reference object may be in any generation (span), but if 1084 // the referent is in the generation (span) being currently collected 1085 // then we can discover the reference object, provided 1086 // the object has not already been discovered by 1087 // a different concurrently running collector (as may be the 1088 // case, for instance, if the reference object is in CMS and 1089 // the referent in DefNewGeneration), and provided the processing 1090 // of this reference object by the current collector will 1091 // appear atomic to every other collector in the system. 1092 // (Thus, for instance, a concurrent collector may not 1093 // discover references in other generations even if the 1094 // referent is in its own generation). This policy may, 1095 // in certain cases, enqueue references somewhat sooner than 1096 // might Policy #0 above, but at marginally increased cost 1097 // and complexity in processing these references. 1098 // We call this choice the "RefeferentBasedDiscovery" policy. 1099 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1100 // Make sure we are discovering refs (rather than processing discovered refs). 1101 if (!_discovering_refs || !RegisterReferences) { 1102 return false; 1103 } 1104 1105 if ((rt == REF_FINAL) && (java_lang_ref_Reference::next(obj) != NULL)) { 1106 // Don't rediscover non-active FinalReferences. 1107 return false; 1108 } 1109 1110 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1111 !is_subject_to_discovery(obj)) { 1112 // Reference is not in the originating generation; 1113 // don't treat it specially (i.e. we want to scan it as a normal 1114 // object with strong references). 1115 return false; 1116 } 1117 1118 // We only discover references whose referents are not (yet) 1119 // known to be strongly reachable. 1120 if (is_alive_non_header() != NULL) { 1121 verify_referent(obj); 1122 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1123 return false; // referent is reachable 1124 } 1125 } 1126 if (rt == REF_SOFT) { 1127 // For soft refs we can decide now if these are not 1128 // current candidates for clearing, in which case we 1129 // can mark through them now, rather than delaying that 1130 // to the reference-processing phase. Since all current 1131 // time-stamp policies advance the soft-ref clock only 1132 // at a full collection cycle, this is always currently 1133 // accurate. 1134 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1135 return false; 1136 } 1137 } 1138 1139 ResourceMark rm; // Needed for tracing. 1140 1141 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj); 1142 const oop discovered = java_lang_ref_Reference::discovered(obj); 1143 assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1144 if (discovered != NULL) { 1145 // The reference has already been discovered... 1146 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1147 p2i(obj), obj->klass()->internal_name()); 1148 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1149 // assumes that an object is not processed twice; 1150 // if it's been already discovered it must be on another 1151 // generation's discovered list; so we won't discover it. 1152 return false; 1153 } else { 1154 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1155 "Unrecognized policy"); 1156 // Check assumption that an object is not potentially 1157 // discovered twice except by concurrent collectors that potentially 1158 // trace the same Reference object twice. 1159 assert(UseConcMarkSweepGC || UseG1GC, 1160 "Only possible with a concurrent marking collector"); 1161 return true; 1162 } 1163 } 1164 1165 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1166 verify_referent(obj); 1167 // Discover if and only if EITHER: 1168 // .. reference is in our span, OR 1169 // .. we are an atomic collector and referent is in our span 1170 if (is_subject_to_discovery(obj) || 1171 (discovery_is_atomic() && 1172 is_subject_to_discovery(java_lang_ref_Reference::referent(obj)))) { 1173 } else { 1174 return false; 1175 } 1176 } else { 1177 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1178 is_subject_to_discovery(obj), "code inconsistency"); 1179 } 1180 1181 // Get the right type of discovered queue head. 1182 DiscoveredList* list = get_discovered_list(rt); 1183 if (list == NULL) { 1184 return false; // nothing special needs to be done 1185 } 1186 1187 if (_discovery_is_mt) { 1188 add_to_discovered_list_mt(*list, obj, discovered_addr); 1189 } else { 1190 // We do a raw store here: the field will be visited later when processing 1191 // the discovered references. 1192 oop current_head = list->head(); 1193 // The last ref must have its discovered field pointing to itself. 1194 oop next_discovered = (current_head != NULL) ? current_head : obj; 1195 1196 assert(discovered == NULL, "control point invariant"); 1197 RawAccess<>::oop_store(discovered_addr, next_discovered); 1198 list->set_head(obj); 1199 list->inc_length(1); 1200 1201 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1202 } 1203 assert(oopDesc::is_oop(obj), "Discovered a bad reference"); 1204 verify_referent(obj); 1205 return true; 1206 } 1207 1208 bool ReferenceProcessor::has_discovered_references() { 1209 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 1210 if (!_discovered_refs[i].is_empty()) { 1211 return true; 1212 } 1213 } 1214 return false; 1215 } 1216 1217 void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_alive, 1218 OopClosure* keep_alive, 1219 VoidClosure* complete_gc, 1220 YieldClosure* yield, 1221 GCTimer* gc_timer) { 1222 // These lists can be handled here in any order and, indeed, concurrently. 1223 1224 // Soft references 1225 { 1226 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1227 log_reflist("SoftRef before: ", _discoveredSoftRefs, _max_num_queues); 1228 for (uint i = 0; i < _max_num_queues; i++) { 1229 if (yield->should_return()) { 1230 return; 1231 } 1232 if (preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1233 keep_alive, complete_gc, yield)) { 1234 log_reflist("SoftRef abort: ", _discoveredSoftRefs, _max_num_queues); 1235 return; 1236 } 1237 } 1238 log_reflist("SoftRef after: ", _discoveredSoftRefs, _max_num_queues); 1239 } 1240 1241 // Weak references 1242 { 1243 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1244 log_reflist("WeakRef before: ", _discoveredWeakRefs, _max_num_queues); 1245 for (uint i = 0; i < _max_num_queues; i++) { 1246 if (yield->should_return()) { 1247 return; 1248 } 1249 if (preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1250 keep_alive, complete_gc, yield)) { 1251 log_reflist("WeakRef abort: ", _discoveredWeakRefs, _max_num_queues); 1252 return; 1253 } 1254 } 1255 log_reflist("WeakRef after: ", _discoveredWeakRefs, _max_num_queues); 1256 } 1257 1258 // Final references 1259 { 1260 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1261 log_reflist("FinalRef before: ", _discoveredFinalRefs, _max_num_queues); 1262 for (uint i = 0; i < _max_num_queues; i++) { 1263 if (yield->should_return()) { 1264 return; 1265 } 1266 if (preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1267 keep_alive, complete_gc, yield)) { 1268 log_reflist("FinalRef abort: ", _discoveredFinalRefs, _max_num_queues); 1269 return; 1270 } 1271 } 1272 log_reflist("FinalRef after: ", _discoveredFinalRefs, _max_num_queues); 1273 } 1274 1275 // Phantom references 1276 { 1277 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1278 log_reflist("PhantomRef before: ", _discoveredPhantomRefs, _max_num_queues); 1279 for (uint i = 0; i < _max_num_queues; i++) { 1280 if (yield->should_return()) { 1281 return; 1282 } 1283 if (preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1284 keep_alive, complete_gc, yield)) { 1285 log_reflist("PhantomRef abort: ", _discoveredPhantomRefs, _max_num_queues); 1286 return; 1287 } 1288 } 1289 log_reflist("PhantomRef after: ", _discoveredPhantomRefs, _max_num_queues); 1290 } 1291 } 1292 1293 // Walk the given discovered ref list, and remove all reference objects 1294 // whose referents are still alive, whose referents are NULL or which 1295 // are not active (have a non-NULL next field). NOTE: When we are 1296 // thus precleaning the ref lists (which happens single-threaded today), 1297 // we do not disable refs discovery to honor the correct semantics of 1298 // java.lang.Reference. As a result, we need to be careful below 1299 // that ref removal steps interleave safely with ref discovery steps 1300 // (in this thread). 1301 bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1302 BoolObjectClosure* is_alive, 1303 OopClosure* keep_alive, 1304 VoidClosure* complete_gc, 1305 YieldClosure* yield) { 1306 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1307 while (iter.has_next()) { 1308 if (yield->should_return_fine_grain()) { 1309 return true; 1310 } 1311 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1312 if (iter.referent() == NULL || iter.is_referent_alive()) { 1313 // The referent has been cleared, or is alive; we need to trace 1314 // and mark its cohort. 1315 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1316 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1317 // Remove Reference object from list 1318 iter.remove(); 1319 // Keep alive its cohort. 1320 iter.make_referent_alive(); 1321 iter.move_to_next(); 1322 } else { 1323 iter.next(); 1324 } 1325 } 1326 // Close the reachable set 1327 complete_gc->do_void(); 1328 1329 if (iter.processed() > 0) { 1330 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1331 iter.removed(), iter.processed(), p2i(&refs_list)); 1332 } 1333 return false; 1334 } 1335 1336 const char* ReferenceProcessor::list_name(uint i) { 1337 assert(i <= _max_num_queues * number_of_subclasses_of_ref(), 1338 "Out of bounds index"); 1339 1340 int j = i / _max_num_queues; 1341 switch (j) { 1342 case 0: return "SoftRef"; 1343 case 1: return "WeakRef"; 1344 case 2: return "FinalRef"; 1345 case 3: return "PhantomRef"; 1346 } 1347 ShouldNotReachHere(); 1348 return NULL; 1349 } 1350 1351 uint RefProcMTDegreeAdjuster::ergo_proc_thread_count(size_t ref_count, 1352 uint max_threads, 1353 RefProcPhases phase) const { 1354 assert(0 < max_threads, "must allow at least one thread"); 1355 1356 if (use_max_threads(phase) || (ReferencesPerThread == 0)) { 1357 return max_threads; 1358 } 1359 1360 size_t thread_count = 1 + (ref_count / ReferencesPerThread); 1361 return (uint)MIN3(thread_count, 1362 static_cast<size_t>(max_threads), 1363 (size_t)os::active_processor_count()); 1364 } 1365 1366 bool RefProcMTDegreeAdjuster::use_max_threads(RefProcPhases phase) const { 1367 // Even a small number of references in either of those cases could produce large amounts of work. 1368 return (phase == ReferenceProcessor::RefPhase1 || phase == ReferenceProcessor::RefPhase3); 1369 } 1370 1371 RefProcMTDegreeAdjuster::RefProcMTDegreeAdjuster(ReferenceProcessor* rp, 1372 RefProcPhases phase, 1373 size_t ref_count): 1374 _rp(rp), 1375 _saved_mt_processing(_rp->processing_is_mt()), 1376 _saved_num_queues(_rp->num_queues()) { 1377 if (!_rp->processing_is_mt() || !_rp->adjust_no_of_processing_threads() || (ReferencesPerThread == 0)) { 1378 return; 1379 } 1380 1381 uint workers = ergo_proc_thread_count(ref_count, _rp->num_queues(), phase); 1382 1383 _rp->set_mt_processing(workers > 1); 1384 _rp->set_active_mt_degree(workers); 1385 } 1386 1387 RefProcMTDegreeAdjuster::~RefProcMTDegreeAdjuster() { 1388 // Revert to previous status. 1389 _rp->set_mt_processing(_saved_mt_processing); 1390 _rp->set_active_mt_degree(_saved_num_queues); 1391 }