1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 35 #include "logging/log.hpp" 36 #include "memory/allocation.inline.hpp" 37 #include "memory/resourceArea.hpp" 38 #include "memory/universe.hpp" 39 #include "oops/access.inline.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "runtime/java.hpp" 42 43 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 44 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 45 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 46 47 void referenceProcessor_init() { 48 ReferenceProcessor::init_statics(); 49 } 50 51 void ReferenceProcessor::init_statics() { 52 // We need a monotonically non-decreasing time in ms but 53 // os::javaTimeMillis() does not guarantee monotonicity. 54 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 55 56 // Initialize the soft ref timestamp clock. 57 _soft_ref_timestamp_clock = now; 58 // Also update the soft ref clock in j.l.r.SoftReference 59 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 60 61 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 62 if (is_server_compilation_mode_vm()) { 63 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 64 } else { 65 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 66 } 67 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 68 RefDiscoveryPolicy == ReferentBasedDiscovery, 69 "Unrecognized RefDiscoveryPolicy"); 70 } 71 72 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 73 #ifdef ASSERT 74 // Verify that we're not currently discovering refs 75 assert(!_discovering_refs, "nested call?"); 76 77 if (check_no_refs) { 78 // Verify that the discovered lists are empty 79 verify_no_references_recorded(); 80 } 81 #endif // ASSERT 82 83 // Someone could have modified the value of the static 84 // field in the j.l.r.SoftReference class that holds the 85 // soft reference timestamp clock using reflection or 86 // Unsafe between GCs. Unconditionally update the static 87 // field in ReferenceProcessor here so that we use the new 88 // value during reference discovery. 89 90 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 91 _discovering_refs = true; 92 } 93 94 ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, 95 bool mt_processing, 96 uint mt_processing_degree, 97 bool mt_discovery, 98 uint mt_discovery_degree, 99 bool atomic_discovery, 100 BoolObjectClosure* is_alive_non_header, 101 bool adjust_no_of_processing_threads) : 102 _is_subject_to_discovery(is_subject_to_discovery), 103 _discovering_refs(false), 104 _enqueuing_is_done(false), 105 _processing_is_mt(mt_processing), 106 _next_id(0), 107 _adjust_no_of_processing_threads(adjust_no_of_processing_threads), 108 _is_alive_non_header(is_alive_non_header) 109 { 110 assert(is_subject_to_discovery != NULL, "must be set"); 111 112 _discovery_is_atomic = atomic_discovery; 113 _discovery_is_mt = mt_discovery; 114 _num_queues = MAX2(1U, mt_processing_degree); 115 _max_num_queues = MAX2(_num_queues, mt_discovery_degree); 116 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 117 _max_num_queues * number_of_subclasses_of_ref(), mtGC); 118 119 _discoveredSoftRefs = &_discovered_refs[0]; 120 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; 121 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; 122 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues]; 123 124 // Initialize all entries to NULL 125 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 126 _discovered_refs[i].clear(); 127 } 128 129 setup_policy(false /* default soft ref policy */); 130 } 131 132 #ifndef PRODUCT 133 void ReferenceProcessor::verify_no_references_recorded() { 134 guarantee(!_discovering_refs, "Discovering refs?"); 135 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 136 guarantee(_discovered_refs[i].is_empty(), 137 "Found non-empty discovered list at %u", i); 138 } 139 } 140 #endif 141 142 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 143 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 144 if (UseCompressedOops) { 145 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 146 } else { 147 f->do_oop((oop*)_discovered_refs[i].adr_head()); 148 } 149 } 150 } 151 152 void ReferenceProcessor::update_soft_ref_master_clock() { 153 // Update (advance) the soft ref master clock field. This must be done 154 // after processing the soft ref list. 155 156 // We need a monotonically non-decreasing time in ms but 157 // os::javaTimeMillis() does not guarantee monotonicity. 158 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 159 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 160 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 161 162 NOT_PRODUCT( 163 if (now < _soft_ref_timestamp_clock) { 164 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 165 _soft_ref_timestamp_clock, now); 166 } 167 ) 168 // The values of now and _soft_ref_timestamp_clock are set using 169 // javaTimeNanos(), which is guaranteed to be monotonically 170 // non-decreasing provided the underlying platform provides such 171 // a time source (and it is bug free). 172 // In product mode, however, protect ourselves from non-monotonicity. 173 if (now > _soft_ref_timestamp_clock) { 174 _soft_ref_timestamp_clock = now; 175 java_lang_ref_SoftReference::set_clock(now); 176 } 177 // Else leave clock stalled at its old value until time progresses 178 // past clock value. 179 } 180 181 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { 182 size_t total = 0; 183 for (uint i = 0; i < _max_num_queues; ++i) { 184 total += lists[i].length(); 185 } 186 return total; 187 } 188 189 #ifdef ASSERT 190 void ReferenceProcessor::verify_total_count_zero(DiscoveredList lists[], const char* type) { 191 size_t count = total_count(lists); 192 assert(count == 0, "%ss must be empty but has " SIZE_FORMAT " elements", type, count); 193 } 194 #endif 195 196 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 197 BoolObjectClosure* is_alive, 198 OopClosure* keep_alive, 199 VoidClosure* complete_gc, 200 AbstractRefProcTaskExecutor* task_executor, 201 ReferenceProcessorPhaseTimes* phase_times) { 202 203 double start_time = os::elapsedTime(); 204 205 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 206 // Stop treating discovered references specially. 207 disable_discovery(); 208 209 // If discovery was concurrent, someone could have modified 210 // the value of the static field in the j.l.r.SoftReference 211 // class that holds the soft reference timestamp clock using 212 // reflection or Unsafe between when discovery was enabled and 213 // now. Unconditionally update the static field in ReferenceProcessor 214 // here so that we use the new value during processing of the 215 // discovered soft refs. 216 217 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 218 219 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 220 total_count(_discoveredWeakRefs), 221 total_count(_discoveredFinalRefs), 222 total_count(_discoveredPhantomRefs)); 223 224 { 225 RefProcTotalPhaseTimesTracker tt(RefPhase1, phase_times, this); 226 process_soft_ref_reconsider(is_alive, keep_alive, complete_gc, 227 task_executor, phase_times); 228 } 229 230 update_soft_ref_master_clock(); 231 232 { 233 RefProcTotalPhaseTimesTracker tt(RefPhase2, phase_times, this); 234 process_soft_weak_final_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times); 235 } 236 237 { 238 RefProcTotalPhaseTimesTracker tt(RefPhase3, phase_times, this); 239 process_final_keep_alive(keep_alive, complete_gc, task_executor, phase_times); 240 } 241 242 { 243 RefProcTotalPhaseTimesTracker tt(RefPhase4, phase_times, this); 244 process_phantom_refs(is_alive, keep_alive, complete_gc, task_executor, phase_times); 245 } 246 247 if (task_executor != NULL) { 248 // Record the work done by the parallel workers. 249 task_executor->set_single_threaded_mode(); 250 } 251 252 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); 253 254 return stats; 255 } 256 257 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 258 _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered); 259 oop discovered = java_lang_ref_Reference::discovered(_current_discovered); 260 assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered), 261 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 262 _next_discovered = discovered; 263 264 _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered); 265 _referent = java_lang_ref_Reference::referent(_current_discovered); 266 assert(Universe::heap()->is_in_or_null(_referent), 267 "Wrong oop found in java.lang.Reference object"); 268 assert(allow_null_referent ? 269 oopDesc::is_oop_or_null(_referent) 270 : oopDesc::is_oop(_referent), 271 "Expected an oop%s for referent field at " PTR_FORMAT, 272 (allow_null_referent ? " or NULL" : ""), 273 p2i(_referent)); 274 } 275 276 void DiscoveredListIterator::remove() { 277 assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference"); 278 RawAccess<>::oop_store(_current_discovered_addr, oop(NULL)); 279 280 // First _prev_next ref actually points into DiscoveredList (gross). 281 oop new_next; 282 if (_next_discovered == _current_discovered) { 283 // At the end of the list, we should make _prev point to itself. 284 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 285 // and _prev will be NULL. 286 new_next = _prev_discovered; 287 } else { 288 new_next = _next_discovered; 289 } 290 // Remove Reference object from discovered list. Note that G1 does not need a 291 // pre-barrier here because we know the Reference has already been found/marked, 292 // that's how it ended up in the discovered list in the first place. 293 RawAccess<>::oop_store(_prev_discovered_addr, new_next); 294 _removed++; 295 _refs_list.dec_length(1); 296 } 297 298 void DiscoveredListIterator::clear_referent() { 299 RawAccess<>::oop_store(_referent_addr, oop(NULL)); 300 } 301 302 void DiscoveredListIterator::enqueue() { 303 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_current_discovered, 304 java_lang_ref_Reference::discovered_offset, 305 _next_discovered); 306 } 307 308 void DiscoveredListIterator::complete_enqueue() { 309 if (_prev_discovered != NULL) { 310 // This is the last object. 311 // Swap refs_list into pending list and set obj's 312 // discovered to what we read from the pending list. 313 oop old = Universe::swap_reference_pending_list(_refs_list.head()); 314 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_prev_discovered, java_lang_ref_Reference::discovered_offset, old); 315 } 316 } 317 318 inline void log_dropped_ref(const DiscoveredListIterator& iter, const char* reason) { 319 if (log_develop_is_enabled(Trace, gc, ref)) { 320 ResourceMark rm; 321 log_develop_trace(gc, ref)("Dropping %s reference " PTR_FORMAT ": %s", 322 reason, p2i(iter.obj()), 323 iter.obj()->klass()->internal_name()); 324 } 325 } 326 327 inline void log_enqueued_ref(const DiscoveredListIterator& iter, const char* reason) { 328 if (log_develop_is_enabled(Trace, gc, ref)) { 329 ResourceMark rm; 330 log_develop_trace(gc, ref)("Enqueue %s reference (" INTPTR_FORMAT ": %s)", 331 reason, p2i(iter.obj()), iter.obj()->klass()->internal_name()); 332 } 333 assert(oopDesc::is_oop(iter.obj()), "Adding a bad reference"); 334 } 335 336 size_t ReferenceProcessor::process_soft_ref_reconsider_work(DiscoveredList& refs_list, 337 ReferencePolicy* policy, 338 BoolObjectClosure* is_alive, 339 OopClosure* keep_alive, 340 VoidClosure* complete_gc) { 341 assert(policy != NULL, "Must have a non-NULL policy"); 342 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 343 // Decide which softly reachable refs should be kept alive. 344 while (iter.has_next()) { 345 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 346 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 347 if (referent_is_dead && 348 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 349 log_dropped_ref(iter, "by policy"); 350 // Remove Reference object from list 351 iter.remove(); 352 // keep the referent around 353 iter.make_referent_alive(); 354 iter.move_to_next(); 355 } else { 356 iter.next(); 357 } 358 } 359 // Close the reachable set 360 complete_gc->do_void(); 361 362 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 363 iter.removed(), iter.processed(), p2i(&refs_list)); 364 return iter.removed(); 365 } 366 367 size_t ReferenceProcessor::process_soft_weak_final_refs_work(DiscoveredList& refs_list, 368 BoolObjectClosure* is_alive, 369 OopClosure* keep_alive, 370 bool do_enqueue_and_clear) { 371 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 372 while (iter.has_next()) { 373 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 374 if (iter.referent() == NULL) { 375 // Reference has been cleared since discovery; only possible if 376 // discovery is not atomic (checked by load_ptrs). Remove 377 // reference from list. 378 log_dropped_ref(iter, "cleared"); 379 iter.remove(); 380 iter.move_to_next(); 381 } else if (iter.is_referent_alive()) { 382 // The referent is reachable after all. 383 // Remove reference from list. 384 log_dropped_ref(iter, "reachable"); 385 iter.remove(); 386 // Update the referent pointer as necessary. Note that this 387 // should not entail any recursive marking because the 388 // referent must already have been traversed. 389 iter.make_referent_alive(); 390 iter.move_to_next(); 391 } else { 392 if (do_enqueue_and_clear) { 393 iter.clear_referent(); 394 iter.enqueue(); 395 log_enqueued_ref(iter, "cleared"); 396 } 397 // Keep in discovered list 398 iter.next(); 399 } 400 } 401 if (do_enqueue_and_clear) { 402 iter.complete_enqueue(); 403 refs_list.clear(); 404 } 405 406 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 407 " Refs in discovered list " INTPTR_FORMAT, 408 iter.removed(), iter.processed(), p2i(&refs_list)); 409 return iter.removed(); 410 } 411 412 size_t ReferenceProcessor::process_final_keep_alive_work(DiscoveredList& refs_list, 413 OopClosure* keep_alive, 414 VoidClosure* complete_gc) { 415 DiscoveredListIterator iter(refs_list, keep_alive, NULL); 416 while (iter.has_next()) { 417 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 418 // keep the referent and followers around 419 iter.make_referent_alive(); 420 421 // Self-loop next, to mark the FinalReference not active. 422 assert(java_lang_ref_Reference::next(iter.obj()) == NULL, "enqueued FinalReference"); 423 java_lang_ref_Reference::set_next_raw(iter.obj(), iter.obj()); 424 425 iter.enqueue(); 426 log_enqueued_ref(iter, "Final"); 427 iter.next(); 428 } 429 iter.complete_enqueue(); 430 // Close the reachable set 431 complete_gc->do_void(); 432 refs_list.clear(); 433 434 assert(iter.removed() == 0, "This phase does not remove anything."); 435 return iter.removed(); 436 } 437 438 size_t ReferenceProcessor::process_phantom_refs_work(DiscoveredList& refs_list, 439 BoolObjectClosure* is_alive, 440 OopClosure* keep_alive, 441 VoidClosure* complete_gc) { 442 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 443 while (iter.has_next()) { 444 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 445 446 oop const referent = iter.referent(); 447 448 if (referent == NULL || iter.is_referent_alive()) { 449 iter.make_referent_alive(); 450 iter.remove(); 451 iter.move_to_next(); 452 } else { 453 iter.clear_referent(); 454 iter.enqueue(); 455 log_enqueued_ref(iter, "cleared Phantom"); 456 iter.next(); 457 } 458 } 459 iter.complete_enqueue(); 460 // Close the reachable set; needed for collectors which keep_alive_closure do 461 // not immediately complete their work. 462 complete_gc->do_void(); 463 refs_list.clear(); 464 465 return iter.removed(); 466 } 467 468 void 469 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 470 oop obj = NULL; 471 oop next = refs_list.head(); 472 while (next != obj) { 473 obj = next; 474 next = java_lang_ref_Reference::discovered(obj); 475 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 476 } 477 refs_list.clear(); 478 } 479 480 void ReferenceProcessor::abandon_partial_discovery() { 481 // loop over the lists 482 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 483 if ((i % _max_num_queues) == 0) { 484 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 485 } 486 clear_discovered_references(_discovered_refs[i]); 487 } 488 } 489 490 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const { 491 DiscoveredList* list = NULL; 492 493 switch (type) { 494 case REF_SOFT: 495 list = _discoveredSoftRefs; 496 break; 497 case REF_WEAK: 498 list = _discoveredWeakRefs; 499 break; 500 case REF_FINAL: 501 list = _discoveredFinalRefs; 502 break; 503 case REF_PHANTOM: 504 list = _discoveredPhantomRefs; 505 break; 506 case REF_OTHER: 507 case REF_NONE: 508 default: 509 ShouldNotReachHere(); 510 } 511 return total_count(list); 512 } 513 514 class RefProcPhase1Task : public AbstractRefProcTaskExecutor::ProcessTask { 515 public: 516 RefProcPhase1Task(ReferenceProcessor& ref_processor, 517 ReferenceProcessorPhaseTimes* phase_times, 518 ReferencePolicy* policy) 519 : ProcessTask(ref_processor, true /* marks_oops_alive */, phase_times), 520 _policy(policy) { } 521 522 virtual void work(uint worker_id, 523 BoolObjectClosure& is_alive, 524 OopClosure& keep_alive, 525 VoidClosure& complete_gc) 526 { 527 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase1, _phase_times, worker_id); 528 size_t const removed = _ref_processor.process_soft_ref_reconsider_work(_ref_processor._discoveredSoftRefs[worker_id], 529 _policy, 530 &is_alive, 531 &keep_alive, 532 &complete_gc); 533 _phase_times->add_ref_cleared(REF_SOFT, removed); 534 } 535 private: 536 ReferencePolicy* _policy; 537 }; 538 539 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 540 void run_phase2(uint worker_id, 541 DiscoveredList list[], 542 BoolObjectClosure& is_alive, 543 OopClosure& keep_alive, 544 bool do_enqueue_and_clear, 545 ReferenceType ref_type) { 546 size_t const removed = _ref_processor.process_soft_weak_final_refs_work(list[worker_id], 547 &is_alive, 548 &keep_alive, 549 do_enqueue_and_clear); 550 _phase_times->add_ref_cleared(ref_type, removed); 551 } 552 553 public: 554 RefProcPhase2Task(ReferenceProcessor& ref_processor, 555 ReferenceProcessorPhaseTimes* phase_times) 556 : ProcessTask(ref_processor, false /* marks_oops_alive */, phase_times) { } 557 558 virtual void work(uint worker_id, 559 BoolObjectClosure& is_alive, 560 OopClosure& keep_alive, 561 VoidClosure& complete_gc) { 562 RefProcWorkerTimeTracker t(_phase_times->phase2_worker_time_sec(), worker_id); 563 { 564 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::SoftRefSubPhase2, _phase_times, worker_id); 565 run_phase2(worker_id, _ref_processor._discoveredSoftRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_SOFT); 566 } 567 { 568 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::WeakRefSubPhase2, _phase_times, worker_id); 569 run_phase2(worker_id, _ref_processor._discoveredWeakRefs, is_alive, keep_alive, true /* do_enqueue_and_clear */, REF_WEAK); 570 } 571 { 572 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase2, _phase_times, worker_id); 573 run_phase2(worker_id, _ref_processor._discoveredFinalRefs, is_alive, keep_alive, false /* do_enqueue_and_clear */, REF_FINAL); 574 } 575 // Close the reachable set; needed for collectors which keep_alive_closure do 576 // not immediately complete their work. 577 complete_gc.do_void(); 578 } 579 }; 580 581 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 582 public: 583 RefProcPhase3Task(ReferenceProcessor& ref_processor, 584 ReferenceProcessorPhaseTimes* phase_times) 585 : ProcessTask(ref_processor, true /* marks_oops_alive */, phase_times) { } 586 587 virtual void work(uint worker_id, 588 BoolObjectClosure& is_alive, 589 OopClosure& keep_alive, 590 VoidClosure& complete_gc) 591 { 592 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::FinalRefSubPhase3, _phase_times, worker_id); 593 _ref_processor.process_final_keep_alive_work(_ref_processor._discoveredFinalRefs[worker_id], &keep_alive, &complete_gc); 594 } 595 }; 596 597 class RefProcPhase4Task: public AbstractRefProcTaskExecutor::ProcessTask { 598 public: 599 RefProcPhase4Task(ReferenceProcessor& ref_processor, 600 ReferenceProcessorPhaseTimes* phase_times) 601 : ProcessTask(ref_processor, false /* marks_oops_alive */, phase_times) { } 602 603 virtual void work(uint worker_id, 604 BoolObjectClosure& is_alive, 605 OopClosure& keep_alive, 606 VoidClosure& complete_gc) 607 { 608 RefProcSubPhasesWorkerTimeTracker tt(ReferenceProcessor::PhantomRefSubPhase4, _phase_times, worker_id); 609 size_t const removed = _ref_processor.process_phantom_refs_work(_ref_processor._discoveredPhantomRefs[worker_id], 610 &is_alive, 611 &keep_alive, 612 &complete_gc); 613 _phase_times->add_ref_cleared(REF_PHANTOM, removed); 614 } 615 }; 616 617 void ReferenceProcessor::log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues) { 618 LogTarget(Trace, gc, ref) lt; 619 620 if (!lt.is_enabled()) { 621 return; 622 } 623 624 size_t total = 0; 625 626 LogStream ls(lt); 627 ls.print("%s", prefix); 628 for (uint i = 0; i < num_active_queues; i++) { 629 ls.print(SIZE_FORMAT " ", list[i].length()); 630 total += list[i].length(); 631 } 632 ls.print_cr("(" SIZE_FORMAT ")", total); 633 } 634 635 #ifndef PRODUCT 636 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) { 637 if (!log_is_enabled(Trace, gc, ref)) { 638 return; 639 } 640 641 log_reflist("", ref_lists, num_active_queues); 642 #ifdef ASSERT 643 for (uint i = num_active_queues; i < _max_num_queues; i++) { 644 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 645 ref_lists[i].length(), i); 646 } 647 #endif 648 } 649 #endif 650 651 void ReferenceProcessor::set_active_mt_degree(uint v) { 652 _num_queues = v; 653 _next_id = 0; 654 } 655 656 bool ReferenceProcessor::need_balance_queues(DiscoveredList refs_lists[]) { 657 assert(_processing_is_mt, "why balance non-mt processing?"); 658 // _num_queues is the processing degree. Only list entries up to 659 // _num_queues will be processed, so any non-empty lists beyond 660 // that must be redistributed to lists in that range. Even if not 661 // needed for that, balancing may be desirable to eliminate poor 662 // distribution of references among the lists. 663 if (ParallelRefProcBalancingEnabled) { 664 return true; // Configuration says do it. 665 } else { 666 // Configuration says don't balance, but if there are non-empty 667 // lists beyond the processing degree, then must ignore the 668 // configuration and balance anyway. 669 for (uint i = _num_queues; i < _max_num_queues; ++i) { 670 if (!refs_lists[i].is_empty()) { 671 return true; // Must balance despite configuration. 672 } 673 } 674 return false; // Safe to obey configuration and not balance. 675 } 676 } 677 678 void ReferenceProcessor::maybe_balance_queues(DiscoveredList refs_lists[]) { 679 assert(_processing_is_mt, "Should not call this otherwise"); 680 if (need_balance_queues(refs_lists)) { 681 balance_queues(refs_lists); 682 } 683 } 684 685 // Balances reference queues. 686 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 687 // queues[0, 1, ..., _num_q-1] because only the first _num_q 688 // corresponding to the active workers will be processed. 689 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 690 { 691 // calculate total length 692 size_t total_refs = 0; 693 log_develop_trace(gc, ref)("Balance ref_lists "); 694 695 log_reflist_counts(ref_lists, _max_num_queues); 696 697 for (uint i = 0; i < _max_num_queues; ++i) { 698 total_refs += ref_lists[i].length(); 699 } 700 size_t avg_refs = total_refs / _num_queues + 1; 701 uint to_idx = 0; 702 for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { 703 bool move_all = false; 704 if (from_idx >= _num_queues) { 705 move_all = ref_lists[from_idx].length() > 0; 706 } 707 while ((ref_lists[from_idx].length() > avg_refs) || 708 move_all) { 709 assert(to_idx < _num_queues, "Sanity Check!"); 710 if (ref_lists[to_idx].length() < avg_refs) { 711 // move superfluous refs 712 size_t refs_to_move; 713 // Move all the Ref's if the from queue will not be processed. 714 if (move_all) { 715 refs_to_move = MIN2(ref_lists[from_idx].length(), 716 avg_refs - ref_lists[to_idx].length()); 717 } else { 718 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 719 avg_refs - ref_lists[to_idx].length()); 720 } 721 722 assert(refs_to_move > 0, "otherwise the code below will fail"); 723 724 oop move_head = ref_lists[from_idx].head(); 725 oop move_tail = move_head; 726 oop new_head = move_head; 727 // find an element to split the list on 728 for (size_t j = 0; j < refs_to_move; ++j) { 729 move_tail = new_head; 730 new_head = java_lang_ref_Reference::discovered(new_head); 731 } 732 733 // Add the chain to the to list. 734 if (ref_lists[to_idx].head() == NULL) { 735 // to list is empty. Make a loop at the end. 736 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 737 } else { 738 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 739 } 740 ref_lists[to_idx].set_head(move_head); 741 ref_lists[to_idx].inc_length(refs_to_move); 742 743 // Remove the chain from the from list. 744 if (move_tail == new_head) { 745 // We found the end of the from list. 746 ref_lists[from_idx].set_head(NULL); 747 } else { 748 ref_lists[from_idx].set_head(new_head); 749 } 750 ref_lists[from_idx].dec_length(refs_to_move); 751 if (ref_lists[from_idx].length() == 0) { 752 break; 753 } 754 } else { 755 to_idx = (to_idx + 1) % _num_queues; 756 } 757 } 758 } 759 #ifdef ASSERT 760 log_reflist_counts(ref_lists, _num_queues); 761 size_t balanced_total_refs = 0; 762 for (uint i = 0; i < _num_queues; ++i) { 763 balanced_total_refs += ref_lists[i].length(); 764 } 765 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 766 #endif 767 } 768 769 bool ReferenceProcessor::is_mt_processing_set_up(AbstractRefProcTaskExecutor* task_executor) const { 770 return task_executor != NULL && _processing_is_mt; 771 } 772 773 void ReferenceProcessor::process_soft_ref_reconsider(BoolObjectClosure* is_alive, 774 OopClosure* keep_alive, 775 VoidClosure* complete_gc, 776 AbstractRefProcTaskExecutor* task_executor, 777 ReferenceProcessorPhaseTimes* phase_times) { 778 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 779 780 size_t const num_soft_refs = total_count(_discoveredSoftRefs); 781 phase_times->set_ref_discovered(REF_SOFT, num_soft_refs); 782 783 phase_times->set_processing_is_mt(_processing_is_mt); 784 785 if (num_soft_refs == 0 || _current_soft_ref_policy == NULL) { 786 log_debug(gc, ref)("Skipped phase1 of Reference Processing due to unavailable references"); 787 return; 788 } 789 790 RefProcMTDegreeAdjuster a(this, RefPhase1, num_soft_refs); 791 792 if (_processing_is_mt) { 793 RefProcBalanceQueuesTimeTracker tt(RefPhase1, phase_times); 794 maybe_balance_queues(_discoveredSoftRefs); 795 } 796 797 RefProcPhaseTimeTracker tt(RefPhase1, phase_times); 798 799 log_reflist("Phase1 Soft before", _discoveredSoftRefs, _max_num_queues); 800 if (_processing_is_mt) { 801 RefProcPhase1Task phase1(*this, phase_times, _current_soft_ref_policy); 802 task_executor->execute(phase1, num_queues()); 803 } else { 804 size_t removed = 0; 805 806 RefProcSubPhasesWorkerTimeTracker tt2(SoftRefSubPhase1, phase_times, 0); 807 for (uint i = 0; i < _max_num_queues; i++) { 808 removed += process_soft_ref_reconsider_work(_discoveredSoftRefs[i], _current_soft_ref_policy, 809 is_alive, keep_alive, complete_gc); 810 } 811 812 phase_times->add_ref_cleared(REF_SOFT, removed); 813 } 814 log_reflist("Phase1 Soft after", _discoveredSoftRefs, _max_num_queues); 815 } 816 817 void ReferenceProcessor::process_soft_weak_final_refs(BoolObjectClosure* is_alive, 818 OopClosure* keep_alive, 819 VoidClosure* complete_gc, 820 AbstractRefProcTaskExecutor* task_executor, 821 ReferenceProcessorPhaseTimes* phase_times) { 822 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 823 824 size_t const num_soft_refs = total_count(_discoveredSoftRefs); 825 size_t const num_weak_refs = total_count(_discoveredWeakRefs); 826 size_t const num_final_refs = total_count(_discoveredFinalRefs); 827 size_t const num_total_refs = num_soft_refs + num_weak_refs + num_final_refs; 828 phase_times->set_ref_discovered(REF_WEAK, num_weak_refs); 829 phase_times->set_ref_discovered(REF_FINAL, num_final_refs); 830 831 phase_times->set_processing_is_mt(_processing_is_mt); 832 833 if (num_total_refs == 0) { 834 log_debug(gc, ref)("Skipped phase2 of Reference Processing due to unavailable references"); 835 return; 836 } 837 838 RefProcMTDegreeAdjuster a(this, RefPhase2, num_total_refs); 839 840 if (_processing_is_mt) { 841 RefProcBalanceQueuesTimeTracker tt(RefPhase2, phase_times); 842 maybe_balance_queues(_discoveredSoftRefs); 843 maybe_balance_queues(_discoveredWeakRefs); 844 maybe_balance_queues(_discoveredFinalRefs); 845 } 846 847 RefProcPhaseTimeTracker tt(RefPhase2, phase_times); 848 849 log_reflist("Phase2 Soft before", _discoveredSoftRefs, _max_num_queues); 850 log_reflist("Phase2 Weak before", _discoveredWeakRefs, _max_num_queues); 851 log_reflist("Phase2 Final before", _discoveredFinalRefs, _max_num_queues); 852 if (_processing_is_mt) { 853 RefProcPhase2Task phase2(*this, phase_times); 854 task_executor->execute(phase2, num_queues()); 855 } else { 856 RefProcWorkerTimeTracker t(phase_times->phase2_worker_time_sec(), 0); 857 { 858 size_t removed = 0; 859 860 RefProcSubPhasesWorkerTimeTracker tt2(SoftRefSubPhase2, phase_times, 0); 861 for (uint i = 0; i < _max_num_queues; i++) { 862 removed += process_soft_weak_final_refs_work(_discoveredSoftRefs[i], is_alive, keep_alive, true /* do_enqueue */); 863 } 864 865 phase_times->add_ref_cleared(REF_SOFT, removed); 866 } 867 { 868 size_t removed = 0; 869 870 RefProcSubPhasesWorkerTimeTracker tt2(WeakRefSubPhase2, phase_times, 0); 871 for (uint i = 0; i < _max_num_queues; i++) { 872 removed += process_soft_weak_final_refs_work(_discoveredWeakRefs[i], is_alive, keep_alive, true /* do_enqueue */); 873 } 874 875 phase_times->add_ref_cleared(REF_WEAK, removed); 876 } 877 { 878 size_t removed = 0; 879 880 RefProcSubPhasesWorkerTimeTracker tt2(FinalRefSubPhase2, phase_times, 0); 881 for (uint i = 0; i < _max_num_queues; i++) { 882 removed += process_soft_weak_final_refs_work(_discoveredFinalRefs[i], is_alive, keep_alive, false /* do_enqueue */); 883 } 884 885 phase_times->add_ref_cleared(REF_FINAL, removed); 886 } 887 complete_gc->do_void(); 888 } 889 verify_total_count_zero(_discoveredSoftRefs, "SoftReference"); 890 verify_total_count_zero(_discoveredWeakRefs, "WeakReference"); 891 log_reflist("Phase2 Final after", _discoveredFinalRefs, _max_num_queues); 892 } 893 894 void ReferenceProcessor::process_final_keep_alive(OopClosure* keep_alive, 895 VoidClosure* complete_gc, 896 AbstractRefProcTaskExecutor* task_executor, 897 ReferenceProcessorPhaseTimes* phase_times) { 898 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 899 900 size_t const num_final_refs = total_count(_discoveredFinalRefs); 901 902 phase_times->set_processing_is_mt(_processing_is_mt); 903 904 if (num_final_refs == 0) { 905 log_debug(gc, ref)("Skipped phase3 of Reference Processing due to unavailable references"); 906 return; 907 } 908 909 RefProcMTDegreeAdjuster a(this, RefPhase3, num_final_refs); 910 911 if (_processing_is_mt) { 912 RefProcBalanceQueuesTimeTracker tt(RefPhase3, phase_times); 913 maybe_balance_queues(_discoveredFinalRefs); 914 } 915 916 // Phase 3: 917 // . Traverse referents of final references and keep them and followers alive. 918 RefProcPhaseTimeTracker tt(RefPhase3, phase_times); 919 920 if (_processing_is_mt) { 921 RefProcPhase3Task phase3(*this, phase_times); 922 task_executor->execute(phase3, num_queues()); 923 } else { 924 RefProcSubPhasesWorkerTimeTracker tt2(FinalRefSubPhase3, phase_times, 0); 925 for (uint i = 0; i < _max_num_queues; i++) { 926 process_final_keep_alive_work(_discoveredFinalRefs[i], keep_alive, complete_gc); 927 } 928 } 929 verify_total_count_zero(_discoveredFinalRefs, "FinalReference"); 930 } 931 932 void ReferenceProcessor::process_phantom_refs(BoolObjectClosure* is_alive, 933 OopClosure* keep_alive, 934 VoidClosure* complete_gc, 935 AbstractRefProcTaskExecutor* task_executor, 936 ReferenceProcessorPhaseTimes* phase_times) { 937 assert(!_processing_is_mt || task_executor != NULL, "Task executor must not be NULL when mt processing is set."); 938 939 size_t const num_phantom_refs = total_count(_discoveredPhantomRefs); 940 phase_times->set_ref_discovered(REF_PHANTOM, num_phantom_refs); 941 942 phase_times->set_processing_is_mt(_processing_is_mt); 943 944 if (num_phantom_refs == 0) { 945 log_debug(gc, ref)("Skipped phase4 of Reference Processing due to unavailable references"); 946 return; 947 } 948 949 RefProcMTDegreeAdjuster a(this, RefPhase4, num_phantom_refs); 950 951 if (_processing_is_mt) { 952 RefProcBalanceQueuesTimeTracker tt(RefPhase4, phase_times); 953 maybe_balance_queues(_discoveredPhantomRefs); 954 } 955 956 // Phase 4: Walk phantom references appropriately. 957 RefProcPhaseTimeTracker tt(RefPhase4, phase_times); 958 959 log_reflist("Phase4 Phantom before", _discoveredPhantomRefs, _max_num_queues); 960 if (_processing_is_mt) { 961 RefProcPhase4Task phase4(*this, phase_times); 962 task_executor->execute(phase4, num_queues()); 963 } else { 964 size_t removed = 0; 965 966 RefProcSubPhasesWorkerTimeTracker tt(PhantomRefSubPhase4, phase_times, 0); 967 for (uint i = 0; i < _max_num_queues; i++) { 968 removed += process_phantom_refs_work(_discoveredPhantomRefs[i], is_alive, keep_alive, complete_gc); 969 } 970 971 phase_times->add_ref_cleared(REF_PHANTOM, removed); 972 } 973 verify_total_count_zero(_discoveredPhantomRefs, "PhantomReference"); 974 } 975 976 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 977 uint id = 0; 978 // Determine the queue index to use for this object. 979 if (_discovery_is_mt) { 980 // During a multi-threaded discovery phase, 981 // each thread saves to its "own" list. 982 Thread* thr = Thread::current(); 983 id = thr->as_Worker_thread()->id(); 984 } else { 985 // single-threaded discovery, we save in round-robin 986 // fashion to each of the lists. 987 if (_processing_is_mt) { 988 id = next_id(); 989 } 990 } 991 assert(id < _max_num_queues, "Id is out of bounds id %u and max id %u)", id, _max_num_queues); 992 993 // Get the discovered queue to which we will add 994 DiscoveredList* list = NULL; 995 switch (rt) { 996 case REF_OTHER: 997 // Unknown reference type, no special treatment 998 break; 999 case REF_SOFT: 1000 list = &_discoveredSoftRefs[id]; 1001 break; 1002 case REF_WEAK: 1003 list = &_discoveredWeakRefs[id]; 1004 break; 1005 case REF_FINAL: 1006 list = &_discoveredFinalRefs[id]; 1007 break; 1008 case REF_PHANTOM: 1009 list = &_discoveredPhantomRefs[id]; 1010 break; 1011 case REF_NONE: 1012 // we should not reach here if we are an InstanceRefKlass 1013 default: 1014 ShouldNotReachHere(); 1015 } 1016 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 1017 return list; 1018 } 1019 1020 inline void 1021 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 1022 oop obj, 1023 HeapWord* discovered_addr) { 1024 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 1025 // First we must make sure this object is only enqueued once. CAS in a non null 1026 // discovered_addr. 1027 oop current_head = refs_list.head(); 1028 // The last ref must have its discovered field pointing to itself. 1029 oop next_discovered = (current_head != NULL) ? current_head : obj; 1030 1031 oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(discovered_addr, oop(NULL), next_discovered); 1032 1033 if (retest == NULL) { 1034 // This thread just won the right to enqueue the object. 1035 // We have separate lists for enqueueing, so no synchronization 1036 // is necessary. 1037 refs_list.set_head(obj); 1038 refs_list.inc_length(1); 1039 1040 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 1041 p2i(obj), obj->klass()->internal_name()); 1042 } else { 1043 // If retest was non NULL, another thread beat us to it: 1044 // The reference has already been discovered... 1045 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1046 p2i(obj), obj->klass()->internal_name()); 1047 } 1048 } 1049 1050 #ifndef PRODUCT 1051 // Non-atomic (i.e. concurrent) discovery might allow us 1052 // to observe j.l.References with NULL referents, being those 1053 // cleared concurrently by mutators during (or after) discovery. 1054 void ReferenceProcessor::verify_referent(oop obj) { 1055 bool da = discovery_is_atomic(); 1056 oop referent = java_lang_ref_Reference::referent(obj); 1057 assert(da ? oopDesc::is_oop(referent) : oopDesc::is_oop_or_null(referent), 1058 "Bad referent " INTPTR_FORMAT " found in Reference " 1059 INTPTR_FORMAT " during %satomic discovery ", 1060 p2i(referent), p2i(obj), da ? "" : "non-"); 1061 } 1062 #endif 1063 1064 bool ReferenceProcessor::is_subject_to_discovery(oop const obj) const { 1065 return _is_subject_to_discovery->do_object_b(obj); 1066 } 1067 1068 // We mention two of several possible choices here: 1069 // #0: if the reference object is not in the "originating generation" 1070 // (or part of the heap being collected, indicated by our "span" 1071 // we don't treat it specially (i.e. we scan it as we would 1072 // a normal oop, treating its references as strong references). 1073 // This means that references can't be discovered unless their 1074 // referent is also in the same span. This is the simplest, 1075 // most "local" and most conservative approach, albeit one 1076 // that may cause weak references to be enqueued least promptly. 1077 // We call this choice the "ReferenceBasedDiscovery" policy. 1078 // #1: the reference object may be in any generation (span), but if 1079 // the referent is in the generation (span) being currently collected 1080 // then we can discover the reference object, provided 1081 // the object has not already been discovered by 1082 // a different concurrently running collector (as may be the 1083 // case, for instance, if the reference object is in CMS and 1084 // the referent in DefNewGeneration), and provided the processing 1085 // of this reference object by the current collector will 1086 // appear atomic to every other collector in the system. 1087 // (Thus, for instance, a concurrent collector may not 1088 // discover references in other generations even if the 1089 // referent is in its own generation). This policy may, 1090 // in certain cases, enqueue references somewhat sooner than 1091 // might Policy #0 above, but at marginally increased cost 1092 // and complexity in processing these references. 1093 // We call this choice the "RefeferentBasedDiscovery" policy. 1094 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 1095 // Make sure we are discovering refs (rather than processing discovered refs). 1096 if (!_discovering_refs || !RegisterReferences) { 1097 return false; 1098 } 1099 1100 if ((rt == REF_FINAL) && (java_lang_ref_Reference::next(obj) != NULL)) { 1101 // Don't rediscover non-active FinalReferences. 1102 return false; 1103 } 1104 1105 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 1106 !is_subject_to_discovery(obj)) { 1107 // Reference is not in the originating generation; 1108 // don't treat it specially (i.e. we want to scan it as a normal 1109 // object with strong references). 1110 return false; 1111 } 1112 1113 // We only discover references whose referents are not (yet) 1114 // known to be strongly reachable. 1115 if (is_alive_non_header() != NULL) { 1116 verify_referent(obj); 1117 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1118 return false; // referent is reachable 1119 } 1120 } 1121 if (rt == REF_SOFT) { 1122 // For soft refs we can decide now if these are not 1123 // current candidates for clearing, in which case we 1124 // can mark through them now, rather than delaying that 1125 // to the reference-processing phase. Since all current 1126 // time-stamp policies advance the soft-ref clock only 1127 // at a full collection cycle, this is always currently 1128 // accurate. 1129 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1130 return false; 1131 } 1132 } 1133 1134 ResourceMark rm; // Needed for tracing. 1135 1136 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj); 1137 const oop discovered = java_lang_ref_Reference::discovered(obj); 1138 assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1139 if (discovered != NULL) { 1140 // The reference has already been discovered... 1141 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1142 p2i(obj), obj->klass()->internal_name()); 1143 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1144 // assumes that an object is not processed twice; 1145 // if it's been already discovered it must be on another 1146 // generation's discovered list; so we won't discover it. 1147 return false; 1148 } else { 1149 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1150 "Unrecognized policy"); 1151 // Check assumption that an object is not potentially 1152 // discovered twice except by concurrent collectors that potentially 1153 // trace the same Reference object twice. 1154 assert(UseG1GC || UseShenandoahGC, 1155 "Only possible with a concurrent marking collector"); 1156 return true; 1157 } 1158 } 1159 1160 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1161 verify_referent(obj); 1162 // Discover if and only if EITHER: 1163 // .. reference is in our span, OR 1164 // .. we are an atomic collector and referent is in our span 1165 if (is_subject_to_discovery(obj) || 1166 (discovery_is_atomic() && 1167 is_subject_to_discovery(java_lang_ref_Reference::referent(obj)))) { 1168 } else { 1169 return false; 1170 } 1171 } else { 1172 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1173 is_subject_to_discovery(obj), "code inconsistency"); 1174 } 1175 1176 // Get the right type of discovered queue head. 1177 DiscoveredList* list = get_discovered_list(rt); 1178 if (list == NULL) { 1179 return false; // nothing special needs to be done 1180 } 1181 1182 if (_discovery_is_mt) { 1183 add_to_discovered_list_mt(*list, obj, discovered_addr); 1184 } else { 1185 // We do a raw store here: the field will be visited later when processing 1186 // the discovered references. 1187 oop current_head = list->head(); 1188 // The last ref must have its discovered field pointing to itself. 1189 oop next_discovered = (current_head != NULL) ? current_head : obj; 1190 1191 assert(discovered == NULL, "control point invariant"); 1192 RawAccess<>::oop_store(discovered_addr, next_discovered); 1193 list->set_head(obj); 1194 list->inc_length(1); 1195 1196 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1197 } 1198 assert(oopDesc::is_oop(obj), "Discovered a bad reference"); 1199 verify_referent(obj); 1200 return true; 1201 } 1202 1203 bool ReferenceProcessor::has_discovered_references() { 1204 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 1205 if (!_discovered_refs[i].is_empty()) { 1206 return true; 1207 } 1208 } 1209 return false; 1210 } 1211 1212 void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_alive, 1213 OopClosure* keep_alive, 1214 VoidClosure* complete_gc, 1215 YieldClosure* yield, 1216 GCTimer* gc_timer) { 1217 // These lists can be handled here in any order and, indeed, concurrently. 1218 1219 // Soft references 1220 { 1221 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1222 log_reflist("SoftRef before: ", _discoveredSoftRefs, _max_num_queues); 1223 for (uint i = 0; i < _max_num_queues; i++) { 1224 if (yield->should_return()) { 1225 return; 1226 } 1227 if (preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1228 keep_alive, complete_gc, yield)) { 1229 log_reflist("SoftRef abort: ", _discoveredSoftRefs, _max_num_queues); 1230 return; 1231 } 1232 } 1233 log_reflist("SoftRef after: ", _discoveredSoftRefs, _max_num_queues); 1234 } 1235 1236 // Weak references 1237 { 1238 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1239 log_reflist("WeakRef before: ", _discoveredWeakRefs, _max_num_queues); 1240 for (uint i = 0; i < _max_num_queues; i++) { 1241 if (yield->should_return()) { 1242 return; 1243 } 1244 if (preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1245 keep_alive, complete_gc, yield)) { 1246 log_reflist("WeakRef abort: ", _discoveredWeakRefs, _max_num_queues); 1247 return; 1248 } 1249 } 1250 log_reflist("WeakRef after: ", _discoveredWeakRefs, _max_num_queues); 1251 } 1252 1253 // Final references 1254 { 1255 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1256 log_reflist("FinalRef before: ", _discoveredFinalRefs, _max_num_queues); 1257 for (uint i = 0; i < _max_num_queues; i++) { 1258 if (yield->should_return()) { 1259 return; 1260 } 1261 if (preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1262 keep_alive, complete_gc, yield)) { 1263 log_reflist("FinalRef abort: ", _discoveredFinalRefs, _max_num_queues); 1264 return; 1265 } 1266 } 1267 log_reflist("FinalRef after: ", _discoveredFinalRefs, _max_num_queues); 1268 } 1269 1270 // Phantom references 1271 { 1272 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1273 log_reflist("PhantomRef before: ", _discoveredPhantomRefs, _max_num_queues); 1274 for (uint i = 0; i < _max_num_queues; i++) { 1275 if (yield->should_return()) { 1276 return; 1277 } 1278 if (preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1279 keep_alive, complete_gc, yield)) { 1280 log_reflist("PhantomRef abort: ", _discoveredPhantomRefs, _max_num_queues); 1281 return; 1282 } 1283 } 1284 log_reflist("PhantomRef after: ", _discoveredPhantomRefs, _max_num_queues); 1285 } 1286 } 1287 1288 // Walk the given discovered ref list, and remove all reference objects 1289 // whose referents are still alive, whose referents are NULL or which 1290 // are not active (have a non-NULL next field). NOTE: When we are 1291 // thus precleaning the ref lists (which happens single-threaded today), 1292 // we do not disable refs discovery to honor the correct semantics of 1293 // java.lang.Reference. As a result, we need to be careful below 1294 // that ref removal steps interleave safely with ref discovery steps 1295 // (in this thread). 1296 bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1297 BoolObjectClosure* is_alive, 1298 OopClosure* keep_alive, 1299 VoidClosure* complete_gc, 1300 YieldClosure* yield) { 1301 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1302 while (iter.has_next()) { 1303 if (yield->should_return_fine_grain()) { 1304 return true; 1305 } 1306 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1307 if (iter.referent() == NULL || iter.is_referent_alive()) { 1308 // The referent has been cleared, or is alive; we need to trace 1309 // and mark its cohort. 1310 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1311 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1312 // Remove Reference object from list 1313 iter.remove(); 1314 // Keep alive its cohort. 1315 iter.make_referent_alive(); 1316 iter.move_to_next(); 1317 } else { 1318 iter.next(); 1319 } 1320 } 1321 // Close the reachable set 1322 complete_gc->do_void(); 1323 1324 if (iter.processed() > 0) { 1325 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1326 iter.removed(), iter.processed(), p2i(&refs_list)); 1327 } 1328 return false; 1329 } 1330 1331 const char* ReferenceProcessor::list_name(uint i) { 1332 assert(i <= _max_num_queues * number_of_subclasses_of_ref(), 1333 "Out of bounds index"); 1334 1335 int j = i / _max_num_queues; 1336 switch (j) { 1337 case 0: return "SoftRef"; 1338 case 1: return "WeakRef"; 1339 case 2: return "FinalRef"; 1340 case 3: return "PhantomRef"; 1341 } 1342 ShouldNotReachHere(); 1343 return NULL; 1344 } 1345 1346 uint RefProcMTDegreeAdjuster::ergo_proc_thread_count(size_t ref_count, 1347 uint max_threads, 1348 RefProcPhases phase) const { 1349 assert(0 < max_threads, "must allow at least one thread"); 1350 1351 if (use_max_threads(phase) || (ReferencesPerThread == 0)) { 1352 return max_threads; 1353 } 1354 1355 size_t thread_count = 1 + (ref_count / ReferencesPerThread); 1356 return (uint)MIN3(thread_count, 1357 static_cast<size_t>(max_threads), 1358 (size_t)os::active_processor_count()); 1359 } 1360 1361 bool RefProcMTDegreeAdjuster::use_max_threads(RefProcPhases phase) const { 1362 // Even a small number of references in either of those cases could produce large amounts of work. 1363 return (phase == ReferenceProcessor::RefPhase1 || phase == ReferenceProcessor::RefPhase3); 1364 } 1365 1366 RefProcMTDegreeAdjuster::RefProcMTDegreeAdjuster(ReferenceProcessor* rp, 1367 RefProcPhases phase, 1368 size_t ref_count): 1369 _rp(rp), 1370 _saved_mt_processing(_rp->processing_is_mt()), 1371 _saved_num_queues(_rp->num_queues()) { 1372 if (!_rp->processing_is_mt() || !_rp->adjust_no_of_processing_threads() || (ReferencesPerThread == 0)) { 1373 return; 1374 } 1375 1376 uint workers = ergo_proc_thread_count(ref_count, _rp->num_queues(), phase); 1377 1378 _rp->set_mt_processing(workers > 1); 1379 _rp->set_active_mt_degree(workers); 1380 } 1381 1382 RefProcMTDegreeAdjuster::~RefProcMTDegreeAdjuster() { 1383 // Revert to previous status. 1384 _rp->set_mt_processing(_saved_mt_processing); 1385 _rp->set_active_mt_degree(_saved_num_queues); 1386 }