1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/access.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/java.hpp" 40 41 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 42 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 43 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 44 45 void referenceProcessor_init() { 46 ReferenceProcessor::init_statics(); 47 } 48 49 void ReferenceProcessor::init_statics() { 50 // We need a monotonically non-decreasing time in ms but 51 // os::javaTimeMillis() does not guarantee monotonicity. 52 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 53 54 // Initialize the soft ref timestamp clock. 55 _soft_ref_timestamp_clock = now; 56 // Also update the soft ref clock in j.l.r.SoftReference 57 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 58 59 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 60 if (is_server_compilation_mode_vm()) { 61 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 62 } else { 63 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 64 } 65 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 66 vm_exit_during_initialization("Could not allocate reference policy object"); 67 } 68 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 69 RefDiscoveryPolicy == ReferentBasedDiscovery, 70 "Unrecognized RefDiscoveryPolicy"); 71 } 72 73 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 74 #ifdef ASSERT 75 // Verify that we're not currently discovering refs 76 assert(!_discovering_refs, "nested call?"); 77 78 if (check_no_refs) { 79 // Verify that the discovered lists are empty 80 verify_no_references_recorded(); 81 } 82 #endif // ASSERT 83 84 // Someone could have modified the value of the static 85 // field in the j.l.r.SoftReference class that holds the 86 // soft reference timestamp clock using reflection or 87 // Unsafe between GCs. Unconditionally update the static 88 // field in ReferenceProcessor here so that we use the new 89 // value during reference discovery. 90 91 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 92 _discovering_refs = true; 93 } 94 95 ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, 96 bool mt_processing, 97 uint mt_processing_degree, 98 bool mt_discovery, 99 uint mt_discovery_degree, 100 bool atomic_discovery, 101 BoolObjectClosure* is_alive_non_header) : 102 _is_subject_to_discovery(is_subject_to_discovery), 103 _discovering_refs(false), 104 _enqueuing_is_done(false), 105 _is_alive_non_header(is_alive_non_header), 106 _processing_is_mt(mt_processing), 107 _next_id(0) 108 { 109 assert(is_subject_to_discovery != NULL, "must be set"); 110 111 _discovery_is_atomic = atomic_discovery; 112 _discovery_is_mt = mt_discovery; 113 _num_queues = MAX2(1U, mt_processing_degree); 114 _max_num_queues = MAX2(_num_queues, mt_discovery_degree); 115 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 116 _max_num_queues * number_of_subclasses_of_ref(), mtGC); 117 118 if (_discovered_refs == NULL) { 119 vm_exit_during_initialization("Could not allocated RefProc Array"); 120 } 121 _discoveredSoftRefs = &_discovered_refs[0]; 122 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; 123 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; 124 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues]; 125 126 // Initialize all entries to NULL 127 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 128 _discovered_refs[i].set_head(NULL); 129 _discovered_refs[i].set_length(0); 130 } 131 132 setup_policy(false /* default soft ref policy */); 133 } 134 135 #ifndef PRODUCT 136 void ReferenceProcessor::verify_no_references_recorded() { 137 guarantee(!_discovering_refs, "Discovering refs?"); 138 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 139 guarantee(_discovered_refs[i].is_empty(), 140 "Found non-empty discovered list at %u", i); 141 } 142 } 143 #endif 144 145 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 146 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 147 if (UseCompressedOops) { 148 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 149 } else { 150 f->do_oop((oop*)_discovered_refs[i].adr_head()); 151 } 152 } 153 } 154 155 void ReferenceProcessor::update_soft_ref_master_clock() { 156 // Update (advance) the soft ref master clock field. This must be done 157 // after processing the soft ref list. 158 159 // We need a monotonically non-decreasing time in ms but 160 // os::javaTimeMillis() does not guarantee monotonicity. 161 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 162 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 163 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 164 165 NOT_PRODUCT( 166 if (now < _soft_ref_timestamp_clock) { 167 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 168 _soft_ref_timestamp_clock, now); 169 } 170 ) 171 // The values of now and _soft_ref_timestamp_clock are set using 172 // javaTimeNanos(), which is guaranteed to be monotonically 173 // non-decreasing provided the underlying platform provides such 174 // a time source (and it is bug free). 175 // In product mode, however, protect ourselves from non-monotonicity. 176 if (now > _soft_ref_timestamp_clock) { 177 _soft_ref_timestamp_clock = now; 178 java_lang_ref_SoftReference::set_clock(now); 179 } 180 // Else leave clock stalled at its old value until time progresses 181 // past clock value. 182 } 183 184 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { 185 size_t total = 0; 186 for (uint i = 0; i < _max_num_queues; ++i) { 187 total += lists[i].length(); 188 } 189 return total; 190 } 191 192 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 193 BoolObjectClosure* is_alive, 194 OopClosure* keep_alive, 195 VoidClosure* complete_gc, 196 AbstractRefProcTaskExecutor* task_executor, 197 ReferenceProcessorPhaseTimes* phase_times) { 198 199 double start_time = os::elapsedTime(); 200 201 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 202 // Stop treating discovered references specially. 203 disable_discovery(); 204 205 // If discovery was concurrent, someone could have modified 206 // the value of the static field in the j.l.r.SoftReference 207 // class that holds the soft reference timestamp clock using 208 // reflection or Unsafe between when discovery was enabled and 209 // now. Unconditionally update the static field in ReferenceProcessor 210 // here so that we use the new value during processing of the 211 // discovered soft refs. 212 213 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 214 215 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 216 total_count(_discoveredWeakRefs), 217 total_count(_discoveredFinalRefs), 218 total_count(_discoveredPhantomRefs)); 219 220 // Soft references 221 { 222 RefProcPhaseTimesTracker tt(REF_SOFT, phase_times, this); 223 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 224 is_alive, keep_alive, complete_gc, task_executor, phase_times); 225 } 226 227 update_soft_ref_master_clock(); 228 229 // Weak references 230 { 231 RefProcPhaseTimesTracker tt(REF_WEAK, phase_times, this); 232 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 233 is_alive, keep_alive, complete_gc, task_executor, phase_times); 234 } 235 236 // Final references 237 { 238 RefProcPhaseTimesTracker tt(REF_FINAL, phase_times, this); 239 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 240 is_alive, keep_alive, complete_gc, task_executor, phase_times); 241 } 242 243 // Phantom references 244 { 245 RefProcPhaseTimesTracker tt(REF_PHANTOM, phase_times, this); 246 process_discovered_reflist(_discoveredPhantomRefs, NULL, true, 247 is_alive, keep_alive, complete_gc, task_executor, phase_times); 248 } 249 250 if (task_executor != NULL) { 251 // Record the work done by the parallel workers. 252 task_executor->set_single_threaded_mode(); 253 } 254 255 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); 256 257 return stats; 258 } 259 260 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 261 _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered); 262 oop discovered = java_lang_ref_Reference::discovered(_current_discovered); 263 assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered), 264 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 265 _next_discovered = discovered; 266 267 _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered); 268 _referent = java_lang_ref_Reference::referent(_current_discovered); 269 assert(Universe::heap()->is_in_reserved_or_null(_referent), 270 "Wrong oop found in java.lang.Reference object"); 271 assert(allow_null_referent ? 272 oopDesc::is_oop_or_null(_referent) 273 : oopDesc::is_oop(_referent), 274 "Expected an oop%s for referent field at " PTR_FORMAT, 275 (allow_null_referent ? " or NULL" : ""), 276 p2i(_referent)); 277 } 278 279 void DiscoveredListIterator::remove() { 280 assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference"); 281 RawAccess<>::oop_store(_current_discovered_addr, oop(NULL)); 282 283 // First _prev_next ref actually points into DiscoveredList (gross). 284 oop new_next; 285 if (_next_discovered == _current_discovered) { 286 // At the end of the list, we should make _prev point to itself. 287 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 288 // and _prev will be NULL. 289 new_next = _prev_discovered; 290 } else { 291 new_next = _next_discovered; 292 } 293 // Remove Reference object from discovered list. Note that G1 does not need a 294 // pre-barrier here because we know the Reference has already been found/marked, 295 // that's how it ended up in the discovered list in the first place. 296 RawAccess<>::oop_store(_prev_discovered_addr, new_next); 297 NOT_PRODUCT(_removed++); 298 _refs_list.dec_length(1); 299 } 300 301 void DiscoveredListIterator::clear_referent() { 302 RawAccess<>::oop_store(_referent_addr, oop(NULL)); 303 } 304 305 void DiscoveredListIterator::enqueue() { 306 // Self-loop next, so as to make Ref not active. 307 java_lang_ref_Reference::set_next_raw(_current_discovered, _current_discovered); 308 309 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_current_discovered, 310 java_lang_ref_Reference::discovered_offset, 311 _next_discovered); 312 } 313 314 void DiscoveredListIterator::complete_enqueue() { 315 if (_prev_discovered != NULL) { 316 // This is the last object. 317 // Swap refs_list into pending list and set obj's 318 // discovered to what we read from the pending list. 319 oop old = Universe::swap_reference_pending_list(_refs_list.head()); 320 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_prev_discovered, java_lang_ref_Reference::discovered_offset, old); 321 } 322 } 323 324 // NOTE: process_phase*() are largely similar, and at a high level 325 // merely iterate over the extant list applying a predicate to 326 // each of its elements and possibly removing that element from the 327 // list and applying some further closures to that element. 328 // We should consider the possibility of replacing these 329 // process_phase*() methods by abstracting them into 330 // a single general iterator invocation that receives appropriate 331 // closures that accomplish this work. 332 333 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 334 // referents are not alive, but that should be kept alive for policy reasons. 335 // Keep alive the transitive closure of all such referents. 336 void 337 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 338 ReferencePolicy* policy, 339 BoolObjectClosure* is_alive, 340 OopClosure* keep_alive, 341 VoidClosure* complete_gc) { 342 assert(policy != NULL, "Must have a non-NULL policy"); 343 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 344 // Decide which softly reachable refs should be kept alive. 345 while (iter.has_next()) { 346 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 347 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 348 if (referent_is_dead && 349 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 350 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 351 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 352 // Remove Reference object from list 353 iter.remove(); 354 // keep the referent around 355 iter.make_referent_alive(); 356 iter.move_to_next(); 357 } else { 358 iter.next(); 359 } 360 } 361 // Close the reachable set 362 complete_gc->do_void(); 363 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 364 iter.removed(), iter.processed(), p2i(&refs_list)); 365 } 366 367 void ReferenceProcessor::process_phase2(DiscoveredList& refs_list, 368 BoolObjectClosure* is_alive, 369 OopClosure* keep_alive, 370 VoidClosure* complete_gc) { 371 if (discovery_is_atomic()) { 372 // complete_gc is ignored in this case for this phase 373 pp2_work(refs_list, is_alive, keep_alive); 374 } else { 375 assert(complete_gc != NULL, "Error"); 376 pp2_work_concurrent_discovery(refs_list, is_alive, 377 keep_alive, complete_gc); 378 } 379 } 380 // Traverse the list and remove any Refs that are not active, or 381 // whose referents are either alive or NULL. 382 void 383 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 384 BoolObjectClosure* is_alive, 385 OopClosure* keep_alive) { 386 assert(discovery_is_atomic(), "Error"); 387 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 388 while (iter.has_next()) { 389 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 390 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 391 assert(next == NULL, "Should not discover inactive Reference"); 392 if (iter.is_referent_alive()) { 393 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 394 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 395 // The referent is reachable after all. 396 // Remove Reference object from list. 397 iter.remove(); 398 // Update the referent pointer as necessary: Note that this 399 // should not entail any recursive marking because the 400 // referent must already have been traversed. 401 iter.make_referent_alive(); 402 iter.move_to_next(); 403 } else { 404 iter.next(); 405 } 406 } 407 NOT_PRODUCT( 408 if (iter.processed() > 0) { 409 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 410 " Refs in discovered list " INTPTR_FORMAT, 411 iter.removed(), iter.processed(), p2i(&refs_list)); 412 } 413 ) 414 } 415 416 void 417 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 418 BoolObjectClosure* is_alive, 419 OopClosure* keep_alive, 420 VoidClosure* complete_gc) { 421 assert(!discovery_is_atomic(), "Error"); 422 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 423 while (iter.has_next()) { 424 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 425 HeapWord* next_addr = java_lang_ref_Reference::next_addr_raw(iter.obj()); 426 oop next = java_lang_ref_Reference::next(iter.obj()); 427 if ((iter.referent() == NULL || iter.is_referent_alive() || 428 next != NULL)) { 429 assert(oopDesc::is_oop_or_null(next), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 430 // Remove Reference object from list 431 iter.remove(); 432 // Trace the cohorts 433 iter.make_referent_alive(); 434 if (UseCompressedOops) { 435 keep_alive->do_oop((narrowOop*)next_addr); 436 } else { 437 keep_alive->do_oop((oop*)next_addr); 438 } 439 iter.move_to_next(); 440 } else { 441 iter.next(); 442 } 443 } 444 // Now close the newly reachable set 445 complete_gc->do_void(); 446 NOT_PRODUCT( 447 if (iter.processed() > 0) { 448 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 449 " Refs in discovered list " INTPTR_FORMAT, 450 iter.removed(), iter.processed(), p2i(&refs_list)); 451 } 452 ) 453 } 454 455 void ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 456 bool clear_referent, 457 BoolObjectClosure* is_alive, 458 OopClosure* keep_alive, 459 VoidClosure* complete_gc) { 460 ResourceMark rm; 461 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 462 while (iter.has_next()) { 463 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 464 if (clear_referent) { 465 // NULL out referent pointer 466 iter.clear_referent(); 467 } else { 468 // keep the referent around 469 iter.make_referent_alive(); 470 } 471 iter.enqueue(); 472 log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 473 clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name()); 474 assert(oopDesc::is_oop(iter.obj(), UseConcMarkSweepGC), "Adding a bad reference"); 475 iter.next(); 476 } 477 iter.complete_enqueue(); 478 // Close the reachable set 479 complete_gc->do_void(); 480 // Clear the list. 481 refs_list.set_head(NULL); 482 refs_list.set_length(0); 483 } 484 485 void 486 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 487 oop obj = NULL; 488 oop next = refs_list.head(); 489 while (next != obj) { 490 obj = next; 491 next = java_lang_ref_Reference::discovered(obj); 492 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 493 } 494 refs_list.set_head(NULL); 495 refs_list.set_length(0); 496 } 497 498 void ReferenceProcessor::abandon_partial_discovery() { 499 // loop over the lists 500 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 501 if ((i % _max_num_queues) == 0) { 502 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 503 } 504 clear_discovered_references(_discovered_refs[i]); 505 } 506 } 507 508 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const { 509 DiscoveredList* list = NULL; 510 511 switch (type) { 512 case REF_SOFT: 513 list = _discoveredSoftRefs; 514 break; 515 case REF_WEAK: 516 list = _discoveredWeakRefs; 517 break; 518 case REF_FINAL: 519 list = _discoveredFinalRefs; 520 break; 521 case REF_PHANTOM: 522 list = _discoveredPhantomRefs; 523 break; 524 case REF_OTHER: 525 case REF_NONE: 526 default: 527 ShouldNotReachHere(); 528 } 529 return total_count(list); 530 } 531 532 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 533 public: 534 RefProcPhase1Task(ReferenceProcessor& ref_processor, 535 DiscoveredList refs_lists[], 536 ReferencePolicy* policy, 537 bool marks_oops_alive, 538 ReferenceProcessorPhaseTimes* phase_times) 539 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 540 _policy(policy) 541 { } 542 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 543 OopClosure& keep_alive, 544 VoidClosure& complete_gc) 545 { 546 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, _phase_times, i); 547 548 _ref_processor.process_phase1(_refs_lists[i], _policy, 549 &is_alive, &keep_alive, &complete_gc); 550 } 551 private: 552 ReferencePolicy* _policy; 553 }; 554 555 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 556 public: 557 RefProcPhase2Task(ReferenceProcessor& ref_processor, 558 DiscoveredList refs_lists[], 559 bool marks_oops_alive, 560 ReferenceProcessorPhaseTimes* phase_times) 561 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times) 562 { } 563 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 564 OopClosure& keep_alive, 565 VoidClosure& complete_gc) 566 { 567 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, _phase_times, i); 568 569 _ref_processor.process_phase2(_refs_lists[i], 570 &is_alive, &keep_alive, &complete_gc); 571 } 572 }; 573 574 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 575 public: 576 RefProcPhase3Task(ReferenceProcessor& ref_processor, 577 DiscoveredList refs_lists[], 578 bool clear_referent, 579 bool marks_oops_alive, 580 ReferenceProcessorPhaseTimes* phase_times) 581 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 582 _clear_referent(clear_referent) 583 { } 584 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 585 OopClosure& keep_alive, 586 VoidClosure& complete_gc) 587 { 588 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, _phase_times, i); 589 590 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 591 &is_alive, &keep_alive, &complete_gc); 592 } 593 private: 594 bool _clear_referent; 595 }; 596 597 void ReferenceProcessor::log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues) { 598 LogTarget(Trace, gc, ref) lt; 599 600 if (!lt.is_enabled()) { 601 return; 602 } 603 604 size_t total = 0; 605 606 LogStream ls(lt); 607 ls.print("%s", prefix); 608 for (uint i = 0; i < num_active_queues; i++) { 609 ls.print(SIZE_FORMAT " ", list[i].length()); 610 total += list[i].length(); 611 } 612 ls.print_cr("(" SIZE_FORMAT ")", total); 613 } 614 615 #ifndef PRODUCT 616 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) { 617 if (!log_is_enabled(Trace, gc, ref)) { 618 return; 619 } 620 621 log_reflist("", ref_lists, num_active_queues); 622 #ifdef ASSERT 623 for (uint i = num_active_queues; i < _max_num_queues; i++) { 624 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 625 ref_lists[i].length(), i); 626 } 627 #endif 628 } 629 #endif 630 631 void ReferenceProcessor::set_active_mt_degree(uint v) { 632 _num_queues = v; 633 _next_id = 0; 634 } 635 636 // Balances reference queues. 637 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 638 // queues[0, 1, ..., _num_q-1] because only the first _num_q 639 // corresponding to the active workers will be processed. 640 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 641 { 642 // calculate total length 643 size_t total_refs = 0; 644 log_develop_trace(gc, ref)("Balance ref_lists "); 645 646 log_reflist_counts(ref_lists, _max_num_queues); 647 648 for (uint i = 0; i < _max_num_queues; ++i) { 649 total_refs += ref_lists[i].length(); 650 } 651 size_t avg_refs = total_refs / _num_queues + 1; 652 uint to_idx = 0; 653 for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { 654 bool move_all = false; 655 if (from_idx >= _num_queues) { 656 move_all = ref_lists[from_idx].length() > 0; 657 } 658 while ((ref_lists[from_idx].length() > avg_refs) || 659 move_all) { 660 assert(to_idx < _num_queues, "Sanity Check!"); 661 if (ref_lists[to_idx].length() < avg_refs) { 662 // move superfluous refs 663 size_t refs_to_move; 664 // Move all the Ref's if the from queue will not be processed. 665 if (move_all) { 666 refs_to_move = MIN2(ref_lists[from_idx].length(), 667 avg_refs - ref_lists[to_idx].length()); 668 } else { 669 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 670 avg_refs - ref_lists[to_idx].length()); 671 } 672 673 assert(refs_to_move > 0, "otherwise the code below will fail"); 674 675 oop move_head = ref_lists[from_idx].head(); 676 oop move_tail = move_head; 677 oop new_head = move_head; 678 // find an element to split the list on 679 for (size_t j = 0; j < refs_to_move; ++j) { 680 move_tail = new_head; 681 new_head = java_lang_ref_Reference::discovered(new_head); 682 } 683 684 // Add the chain to the to list. 685 if (ref_lists[to_idx].head() == NULL) { 686 // to list is empty. Make a loop at the end. 687 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 688 } else { 689 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 690 } 691 ref_lists[to_idx].set_head(move_head); 692 ref_lists[to_idx].inc_length(refs_to_move); 693 694 // Remove the chain from the from list. 695 if (move_tail == new_head) { 696 // We found the end of the from list. 697 ref_lists[from_idx].set_head(NULL); 698 } else { 699 ref_lists[from_idx].set_head(new_head); 700 } 701 ref_lists[from_idx].dec_length(refs_to_move); 702 if (ref_lists[from_idx].length() == 0) { 703 break; 704 } 705 } else { 706 to_idx = (to_idx + 1) % _num_queues; 707 } 708 } 709 } 710 #ifdef ASSERT 711 log_reflist_counts(ref_lists, _num_queues); 712 size_t balanced_total_refs = 0; 713 for (uint i = 0; i < _num_queues; ++i) { 714 balanced_total_refs += ref_lists[i].length(); 715 } 716 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 717 #endif 718 } 719 720 void ReferenceProcessor::process_discovered_reflist( 721 DiscoveredList refs_lists[], 722 ReferencePolicy* policy, 723 bool clear_referent, 724 BoolObjectClosure* is_alive, 725 OopClosure* keep_alive, 726 VoidClosure* complete_gc, 727 AbstractRefProcTaskExecutor* task_executor, 728 ReferenceProcessorPhaseTimes* phase_times) 729 { 730 bool mt_processing = task_executor != NULL && _processing_is_mt; 731 732 phase_times->set_processing_is_mt(mt_processing); 733 734 if (mt_processing && ParallelRefProcBalancingEnabled) { 735 RefProcBalanceQueuesTimeTracker tt(phase_times); 736 balance_queues(refs_lists); 737 } 738 739 // Phase 1 (soft refs only): 740 // . Traverse the list and remove any SoftReferences whose 741 // referents are not alive, but that should be kept alive for 742 // policy reasons. Keep alive the transitive closure of all 743 // such referents. 744 if (policy != NULL) { 745 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, phase_times); 746 747 if (mt_processing) { 748 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times); 749 task_executor->execute(phase1); 750 } else { 751 for (uint i = 0; i < _max_num_queues; i++) { 752 process_phase1(refs_lists[i], policy, 753 is_alive, keep_alive, complete_gc); 754 } 755 } 756 } else { // policy == NULL 757 assert(refs_lists != _discoveredSoftRefs, 758 "Policy must be specified for soft references."); 759 } 760 761 // Phase 2: 762 // . Traverse the list and remove any refs whose referents are alive. 763 { 764 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, phase_times); 765 766 if (mt_processing) { 767 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times); 768 task_executor->execute(phase2); 769 } else { 770 for (uint i = 0; i < _max_num_queues; i++) { 771 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 772 } 773 } 774 } 775 776 // Phase 3: 777 // . Traverse the list and process referents as appropriate. 778 { 779 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, phase_times); 780 781 if (mt_processing) { 782 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times); 783 task_executor->execute(phase3); 784 } else { 785 for (uint i = 0; i < _max_num_queues; i++) { 786 process_phase3(refs_lists[i], clear_referent, 787 is_alive, keep_alive, complete_gc); 788 } 789 } 790 } 791 } 792 793 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 794 uint id = 0; 795 // Determine the queue index to use for this object. 796 if (_discovery_is_mt) { 797 // During a multi-threaded discovery phase, 798 // each thread saves to its "own" list. 799 Thread* thr = Thread::current(); 800 id = thr->as_Worker_thread()->id(); 801 } else { 802 // single-threaded discovery, we save in round-robin 803 // fashion to each of the lists. 804 if (_processing_is_mt) { 805 id = next_id(); 806 } 807 } 808 assert(id < _max_num_queues, "Id is out of bounds id %u and max id %u)", id, _max_num_queues); 809 810 // Get the discovered queue to which we will add 811 DiscoveredList* list = NULL; 812 switch (rt) { 813 case REF_OTHER: 814 // Unknown reference type, no special treatment 815 break; 816 case REF_SOFT: 817 list = &_discoveredSoftRefs[id]; 818 break; 819 case REF_WEAK: 820 list = &_discoveredWeakRefs[id]; 821 break; 822 case REF_FINAL: 823 list = &_discoveredFinalRefs[id]; 824 break; 825 case REF_PHANTOM: 826 list = &_discoveredPhantomRefs[id]; 827 break; 828 case REF_NONE: 829 // we should not reach here if we are an InstanceRefKlass 830 default: 831 ShouldNotReachHere(); 832 } 833 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 834 return list; 835 } 836 837 inline void 838 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 839 oop obj, 840 HeapWord* discovered_addr) { 841 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 842 // First we must make sure this object is only enqueued once. CAS in a non null 843 // discovered_addr. 844 oop current_head = refs_list.head(); 845 // The last ref must have its discovered field pointing to itself. 846 oop next_discovered = (current_head != NULL) ? current_head : obj; 847 848 oop retest = RawAccess<>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL)); 849 850 if (retest == NULL) { 851 // This thread just won the right to enqueue the object. 852 // We have separate lists for enqueueing, so no synchronization 853 // is necessary. 854 refs_list.set_head(obj); 855 refs_list.inc_length(1); 856 857 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 858 p2i(obj), obj->klass()->internal_name()); 859 } else { 860 // If retest was non NULL, another thread beat us to it: 861 // The reference has already been discovered... 862 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 863 p2i(obj), obj->klass()->internal_name()); 864 } 865 } 866 867 #ifndef PRODUCT 868 // Non-atomic (i.e. concurrent) discovery might allow us 869 // to observe j.l.References with NULL referents, being those 870 // cleared concurrently by mutators during (or after) discovery. 871 void ReferenceProcessor::verify_referent(oop obj) { 872 bool da = discovery_is_atomic(); 873 oop referent = java_lang_ref_Reference::referent(obj); 874 assert(da ? oopDesc::is_oop(referent) : oopDesc::is_oop_or_null(referent), 875 "Bad referent " INTPTR_FORMAT " found in Reference " 876 INTPTR_FORMAT " during %satomic discovery ", 877 p2i(referent), p2i(obj), da ? "" : "non-"); 878 } 879 #endif 880 881 bool ReferenceProcessor::is_subject_to_discovery(oop const obj) const { 882 return _is_subject_to_discovery->do_object_b(obj); 883 } 884 885 // We mention two of several possible choices here: 886 // #0: if the reference object is not in the "originating generation" 887 // (or part of the heap being collected, indicated by our "span" 888 // we don't treat it specially (i.e. we scan it as we would 889 // a normal oop, treating its references as strong references). 890 // This means that references can't be discovered unless their 891 // referent is also in the same span. This is the simplest, 892 // most "local" and most conservative approach, albeit one 893 // that may cause weak references to be enqueued least promptly. 894 // We call this choice the "ReferenceBasedDiscovery" policy. 895 // #1: the reference object may be in any generation (span), but if 896 // the referent is in the generation (span) being currently collected 897 // then we can discover the reference object, provided 898 // the object has not already been discovered by 899 // a different concurrently running collector (as may be the 900 // case, for instance, if the reference object is in CMS and 901 // the referent in DefNewGeneration), and provided the processing 902 // of this reference object by the current collector will 903 // appear atomic to every other collector in the system. 904 // (Thus, for instance, a concurrent collector may not 905 // discover references in other generations even if the 906 // referent is in its own generation). This policy may, 907 // in certain cases, enqueue references somewhat sooner than 908 // might Policy #0 above, but at marginally increased cost 909 // and complexity in processing these references. 910 // We call this choice the "RefeferentBasedDiscovery" policy. 911 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 912 // Make sure we are discovering refs (rather than processing discovered refs). 913 if (!_discovering_refs || !RegisterReferences) { 914 return false; 915 } 916 // We only discover active references. 917 oop next = java_lang_ref_Reference::next(obj); 918 if (next != NULL) { // Ref is no longer active 919 return false; 920 } 921 922 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 923 !is_subject_to_discovery(obj)) { 924 // Reference is not in the originating generation; 925 // don't treat it specially (i.e. we want to scan it as a normal 926 // object with strong references). 927 return false; 928 } 929 930 // We only discover references whose referents are not (yet) 931 // known to be strongly reachable. 932 if (is_alive_non_header() != NULL) { 933 verify_referent(obj); 934 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 935 return false; // referent is reachable 936 } 937 } 938 if (rt == REF_SOFT) { 939 // For soft refs we can decide now if these are not 940 // current candidates for clearing, in which case we 941 // can mark through them now, rather than delaying that 942 // to the reference-processing phase. Since all current 943 // time-stamp policies advance the soft-ref clock only 944 // at a full collection cycle, this is always currently 945 // accurate. 946 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 947 return false; 948 } 949 } 950 951 ResourceMark rm; // Needed for tracing. 952 953 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj); 954 const oop discovered = java_lang_ref_Reference::discovered(obj); 955 assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 956 if (discovered != NULL) { 957 // The reference has already been discovered... 958 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 959 p2i(obj), obj->klass()->internal_name()); 960 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 961 // assumes that an object is not processed twice; 962 // if it's been already discovered it must be on another 963 // generation's discovered list; so we won't discover it. 964 return false; 965 } else { 966 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 967 "Unrecognized policy"); 968 // Check assumption that an object is not potentially 969 // discovered twice except by concurrent collectors that potentially 970 // trace the same Reference object twice. 971 assert(UseConcMarkSweepGC || UseG1GC, 972 "Only possible with a concurrent marking collector"); 973 return true; 974 } 975 } 976 977 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 978 verify_referent(obj); 979 // Discover if and only if EITHER: 980 // .. reference is in our span, OR 981 // .. we are an atomic collector and referent is in our span 982 if (is_subject_to_discovery(obj) || 983 (discovery_is_atomic() && 984 is_subject_to_discovery(java_lang_ref_Reference::referent(obj)))) { 985 } else { 986 return false; 987 } 988 } else { 989 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 990 is_subject_to_discovery(obj), "code inconsistency"); 991 } 992 993 // Get the right type of discovered queue head. 994 DiscoveredList* list = get_discovered_list(rt); 995 if (list == NULL) { 996 return false; // nothing special needs to be done 997 } 998 999 if (_discovery_is_mt) { 1000 add_to_discovered_list_mt(*list, obj, discovered_addr); 1001 } else { 1002 // We do a raw store here: the field will be visited later when processing 1003 // the discovered references. 1004 oop current_head = list->head(); 1005 // The last ref must have its discovered field pointing to itself. 1006 oop next_discovered = (current_head != NULL) ? current_head : obj; 1007 1008 assert(discovered == NULL, "control point invariant"); 1009 RawAccess<>::oop_store(discovered_addr, next_discovered); 1010 list->set_head(obj); 1011 list->inc_length(1); 1012 1013 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1014 } 1015 assert(oopDesc::is_oop(obj), "Discovered a bad reference"); 1016 verify_referent(obj); 1017 return true; 1018 } 1019 1020 bool ReferenceProcessor::has_discovered_references() { 1021 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 1022 if (!_discovered_refs[i].is_empty()) { 1023 return true; 1024 } 1025 } 1026 return false; 1027 } 1028 1029 void ReferenceProcessor::preclean_discovered_references(BoolObjectClosure* is_alive, 1030 OopClosure* keep_alive, 1031 VoidClosure* complete_gc, 1032 YieldClosure* yield, 1033 GCTimer* gc_timer) { 1034 // These lists can be handled here in any order and, indeed, concurrently. 1035 1036 // Soft references 1037 { 1038 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1039 log_reflist("SoftRef before: ", _discoveredSoftRefs, _max_num_queues); 1040 for (uint i = 0; i < _max_num_queues; i++) { 1041 if (yield->should_return()) { 1042 return; 1043 } 1044 if (preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1045 keep_alive, complete_gc, yield)) { 1046 log_reflist("SoftRef abort: ", _discoveredSoftRefs, _max_num_queues); 1047 return; 1048 } 1049 } 1050 log_reflist("SoftRef after: ", _discoveredSoftRefs, _max_num_queues); 1051 } 1052 1053 // Weak references 1054 { 1055 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1056 log_reflist("WeakRef before: ", _discoveredWeakRefs, _max_num_queues); 1057 for (uint i = 0; i < _max_num_queues; i++) { 1058 if (yield->should_return()) { 1059 return; 1060 } 1061 if (preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1062 keep_alive, complete_gc, yield)) { 1063 log_reflist("WeakRef abort: ", _discoveredWeakRefs, _max_num_queues); 1064 return; 1065 } 1066 } 1067 log_reflist("WeakRef after: ", _discoveredWeakRefs, _max_num_queues); 1068 } 1069 1070 // Final references 1071 { 1072 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1073 log_reflist("FinalRef before: ", _discoveredFinalRefs, _max_num_queues); 1074 for (uint i = 0; i < _max_num_queues; i++) { 1075 if (yield->should_return()) { 1076 return; 1077 } 1078 if (preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1079 keep_alive, complete_gc, yield)) { 1080 log_reflist("FinalRef abort: ", _discoveredFinalRefs, _max_num_queues); 1081 return; 1082 } 1083 } 1084 log_reflist("FinalRef after: ", _discoveredFinalRefs, _max_num_queues); 1085 } 1086 1087 // Phantom references 1088 { 1089 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1090 log_reflist("PhantomRef before: ", _discoveredPhantomRefs, _max_num_queues); 1091 for (uint i = 0; i < _max_num_queues; i++) { 1092 if (yield->should_return()) { 1093 return; 1094 } 1095 if (preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1096 keep_alive, complete_gc, yield)) { 1097 log_reflist("PhantomRef abort: ", _discoveredPhantomRefs, _max_num_queues); 1098 return; 1099 } 1100 } 1101 log_reflist("PhantomRef after: ", _discoveredPhantomRefs, _max_num_queues); 1102 } 1103 } 1104 1105 // Walk the given discovered ref list, and remove all reference objects 1106 // whose referents are still alive, whose referents are NULL or which 1107 // are not active (have a non-NULL next field). NOTE: When we are 1108 // thus precleaning the ref lists (which happens single-threaded today), 1109 // we do not disable refs discovery to honor the correct semantics of 1110 // java.lang.Reference. As a result, we need to be careful below 1111 // that ref removal steps interleave safely with ref discovery steps 1112 // (in this thread). 1113 bool ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1114 BoolObjectClosure* is_alive, 1115 OopClosure* keep_alive, 1116 VoidClosure* complete_gc, 1117 YieldClosure* yield) { 1118 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1119 while (iter.has_next()) { 1120 if (yield->should_return_fine_grain()) { 1121 return true; 1122 } 1123 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1124 oop obj = iter.obj(); 1125 oop next = java_lang_ref_Reference::next(obj); 1126 if (iter.referent() == NULL || iter.is_referent_alive() || next != NULL) { 1127 // The referent has been cleared, or is alive, or the Reference is not 1128 // active; we need to trace and mark its cohort. 1129 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1130 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1131 // Remove Reference object from list 1132 iter.remove(); 1133 // Keep alive its cohort. 1134 iter.make_referent_alive(); 1135 if (UseCompressedOops) { 1136 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr_raw(obj); 1137 keep_alive->do_oop(next_addr); 1138 } else { 1139 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr_raw(obj); 1140 keep_alive->do_oop(next_addr); 1141 } 1142 iter.move_to_next(); 1143 } else { 1144 iter.next(); 1145 } 1146 } 1147 // Close the reachable set 1148 complete_gc->do_void(); 1149 1150 NOT_PRODUCT( 1151 if (iter.processed() > 0) { 1152 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1153 iter.removed(), iter.processed(), p2i(&refs_list)); 1154 } 1155 ) 1156 return false; 1157 } 1158 1159 const char* ReferenceProcessor::list_name(uint i) { 1160 assert(i <= _max_num_queues * number_of_subclasses_of_ref(), 1161 "Out of bounds index"); 1162 1163 int j = i / _max_num_queues; 1164 switch (j) { 1165 case 0: return "SoftRef"; 1166 case 1: return "WeakRef"; 1167 case 2: return "FinalRef"; 1168 case 3: return "PhantomRef"; 1169 } 1170 ShouldNotReachHere(); 1171 return NULL; 1172 }