1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/access.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/java.hpp" 40 41 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 42 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 43 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 44 45 void referenceProcessor_init() { 46 ReferenceProcessor::init_statics(); 47 } 48 49 void ReferenceProcessor::init_statics() { 50 // We need a monotonically non-decreasing time in ms but 51 // os::javaTimeMillis() does not guarantee monotonicity. 52 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 53 54 // Initialize the soft ref timestamp clock. 55 _soft_ref_timestamp_clock = now; 56 // Also update the soft ref clock in j.l.r.SoftReference 57 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 58 59 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 60 if (is_server_compilation_mode_vm()) { 61 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 62 } else { 63 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 64 } 65 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 66 vm_exit_during_initialization("Could not allocate reference policy object"); 67 } 68 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 69 RefDiscoveryPolicy == ReferentBasedDiscovery, 70 "Unrecognized RefDiscoveryPolicy"); 71 } 72 73 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 74 #ifdef ASSERT 75 // Verify that we're not currently discovering refs 76 assert(!_discovering_refs, "nested call?"); 77 78 if (check_no_refs) { 79 // Verify that the discovered lists are empty 80 verify_no_references_recorded(); 81 } 82 #endif // ASSERT 83 84 // Someone could have modified the value of the static 85 // field in the j.l.r.SoftReference class that holds the 86 // soft reference timestamp clock using reflection or 87 // Unsafe between GCs. Unconditionally update the static 88 // field in ReferenceProcessor here so that we use the new 89 // value during reference discovery. 90 91 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 92 _discovering_refs = true; 93 } 94 95 ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, 96 bool mt_processing, 97 uint mt_processing_degree, 98 bool mt_discovery, 99 uint mt_discovery_degree, 100 bool atomic_discovery, 101 BoolObjectClosure* is_alive_non_header) : 102 _is_subject_to_discovery(is_subject_to_discovery), 103 _discovering_refs(false), 104 _enqueuing_is_done(false), 105 _is_alive_non_header(is_alive_non_header), 106 _processing_is_mt(mt_processing), 107 _next_id(0) 108 { 109 assert(is_subject_to_discovery != NULL, "must be set"); 110 111 _discovery_is_atomic = atomic_discovery; 112 _discovery_is_mt = mt_discovery; 113 _num_queues = MAX2(1U, mt_processing_degree); 114 _max_num_queues = MAX2(_num_queues, mt_discovery_degree); 115 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 116 _max_num_queues * number_of_subclasses_of_ref(), mtGC); 117 118 if (_discovered_refs == NULL) { 119 vm_exit_during_initialization("Could not allocated RefProc Array"); 120 } 121 _discoveredSoftRefs = &_discovered_refs[0]; 122 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; 123 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; 124 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues]; 125 126 // Initialize all entries to NULL 127 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 128 _discovered_refs[i].set_head(NULL); 129 _discovered_refs[i].set_length(0); 130 } 131 132 setup_policy(false /* default soft ref policy */); 133 } 134 135 SpanReferenceProcessor::SpanReferenceProcessor(MemRegion span, 136 bool mt_processing, 137 uint mt_processing_degree, 138 bool mt_discovery, 139 uint mt_discovery_degree, 140 bool atomic_discovery, 141 BoolObjectClosure* is_alive_non_header) : 142 ReferenceProcessor(&_span_based_discoverer, 143 mt_processing, 144 mt_processing_degree, 145 mt_discovery, 146 mt_discovery_degree, 147 atomic_discovery, 148 is_alive_non_header), 149 _span_based_discoverer(span) { 150 151 } 152 153 #ifndef PRODUCT 154 void ReferenceProcessor::verify_no_references_recorded() { 155 guarantee(!_discovering_refs, "Discovering refs?"); 156 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 157 guarantee(_discovered_refs[i].is_empty(), 158 "Found non-empty discovered list at %u", i); 159 } 160 } 161 #endif 162 163 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 164 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 165 if (UseCompressedOops) { 166 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 167 } else { 168 f->do_oop((oop*)_discovered_refs[i].adr_head()); 169 } 170 } 171 } 172 173 void ReferenceProcessor::update_soft_ref_master_clock() { 174 // Update (advance) the soft ref master clock field. This must be done 175 // after processing the soft ref list. 176 177 // We need a monotonically non-decreasing time in ms but 178 // os::javaTimeMillis() does not guarantee monotonicity. 179 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 180 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 181 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 182 183 NOT_PRODUCT( 184 if (now < _soft_ref_timestamp_clock) { 185 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 186 _soft_ref_timestamp_clock, now); 187 } 188 ) 189 // The values of now and _soft_ref_timestamp_clock are set using 190 // javaTimeNanos(), which is guaranteed to be monotonically 191 // non-decreasing provided the underlying platform provides such 192 // a time source (and it is bug free). 193 // In product mode, however, protect ourselves from non-monotonicity. 194 if (now > _soft_ref_timestamp_clock) { 195 _soft_ref_timestamp_clock = now; 196 java_lang_ref_SoftReference::set_clock(now); 197 } 198 // Else leave clock stalled at its old value until time progresses 199 // past clock value. 200 } 201 202 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { 203 size_t total = 0; 204 for (uint i = 0; i < _max_num_queues; ++i) { 205 total += lists[i].length(); 206 } 207 return total; 208 } 209 210 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 211 BoolObjectClosure* is_alive, 212 OopClosure* keep_alive, 213 VoidClosure* complete_gc, 214 AbstractRefProcTaskExecutor* task_executor, 215 ReferenceProcessorPhaseTimes* phase_times) { 216 217 double start_time = os::elapsedTime(); 218 219 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 220 // Stop treating discovered references specially. 221 disable_discovery(); 222 223 // If discovery was concurrent, someone could have modified 224 // the value of the static field in the j.l.r.SoftReference 225 // class that holds the soft reference timestamp clock using 226 // reflection or Unsafe between when discovery was enabled and 227 // now. Unconditionally update the static field in ReferenceProcessor 228 // here so that we use the new value during processing of the 229 // discovered soft refs. 230 231 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 232 233 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 234 total_count(_discoveredWeakRefs), 235 total_count(_discoveredFinalRefs), 236 total_count(_discoveredPhantomRefs)); 237 238 // Soft references 239 { 240 RefProcPhaseTimesTracker tt(REF_SOFT, phase_times, this); 241 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 242 is_alive, keep_alive, complete_gc, task_executor, phase_times); 243 } 244 245 update_soft_ref_master_clock(); 246 247 // Weak references 248 { 249 RefProcPhaseTimesTracker tt(REF_WEAK, phase_times, this); 250 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 251 is_alive, keep_alive, complete_gc, task_executor, phase_times); 252 } 253 254 // Final references 255 { 256 RefProcPhaseTimesTracker tt(REF_FINAL, phase_times, this); 257 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 258 is_alive, keep_alive, complete_gc, task_executor, phase_times); 259 } 260 261 // Phantom references 262 { 263 RefProcPhaseTimesTracker tt(REF_PHANTOM, phase_times, this); 264 process_discovered_reflist(_discoveredPhantomRefs, NULL, true, 265 is_alive, keep_alive, complete_gc, task_executor, phase_times); 266 } 267 268 if (task_executor != NULL) { 269 // Record the work done by the parallel workers. 270 task_executor->set_single_threaded_mode(); 271 } 272 273 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); 274 275 return stats; 276 } 277 278 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 279 _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered); 280 oop discovered = java_lang_ref_Reference::discovered(_current_discovered); 281 assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered), 282 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 283 _next_discovered = discovered; 284 285 _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered); 286 _referent = java_lang_ref_Reference::referent(_current_discovered); 287 assert(Universe::heap()->is_in_reserved_or_null(_referent), 288 "Wrong oop found in java.lang.Reference object"); 289 assert(allow_null_referent ? 290 oopDesc::is_oop_or_null(_referent) 291 : oopDesc::is_oop(_referent), 292 "Expected an oop%s for referent field at " PTR_FORMAT, 293 (allow_null_referent ? " or NULL" : ""), 294 p2i(_referent)); 295 } 296 297 void DiscoveredListIterator::remove() { 298 assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference"); 299 RawAccess<>::oop_store(_current_discovered_addr, oop(NULL)); 300 301 // First _prev_next ref actually points into DiscoveredList (gross). 302 oop new_next; 303 if (_next_discovered == _current_discovered) { 304 // At the end of the list, we should make _prev point to itself. 305 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 306 // and _prev will be NULL. 307 new_next = _prev_discovered; 308 } else { 309 new_next = _next_discovered; 310 } 311 // Remove Reference object from discovered list. Note that G1 does not need a 312 // pre-barrier here because we know the Reference has already been found/marked, 313 // that's how it ended up in the discovered list in the first place. 314 RawAccess<>::oop_store(_prev_discovered_addr, new_next); 315 NOT_PRODUCT(_removed++); 316 _refs_list.dec_length(1); 317 } 318 319 void DiscoveredListIterator::clear_referent() { 320 RawAccess<>::oop_store(_referent_addr, oop(NULL)); 321 } 322 323 void DiscoveredListIterator::enqueue() { 324 // Self-loop next, so as to make Ref not active. 325 java_lang_ref_Reference::set_next_raw(_current_discovered, _current_discovered); 326 327 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_current_discovered, 328 java_lang_ref_Reference::discovered_offset, 329 _next_discovered); 330 } 331 332 void DiscoveredListIterator::complete_enqeue() { 333 if (_prev_discovered != NULL) { 334 // This is the last object. 335 // Swap refs_list into pending list and set obj's 336 // discovered to what we read from the pending list. 337 oop old = Universe::swap_reference_pending_list(_refs_list.head()); 338 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(_prev_discovered, java_lang_ref_Reference::discovered_offset, old); 339 } 340 } 341 342 // NOTE: process_phase*() are largely similar, and at a high level 343 // merely iterate over the extant list applying a predicate to 344 // each of its elements and possibly removing that element from the 345 // list and applying some further closures to that element. 346 // We should consider the possibility of replacing these 347 // process_phase*() methods by abstracting them into 348 // a single general iterator invocation that receives appropriate 349 // closures that accomplish this work. 350 351 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 352 // referents are not alive, but that should be kept alive for policy reasons. 353 // Keep alive the transitive closure of all such referents. 354 void 355 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 356 ReferencePolicy* policy, 357 BoolObjectClosure* is_alive, 358 OopClosure* keep_alive, 359 VoidClosure* complete_gc) { 360 assert(policy != NULL, "Must have a non-NULL policy"); 361 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 362 // Decide which softly reachable refs should be kept alive. 363 while (iter.has_next()) { 364 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 365 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 366 if (referent_is_dead && 367 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 368 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 369 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 370 // Remove Reference object from list 371 iter.remove(); 372 // keep the referent around 373 iter.make_referent_alive(); 374 iter.move_to_next(); 375 } else { 376 iter.next(); 377 } 378 } 379 // Close the reachable set 380 complete_gc->do_void(); 381 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 382 iter.removed(), iter.processed(), p2i(&refs_list)); 383 } 384 385 void ReferenceProcessor::process_phase2(DiscoveredList& refs_list, 386 BoolObjectClosure* is_alive, 387 OopClosure* keep_alive, 388 VoidClosure* complete_gc) { 389 if (discovery_is_atomic()) { 390 // complete_gc is ignored in this case for this phase 391 pp2_work(refs_list, is_alive, keep_alive); 392 } else { 393 assert(complete_gc != NULL, "Error"); 394 pp2_work_concurrent_discovery(refs_list, is_alive, 395 keep_alive, complete_gc); 396 } 397 } 398 // Traverse the list and remove any Refs that are not active, or 399 // whose referents are either alive or NULL. 400 void 401 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 402 BoolObjectClosure* is_alive, 403 OopClosure* keep_alive) { 404 assert(discovery_is_atomic(), "Error"); 405 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 406 while (iter.has_next()) { 407 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 408 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 409 assert(next == NULL, "Should not discover inactive Reference"); 410 if (iter.is_referent_alive()) { 411 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 412 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 413 // The referent is reachable after all. 414 // Remove Reference object from list. 415 iter.remove(); 416 // Update the referent pointer as necessary: Note that this 417 // should not entail any recursive marking because the 418 // referent must already have been traversed. 419 iter.make_referent_alive(); 420 iter.move_to_next(); 421 } else { 422 iter.next(); 423 } 424 } 425 NOT_PRODUCT( 426 if (iter.processed() > 0) { 427 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 428 " Refs in discovered list " INTPTR_FORMAT, 429 iter.removed(), iter.processed(), p2i(&refs_list)); 430 } 431 ) 432 } 433 434 void 435 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 436 BoolObjectClosure* is_alive, 437 OopClosure* keep_alive, 438 VoidClosure* complete_gc) { 439 assert(!discovery_is_atomic(), "Error"); 440 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 441 while (iter.has_next()) { 442 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 443 HeapWord* next_addr = java_lang_ref_Reference::next_addr_raw(iter.obj()); 444 oop next = java_lang_ref_Reference::next(iter.obj()); 445 if ((iter.referent() == NULL || iter.is_referent_alive() || 446 next != NULL)) { 447 assert(oopDesc::is_oop_or_null(next), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 448 // Remove Reference object from list 449 iter.remove(); 450 // Trace the cohorts 451 iter.make_referent_alive(); 452 if (UseCompressedOops) { 453 keep_alive->do_oop((narrowOop*)next_addr); 454 } else { 455 keep_alive->do_oop((oop*)next_addr); 456 } 457 iter.move_to_next(); 458 } else { 459 iter.next(); 460 } 461 } 462 // Now close the newly reachable set 463 complete_gc->do_void(); 464 NOT_PRODUCT( 465 if (iter.processed() > 0) { 466 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 467 " Refs in discovered list " INTPTR_FORMAT, 468 iter.removed(), iter.processed(), p2i(&refs_list)); 469 } 470 ) 471 } 472 473 void ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 474 bool clear_referent, 475 BoolObjectClosure* is_alive, 476 OopClosure* keep_alive, 477 VoidClosure* complete_gc) { 478 ResourceMark rm; 479 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 480 while (iter.has_next()) { 481 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 482 if (clear_referent) { 483 // NULL out referent pointer 484 iter.clear_referent(); 485 } else { 486 // keep the referent around 487 iter.make_referent_alive(); 488 } 489 iter.enqueue(); 490 log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 491 clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name()); 492 assert(oopDesc::is_oop(iter.obj(), UseConcMarkSweepGC), "Adding a bad reference"); 493 iter.next(); 494 } 495 iter.complete_enqeue(); 496 // Close the reachable set 497 complete_gc->do_void(); 498 } 499 500 void 501 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 502 oop obj = NULL; 503 oop next = refs_list.head(); 504 while (next != obj) { 505 obj = next; 506 next = java_lang_ref_Reference::discovered(obj); 507 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 508 } 509 refs_list.set_head(NULL); 510 refs_list.set_length(0); 511 } 512 513 void ReferenceProcessor::abandon_partial_discovery() { 514 // loop over the lists 515 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 516 if ((i % _max_num_queues) == 0) { 517 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 518 } 519 clear_discovered_references(_discovered_refs[i]); 520 } 521 } 522 523 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const { 524 DiscoveredList* list = NULL; 525 526 switch (type) { 527 case REF_SOFT: 528 list = _discoveredSoftRefs; 529 break; 530 case REF_WEAK: 531 list = _discoveredWeakRefs; 532 break; 533 case REF_FINAL: 534 list = _discoveredFinalRefs; 535 break; 536 case REF_PHANTOM: 537 list = _discoveredPhantomRefs; 538 break; 539 case REF_OTHER: 540 case REF_NONE: 541 default: 542 ShouldNotReachHere(); 543 } 544 return total_count(list); 545 } 546 547 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 548 public: 549 RefProcPhase1Task(ReferenceProcessor& ref_processor, 550 DiscoveredList refs_lists[], 551 ReferencePolicy* policy, 552 bool marks_oops_alive, 553 ReferenceProcessorPhaseTimes* phase_times) 554 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 555 _policy(policy) 556 { } 557 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 558 OopClosure& keep_alive, 559 VoidClosure& complete_gc) 560 { 561 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, _phase_times, i); 562 563 _ref_processor.process_phase1(_refs_lists[i], _policy, 564 &is_alive, &keep_alive, &complete_gc); 565 } 566 private: 567 ReferencePolicy* _policy; 568 }; 569 570 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 571 public: 572 RefProcPhase2Task(ReferenceProcessor& ref_processor, 573 DiscoveredList refs_lists[], 574 bool marks_oops_alive, 575 ReferenceProcessorPhaseTimes* phase_times) 576 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times) 577 { } 578 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 579 OopClosure& keep_alive, 580 VoidClosure& complete_gc) 581 { 582 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, _phase_times, i); 583 584 _ref_processor.process_phase2(_refs_lists[i], 585 &is_alive, &keep_alive, &complete_gc); 586 } 587 }; 588 589 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 590 public: 591 RefProcPhase3Task(ReferenceProcessor& ref_processor, 592 DiscoveredList refs_lists[], 593 bool clear_referent, 594 bool marks_oops_alive, 595 ReferenceProcessorPhaseTimes* phase_times) 596 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 597 _clear_referent(clear_referent) 598 { } 599 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 600 OopClosure& keep_alive, 601 VoidClosure& complete_gc) 602 { 603 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, _phase_times, i); 604 605 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 606 &is_alive, &keep_alive, &complete_gc); 607 _refs_lists[i].set_head(NULL); 608 _refs_lists[i].set_length(0); 609 } 610 private: 611 bool _clear_referent; 612 }; 613 614 #ifndef PRODUCT 615 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) { 616 if (!log_is_enabled(Trace, gc, ref)) { 617 return; 618 } 619 620 stringStream st; 621 for (uint i = 0; i < active_length; ++i) { 622 st.print(SIZE_FORMAT " ", ref_lists[i].length()); 623 } 624 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); 625 #ifdef ASSERT 626 for (uint i = active_length; i < _max_num_queues; i++) { 627 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 628 ref_lists[i].length(), i); 629 } 630 #endif 631 } 632 #endif 633 634 void ReferenceProcessor::set_active_mt_degree(uint v) { 635 _num_queues = v; 636 _next_id = 0; 637 } 638 639 // Balances reference queues. 640 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 641 // queues[0, 1, ..., _num_q-1] because only the first _num_q 642 // corresponding to the active workers will be processed. 643 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 644 { 645 // calculate total length 646 size_t total_refs = 0; 647 log_develop_trace(gc, ref)("Balance ref_lists "); 648 649 for (uint i = 0; i < _max_num_queues; ++i) { 650 total_refs += ref_lists[i].length(); 651 } 652 log_reflist_counts(ref_lists, _max_num_queues, total_refs); 653 size_t avg_refs = total_refs / _num_queues + 1; 654 uint to_idx = 0; 655 for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { 656 bool move_all = false; 657 if (from_idx >= _num_queues) { 658 move_all = ref_lists[from_idx].length() > 0; 659 } 660 while ((ref_lists[from_idx].length() > avg_refs) || 661 move_all) { 662 assert(to_idx < _num_queues, "Sanity Check!"); 663 if (ref_lists[to_idx].length() < avg_refs) { 664 // move superfluous refs 665 size_t refs_to_move; 666 // Move all the Ref's if the from queue will not be processed. 667 if (move_all) { 668 refs_to_move = MIN2(ref_lists[from_idx].length(), 669 avg_refs - ref_lists[to_idx].length()); 670 } else { 671 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 672 avg_refs - ref_lists[to_idx].length()); 673 } 674 675 assert(refs_to_move > 0, "otherwise the code below will fail"); 676 677 oop move_head = ref_lists[from_idx].head(); 678 oop move_tail = move_head; 679 oop new_head = move_head; 680 // find an element to split the list on 681 for (size_t j = 0; j < refs_to_move; ++j) { 682 move_tail = new_head; 683 new_head = java_lang_ref_Reference::discovered(new_head); 684 } 685 686 // Add the chain to the to list. 687 if (ref_lists[to_idx].head() == NULL) { 688 // to list is empty. Make a loop at the end. 689 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 690 } else { 691 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 692 } 693 ref_lists[to_idx].set_head(move_head); 694 ref_lists[to_idx].inc_length(refs_to_move); 695 696 // Remove the chain from the from list. 697 if (move_tail == new_head) { 698 // We found the end of the from list. 699 ref_lists[from_idx].set_head(NULL); 700 } else { 701 ref_lists[from_idx].set_head(new_head); 702 } 703 ref_lists[from_idx].dec_length(refs_to_move); 704 if (ref_lists[from_idx].length() == 0) { 705 break; 706 } 707 } else { 708 to_idx = (to_idx + 1) % _num_queues; 709 } 710 } 711 } 712 #ifdef ASSERT 713 size_t balanced_total_refs = 0; 714 for (uint i = 0; i < _num_queues; ++i) { 715 balanced_total_refs += ref_lists[i].length(); 716 } 717 log_reflist_counts(ref_lists, _num_queues, balanced_total_refs); 718 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 719 #endif 720 } 721 722 void ReferenceProcessor::process_discovered_reflist( 723 DiscoveredList refs_lists[], 724 ReferencePolicy* policy, 725 bool clear_referent, 726 BoolObjectClosure* is_alive, 727 OopClosure* keep_alive, 728 VoidClosure* complete_gc, 729 AbstractRefProcTaskExecutor* task_executor, 730 ReferenceProcessorPhaseTimes* phase_times) 731 { 732 bool mt_processing = task_executor != NULL && _processing_is_mt; 733 734 phase_times->set_processing_is_mt(mt_processing); 735 736 if (mt_processing && ParallelRefProcBalancingEnabled) { 737 RefProcBalanceQueuesTimeTracker tt(phase_times); 738 balance_queues(refs_lists); 739 } 740 741 // Phase 1 (soft refs only): 742 // . Traverse the list and remove any SoftReferences whose 743 // referents are not alive, but that should be kept alive for 744 // policy reasons. Keep alive the transitive closure of all 745 // such referents. 746 if (policy != NULL) { 747 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, phase_times); 748 749 if (mt_processing) { 750 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times); 751 task_executor->execute(phase1); 752 } else { 753 for (uint i = 0; i < _max_num_queues; i++) { 754 process_phase1(refs_lists[i], policy, 755 is_alive, keep_alive, complete_gc); 756 } 757 } 758 } else { // policy == NULL 759 assert(refs_lists != _discoveredSoftRefs, 760 "Policy must be specified for soft references."); 761 } 762 763 // Phase 2: 764 // . Traverse the list and remove any refs whose referents are alive. 765 { 766 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, phase_times); 767 768 if (mt_processing) { 769 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times); 770 task_executor->execute(phase2); 771 } else { 772 for (uint i = 0; i < _max_num_queues; i++) { 773 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 774 } 775 } 776 } 777 778 // Phase 3: 779 // . Traverse the list and process referents as appropriate. 780 { 781 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, phase_times); 782 783 if (mt_processing) { 784 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times); 785 task_executor->execute(phase3); 786 } else { 787 for (uint i = 0; i < _max_num_queues; i++) { 788 process_phase3(refs_lists[i], clear_referent, 789 is_alive, keep_alive, complete_gc); 790 refs_lists[i].set_head(NULL); 791 refs_lists[i].set_length(0); 792 } 793 } 794 } 795 } 796 797 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 798 uint id = 0; 799 // Determine the queue index to use for this object. 800 if (_discovery_is_mt) { 801 // During a multi-threaded discovery phase, 802 // each thread saves to its "own" list. 803 Thread* thr = Thread::current(); 804 id = thr->as_Worker_thread()->id(); 805 } else { 806 // single-threaded discovery, we save in round-robin 807 // fashion to each of the lists. 808 if (_processing_is_mt) { 809 id = next_id(); 810 } 811 } 812 assert(id < _max_num_queues, "Id is out-of-bounds id %u and max id %u)", id, _max_num_queues); 813 814 // Get the discovered queue to which we will add 815 DiscoveredList* list = NULL; 816 switch (rt) { 817 case REF_OTHER: 818 // Unknown reference type, no special treatment 819 break; 820 case REF_SOFT: 821 list = &_discoveredSoftRefs[id]; 822 break; 823 case REF_WEAK: 824 list = &_discoveredWeakRefs[id]; 825 break; 826 case REF_FINAL: 827 list = &_discoveredFinalRefs[id]; 828 break; 829 case REF_PHANTOM: 830 list = &_discoveredPhantomRefs[id]; 831 break; 832 case REF_NONE: 833 // we should not reach here if we are an InstanceRefKlass 834 default: 835 ShouldNotReachHere(); 836 } 837 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 838 return list; 839 } 840 841 inline void 842 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 843 oop obj, 844 HeapWord* discovered_addr) { 845 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 846 // First we must make sure this object is only enqueued once. CAS in a non null 847 // discovered_addr. 848 oop current_head = refs_list.head(); 849 // The last ref must have its discovered field pointing to itself. 850 oop next_discovered = (current_head != NULL) ? current_head : obj; 851 852 oop retest = RawAccess<>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL)); 853 854 if (retest == NULL) { 855 // This thread just won the right to enqueue the object. 856 // We have separate lists for enqueueing, so no synchronization 857 // is necessary. 858 refs_list.set_head(obj); 859 refs_list.inc_length(1); 860 861 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 862 p2i(obj), obj->klass()->internal_name()); 863 } else { 864 // If retest was non NULL, another thread beat us to it: 865 // The reference has already been discovered... 866 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 867 p2i(obj), obj->klass()->internal_name()); 868 } 869 } 870 871 #ifndef PRODUCT 872 // Non-atomic (i.e. concurrent) discovery might allow us 873 // to observe j.l.References with NULL referents, being those 874 // cleared concurrently by mutators during (or after) discovery. 875 void ReferenceProcessor::verify_referent(oop obj) { 876 bool da = discovery_is_atomic(); 877 oop referent = java_lang_ref_Reference::referent(obj); 878 assert(da ? oopDesc::is_oop(referent) : oopDesc::is_oop_or_null(referent), 879 "Bad referent " INTPTR_FORMAT " found in Reference " 880 INTPTR_FORMAT " during %satomic discovery ", 881 p2i(referent), p2i(obj), da ? "" : "non-"); 882 } 883 #endif 884 885 template <class T> 886 bool ReferenceProcessor::is_subject_to_discovery(T const obj) const { 887 return _is_subject_to_discovery->do_object_b(obj); 888 } 889 890 // We mention two of several possible choices here: 891 // #0: if the reference object is not in the "originating generation" 892 // (or part of the heap being collected, indicated by our "span" 893 // we don't treat it specially (i.e. we scan it as we would 894 // a normal oop, treating its references as strong references). 895 // This means that references can't be discovered unless their 896 // referent is also in the same span. This is the simplest, 897 // most "local" and most conservative approach, albeit one 898 // that may cause weak references to be enqueued least promptly. 899 // We call this choice the "ReferenceBasedDiscovery" policy. 900 // #1: the reference object may be in any generation (span), but if 901 // the referent is in the generation (span) being currently collected 902 // then we can discover the reference object, provided 903 // the object has not already been discovered by 904 // a different concurrently running collector (as may be the 905 // case, for instance, if the reference object is in CMS and 906 // the referent in DefNewGeneration), and provided the processing 907 // of this reference object by the current collector will 908 // appear atomic to every other collector in the system. 909 // (Thus, for instance, a concurrent collector may not 910 // discover references in other generations even if the 911 // referent is in its own generation). This policy may, 912 // in certain cases, enqueue references somewhat sooner than 913 // might Policy #0 above, but at marginally increased cost 914 // and complexity in processing these references. 915 // We call this choice the "RefeferentBasedDiscovery" policy. 916 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 917 // Make sure we are discovering refs (rather than processing discovered refs). 918 if (!_discovering_refs || !RegisterReferences) { 919 return false; 920 } 921 // We only discover active references. 922 oop next = java_lang_ref_Reference::next(obj); 923 if (next != NULL) { // Ref is no longer active 924 return false; 925 } 926 927 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 928 !is_subject_to_discovery(obj)) { 929 // Reference is not in the originating generation; 930 // don't treat it specially (i.e. we want to scan it as a normal 931 // object with strong references). 932 return false; 933 } 934 935 // We only discover references whose referents are not (yet) 936 // known to be strongly reachable. 937 if (is_alive_non_header() != NULL) { 938 verify_referent(obj); 939 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 940 return false; // referent is reachable 941 } 942 } 943 if (rt == REF_SOFT) { 944 // For soft refs we can decide now if these are not 945 // current candidates for clearing, in which case we 946 // can mark through them now, rather than delaying that 947 // to the reference-processing phase. Since all current 948 // time-stamp policies advance the soft-ref clock only 949 // at a full collection cycle, this is always currently 950 // accurate. 951 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 952 return false; 953 } 954 } 955 956 ResourceMark rm; // Needed for tracing. 957 958 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj); 959 const oop discovered = java_lang_ref_Reference::discovered(obj); 960 assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 961 if (discovered != NULL) { 962 // The reference has already been discovered... 963 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 964 p2i(obj), obj->klass()->internal_name()); 965 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 966 // assumes that an object is not processed twice; 967 // if it's been already discovered it must be on another 968 // generation's discovered list; so we won't discover it. 969 return false; 970 } else { 971 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 972 "Unrecognized policy"); 973 // Check assumption that an object is not potentially 974 // discovered twice except by concurrent collectors that potentially 975 // trace the same Reference object twice. 976 assert(UseConcMarkSweepGC || UseG1GC, 977 "Only possible with a concurrent marking collector"); 978 return true; 979 } 980 } 981 982 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 983 verify_referent(obj); 984 // Discover if and only if EITHER: 985 // .. reference is in our span, OR 986 // .. we are an atomic collector and referent is in our span 987 if (is_subject_to_discovery(obj) || 988 (discovery_is_atomic() && 989 is_subject_to_discovery(java_lang_ref_Reference::referent(obj)))) { 990 } else { 991 return false; 992 } 993 } else { 994 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 995 is_subject_to_discovery(obj), "code inconsistency"); 996 } 997 998 // Get the right type of discovered queue head. 999 DiscoveredList* list = get_discovered_list(rt); 1000 if (list == NULL) { 1001 return false; // nothing special needs to be done 1002 } 1003 1004 if (_discovery_is_mt) { 1005 add_to_discovered_list_mt(*list, obj, discovered_addr); 1006 } else { 1007 // We do a raw store here: the field will be visited later when processing 1008 // the discovered references. 1009 oop current_head = list->head(); 1010 // The last ref must have its discovered field pointing to itself. 1011 oop next_discovered = (current_head != NULL) ? current_head : obj; 1012 1013 assert(discovered == NULL, "control point invariant"); 1014 RawAccess<>::oop_store(discovered_addr, next_discovered); 1015 list->set_head(obj); 1016 list->inc_length(1); 1017 1018 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1019 } 1020 assert(oopDesc::is_oop(obj), "Discovered a bad reference"); 1021 verify_referent(obj); 1022 return true; 1023 } 1024 1025 bool ReferenceProcessor::has_discovered_references() { 1026 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 1027 if (!_discovered_refs[i].is_empty()) { 1028 return true; 1029 } 1030 } 1031 return false; 1032 } 1033 1034 // Preclean the discovered references by removing those 1035 // whose referents are alive, and by marking from those that 1036 // are not active. These lists can be handled here 1037 // in any order and, indeed, concurrently. 1038 void ReferenceProcessor::preclean_discovered_references( 1039 BoolObjectClosure* is_alive, 1040 OopClosure* keep_alive, 1041 VoidClosure* complete_gc, 1042 YieldClosure* yield, 1043 GCTimer* gc_timer) { 1044 1045 // Soft references 1046 { 1047 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1048 for (uint i = 0; i < _max_num_queues; i++) { 1049 if (yield->should_return()) { 1050 return; 1051 } 1052 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1053 keep_alive, complete_gc, yield); 1054 } 1055 } 1056 1057 // Weak references 1058 { 1059 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1060 for (uint i = 0; i < _max_num_queues; i++) { 1061 if (yield->should_return()) { 1062 return; 1063 } 1064 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1065 keep_alive, complete_gc, yield); 1066 } 1067 } 1068 1069 // Final references 1070 { 1071 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1072 for (uint i = 0; i < _max_num_queues; i++) { 1073 if (yield->should_return()) { 1074 return; 1075 } 1076 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1077 keep_alive, complete_gc, yield); 1078 } 1079 } 1080 1081 // Phantom references 1082 { 1083 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1084 for (uint i = 0; i < _max_num_queues; i++) { 1085 if (yield->should_return()) { 1086 return; 1087 } 1088 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1089 keep_alive, complete_gc, yield); 1090 } 1091 } 1092 } 1093 1094 // Walk the given discovered ref list, and remove all reference objects 1095 // whose referents are still alive, whose referents are NULL or which 1096 // are not active (have a non-NULL next field). NOTE: When we are 1097 // thus precleaning the ref lists (which happens single-threaded today), 1098 // we do not disable refs discovery to honor the correct semantics of 1099 // java.lang.Reference. As a result, we need to be careful below 1100 // that ref removal steps interleave safely with ref discovery steps 1101 // (in this thread). 1102 void 1103 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1104 BoolObjectClosure* is_alive, 1105 OopClosure* keep_alive, 1106 VoidClosure* complete_gc, 1107 YieldClosure* yield) { 1108 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1109 while (iter.has_next()) { 1110 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1111 oop obj = iter.obj(); 1112 oop next = java_lang_ref_Reference::next(obj); 1113 if (iter.referent() == NULL || iter.is_referent_alive() || 1114 next != NULL) { 1115 // The referent has been cleared, or is alive, or the Reference is not 1116 // active; we need to trace and mark its cohort. 1117 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1118 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1119 // Remove Reference object from list 1120 iter.remove(); 1121 // Keep alive its cohort. 1122 iter.make_referent_alive(); 1123 if (UseCompressedOops) { 1124 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr_raw(obj); 1125 keep_alive->do_oop(next_addr); 1126 } else { 1127 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr_raw(obj); 1128 keep_alive->do_oop(next_addr); 1129 } 1130 iter.move_to_next(); 1131 } else { 1132 iter.next(); 1133 } 1134 } 1135 // Close the reachable set 1136 complete_gc->do_void(); 1137 1138 NOT_PRODUCT( 1139 if (iter.processed() > 0) { 1140 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1141 iter.removed(), iter.processed(), p2i(&refs_list)); 1142 } 1143 ) 1144 } 1145 1146 const char* ReferenceProcessor::list_name(uint i) { 1147 assert(i <= _max_num_queues * number_of_subclasses_of_ref(), 1148 "Out of bounds index"); 1149 1150 int j = i / _max_num_queues; 1151 switch (j) { 1152 case 0: return "SoftRef"; 1153 case 1: return "WeakRef"; 1154 case 2: return "FinalRef"; 1155 case 3: return "PhantomRef"; 1156 } 1157 ShouldNotReachHere(); 1158 return NULL; 1159 }