1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/javaClasses.inline.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "gc/shared/collectedHeap.hpp" 29 #include "gc/shared/collectedHeap.inline.hpp" 30 #include "gc/shared/gcTimer.hpp" 31 #include "gc/shared/gcTraceTime.inline.hpp" 32 #include "gc/shared/referencePolicy.hpp" 33 #include "gc/shared/referenceProcessor.inline.hpp" 34 #include "logging/log.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/access.inline.hpp" 38 #include "oops/oop.inline.hpp" 39 #include "runtime/java.hpp" 40 41 ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL; 42 ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL; 43 jlong ReferenceProcessor::_soft_ref_timestamp_clock = 0; 44 45 void referenceProcessor_init() { 46 ReferenceProcessor::init_statics(); 47 } 48 49 void ReferenceProcessor::init_statics() { 50 // We need a monotonically non-decreasing time in ms but 51 // os::javaTimeMillis() does not guarantee monotonicity. 52 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 53 54 // Initialize the soft ref timestamp clock. 55 _soft_ref_timestamp_clock = now; 56 // Also update the soft ref clock in j.l.r.SoftReference 57 java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock); 58 59 _always_clear_soft_ref_policy = new AlwaysClearPolicy(); 60 if (is_server_compilation_mode_vm()) { 61 _default_soft_ref_policy = new LRUMaxHeapPolicy(); 62 } else { 63 _default_soft_ref_policy = new LRUCurrentHeapPolicy(); 64 } 65 if (_always_clear_soft_ref_policy == NULL || _default_soft_ref_policy == NULL) { 66 vm_exit_during_initialization("Could not allocate reference policy object"); 67 } 68 guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery || 69 RefDiscoveryPolicy == ReferentBasedDiscovery, 70 "Unrecognized RefDiscoveryPolicy"); 71 } 72 73 void ReferenceProcessor::enable_discovery(bool check_no_refs) { 74 #ifdef ASSERT 75 // Verify that we're not currently discovering refs 76 assert(!_discovering_refs, "nested call?"); 77 78 if (check_no_refs) { 79 // Verify that the discovered lists are empty 80 verify_no_references_recorded(); 81 } 82 #endif // ASSERT 83 84 // Someone could have modified the value of the static 85 // field in the j.l.r.SoftReference class that holds the 86 // soft reference timestamp clock using reflection or 87 // Unsafe between GCs. Unconditionally update the static 88 // field in ReferenceProcessor here so that we use the new 89 // value during reference discovery. 90 91 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 92 _discovering_refs = true; 93 } 94 95 ReferenceProcessor::ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, 96 bool mt_processing, 97 uint mt_processing_degree, 98 bool mt_discovery, 99 uint mt_discovery_degree, 100 bool atomic_discovery, 101 BoolObjectClosure* is_alive_non_header) : 102 _is_subject_to_discovery(is_subject_to_discovery), 103 _discovering_refs(false), 104 _enqueuing_is_done(false), 105 _is_alive_non_header(is_alive_non_header), 106 _processing_is_mt(mt_processing), 107 _next_id(0) 108 { 109 assert(is_subject_to_discovery != NULL, "must be set"); 110 111 _discovery_is_atomic = atomic_discovery; 112 _discovery_is_mt = mt_discovery; 113 _num_queues = MAX2(1U, mt_processing_degree); 114 _max_num_queues = MAX2(_num_queues, mt_discovery_degree); 115 _discovered_refs = NEW_C_HEAP_ARRAY(DiscoveredList, 116 _max_num_queues * number_of_subclasses_of_ref(), mtGC); 117 118 if (_discovered_refs == NULL) { 119 vm_exit_during_initialization("Could not allocated RefProc Array"); 120 } 121 _discoveredSoftRefs = &_discovered_refs[0]; 122 _discoveredWeakRefs = &_discoveredSoftRefs[_max_num_queues]; 123 _discoveredFinalRefs = &_discoveredWeakRefs[_max_num_queues]; 124 _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_queues]; 125 126 // Initialize all entries to NULL 127 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 128 _discovered_refs[i].set_head(NULL); 129 _discovered_refs[i].set_length(0); 130 } 131 132 setup_policy(false /* default soft ref policy */); 133 } 134 135 #ifndef PRODUCT 136 void ReferenceProcessor::verify_no_references_recorded() { 137 guarantee(!_discovering_refs, "Discovering refs?"); 138 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 139 guarantee(_discovered_refs[i].is_empty(), 140 "Found non-empty discovered list at %u", i); 141 } 142 } 143 #endif 144 145 void ReferenceProcessor::weak_oops_do(OopClosure* f) { 146 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 147 if (UseCompressedOops) { 148 f->do_oop((narrowOop*)_discovered_refs[i].adr_head()); 149 } else { 150 f->do_oop((oop*)_discovered_refs[i].adr_head()); 151 } 152 } 153 } 154 155 void ReferenceProcessor::update_soft_ref_master_clock() { 156 // Update (advance) the soft ref master clock field. This must be done 157 // after processing the soft ref list. 158 159 // We need a monotonically non-decreasing time in ms but 160 // os::javaTimeMillis() does not guarantee monotonicity. 161 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 162 jlong soft_ref_clock = java_lang_ref_SoftReference::clock(); 163 assert(soft_ref_clock == _soft_ref_timestamp_clock, "soft ref clocks out of sync"); 164 165 NOT_PRODUCT( 166 if (now < _soft_ref_timestamp_clock) { 167 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, 168 _soft_ref_timestamp_clock, now); 169 } 170 ) 171 // The values of now and _soft_ref_timestamp_clock are set using 172 // javaTimeNanos(), which is guaranteed to be monotonically 173 // non-decreasing provided the underlying platform provides such 174 // a time source (and it is bug free). 175 // In product mode, however, protect ourselves from non-monotonicity. 176 if (now > _soft_ref_timestamp_clock) { 177 _soft_ref_timestamp_clock = now; 178 java_lang_ref_SoftReference::set_clock(now); 179 } 180 // Else leave clock stalled at its old value until time progresses 181 // past clock value. 182 } 183 184 size_t ReferenceProcessor::total_count(DiscoveredList lists[]) const { 185 size_t total = 0; 186 for (uint i = 0; i < _max_num_queues; ++i) { 187 total += lists[i].length(); 188 } 189 return total; 190 } 191 192 ReferenceProcessorStats ReferenceProcessor::process_discovered_references( 193 BoolObjectClosure* is_alive, 194 OopClosure* keep_alive, 195 VoidClosure* complete_gc, 196 AbstractRefProcTaskExecutor* task_executor, 197 ReferenceProcessorPhaseTimes* phase_times) { 198 199 double start_time = os::elapsedTime(); 200 201 assert(!enqueuing_is_done(), "If here enqueuing should not be complete"); 202 // Stop treating discovered references specially. 203 disable_discovery(); 204 205 // If discovery was concurrent, someone could have modified 206 // the value of the static field in the j.l.r.SoftReference 207 // class that holds the soft reference timestamp clock using 208 // reflection or Unsafe between when discovery was enabled and 209 // now. Unconditionally update the static field in ReferenceProcessor 210 // here so that we use the new value during processing of the 211 // discovered soft refs. 212 213 _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock(); 214 215 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 216 total_count(_discoveredWeakRefs), 217 total_count(_discoveredFinalRefs), 218 total_count(_discoveredPhantomRefs)); 219 220 // Soft references 221 { 222 RefProcPhaseTimesTracker tt(REF_SOFT, phase_times, this); 223 process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true, 224 is_alive, keep_alive, complete_gc, task_executor, phase_times); 225 } 226 227 update_soft_ref_master_clock(); 228 229 // Weak references 230 { 231 RefProcPhaseTimesTracker tt(REF_WEAK, phase_times, this); 232 process_discovered_reflist(_discoveredWeakRefs, NULL, true, 233 is_alive, keep_alive, complete_gc, task_executor, phase_times); 234 } 235 236 // Final references 237 { 238 RefProcPhaseTimesTracker tt(REF_FINAL, phase_times, this); 239 process_discovered_reflist(_discoveredFinalRefs, NULL, false, 240 is_alive, keep_alive, complete_gc, task_executor, phase_times); 241 } 242 243 // Phantom references 244 { 245 RefProcPhaseTimesTracker tt(REF_PHANTOM, phase_times, this); 246 process_discovered_reflist(_discoveredPhantomRefs, NULL, true, 247 is_alive, keep_alive, complete_gc, task_executor, phase_times); 248 } 249 250 if (task_executor != NULL) { 251 // Record the work done by the parallel workers. 252 task_executor->set_single_threaded_mode(); 253 } 254 255 phase_times->set_total_time_ms((os::elapsedTime() - start_time) * 1000); 256 257 return stats; 258 } 259 260 void ReferenceProcessor::enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor, 261 ReferenceProcessorPhaseTimes* phase_times) { 262 // Enqueue references that are not made active again, and 263 // clear the decks for the next collection (cycle). 264 enqueue_discovered_reflists(task_executor, phase_times); 265 266 // Stop treating discovered references specially. 267 disable_discovery(); 268 } 269 270 void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list) { 271 // Given a list of refs linked through the "discovered" field 272 // (java.lang.ref.Reference.discovered), self-loop their "next" field 273 // thus distinguishing them from active References, then 274 // prepend them to the pending list. 275 // 276 // The Java threads will see the Reference objects linked together through 277 // the discovered field. Instead of trying to do the write barrier updates 278 // in all places in the reference processor where we manipulate the discovered 279 // field we make sure to do the barrier here where we anyway iterate through 280 // all linked Reference objects. Note that it is important to not dirty any 281 // cards during reference processing since this will cause card table 282 // verification to fail for G1. 283 log_develop_trace(gc, ref)("ReferenceProcessor::enqueue_discovered_reflist list " INTPTR_FORMAT, p2i(&refs_list)); 284 285 oop obj = NULL; 286 oop next_discovered = refs_list.head(); 287 // Walk down the list, self-looping the next field 288 // so that the References are not considered active. 289 while (obj != next_discovered) { 290 obj = next_discovered; 291 assert(obj->is_instance(), "should be an instance object"); 292 assert(InstanceKlass::cast(obj->klass())->is_reference_instance_klass(), "should be reference object"); 293 next_discovered = java_lang_ref_Reference::discovered(obj); 294 log_develop_trace(gc, ref)(" obj " INTPTR_FORMAT "/next_discovered " INTPTR_FORMAT, p2i(obj), p2i(next_discovered)); 295 assert(java_lang_ref_Reference::next(obj) == NULL, 296 "Reference not active; should not be discovered"); 297 // Self-loop next, so as to make Ref not active. 298 java_lang_ref_Reference::set_next_raw(obj, obj); 299 if (next_discovered != obj) { 300 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, next_discovered); 301 } else { 302 // This is the last object. 303 // Swap refs_list into pending list and set obj's 304 // discovered to what we read from the pending list. 305 oop old = Universe::swap_reference_pending_list(refs_list.head()); 306 HeapAccess<AS_NO_KEEPALIVE>::oop_store_at(obj, java_lang_ref_Reference::discovered_offset, old); 307 } 308 } 309 } 310 311 // Parallel enqueue task 312 class RefProcEnqueueTask: public AbstractRefProcTaskExecutor::EnqueueTask { 313 public: 314 RefProcEnqueueTask(ReferenceProcessor& ref_processor, 315 DiscoveredList discovered_refs[], 316 int n_queues, 317 ReferenceProcessorPhaseTimes* phase_times) 318 : EnqueueTask(ref_processor, discovered_refs, n_queues, phase_times) 319 { } 320 321 virtual void work(unsigned int work_id) { 322 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefEnqueue, _phase_times, work_id); 323 324 assert(work_id < (unsigned int)_ref_processor.max_num_queues(), "Index out-of-bounds"); 325 // Simplest first cut: static partitioning. 326 int index = work_id; 327 // The increment on "index" must correspond to the maximum number of queues 328 // (n_queues) with which that ReferenceProcessor was created. That 329 // is because of the "clever" way the discovered references lists were 330 // allocated and are indexed into. 331 assert(_n_queues == (int) _ref_processor.max_num_queues(), "Different number not expected"); 332 for (int j = 0; 333 j < ReferenceProcessor::number_of_subclasses_of_ref(); 334 j++, index += _n_queues) { 335 _ref_processor.enqueue_discovered_reflist(_refs_lists[index]); 336 _refs_lists[index].set_head(NULL); 337 _refs_lists[index].set_length(0); 338 } 339 } 340 }; 341 342 // Enqueue references that are not made active again 343 void ReferenceProcessor::enqueue_discovered_reflists(AbstractRefProcTaskExecutor* task_executor, 344 ReferenceProcessorPhaseTimes* phase_times) { 345 346 ReferenceProcessorStats stats(total_count(_discoveredSoftRefs), 347 total_count(_discoveredWeakRefs), 348 total_count(_discoveredFinalRefs), 349 total_count(_discoveredPhantomRefs)); 350 351 RefProcEnqueueTimeTracker tt(phase_times, stats); 352 353 if (_processing_is_mt && task_executor != NULL) { 354 // Parallel code 355 RefProcEnqueueTask tsk(*this, _discovered_refs, _max_num_queues, phase_times); 356 task_executor->execute(tsk); 357 } else { 358 // Serial code: call the parent class's implementation 359 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 360 enqueue_discovered_reflist(_discovered_refs[i]); 361 _discovered_refs[i].set_head(NULL); 362 _discovered_refs[i].set_length(0); 363 } 364 } 365 } 366 367 void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) { 368 _current_discovered_addr = java_lang_ref_Reference::discovered_addr_raw(_current_discovered); 369 oop discovered = java_lang_ref_Reference::discovered(_current_discovered); 370 assert(_current_discovered_addr && oopDesc::is_oop_or_null(discovered), 371 "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 372 _next_discovered = discovered; 373 374 _referent_addr = java_lang_ref_Reference::referent_addr_raw(_current_discovered); 375 _referent = java_lang_ref_Reference::referent(_current_discovered); 376 assert(Universe::heap()->is_in_reserved_or_null(_referent), 377 "Wrong oop found in java.lang.Reference object"); 378 assert(allow_null_referent ? 379 oopDesc::is_oop_or_null(_referent) 380 : oopDesc::is_oop(_referent), 381 "Expected an oop%s for referent field at " PTR_FORMAT, 382 (allow_null_referent ? " or NULL" : ""), 383 p2i(_referent)); 384 } 385 386 void DiscoveredListIterator::remove() { 387 assert(oopDesc::is_oop(_current_discovered), "Dropping a bad reference"); 388 RawAccess<>::oop_store(_current_discovered_addr, oop(NULL)); 389 390 // First _prev_next ref actually points into DiscoveredList (gross). 391 oop new_next; 392 if (_next_discovered == _current_discovered) { 393 // At the end of the list, we should make _prev point to itself. 394 // If _ref is the first ref, then _prev_next will be in the DiscoveredList, 395 // and _prev will be NULL. 396 new_next = _prev_discovered; 397 } else { 398 new_next = _next_discovered; 399 } 400 // Remove Reference object from discovered list. Note that G1 does not need a 401 // pre-barrier here because we know the Reference has already been found/marked, 402 // that's how it ended up in the discovered list in the first place. 403 RawAccess<>::oop_store(_prev_discovered_addr, new_next); 404 NOT_PRODUCT(_removed++); 405 _refs_list.dec_length(1); 406 } 407 408 void DiscoveredListIterator::clear_referent() { 409 RawAccess<>::oop_store(_referent_addr, oop(NULL)); 410 } 411 412 // NOTE: process_phase*() are largely similar, and at a high level 413 // merely iterate over the extant list applying a predicate to 414 // each of its elements and possibly removing that element from the 415 // list and applying some further closures to that element. 416 // We should consider the possibility of replacing these 417 // process_phase*() methods by abstracting them into 418 // a single general iterator invocation that receives appropriate 419 // closures that accomplish this work. 420 421 // (SoftReferences only) Traverse the list and remove any SoftReferences whose 422 // referents are not alive, but that should be kept alive for policy reasons. 423 // Keep alive the transitive closure of all such referents. 424 void 425 ReferenceProcessor::process_phase1(DiscoveredList& refs_list, 426 ReferencePolicy* policy, 427 BoolObjectClosure* is_alive, 428 OopClosure* keep_alive, 429 VoidClosure* complete_gc) { 430 assert(policy != NULL, "Must have a non-NULL policy"); 431 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 432 // Decide which softly reachable refs should be kept alive. 433 while (iter.has_next()) { 434 iter.load_ptrs(DEBUG_ONLY(!discovery_is_atomic() /* allow_null_referent */)); 435 bool referent_is_dead = (iter.referent() != NULL) && !iter.is_referent_alive(); 436 if (referent_is_dead && 437 !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) { 438 log_develop_trace(gc, ref)("Dropping reference (" INTPTR_FORMAT ": %s" ") by policy", 439 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 440 // Remove Reference object from list 441 iter.remove(); 442 // keep the referent around 443 iter.make_referent_alive(); 444 iter.move_to_next(); 445 } else { 446 iter.next(); 447 } 448 } 449 // Close the reachable set 450 complete_gc->do_void(); 451 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " dead Refs out of " SIZE_FORMAT " discovered Refs by policy, from list " INTPTR_FORMAT, 452 iter.removed(), iter.processed(), p2i(&refs_list)); 453 } 454 455 void ReferenceProcessor::process_phase2(DiscoveredList& refs_list, 456 BoolObjectClosure* is_alive, 457 OopClosure* keep_alive, 458 VoidClosure* complete_gc) { 459 if (discovery_is_atomic()) { 460 // complete_gc is ignored in this case for this phase 461 pp2_work(refs_list, is_alive, keep_alive); 462 } else { 463 assert(complete_gc != NULL, "Error"); 464 pp2_work_concurrent_discovery(refs_list, is_alive, 465 keep_alive, complete_gc); 466 } 467 } 468 // Traverse the list and remove any Refs that are not active, or 469 // whose referents are either alive or NULL. 470 void 471 ReferenceProcessor::pp2_work(DiscoveredList& refs_list, 472 BoolObjectClosure* is_alive, 473 OopClosure* keep_alive) { 474 assert(discovery_is_atomic(), "Error"); 475 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 476 while (iter.has_next()) { 477 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 478 DEBUG_ONLY(oop next = java_lang_ref_Reference::next(iter.obj());) 479 assert(next == NULL, "Should not discover inactive Reference"); 480 if (iter.is_referent_alive()) { 481 log_develop_trace(gc, ref)("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)", 482 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 483 // The referent is reachable after all. 484 // Remove Reference object from list. 485 iter.remove(); 486 // Update the referent pointer as necessary: Note that this 487 // should not entail any recursive marking because the 488 // referent must already have been traversed. 489 iter.make_referent_alive(); 490 iter.move_to_next(); 491 } else { 492 iter.next(); 493 } 494 } 495 NOT_PRODUCT( 496 if (iter.processed() > 0) { 497 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 498 " Refs in discovered list " INTPTR_FORMAT, 499 iter.removed(), iter.processed(), p2i(&refs_list)); 500 } 501 ) 502 } 503 504 void 505 ReferenceProcessor::pp2_work_concurrent_discovery(DiscoveredList& refs_list, 506 BoolObjectClosure* is_alive, 507 OopClosure* keep_alive, 508 VoidClosure* complete_gc) { 509 assert(!discovery_is_atomic(), "Error"); 510 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 511 while (iter.has_next()) { 512 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 513 HeapWord* next_addr = java_lang_ref_Reference::next_addr_raw(iter.obj()); 514 oop next = java_lang_ref_Reference::next(iter.obj()); 515 if ((iter.referent() == NULL || iter.is_referent_alive() || 516 next != NULL)) { 517 assert(oopDesc::is_oop_or_null(next), "Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)); 518 // Remove Reference object from list 519 iter.remove(); 520 // Trace the cohorts 521 iter.make_referent_alive(); 522 if (UseCompressedOops) { 523 keep_alive->do_oop((narrowOop*)next_addr); 524 } else { 525 keep_alive->do_oop((oop*)next_addr); 526 } 527 iter.move_to_next(); 528 } else { 529 iter.next(); 530 } 531 } 532 // Now close the newly reachable set 533 complete_gc->do_void(); 534 NOT_PRODUCT( 535 if (iter.processed() > 0) { 536 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " active Refs out of " SIZE_FORMAT 537 " Refs in discovered list " INTPTR_FORMAT, 538 iter.removed(), iter.processed(), p2i(&refs_list)); 539 } 540 ) 541 } 542 543 void ReferenceProcessor::process_phase3(DiscoveredList& refs_list, 544 bool clear_referent, 545 BoolObjectClosure* is_alive, 546 OopClosure* keep_alive, 547 VoidClosure* complete_gc) { 548 ResourceMark rm; 549 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 550 while (iter.has_next()) { 551 iter.load_ptrs(DEBUG_ONLY(false /* allow_null_referent */)); 552 if (clear_referent) { 553 // NULL out referent pointer 554 iter.clear_referent(); 555 } else { 556 // keep the referent around 557 iter.make_referent_alive(); 558 } 559 log_develop_trace(gc, ref)("Adding %sreference (" INTPTR_FORMAT ": %s) as pending", 560 clear_referent ? "cleared " : "", p2i(iter.obj()), iter.obj()->klass()->internal_name()); 561 assert(oopDesc::is_oop(iter.obj(), UseConcMarkSweepGC), "Adding a bad reference"); 562 iter.next(); 563 } 564 // Close the reachable set 565 complete_gc->do_void(); 566 } 567 568 void 569 ReferenceProcessor::clear_discovered_references(DiscoveredList& refs_list) { 570 oop obj = NULL; 571 oop next = refs_list.head(); 572 while (next != obj) { 573 obj = next; 574 next = java_lang_ref_Reference::discovered(obj); 575 java_lang_ref_Reference::set_discovered_raw(obj, NULL); 576 } 577 refs_list.set_head(NULL); 578 refs_list.set_length(0); 579 } 580 581 void ReferenceProcessor::abandon_partial_discovery() { 582 // loop over the lists 583 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 584 if ((i % _max_num_queues) == 0) { 585 log_develop_trace(gc, ref)("Abandoning %s discovered list", list_name(i)); 586 } 587 clear_discovered_references(_discovered_refs[i]); 588 } 589 } 590 591 size_t ReferenceProcessor::total_reference_count(ReferenceType type) const { 592 DiscoveredList* list = NULL; 593 594 switch (type) { 595 case REF_SOFT: 596 list = _discoveredSoftRefs; 597 break; 598 case REF_WEAK: 599 list = _discoveredWeakRefs; 600 break; 601 case REF_FINAL: 602 list = _discoveredFinalRefs; 603 break; 604 case REF_PHANTOM: 605 list = _discoveredPhantomRefs; 606 break; 607 case REF_OTHER: 608 case REF_NONE: 609 default: 610 ShouldNotReachHere(); 611 } 612 return total_count(list); 613 } 614 615 class RefProcPhase1Task: public AbstractRefProcTaskExecutor::ProcessTask { 616 public: 617 RefProcPhase1Task(ReferenceProcessor& ref_processor, 618 DiscoveredList refs_lists[], 619 ReferencePolicy* policy, 620 bool marks_oops_alive, 621 ReferenceProcessorPhaseTimes* phase_times) 622 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 623 _policy(policy) 624 { } 625 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 626 OopClosure& keep_alive, 627 VoidClosure& complete_gc) 628 { 629 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, _phase_times, i); 630 631 _ref_processor.process_phase1(_refs_lists[i], _policy, 632 &is_alive, &keep_alive, &complete_gc); 633 } 634 private: 635 ReferencePolicy* _policy; 636 }; 637 638 class RefProcPhase2Task: public AbstractRefProcTaskExecutor::ProcessTask { 639 public: 640 RefProcPhase2Task(ReferenceProcessor& ref_processor, 641 DiscoveredList refs_lists[], 642 bool marks_oops_alive, 643 ReferenceProcessorPhaseTimes* phase_times) 644 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times) 645 { } 646 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 647 OopClosure& keep_alive, 648 VoidClosure& complete_gc) 649 { 650 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, _phase_times, i); 651 652 _ref_processor.process_phase2(_refs_lists[i], 653 &is_alive, &keep_alive, &complete_gc); 654 } 655 }; 656 657 class RefProcPhase3Task: public AbstractRefProcTaskExecutor::ProcessTask { 658 public: 659 RefProcPhase3Task(ReferenceProcessor& ref_processor, 660 DiscoveredList refs_lists[], 661 bool clear_referent, 662 bool marks_oops_alive, 663 ReferenceProcessorPhaseTimes* phase_times) 664 : ProcessTask(ref_processor, refs_lists, marks_oops_alive, phase_times), 665 _clear_referent(clear_referent) 666 { } 667 virtual void work(unsigned int i, BoolObjectClosure& is_alive, 668 OopClosure& keep_alive, 669 VoidClosure& complete_gc) 670 { 671 RefProcWorkerTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, _phase_times, i); 672 673 _ref_processor.process_phase3(_refs_lists[i], _clear_referent, 674 &is_alive, &keep_alive, &complete_gc); 675 } 676 private: 677 bool _clear_referent; 678 }; 679 680 #ifndef PRODUCT 681 void ReferenceProcessor::log_reflist_counts(DiscoveredList ref_lists[], uint active_length, size_t total_refs) { 682 if (!log_is_enabled(Trace, gc, ref)) { 683 return; 684 } 685 686 stringStream st; 687 for (uint i = 0; i < active_length; ++i) { 688 st.print(SIZE_FORMAT " ", ref_lists[i].length()); 689 } 690 log_develop_trace(gc, ref)("%s= " SIZE_FORMAT, st.as_string(), total_refs); 691 #ifdef ASSERT 692 for (uint i = active_length; i < _max_num_queues; i++) { 693 assert(ref_lists[i].length() == 0, SIZE_FORMAT " unexpected References in %u", 694 ref_lists[i].length(), i); 695 } 696 #endif 697 } 698 #endif 699 700 void ReferenceProcessor::set_active_mt_degree(uint v) { 701 _num_queues = v; 702 _next_id = 0; 703 } 704 705 // Balances reference queues. 706 // Move entries from all queues[0, 1, ..., _max_num_q-1] to 707 // queues[0, 1, ..., _num_q-1] because only the first _num_q 708 // corresponding to the active workers will be processed. 709 void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[]) 710 { 711 // calculate total length 712 size_t total_refs = 0; 713 log_develop_trace(gc, ref)("Balance ref_lists "); 714 715 for (uint i = 0; i < _max_num_queues; ++i) { 716 total_refs += ref_lists[i].length(); 717 } 718 log_reflist_counts(ref_lists, _max_num_queues, total_refs); 719 size_t avg_refs = total_refs / _num_queues + 1; 720 uint to_idx = 0; 721 for (uint from_idx = 0; from_idx < _max_num_queues; from_idx++) { 722 bool move_all = false; 723 if (from_idx >= _num_queues) { 724 move_all = ref_lists[from_idx].length() > 0; 725 } 726 while ((ref_lists[from_idx].length() > avg_refs) || 727 move_all) { 728 assert(to_idx < _num_queues, "Sanity Check!"); 729 if (ref_lists[to_idx].length() < avg_refs) { 730 // move superfluous refs 731 size_t refs_to_move; 732 // Move all the Ref's if the from queue will not be processed. 733 if (move_all) { 734 refs_to_move = MIN2(ref_lists[from_idx].length(), 735 avg_refs - ref_lists[to_idx].length()); 736 } else { 737 refs_to_move = MIN2(ref_lists[from_idx].length() - avg_refs, 738 avg_refs - ref_lists[to_idx].length()); 739 } 740 741 assert(refs_to_move > 0, "otherwise the code below will fail"); 742 743 oop move_head = ref_lists[from_idx].head(); 744 oop move_tail = move_head; 745 oop new_head = move_head; 746 // find an element to split the list on 747 for (size_t j = 0; j < refs_to_move; ++j) { 748 move_tail = new_head; 749 new_head = java_lang_ref_Reference::discovered(new_head); 750 } 751 752 // Add the chain to the to list. 753 if (ref_lists[to_idx].head() == NULL) { 754 // to list is empty. Make a loop at the end. 755 java_lang_ref_Reference::set_discovered_raw(move_tail, move_tail); 756 } else { 757 java_lang_ref_Reference::set_discovered_raw(move_tail, ref_lists[to_idx].head()); 758 } 759 ref_lists[to_idx].set_head(move_head); 760 ref_lists[to_idx].inc_length(refs_to_move); 761 762 // Remove the chain from the from list. 763 if (move_tail == new_head) { 764 // We found the end of the from list. 765 ref_lists[from_idx].set_head(NULL); 766 } else { 767 ref_lists[from_idx].set_head(new_head); 768 } 769 ref_lists[from_idx].dec_length(refs_to_move); 770 if (ref_lists[from_idx].length() == 0) { 771 break; 772 } 773 } else { 774 to_idx = (to_idx + 1) % _num_queues; 775 } 776 } 777 } 778 #ifdef ASSERT 779 size_t balanced_total_refs = 0; 780 for (uint i = 0; i < _num_queues; ++i) { 781 balanced_total_refs += ref_lists[i].length(); 782 } 783 log_reflist_counts(ref_lists, _num_queues, balanced_total_refs); 784 assert(total_refs == balanced_total_refs, "Balancing was incomplete"); 785 #endif 786 } 787 788 void ReferenceProcessor::balance_all_queues() { 789 balance_queues(_discoveredSoftRefs); 790 balance_queues(_discoveredWeakRefs); 791 balance_queues(_discoveredFinalRefs); 792 balance_queues(_discoveredPhantomRefs); 793 } 794 795 void ReferenceProcessor::process_discovered_reflist( 796 DiscoveredList refs_lists[], 797 ReferencePolicy* policy, 798 bool clear_referent, 799 BoolObjectClosure* is_alive, 800 OopClosure* keep_alive, 801 VoidClosure* complete_gc, 802 AbstractRefProcTaskExecutor* task_executor, 803 ReferenceProcessorPhaseTimes* phase_times) 804 { 805 bool mt_processing = task_executor != NULL && _processing_is_mt; 806 807 phase_times->set_processing_is_mt(mt_processing); 808 809 if (mt_processing && ParallelRefProcBalancingEnabled) { 810 RefProcBalanceQueuesTimeTracker tt(phase_times); 811 balance_queues(refs_lists); 812 } 813 814 // Phase 1 (soft refs only): 815 // . Traverse the list and remove any SoftReferences whose 816 // referents are not alive, but that should be kept alive for 817 // policy reasons. Keep alive the transitive closure of all 818 // such referents. 819 if (policy != NULL) { 820 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase1, phase_times); 821 822 if (mt_processing) { 823 RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/, phase_times); 824 task_executor->execute(phase1); 825 } else { 826 for (uint i = 0; i < _max_num_queues; i++) { 827 process_phase1(refs_lists[i], policy, 828 is_alive, keep_alive, complete_gc); 829 } 830 } 831 } else { // policy == NULL 832 assert(refs_lists != _discoveredSoftRefs, 833 "Policy must be specified for soft references."); 834 } 835 836 // Phase 2: 837 // . Traverse the list and remove any refs whose referents are alive. 838 { 839 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase2, phase_times); 840 841 if (mt_processing) { 842 RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/, phase_times); 843 task_executor->execute(phase2); 844 } else { 845 for (uint i = 0; i < _max_num_queues; i++) { 846 process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc); 847 } 848 } 849 } 850 851 // Phase 3: 852 // . Traverse the list and process referents as appropriate. 853 { 854 RefProcParPhaseTimeTracker tt(ReferenceProcessorPhaseTimes::RefPhase3, phase_times); 855 856 if (mt_processing) { 857 RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/, phase_times); 858 task_executor->execute(phase3); 859 } else { 860 for (uint i = 0; i < _max_num_queues; i++) { 861 process_phase3(refs_lists[i], clear_referent, 862 is_alive, keep_alive, complete_gc); 863 } 864 } 865 } 866 } 867 868 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) { 869 uint id = 0; 870 // Determine the queue index to use for this object. 871 if (_discovery_is_mt) { 872 // During a multi-threaded discovery phase, 873 // each thread saves to its "own" list. 874 Thread* thr = Thread::current(); 875 id = thr->as_Worker_thread()->id(); 876 } else { 877 // single-threaded discovery, we save in round-robin 878 // fashion to each of the lists. 879 if (_processing_is_mt) { 880 id = next_id(); 881 } 882 } 883 assert(id < _max_num_queues, "Id is out-of-bounds id %u and max id %u)", id, _max_num_queues); 884 885 // Get the discovered queue to which we will add 886 DiscoveredList* list = NULL; 887 switch (rt) { 888 case REF_OTHER: 889 // Unknown reference type, no special treatment 890 break; 891 case REF_SOFT: 892 list = &_discoveredSoftRefs[id]; 893 break; 894 case REF_WEAK: 895 list = &_discoveredWeakRefs[id]; 896 break; 897 case REF_FINAL: 898 list = &_discoveredFinalRefs[id]; 899 break; 900 case REF_PHANTOM: 901 list = &_discoveredPhantomRefs[id]; 902 break; 903 case REF_NONE: 904 // we should not reach here if we are an InstanceRefKlass 905 default: 906 ShouldNotReachHere(); 907 } 908 log_develop_trace(gc, ref)("Thread %d gets list " INTPTR_FORMAT, id, p2i(list)); 909 return list; 910 } 911 912 inline void 913 ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list, 914 oop obj, 915 HeapWord* discovered_addr) { 916 assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller"); 917 // First we must make sure this object is only enqueued once. CAS in a non null 918 // discovered_addr. 919 oop current_head = refs_list.head(); 920 // The last ref must have its discovered field pointing to itself. 921 oop next_discovered = (current_head != NULL) ? current_head : obj; 922 923 oop retest = RawAccess<>::oop_atomic_cmpxchg(next_discovered, discovered_addr, oop(NULL)); 924 925 if (retest == NULL) { 926 // This thread just won the right to enqueue the object. 927 // We have separate lists for enqueueing, so no synchronization 928 // is necessary. 929 refs_list.set_head(obj); 930 refs_list.inc_length(1); 931 932 log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)", 933 p2i(obj), obj->klass()->internal_name()); 934 } else { 935 // If retest was non NULL, another thread beat us to it: 936 // The reference has already been discovered... 937 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 938 p2i(obj), obj->klass()->internal_name()); 939 } 940 } 941 942 #ifndef PRODUCT 943 // Non-atomic (i.e. concurrent) discovery might allow us 944 // to observe j.l.References with NULL referents, being those 945 // cleared concurrently by mutators during (or after) discovery. 946 void ReferenceProcessor::verify_referent(oop obj) { 947 bool da = discovery_is_atomic(); 948 oop referent = java_lang_ref_Reference::referent(obj); 949 assert(da ? oopDesc::is_oop(referent) : oopDesc::is_oop_or_null(referent), 950 "Bad referent " INTPTR_FORMAT " found in Reference " 951 INTPTR_FORMAT " during %satomic discovery ", 952 p2i(referent), p2i(obj), da ? "" : "non-"); 953 } 954 #endif 955 956 bool ReferenceProcessor::is_subject_to_discovery(oop const obj) const { 957 return _is_subject_to_discovery->do_object_b(obj); 958 } 959 960 // We mention two of several possible choices here: 961 // #0: if the reference object is not in the "originating generation" 962 // (or part of the heap being collected, indicated by our "span" 963 // we don't treat it specially (i.e. we scan it as we would 964 // a normal oop, treating its references as strong references). 965 // This means that references can't be discovered unless their 966 // referent is also in the same span. This is the simplest, 967 // most "local" and most conservative approach, albeit one 968 // that may cause weak references to be enqueued least promptly. 969 // We call this choice the "ReferenceBasedDiscovery" policy. 970 // #1: the reference object may be in any generation (span), but if 971 // the referent is in the generation (span) being currently collected 972 // then we can discover the reference object, provided 973 // the object has not already been discovered by 974 // a different concurrently running collector (as may be the 975 // case, for instance, if the reference object is in CMS and 976 // the referent in DefNewGeneration), and provided the processing 977 // of this reference object by the current collector will 978 // appear atomic to every other collector in the system. 979 // (Thus, for instance, a concurrent collector may not 980 // discover references in other generations even if the 981 // referent is in its own generation). This policy may, 982 // in certain cases, enqueue references somewhat sooner than 983 // might Policy #0 above, but at marginally increased cost 984 // and complexity in processing these references. 985 // We call this choice the "RefeferentBasedDiscovery" policy. 986 bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) { 987 // Make sure we are discovering refs (rather than processing discovered refs). 988 if (!_discovering_refs || !RegisterReferences) { 989 return false; 990 } 991 // We only discover active references. 992 oop next = java_lang_ref_Reference::next(obj); 993 if (next != NULL) { // Ref is no longer active 994 return false; 995 } 996 997 if (RefDiscoveryPolicy == ReferenceBasedDiscovery && 998 !is_subject_to_discovery(obj)) { 999 // Reference is not in the originating generation; 1000 // don't treat it specially (i.e. we want to scan it as a normal 1001 // object with strong references). 1002 return false; 1003 } 1004 1005 // We only discover references whose referents are not (yet) 1006 // known to be strongly reachable. 1007 if (is_alive_non_header() != NULL) { 1008 verify_referent(obj); 1009 if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) { 1010 return false; // referent is reachable 1011 } 1012 } 1013 if (rt == REF_SOFT) { 1014 // For soft refs we can decide now if these are not 1015 // current candidates for clearing, in which case we 1016 // can mark through them now, rather than delaying that 1017 // to the reference-processing phase. Since all current 1018 // time-stamp policies advance the soft-ref clock only 1019 // at a full collection cycle, this is always currently 1020 // accurate. 1021 if (!_current_soft_ref_policy->should_clear_reference(obj, _soft_ref_timestamp_clock)) { 1022 return false; 1023 } 1024 } 1025 1026 ResourceMark rm; // Needed for tracing. 1027 1028 HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr_raw(obj); 1029 const oop discovered = java_lang_ref_Reference::discovered(obj); 1030 assert(oopDesc::is_oop_or_null(discovered), "Expected an oop or NULL for discovered field at " PTR_FORMAT, p2i(discovered)); 1031 if (discovered != NULL) { 1032 // The reference has already been discovered... 1033 log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)", 1034 p2i(obj), obj->klass()->internal_name()); 1035 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1036 // assumes that an object is not processed twice; 1037 // if it's been already discovered it must be on another 1038 // generation's discovered list; so we won't discover it. 1039 return false; 1040 } else { 1041 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery, 1042 "Unrecognized policy"); 1043 // Check assumption that an object is not potentially 1044 // discovered twice except by concurrent collectors that potentially 1045 // trace the same Reference object twice. 1046 assert(UseConcMarkSweepGC || UseG1GC, 1047 "Only possible with a concurrent marking collector"); 1048 return true; 1049 } 1050 } 1051 1052 if (RefDiscoveryPolicy == ReferentBasedDiscovery) { 1053 verify_referent(obj); 1054 // Discover if and only if EITHER: 1055 // .. reference is in our span, OR 1056 // .. we are an atomic collector and referent is in our span 1057 if (is_subject_to_discovery(obj) || 1058 (discovery_is_atomic() && 1059 is_subject_to_discovery(java_lang_ref_Reference::referent(obj)))) { 1060 } else { 1061 return false; 1062 } 1063 } else { 1064 assert(RefDiscoveryPolicy == ReferenceBasedDiscovery && 1065 is_subject_to_discovery(obj), "code inconsistency"); 1066 } 1067 1068 // Get the right type of discovered queue head. 1069 DiscoveredList* list = get_discovered_list(rt); 1070 if (list == NULL) { 1071 return false; // nothing special needs to be done 1072 } 1073 1074 if (_discovery_is_mt) { 1075 add_to_discovered_list_mt(*list, obj, discovered_addr); 1076 } else { 1077 // We do a raw store here: the field will be visited later when processing 1078 // the discovered references. 1079 oop current_head = list->head(); 1080 // The last ref must have its discovered field pointing to itself. 1081 oop next_discovered = (current_head != NULL) ? current_head : obj; 1082 1083 assert(discovered == NULL, "control point invariant"); 1084 RawAccess<>::oop_store(discovered_addr, next_discovered); 1085 list->set_head(obj); 1086 list->inc_length(1); 1087 1088 log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name()); 1089 } 1090 assert(oopDesc::is_oop(obj), "Discovered a bad reference"); 1091 verify_referent(obj); 1092 return true; 1093 } 1094 1095 bool ReferenceProcessor::has_discovered_references() { 1096 for (uint i = 0; i < _max_num_queues * number_of_subclasses_of_ref(); i++) { 1097 if (!_discovered_refs[i].is_empty()) { 1098 return true; 1099 } 1100 } 1101 return false; 1102 } 1103 1104 // Preclean the discovered references by removing those 1105 // whose referents are alive, and by marking from those that 1106 // are not active. These lists can be handled here 1107 // in any order and, indeed, concurrently. 1108 void ReferenceProcessor::preclean_discovered_references( 1109 BoolObjectClosure* is_alive, 1110 OopClosure* keep_alive, 1111 VoidClosure* complete_gc, 1112 YieldClosure* yield, 1113 GCTimer* gc_timer) { 1114 1115 // Soft references 1116 { 1117 GCTraceTime(Debug, gc, ref) tm("Preclean SoftReferences", gc_timer); 1118 for (uint i = 0; i < _max_num_queues; i++) { 1119 if (yield->should_return()) { 1120 return; 1121 } 1122 preclean_discovered_reflist(_discoveredSoftRefs[i], is_alive, 1123 keep_alive, complete_gc, yield); 1124 } 1125 } 1126 1127 // Weak references 1128 { 1129 GCTraceTime(Debug, gc, ref) tm("Preclean WeakReferences", gc_timer); 1130 for (uint i = 0; i < _max_num_queues; i++) { 1131 if (yield->should_return()) { 1132 return; 1133 } 1134 preclean_discovered_reflist(_discoveredWeakRefs[i], is_alive, 1135 keep_alive, complete_gc, yield); 1136 } 1137 } 1138 1139 // Final references 1140 { 1141 GCTraceTime(Debug, gc, ref) tm("Preclean FinalReferences", gc_timer); 1142 for (uint i = 0; i < _max_num_queues; i++) { 1143 if (yield->should_return()) { 1144 return; 1145 } 1146 preclean_discovered_reflist(_discoveredFinalRefs[i], is_alive, 1147 keep_alive, complete_gc, yield); 1148 } 1149 } 1150 1151 // Phantom references 1152 { 1153 GCTraceTime(Debug, gc, ref) tm("Preclean PhantomReferences", gc_timer); 1154 for (uint i = 0; i < _max_num_queues; i++) { 1155 if (yield->should_return()) { 1156 return; 1157 } 1158 preclean_discovered_reflist(_discoveredPhantomRefs[i], is_alive, 1159 keep_alive, complete_gc, yield); 1160 } 1161 } 1162 } 1163 1164 // Walk the given discovered ref list, and remove all reference objects 1165 // whose referents are still alive, whose referents are NULL or which 1166 // are not active (have a non-NULL next field). NOTE: When we are 1167 // thus precleaning the ref lists (which happens single-threaded today), 1168 // we do not disable refs discovery to honor the correct semantics of 1169 // java.lang.Reference. As a result, we need to be careful below 1170 // that ref removal steps interleave safely with ref discovery steps 1171 // (in this thread). 1172 void 1173 ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list, 1174 BoolObjectClosure* is_alive, 1175 OopClosure* keep_alive, 1176 VoidClosure* complete_gc, 1177 YieldClosure* yield) { 1178 DiscoveredListIterator iter(refs_list, keep_alive, is_alive); 1179 while (iter.has_next()) { 1180 iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */)); 1181 oop obj = iter.obj(); 1182 oop next = java_lang_ref_Reference::next(obj); 1183 if (iter.referent() == NULL || iter.is_referent_alive() || 1184 next != NULL) { 1185 // The referent has been cleared, or is alive, or the Reference is not 1186 // active; we need to trace and mark its cohort. 1187 log_develop_trace(gc, ref)("Precleaning Reference (" INTPTR_FORMAT ": %s)", 1188 p2i(iter.obj()), iter.obj()->klass()->internal_name()); 1189 // Remove Reference object from list 1190 iter.remove(); 1191 // Keep alive its cohort. 1192 iter.make_referent_alive(); 1193 if (UseCompressedOops) { 1194 narrowOop* next_addr = (narrowOop*)java_lang_ref_Reference::next_addr_raw(obj); 1195 keep_alive->do_oop(next_addr); 1196 } else { 1197 oop* next_addr = (oop*)java_lang_ref_Reference::next_addr_raw(obj); 1198 keep_alive->do_oop(next_addr); 1199 } 1200 iter.move_to_next(); 1201 } else { 1202 iter.next(); 1203 } 1204 } 1205 // Close the reachable set 1206 complete_gc->do_void(); 1207 1208 NOT_PRODUCT( 1209 if (iter.processed() > 0) { 1210 log_develop_trace(gc, ref)(" Dropped " SIZE_FORMAT " Refs out of " SIZE_FORMAT " Refs in discovered list " INTPTR_FORMAT, 1211 iter.removed(), iter.processed(), p2i(&refs_list)); 1212 } 1213 ) 1214 } 1215 1216 const char* ReferenceProcessor::list_name(uint i) { 1217 assert(i <= _max_num_queues * number_of_subclasses_of_ref(), 1218 "Out of bounds index"); 1219 1220 int j = i / _max_num_queues; 1221 switch (j) { 1222 case 0: return "SoftRef"; 1223 case 1: return "WeakRef"; 1224 case 2: return "FinalRef"; 1225 case 3: return "PhantomRef"; 1226 } 1227 ShouldNotReachHere(); 1228 return NULL; 1229 }