1 /* 2 * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "classfile/classLoaderDataGraph.hpp" 27 #include "classfile/systemDictionary.hpp" 28 #include "code/codeCache.hpp" 29 #include "gc/cms/cmsGCStats.hpp" 30 #include "gc/cms/cmsHeap.hpp" 31 #include "gc/cms/cmsOopClosures.inline.hpp" 32 #include "gc/cms/cmsVMOperations.hpp" 33 #include "gc/cms/compactibleFreeListSpace.hpp" 34 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp" 35 #include "gc/cms/concurrentMarkSweepThread.hpp" 36 #include "gc/cms/parNewGeneration.hpp" 37 #include "gc/cms/promotionInfo.inline.hpp" 38 #include "gc/serial/genMarkSweep.hpp" 39 #include "gc/serial/tenuredGeneration.hpp" 40 #include "gc/shared/adaptiveSizePolicy.hpp" 41 #include "gc/shared/cardGeneration.inline.hpp" 42 #include "gc/shared/cardTableRS.hpp" 43 #include "gc/shared/collectedHeap.inline.hpp" 44 #include "gc/shared/collectorCounters.hpp" 45 #include "gc/shared/gcLocker.hpp" 46 #include "gc/shared/gcPolicyCounters.hpp" 47 #include "gc/shared/gcTimer.hpp" 48 #include "gc/shared/gcTrace.hpp" 49 #include "gc/shared/gcTraceTime.inline.hpp" 50 #include "gc/shared/genCollectedHeap.hpp" 51 #include "gc/shared/genOopClosures.inline.hpp" 52 #include "gc/shared/isGCActiveMark.hpp" 53 #include "gc/shared/owstTaskTerminator.hpp" 54 #include "gc/shared/referencePolicy.hpp" 55 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 56 #include "gc/shared/space.inline.hpp" 57 #include "gc/shared/strongRootsScope.hpp" 58 #include "gc/shared/taskqueue.inline.hpp" 59 #include "gc/shared/weakProcessor.hpp" 60 #include "gc/shared/workerPolicy.hpp" 61 #include "logging/log.hpp" 62 #include "logging/logStream.hpp" 63 #include "memory/allocation.hpp" 64 #include "memory/binaryTreeDictionary.inline.hpp" 65 #include "memory/iterator.inline.hpp" 66 #include "memory/padded.hpp" 67 #include "memory/resourceArea.hpp" 68 #include "memory/universe.hpp" 69 #include "oops/access.inline.hpp" 70 #include "oops/oop.inline.hpp" 71 #include "prims/jvmtiExport.hpp" 72 #include "runtime/atomic.hpp" 73 #include "runtime/flags/flagSetting.hpp" 74 #include "runtime/globals_extension.hpp" 75 #include "runtime/handles.inline.hpp" 76 #include "runtime/java.hpp" 77 #include "runtime/orderAccess.hpp" 78 #include "runtime/timer.hpp" 79 #include "runtime/vmThread.hpp" 80 #include "services/memoryService.hpp" 81 #include "services/runtimeService.hpp" 82 #include "utilities/align.hpp" 83 #include "utilities/stack.inline.hpp" 84 #if INCLUDE_JVMCI 85 #include "jvmci/jvmci.hpp" 86 #endif 87 88 // statics 89 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; 90 bool CMSCollector::_full_gc_requested = false; 91 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc; 92 93 ////////////////////////////////////////////////////////////////// 94 // In support of CMS/VM thread synchronization 95 ////////////////////////////////////////////////////////////////// 96 // We split use of the CGC_lock into 2 "levels". 97 // The low-level locking is of the usual CGC_lock monitor. We introduce 98 // a higher level "token" (hereafter "CMS token") built on top of the 99 // low level monitor (hereafter "CGC lock"). 100 // The token-passing protocol gives priority to the VM thread. The 101 // CMS-lock doesn't provide any fairness guarantees, but clients 102 // should ensure that it is only held for very short, bounded 103 // durations. 104 // 105 // When either of the CMS thread or the VM thread is involved in 106 // collection operations during which it does not want the other 107 // thread to interfere, it obtains the CMS token. 108 // 109 // If either thread tries to get the token while the other has 110 // it, that thread waits. However, if the VM thread and CMS thread 111 // both want the token, then the VM thread gets priority while the 112 // CMS thread waits. This ensures, for instance, that the "concurrent" 113 // phases of the CMS thread's work do not block out the VM thread 114 // for long periods of time as the CMS thread continues to hog 115 // the token. (See bug 4616232). 116 // 117 // The baton-passing functions are, however, controlled by the 118 // flags _foregroundGCShouldWait and _foregroundGCIsActive, 119 // and here the low-level CMS lock, not the high level token, 120 // ensures mutual exclusion. 121 // 122 // Two important conditions that we have to satisfy: 123 // 1. if a thread does a low-level wait on the CMS lock, then it 124 // relinquishes the CMS token if it were holding that token 125 // when it acquired the low-level CMS lock. 126 // 2. any low-level notifications on the low-level lock 127 // should only be sent when a thread has relinquished the token. 128 // 129 // In the absence of either property, we'd have potential deadlock. 130 // 131 // We protect each of the CMS (concurrent and sequential) phases 132 // with the CMS _token_, not the CMS _lock_. 133 // 134 // The only code protected by CMS lock is the token acquisition code 135 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the 136 // baton-passing code. 137 // 138 // Unfortunately, i couldn't come up with a good abstraction to factor and 139 // hide the naked CGC_lock manipulation in the baton-passing code 140 // further below. That's something we should try to do. Also, the proof 141 // of correctness of this 2-level locking scheme is far from obvious, 142 // and potentially quite slippery. We have an uneasy suspicion, for instance, 143 // that there may be a theoretical possibility of delay/starvation in the 144 // low-level lock/wait/notify scheme used for the baton-passing because of 145 // potential interference with the priority scheme embodied in the 146 // CMS-token-passing protocol. See related comments at a CGC_lock->wait() 147 // invocation further below and marked with "XXX 20011219YSR". 148 // Indeed, as we note elsewhere, this may become yet more slippery 149 // in the presence of multiple CMS and/or multiple VM threads. XXX 150 151 class CMSTokenSync: public StackObj { 152 private: 153 bool _is_cms_thread; 154 public: 155 CMSTokenSync(bool is_cms_thread): 156 _is_cms_thread(is_cms_thread) { 157 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(), 158 "Incorrect argument to constructor"); 159 ConcurrentMarkSweepThread::synchronize(_is_cms_thread); 160 } 161 162 ~CMSTokenSync() { 163 assert(_is_cms_thread ? 164 ConcurrentMarkSweepThread::cms_thread_has_cms_token() : 165 ConcurrentMarkSweepThread::vm_thread_has_cms_token(), 166 "Incorrect state"); 167 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread); 168 } 169 }; 170 171 // Convenience class that does a CMSTokenSync, and then acquires 172 // upto three locks. 173 class CMSTokenSyncWithLocks: public CMSTokenSync { 174 private: 175 // Note: locks are acquired in textual declaration order 176 // and released in the opposite order 177 MutexLocker _locker1, _locker2, _locker3; 178 public: 179 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1, 180 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL): 181 CMSTokenSync(is_cms_thread), 182 _locker1(mutex1, Mutex::_no_safepoint_check_flag), 183 _locker2(mutex2, Mutex::_no_safepoint_check_flag), 184 _locker3(mutex3, Mutex::_no_safepoint_check_flag) 185 { } 186 }; 187 188 189 ////////////////////////////////////////////////////////////////// 190 // Concurrent Mark-Sweep Generation ///////////////////////////// 191 ////////////////////////////////////////////////////////////////// 192 193 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;) 194 195 // This struct contains per-thread things necessary to support parallel 196 // young-gen collection. 197 class CMSParGCThreadState: public CHeapObj<mtGC> { 198 public: 199 CompactibleFreeListSpaceLAB lab; 200 PromotionInfo promo; 201 202 // Constructor. 203 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) { 204 promo.setSpace(cfls); 205 } 206 }; 207 208 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( 209 ReservedSpace rs, 210 size_t initial_byte_size, 211 size_t min_byte_size, 212 size_t max_byte_size, 213 CardTableRS* ct) : 214 CardGeneration(rs, initial_byte_size, ct), 215 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), 216 _did_compact(false) 217 { 218 HeapWord* bottom = (HeapWord*) _virtual_space.low(); 219 HeapWord* end = (HeapWord*) _virtual_space.high(); 220 221 _direct_allocated_words = 0; 222 NOT_PRODUCT( 223 _numObjectsPromoted = 0; 224 _numWordsPromoted = 0; 225 _numObjectsAllocated = 0; 226 _numWordsAllocated = 0; 227 ) 228 229 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end)); 230 NOT_PRODUCT(debug_cms_space = _cmsSpace;) 231 _cmsSpace->_old_gen = this; 232 233 _gc_stats = new CMSGCStats(); 234 235 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass 236 // offsets match. The ability to tell free chunks from objects 237 // depends on this property. 238 debug_only( 239 FreeChunk* junk = NULL; 240 assert(UseCompressedClassPointers || 241 junk->prev_addr() == (void*)(oop(junk)->klass_addr()), 242 "Offset of FreeChunk::_prev within FreeChunk must match" 243 " that of OopDesc::_klass within OopDesc"); 244 ) 245 246 _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC); 247 for (uint i = 0; i < ParallelGCThreads; i++) { 248 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace()); 249 } 250 251 _incremental_collection_failed = false; 252 // The "dilatation_factor" is the expansion that can occur on 253 // account of the fact that the minimum object size in the CMS 254 // generation may be larger than that in, say, a contiguous young 255 // generation. 256 // Ideally, in the calculation below, we'd compute the dilatation 257 // factor as: MinChunkSize/(promoting_gen's min object size) 258 // Since we do not have such a general query interface for the 259 // promoting generation, we'll instead just use the minimum 260 // object size (which today is a header's worth of space); 261 // note that all arithmetic is in units of HeapWords. 262 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking"); 263 assert(_dilatation_factor >= 1.0, "from previous assert"); 264 265 initialize_performance_counters(min_byte_size, max_byte_size); 266 } 267 268 269 // The field "_initiating_occupancy" represents the occupancy percentage 270 // at which we trigger a new collection cycle. Unless explicitly specified 271 // via CMSInitiatingOccupancyFraction (argument "io" below), it 272 // is calculated by: 273 // 274 // Let "f" be MinHeapFreeRatio in 275 // 276 // _initiating_occupancy = 100-f + 277 // f * (CMSTriggerRatio/100) 278 // where CMSTriggerRatio is the argument "tr" below. 279 // 280 // That is, if we assume the heap is at its desired maximum occupancy at the 281 // end of a collection, we let CMSTriggerRatio of the (purported) free 282 // space be allocated before initiating a new collection cycle. 283 // 284 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) { 285 assert(io <= 100 && tr <= 100, "Check the arguments"); 286 if (io >= 0) { 287 _initiating_occupancy = (double)io / 100.0; 288 } else { 289 _initiating_occupancy = ((100 - MinHeapFreeRatio) + 290 (double)(tr * MinHeapFreeRatio) / 100.0) 291 / 100.0; 292 } 293 } 294 295 void ConcurrentMarkSweepGeneration::ref_processor_init() { 296 assert(collector() != NULL, "no collector"); 297 collector()->ref_processor_init(); 298 } 299 300 void CMSCollector::ref_processor_init() { 301 if (_ref_processor == NULL) { 302 // Allocate and initialize a reference processor 303 _ref_processor = 304 new ReferenceProcessor(&_span_based_discoverer, 305 (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing 306 ParallelGCThreads, // mt processing degree 307 _cmsGen->refs_discovery_is_mt(), // mt discovery 308 MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree 309 _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic 310 &_is_alive_closure, // closure for liveness info 311 false); // disable adjusting number of processing threads 312 // Initialize the _ref_processor field of CMSGen 313 _cmsGen->set_ref_processor(_ref_processor); 314 315 } 316 } 317 318 AdaptiveSizePolicy* CMSCollector::size_policy() { 319 return CMSHeap::heap()->size_policy(); 320 } 321 322 void ConcurrentMarkSweepGeneration::initialize_performance_counters(size_t min_old_size, 323 size_t max_old_size) { 324 325 const char* gen_name = "old"; 326 // Generation Counters - generation 1, 1 subspace 327 _gen_counters = new GenerationCounters(gen_name, 1, 1, 328 min_old_size, max_old_size, &_virtual_space); 329 330 _space_counters = new GSpaceCounters(gen_name, 0, 331 _virtual_space.reserved_size(), 332 this, _gen_counters); 333 } 334 335 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha): 336 _cms_gen(cms_gen) 337 { 338 assert(alpha <= 100, "bad value"); 339 _saved_alpha = alpha; 340 341 // Initialize the alphas to the bootstrap value of 100. 342 _gc0_alpha = _cms_alpha = 100; 343 344 _cms_begin_time.update(); 345 _cms_end_time.update(); 346 347 _gc0_duration = 0.0; 348 _gc0_period = 0.0; 349 _gc0_promoted = 0; 350 351 _cms_duration = 0.0; 352 _cms_period = 0.0; 353 _cms_allocated = 0; 354 355 _cms_used_at_gc0_begin = 0; 356 _cms_used_at_gc0_end = 0; 357 _allow_duty_cycle_reduction = false; 358 _valid_bits = 0; 359 } 360 361 double CMSStats::cms_free_adjustment_factor(size_t free) const { 362 // TBD: CR 6909490 363 return 1.0; 364 } 365 366 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) { 367 } 368 369 // If promotion failure handling is on use 370 // the padded average size of the promotion for each 371 // young generation collection. 372 double CMSStats::time_until_cms_gen_full() const { 373 size_t cms_free = _cms_gen->cmsSpace()->free(); 374 CMSHeap* heap = CMSHeap::heap(); 375 size_t expected_promotion = MIN2(heap->young_gen()->capacity(), 376 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average()); 377 if (cms_free > expected_promotion) { 378 // Start a cms collection if there isn't enough space to promote 379 // for the next young collection. Use the padded average as 380 // a safety factor. 381 cms_free -= expected_promotion; 382 383 // Adjust by the safety factor. 384 double cms_free_dbl = (double)cms_free; 385 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0; 386 // Apply a further correction factor which tries to adjust 387 // for recent occurance of concurrent mode failures. 388 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); 389 cms_free_dbl = cms_free_dbl * cms_adjustment; 390 391 log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT, 392 cms_free, expected_promotion); 393 log_trace(gc)(" cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0); 394 // Add 1 in case the consumption rate goes to zero. 395 return cms_free_dbl / (cms_consumption_rate() + 1.0); 396 } 397 return 0.0; 398 } 399 400 // Compare the duration of the cms collection to the 401 // time remaining before the cms generation is empty. 402 // Note that the time from the start of the cms collection 403 // to the start of the cms sweep (less than the total 404 // duration of the cms collection) can be used. This 405 // has been tried and some applications experienced 406 // promotion failures early in execution. This was 407 // possibly because the averages were not accurate 408 // enough at the beginning. 409 double CMSStats::time_until_cms_start() const { 410 // We add "gc0_period" to the "work" calculation 411 // below because this query is done (mostly) at the 412 // end of a scavenge, so we need to conservatively 413 // account for that much possible delay 414 // in the query so as to avoid concurrent mode failures 415 // due to starting the collection just a wee bit too 416 // late. 417 double work = cms_duration() + gc0_period(); 418 double deadline = time_until_cms_gen_full(); 419 // If a concurrent mode failure occurred recently, we want to be 420 // more conservative and halve our expected time_until_cms_gen_full() 421 if (work > deadline) { 422 log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ", 423 cms_duration(), gc0_period(), time_until_cms_gen_full()); 424 return 0.0; 425 } 426 return work - deadline; 427 } 428 429 #ifndef PRODUCT 430 void CMSStats::print_on(outputStream *st) const { 431 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha); 432 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT, 433 gc0_duration(), gc0_period(), gc0_promoted()); 434 st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT, 435 cms_duration(), cms_period(), cms_allocated()); 436 st->print(",cms_since_beg=%g,cms_since_end=%g", 437 cms_time_since_begin(), cms_time_since_end()); 438 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT, 439 _cms_used_at_gc0_begin, _cms_used_at_gc0_end); 440 441 if (valid()) { 442 st->print(",promo_rate=%g,cms_alloc_rate=%g", 443 promotion_rate(), cms_allocation_rate()); 444 st->print(",cms_consumption_rate=%g,time_until_full=%g", 445 cms_consumption_rate(), time_until_cms_gen_full()); 446 } 447 st->cr(); 448 } 449 #endif // #ifndef PRODUCT 450 451 CMSCollector::CollectorState CMSCollector::_collectorState = 452 CMSCollector::Idling; 453 bool CMSCollector::_foregroundGCIsActive = false; 454 bool CMSCollector::_foregroundGCShouldWait = false; 455 456 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 457 CardTableRS* ct): 458 _overflow_list(NULL), 459 _conc_workers(NULL), // may be set later 460 _completed_initialization(false), 461 _collection_count_start(0), 462 _should_unload_classes(CMSClassUnloadingEnabled), 463 _concurrent_cycles_since_last_unload(0), 464 _roots_scanning_options(GenCollectedHeap::SO_None), 465 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), 466 _verifying(false), 467 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), 468 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), 469 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()), 470 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()), 471 _cms_start_registered(false), 472 _cmsGen(cmsGen), 473 // Adjust span to cover old (cms) gen 474 _span(cmsGen->reserved()), 475 _ct(ct), 476 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"), 477 _modUnionTable((CardTable::card_shift - LogHeapWordSize), 478 -1 /* lock-free */, "No_lock" /* dummy */), 479 _restart_addr(NULL), 480 _ser_pmc_preclean_ovflw(0), 481 _ser_pmc_remark_ovflw(0), 482 _par_pmc_remark_ovflw(0), 483 _ser_kac_preclean_ovflw(0), 484 _ser_kac_ovflw(0), 485 _par_kac_ovflw(0), 486 #ifndef PRODUCT 487 _num_par_pushes(0), 488 #endif 489 _span_based_discoverer(_span), 490 _ref_processor(NULL), // will be set later 491 // Construct the is_alive_closure with _span & markBitMap 492 _is_alive_closure(_span, &_markBitMap), 493 _modUnionClosurePar(&_modUnionTable), 494 _between_prologue_and_epilogue(false), 495 _abort_preclean(false), 496 _start_sampling(false), 497 _stats(cmsGen), 498 _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true, 499 //verify that this lock should be acquired with safepoint check. 500 Monitor::_safepoint_check_never)), 501 _eden_chunk_array(NULL), // may be set in ctor body 502 _eden_chunk_index(0), // -- ditto -- 503 _eden_chunk_capacity(0), // -- ditto -- 504 _survivor_chunk_array(NULL), // -- ditto -- 505 _survivor_chunk_index(0), // -- ditto -- 506 _survivor_chunk_capacity(0), // -- ditto -- 507 _survivor_plab_array(NULL) // -- ditto -- 508 { 509 // Now expand the span and allocate the collection support structures 510 // (MUT, marking bit map etc.) to cover both generations subject to 511 // collection. 512 513 // For use by dirty card to oop closures. 514 _cmsGen->cmsSpace()->set_collector(this); 515 516 // Allocate MUT and marking bit map 517 { 518 MutexLocker x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag); 519 if (!_markBitMap.allocate(_span)) { 520 log_warning(gc)("Failed to allocate CMS Bit Map"); 521 return; 522 } 523 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?"); 524 } 525 { 526 _modUnionTable.allocate(_span); 527 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); 528 } 529 530 if (!_markStack.allocate(MarkStackSize)) { 531 log_warning(gc)("Failed to allocate CMS Marking Stack"); 532 return; 533 } 534 535 // Support for multi-threaded concurrent phases 536 if (CMSConcurrentMTEnabled) { 537 if (FLAG_IS_DEFAULT(ConcGCThreads)) { 538 // just for now 539 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4); 540 } 541 if (ConcGCThreads > 1) { 542 _conc_workers = new YieldingFlexibleWorkGang("CMS Thread", 543 ConcGCThreads, true); 544 if (_conc_workers == NULL) { 545 log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled"); 546 CMSConcurrentMTEnabled = false; 547 } else { 548 _conc_workers->initialize_workers(); 549 } 550 } else { 551 CMSConcurrentMTEnabled = false; 552 } 553 } 554 if (!CMSConcurrentMTEnabled) { 555 ConcGCThreads = 0; 556 } else { 557 // Turn off CMSCleanOnEnter optimization temporarily for 558 // the MT case where it's not fixed yet; see 6178663. 559 CMSCleanOnEnter = false; 560 } 561 assert((_conc_workers != NULL) == (ConcGCThreads > 1), 562 "Inconsistency"); 563 log_debug(gc)("ConcGCThreads: %u", ConcGCThreads); 564 log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads); 565 566 // Parallel task queues; these are shared for the 567 // concurrent and stop-world phases of CMS, but 568 // are not shared with parallel scavenge (ParNew). 569 { 570 uint i; 571 uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads); 572 573 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled 574 || ParallelRefProcEnabled) 575 && num_queues > 0) { 576 _task_queues = new OopTaskQueueSet(num_queues); 577 if (_task_queues == NULL) { 578 log_warning(gc)("task_queues allocation failure."); 579 return; 580 } 581 typedef Padded<OopTaskQueue> PaddedOopTaskQueue; 582 for (i = 0; i < num_queues; i++) { 583 PaddedOopTaskQueue *q = new PaddedOopTaskQueue(); 584 if (q == NULL) { 585 log_warning(gc)("work_queue allocation failure."); 586 return; 587 } 588 _task_queues->register_queue(i, q); 589 } 590 for (i = 0; i < num_queues; i++) { 591 _task_queues->queue(i)->initialize(); 592 } 593 } 594 } 595 596 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); 597 598 // Clip CMSBootstrapOccupancy between 0 and 100. 599 _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0; 600 601 // Now tell CMS generations the identity of their collector 602 ConcurrentMarkSweepGeneration::set_collector(this); 603 604 // Create & start a CMS thread for this CMS collector 605 _cmsThread = ConcurrentMarkSweepThread::start(this); 606 assert(cmsThread() != NULL, "CMS Thread should have been created"); 607 assert(cmsThread()->collector() == this, 608 "CMS Thread should refer to this gen"); 609 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 610 611 // Support for parallelizing young gen rescan 612 CMSHeap* heap = CMSHeap::heap(); 613 _young_gen = heap->young_gen(); 614 if (heap->supports_inline_contig_alloc()) { 615 _top_addr = heap->top_addr(); 616 _end_addr = heap->end_addr(); 617 assert(_young_gen != NULL, "no _young_gen"); 618 _eden_chunk_index = 0; 619 _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain; 620 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC); 621 } 622 623 // Support for parallelizing survivor space rescan 624 if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) { 625 const size_t max_plab_samples = 626 _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize); 627 628 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC); 629 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC); 630 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC); 631 _survivor_chunk_capacity = max_plab_samples; 632 for (uint i = 0; i < ParallelGCThreads; i++) { 633 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC); 634 ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples); 635 assert(cur->end() == 0, "Should be 0"); 636 assert(cur->array() == vec, "Should be vec"); 637 assert(cur->capacity() == max_plab_samples, "Error"); 638 } 639 } 640 641 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) 642 _gc_counters = new CollectorCounters("CMS full collection pauses", 1); 643 _cgc_counters = new CollectorCounters("CMS concurrent cycle pauses", 2); 644 _completed_initialization = true; 645 _inter_sweep_timer.start(); // start of time 646 } 647 648 const char* ConcurrentMarkSweepGeneration::name() const { 649 return "concurrent mark-sweep generation"; 650 } 651 void ConcurrentMarkSweepGeneration::update_counters() { 652 if (UsePerfData) { 653 _space_counters->update_all(); 654 _gen_counters->update_all(); 655 } 656 } 657 658 // this is an optimized version of update_counters(). it takes the 659 // used value as a parameter rather than computing it. 660 // 661 void ConcurrentMarkSweepGeneration::update_counters(size_t used) { 662 if (UsePerfData) { 663 _space_counters->update_used(used); 664 _space_counters->update_capacity(); 665 _gen_counters->update_all(); 666 } 667 } 668 669 void ConcurrentMarkSweepGeneration::print() const { 670 Generation::print(); 671 cmsSpace()->print(); 672 } 673 674 #ifndef PRODUCT 675 void ConcurrentMarkSweepGeneration::print_statistics() { 676 cmsSpace()->printFLCensus(0); 677 } 678 #endif 679 680 size_t 681 ConcurrentMarkSweepGeneration::contiguous_available() const { 682 // dld proposes an improvement in precision here. If the committed 683 // part of the space ends in a free block we should add that to 684 // uncommitted size in the calculation below. Will make this 685 // change later, staying with the approximation below for the 686 // time being. -- ysr. 687 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc()); 688 } 689 690 size_t 691 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const { 692 return _cmsSpace->max_alloc_in_words() * HeapWordSize; 693 } 694 695 size_t ConcurrentMarkSweepGeneration::used_stable() const { 696 return cmsSpace()->used_stable(); 697 } 698 699 size_t ConcurrentMarkSweepGeneration::max_available() const { 700 return free() + _virtual_space.uncommitted_size(); 701 } 702 703 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { 704 size_t available = max_available(); 705 size_t av_promo = (size_t)gc_stats()->avg_promoted()->padded_average(); 706 bool res = (available >= av_promo) || (available >= max_promotion_in_bytes); 707 log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")", 708 res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes); 709 return res; 710 } 711 712 // At a promotion failure dump information on block layout in heap 713 // (cms old generation). 714 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { 715 Log(gc, promotion) log; 716 if (log.is_trace()) { 717 LogStream ls(log.trace()); 718 cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls); 719 } 720 } 721 722 void ConcurrentMarkSweepGeneration::reset_after_compaction() { 723 // Clear the promotion information. These pointers can be adjusted 724 // along with all the other pointers into the heap but 725 // compaction is expected to be a rare event with 726 // a heap using cms so don't do it without seeing the need. 727 for (uint i = 0; i < ParallelGCThreads; i++) { 728 _par_gc_thread_states[i]->promo.reset(); 729 } 730 } 731 732 void ConcurrentMarkSweepGeneration::compute_new_size() { 733 assert_locked_or_safepoint(Heap_lock); 734 735 // If incremental collection failed, we just want to expand 736 // to the limit. 737 if (incremental_collection_failed()) { 738 clear_incremental_collection_failed(); 739 grow_to_reserved(); 740 return; 741 } 742 743 // The heap has been compacted but not reset yet. 744 // Any metric such as free() or used() will be incorrect. 745 746 CardGeneration::compute_new_size(); 747 748 // Reset again after a possible resizing 749 if (did_compact()) { 750 cmsSpace()->reset_after_compaction(); 751 } 752 } 753 754 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() { 755 assert_locked_or_safepoint(Heap_lock); 756 757 // If incremental collection failed, we just want to expand 758 // to the limit. 759 if (incremental_collection_failed()) { 760 clear_incremental_collection_failed(); 761 grow_to_reserved(); 762 return; 763 } 764 765 double free_percentage = ((double) free()) / capacity(); 766 double desired_free_percentage = (double) MinHeapFreeRatio / 100; 767 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; 768 769 // compute expansion delta needed for reaching desired free percentage 770 if (free_percentage < desired_free_percentage) { 771 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); 772 assert(desired_capacity >= capacity(), "invalid expansion size"); 773 size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); 774 Log(gc) log; 775 if (log.is_trace()) { 776 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); 777 log.trace("From compute_new_size: "); 778 log.trace(" Free fraction %f", free_percentage); 779 log.trace(" Desired free fraction %f", desired_free_percentage); 780 log.trace(" Maximum free fraction %f", maximum_free_percentage); 781 log.trace(" Capacity " SIZE_FORMAT, capacity() / 1000); 782 log.trace(" Desired capacity " SIZE_FORMAT, desired_capacity / 1000); 783 CMSHeap* heap = CMSHeap::heap(); 784 size_t young_size = heap->young_gen()->capacity(); 785 log.trace(" Young gen size " SIZE_FORMAT, young_size / 1000); 786 log.trace(" unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000); 787 log.trace(" contiguous available " SIZE_FORMAT, contiguous_available() / 1000); 788 log.trace(" Expand by " SIZE_FORMAT " (bytes)", expand_bytes); 789 } 790 // safe if expansion fails 791 expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); 792 log.trace(" Expanded free fraction %f", ((double) free()) / capacity()); 793 } else { 794 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); 795 assert(desired_capacity <= capacity(), "invalid expansion size"); 796 size_t shrink_bytes = capacity() - desired_capacity; 797 // Don't shrink unless the delta is greater than the minimum shrink we want 798 if (shrink_bytes >= MinHeapDeltaBytes) { 799 shrink_free_list_by(shrink_bytes); 800 } 801 } 802 } 803 804 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const { 805 return cmsSpace()->freelistLock(); 806 } 807 808 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) { 809 CMSSynchronousYieldRequest yr; 810 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); 811 return have_lock_and_allocate(size, tlab); 812 } 813 814 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, 815 bool tlab /* ignored */) { 816 assert_lock_strong(freelistLock()); 817 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); 818 HeapWord* res = cmsSpace()->allocate(adjustedSize); 819 // Allocate the object live (grey) if the background collector has 820 // started marking. This is necessary because the marker may 821 // have passed this address and consequently this object will 822 // not otherwise be greyed and would be incorrectly swept up. 823 // Note that if this object contains references, the writing 824 // of those references will dirty the card containing this object 825 // allowing the object to be blackened (and its references scanned) 826 // either during a preclean phase or at the final checkpoint. 827 if (res != NULL) { 828 // We may block here with an uninitialized object with 829 // its mark-bit or P-bits not yet set. Such objects need 830 // to be safely navigable by block_start(). 831 assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here."); 832 assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size"); 833 collector()->direct_allocated(res, adjustedSize); 834 _direct_allocated_words += adjustedSize; 835 // allocation counters 836 NOT_PRODUCT( 837 _numObjectsAllocated++; 838 _numWordsAllocated += (int)adjustedSize; 839 ) 840 } 841 return res; 842 } 843 844 // In the case of direct allocation by mutators in a generation that 845 // is being concurrently collected, the object must be allocated 846 // live (grey) if the background collector has started marking. 847 // This is necessary because the marker may 848 // have passed this address and consequently this object will 849 // not otherwise be greyed and would be incorrectly swept up. 850 // Note that if this object contains references, the writing 851 // of those references will dirty the card containing this object 852 // allowing the object to be blackened (and its references scanned) 853 // either during a preclean phase or at the final checkpoint. 854 void CMSCollector::direct_allocated(HeapWord* start, size_t size) { 855 assert(_markBitMap.covers(start, size), "Out of bounds"); 856 if (_collectorState >= Marking) { 857 MutexLocker y(_markBitMap.lock(), 858 Mutex::_no_safepoint_check_flag); 859 // [see comments preceding SweepClosure::do_blk() below for details] 860 // 861 // Can the P-bits be deleted now? JJJ 862 // 863 // 1. need to mark the object as live so it isn't collected 864 // 2. need to mark the 2nd bit to indicate the object may be uninitialized 865 // 3. need to mark the end of the object so marking, precleaning or sweeping 866 // can skip over uninitialized or unparsable objects. An allocated 867 // object is considered uninitialized for our purposes as long as 868 // its klass word is NULL. All old gen objects are parsable 869 // as soon as they are initialized.) 870 _markBitMap.mark(start); // object is live 871 _markBitMap.mark(start + 1); // object is potentially uninitialized? 872 _markBitMap.mark(start + size - 1); 873 // mark end of object 874 } 875 // check that oop looks uninitialized 876 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL"); 877 } 878 879 void CMSCollector::promoted(bool par, HeapWord* start, 880 bool is_obj_array, size_t obj_size) { 881 assert(_markBitMap.covers(start), "Out of bounds"); 882 // See comment in direct_allocated() about when objects should 883 // be allocated live. 884 if (_collectorState >= Marking) { 885 // we already hold the marking bit map lock, taken in 886 // the prologue 887 if (par) { 888 _markBitMap.par_mark(start); 889 } else { 890 _markBitMap.mark(start); 891 } 892 // We don't need to mark the object as uninitialized (as 893 // in direct_allocated above) because this is being done with the 894 // world stopped and the object will be initialized by the 895 // time the marking, precleaning or sweeping get to look at it. 896 // But see the code for copying objects into the CMS generation, 897 // where we need to ensure that concurrent readers of the 898 // block offset table are able to safely navigate a block that 899 // is in flux from being free to being allocated (and in 900 // transition while being copied into) and subsequently 901 // becoming a bona-fide object when the copy/promotion is complete. 902 assert(SafepointSynchronize::is_at_safepoint(), 903 "expect promotion only at safepoints"); 904 905 if (_collectorState < Sweeping) { 906 // Mark the appropriate cards in the modUnionTable, so that 907 // this object gets scanned before the sweep. If this is 908 // not done, CMS generation references in the object might 909 // not get marked. 910 // For the case of arrays, which are otherwise precisely 911 // marked, we need to dirty the entire array, not just its head. 912 if (is_obj_array) { 913 // The [par_]mark_range() method expects mr.end() below to 914 // be aligned to the granularity of a bit's representation 915 // in the heap. In the case of the MUT below, that's a 916 // card size. 917 MemRegion mr(start, 918 align_up(start + obj_size, 919 CardTable::card_size /* bytes */)); 920 if (par) { 921 _modUnionTable.par_mark_range(mr); 922 } else { 923 _modUnionTable.mark_range(mr); 924 } 925 } else { // not an obj array; we can just mark the head 926 if (par) { 927 _modUnionTable.par_mark(start); 928 } else { 929 _modUnionTable.mark(start); 930 } 931 } 932 } 933 } 934 } 935 936 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { 937 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); 938 // allocate, copy and if necessary update promoinfo -- 939 // delegate to underlying space. 940 assert_lock_strong(freelistLock()); 941 942 #ifndef PRODUCT 943 if (CMSHeap::heap()->promotion_should_fail()) { 944 return NULL; 945 } 946 #endif // #ifndef PRODUCT 947 948 oop res = _cmsSpace->promote(obj, obj_size); 949 if (res == NULL) { 950 // expand and retry 951 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords 952 expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion); 953 // Since this is the old generation, we don't try to promote 954 // into a more senior generation. 955 res = _cmsSpace->promote(obj, obj_size); 956 } 957 if (res != NULL) { 958 // See comment in allocate() about when objects should 959 // be allocated live. 960 assert(oopDesc::is_oop(obj), "Will dereference klass pointer below"); 961 collector()->promoted(false, // Not parallel 962 (HeapWord*)res, obj->is_objArray(), obj_size); 963 // promotion counters 964 NOT_PRODUCT( 965 _numObjectsPromoted++; 966 _numWordsPromoted += 967 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size())); 968 ) 969 } 970 return res; 971 } 972 973 974 // IMPORTANT: Notes on object size recognition in CMS. 975 // --------------------------------------------------- 976 // A block of storage in the CMS generation is always in 977 // one of three states. A free block (FREE), an allocated 978 // object (OBJECT) whose size() method reports the correct size, 979 // and an intermediate state (TRANSIENT) in which its size cannot 980 // be accurately determined. 981 // STATE IDENTIFICATION: (32 bit and 64 bit w/o COOPS) 982 // ----------------------------------------------------- 983 // FREE: klass_word & 1 == 1; mark_word holds block size 984 // 985 // OBJECT: klass_word installed; klass_word != 0 && klass_word & 1 == 0; 986 // obj->size() computes correct size 987 // 988 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT 989 // 990 // STATE IDENTIFICATION: (64 bit+COOPS) 991 // ------------------------------------ 992 // FREE: mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size 993 // 994 // OBJECT: klass_word installed; klass_word != 0; 995 // obj->size() computes correct size 996 // 997 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT 998 // 999 // 1000 // STATE TRANSITION DIAGRAM 1001 // 1002 // mut / parnew mut / parnew 1003 // FREE --------------------> TRANSIENT ---------------------> OBJECT --| 1004 // ^ | 1005 // |------------------------ DEAD <------------------------------------| 1006 // sweep mut 1007 // 1008 // While a block is in TRANSIENT state its size cannot be determined 1009 // so readers will either need to come back later or stall until 1010 // the size can be determined. Note that for the case of direct 1011 // allocation, P-bits, when available, may be used to determine the 1012 // size of an object that may not yet have been initialized. 1013 1014 // Things to support parallel young-gen collection. 1015 oop 1016 ConcurrentMarkSweepGeneration::par_promote(int thread_num, 1017 oop old, markOop m, 1018 size_t word_sz) { 1019 #ifndef PRODUCT 1020 if (CMSHeap::heap()->promotion_should_fail()) { 1021 return NULL; 1022 } 1023 #endif // #ifndef PRODUCT 1024 1025 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; 1026 PromotionInfo* promoInfo = &ps->promo; 1027 // if we are tracking promotions, then first ensure space for 1028 // promotion (including spooling space for saving header if necessary). 1029 // then allocate and copy, then track promoted info if needed. 1030 // When tracking (see PromotionInfo::track()), the mark word may 1031 // be displaced and in this case restoration of the mark word 1032 // occurs in the (oop_since_save_marks_)iterate phase. 1033 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) { 1034 // Out of space for allocating spooling buffers; 1035 // try expanding and allocating spooling buffers. 1036 if (!expand_and_ensure_spooling_space(promoInfo)) { 1037 return NULL; 1038 } 1039 } 1040 assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant"); 1041 const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz); 1042 HeapWord* obj_ptr = ps->lab.alloc(alloc_sz); 1043 if (obj_ptr == NULL) { 1044 obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz); 1045 if (obj_ptr == NULL) { 1046 return NULL; 1047 } 1048 } 1049 oop obj = oop(obj_ptr); 1050 OrderAccess::storestore(); 1051 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); 1052 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size"); 1053 // IMPORTANT: See note on object initialization for CMS above. 1054 // Otherwise, copy the object. Here we must be careful to insert the 1055 // klass pointer last, since this marks the block as an allocated object. 1056 // Except with compressed oops it's the mark word. 1057 HeapWord* old_ptr = (HeapWord*)old; 1058 // Restore the mark word copied above. 1059 obj->set_mark_raw(m); 1060 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); 1061 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size"); 1062 OrderAccess::storestore(); 1063 1064 if (UseCompressedClassPointers) { 1065 // Copy gap missed by (aligned) header size calculation below 1066 obj->set_klass_gap(old->klass_gap()); 1067 } 1068 if (word_sz > (size_t)oopDesc::header_size()) { 1069 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), 1070 obj_ptr + oopDesc::header_size(), 1071 word_sz - oopDesc::header_size()); 1072 } 1073 1074 // Now we can track the promoted object, if necessary. We take care 1075 // to delay the transition from uninitialized to full object 1076 // (i.e., insertion of klass pointer) until after, so that it 1077 // atomically becomes a promoted object. 1078 if (promoInfo->tracking()) { 1079 promoInfo->track((PromotedObject*)obj, old->klass()); 1080 } 1081 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); 1082 assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size"); 1083 assert(oopDesc::is_oop(old), "Will use and dereference old klass ptr below"); 1084 1085 // Finally, install the klass pointer (this should be volatile). 1086 OrderAccess::storestore(); 1087 obj->set_klass(old->klass()); 1088 // We should now be able to calculate the right size for this object 1089 assert(oopDesc::is_oop(obj) && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object"); 1090 1091 collector()->promoted(true, // parallel 1092 obj_ptr, old->is_objArray(), word_sz); 1093 1094 NOT_PRODUCT( 1095 Atomic::inc(&_numObjectsPromoted); 1096 Atomic::add(alloc_sz, &_numWordsPromoted); 1097 ) 1098 1099 return obj; 1100 } 1101 1102 void 1103 ConcurrentMarkSweepGeneration:: 1104 par_promote_alloc_done(int thread_num) { 1105 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; 1106 ps->lab.retire(thread_num); 1107 } 1108 1109 void 1110 ConcurrentMarkSweepGeneration:: 1111 par_oop_since_save_marks_iterate_done(int thread_num) { 1112 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; 1113 ParScanWithoutBarrierClosure* dummy_cl = NULL; 1114 ps->promo.promoted_oops_iterate(dummy_cl); 1115 1116 // Because card-scanning has been completed, subsequent phases 1117 // (e.g., reference processing) will not need to recognize which 1118 // objects have been promoted during this GC. So, we can now disable 1119 // promotion tracking. 1120 ps->promo.stopTrackingPromotions(); 1121 } 1122 1123 bool ConcurrentMarkSweepGeneration::should_collect(bool full, 1124 size_t size, 1125 bool tlab) 1126 { 1127 // We allow a STW collection only if a full 1128 // collection was requested. 1129 return full || should_allocate(size, tlab); // FIX ME !!! 1130 // This and promotion failure handling are connected at the 1131 // hip and should be fixed by untying them. 1132 } 1133 1134 bool CMSCollector::shouldConcurrentCollect() { 1135 LogTarget(Trace, gc) log; 1136 1137 if (_full_gc_requested) { 1138 log.print("CMSCollector: collect because of explicit gc request (or GCLocker)"); 1139 return true; 1140 } 1141 1142 FreelistLocker x(this); 1143 // ------------------------------------------------------------------ 1144 // Print out lots of information which affects the initiation of 1145 // a collection. 1146 if (log.is_enabled() && stats().valid()) { 1147 log.print("CMSCollector shouldConcurrentCollect: "); 1148 1149 LogStream out(log); 1150 stats().print_on(&out); 1151 1152 log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full()); 1153 log.print("free=" SIZE_FORMAT, _cmsGen->free()); 1154 log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available()); 1155 log.print("promotion_rate=%g", stats().promotion_rate()); 1156 log.print("cms_allocation_rate=%g", stats().cms_allocation_rate()); 1157 log.print("occupancy=%3.7f", _cmsGen->occupancy()); 1158 log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); 1159 log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin()); 1160 log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end()); 1161 log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect()); 1162 } 1163 // ------------------------------------------------------------------ 1164 1165 // If the estimated time to complete a cms collection (cms_duration()) 1166 // is less than the estimated time remaining until the cms generation 1167 // is full, start a collection. 1168 if (!UseCMSInitiatingOccupancyOnly) { 1169 if (stats().valid()) { 1170 if (stats().time_until_cms_start() == 0.0) { 1171 return true; 1172 } 1173 } else { 1174 // We want to conservatively collect somewhat early in order 1175 // to try and "bootstrap" our CMS/promotion statistics; 1176 // this branch will not fire after the first successful CMS 1177 // collection because the stats should then be valid. 1178 if (_cmsGen->occupancy() >= _bootstrap_occupancy) { 1179 log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f", 1180 _cmsGen->occupancy(), _bootstrap_occupancy); 1181 return true; 1182 } 1183 } 1184 } 1185 1186 // Otherwise, we start a collection cycle if 1187 // old gen want a collection cycle started. Each may use 1188 // an appropriate criterion for making this decision. 1189 // XXX We need to make sure that the gen expansion 1190 // criterion dovetails well with this. XXX NEED TO FIX THIS 1191 if (_cmsGen->should_concurrent_collect()) { 1192 log.print("CMS old gen initiated"); 1193 return true; 1194 } 1195 1196 // We start a collection if we believe an incremental collection may fail; 1197 // this is not likely to be productive in practice because it's probably too 1198 // late anyway. 1199 CMSHeap* heap = CMSHeap::heap(); 1200 if (heap->incremental_collection_will_fail(true /* consult_young */)) { 1201 log.print("CMSCollector: collect because incremental collection will fail "); 1202 return true; 1203 } 1204 1205 if (MetaspaceGC::should_concurrent_collect()) { 1206 log.print("CMSCollector: collect for metadata allocation "); 1207 return true; 1208 } 1209 1210 // CMSTriggerInterval starts a CMS cycle if enough time has passed. 1211 if (CMSTriggerInterval >= 0) { 1212 if (CMSTriggerInterval == 0) { 1213 // Trigger always 1214 return true; 1215 } 1216 1217 // Check the CMS time since begin (we do not check the stats validity 1218 // as we want to be able to trigger the first CMS cycle as well) 1219 if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) { 1220 if (stats().valid()) { 1221 log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)", 1222 stats().cms_time_since_begin()); 1223 } else { 1224 log.print("CMSCollector: collect because of trigger interval (first collection)"); 1225 } 1226 return true; 1227 } 1228 } 1229 1230 return false; 1231 } 1232 1233 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); } 1234 1235 // Clear _expansion_cause fields of constituent generations 1236 void CMSCollector::clear_expansion_cause() { 1237 _cmsGen->clear_expansion_cause(); 1238 } 1239 1240 // We should be conservative in starting a collection cycle. To 1241 // start too eagerly runs the risk of collecting too often in the 1242 // extreme. To collect too rarely falls back on full collections, 1243 // which works, even if not optimum in terms of concurrent work. 1244 // As a work around for too eagerly collecting, use the flag 1245 // UseCMSInitiatingOccupancyOnly. This also has the advantage of 1246 // giving the user an easily understandable way of controlling the 1247 // collections. 1248 // We want to start a new collection cycle if any of the following 1249 // conditions hold: 1250 // . our current occupancy exceeds the configured initiating occupancy 1251 // for this generation, or 1252 // . we recently needed to expand this space and have not, since that 1253 // expansion, done a collection of this generation, or 1254 // . the underlying space believes that it may be a good idea to initiate 1255 // a concurrent collection (this may be based on criteria such as the 1256 // following: the space uses linear allocation and linear allocation is 1257 // going to fail, or there is believed to be excessive fragmentation in 1258 // the generation, etc... or ... 1259 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for 1260 // the case of the old generation; see CR 6543076): 1261 // we may be approaching a point at which allocation requests may fail because 1262 // we will be out of sufficient free space given allocation rate estimates.] 1263 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { 1264 1265 assert_lock_strong(freelistLock()); 1266 if (occupancy() > initiating_occupancy()) { 1267 log_trace(gc)(" %s: collect because of occupancy %f / %f ", 1268 short_name(), occupancy(), initiating_occupancy()); 1269 return true; 1270 } 1271 if (UseCMSInitiatingOccupancyOnly) { 1272 return false; 1273 } 1274 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) { 1275 log_trace(gc)(" %s: collect because expanded for allocation ", short_name()); 1276 return true; 1277 } 1278 return false; 1279 } 1280 1281 void ConcurrentMarkSweepGeneration::collect(bool full, 1282 bool clear_all_soft_refs, 1283 size_t size, 1284 bool tlab) 1285 { 1286 collector()->collect(full, clear_all_soft_refs, size, tlab); 1287 } 1288 1289 void CMSCollector::collect(bool full, 1290 bool clear_all_soft_refs, 1291 size_t size, 1292 bool tlab) 1293 { 1294 // The following "if" branch is present for defensive reasons. 1295 // In the current uses of this interface, it can be replaced with: 1296 // assert(!GCLocker.is_active(), "Can't be called otherwise"); 1297 // But I am not placing that assert here to allow future 1298 // generality in invoking this interface. 1299 if (GCLocker::is_active()) { 1300 // A consistency test for GCLocker 1301 assert(GCLocker::needs_gc(), "Should have been set already"); 1302 // Skip this foreground collection, instead 1303 // expanding the heap if necessary. 1304 // Need the free list locks for the call to free() in compute_new_size() 1305 compute_new_size(); 1306 return; 1307 } 1308 acquire_control_and_collect(full, clear_all_soft_refs); 1309 } 1310 1311 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) { 1312 CMSHeap* heap = CMSHeap::heap(); 1313 unsigned int gc_count = heap->total_full_collections(); 1314 if (gc_count == full_gc_count) { 1315 MutexLocker y(CGC_lock, Mutex::_no_safepoint_check_flag); 1316 _full_gc_requested = true; 1317 _full_gc_cause = cause; 1318 CGC_lock->notify(); // nudge CMS thread 1319 } else { 1320 assert(gc_count > full_gc_count, "Error: causal loop"); 1321 } 1322 } 1323 1324 bool CMSCollector::is_external_interruption() { 1325 GCCause::Cause cause = CMSHeap::heap()->gc_cause(); 1326 return GCCause::is_user_requested_gc(cause) || 1327 GCCause::is_serviceability_requested_gc(cause); 1328 } 1329 1330 void CMSCollector::report_concurrent_mode_interruption() { 1331 if (is_external_interruption()) { 1332 log_debug(gc)("Concurrent mode interrupted"); 1333 } else { 1334 log_debug(gc)("Concurrent mode failure"); 1335 _gc_tracer_cm->report_concurrent_mode_failure(); 1336 } 1337 } 1338 1339 1340 // The foreground and background collectors need to coordinate in order 1341 // to make sure that they do not mutually interfere with CMS collections. 1342 // When a background collection is active, 1343 // the foreground collector may need to take over (preempt) and 1344 // synchronously complete an ongoing collection. Depending on the 1345 // frequency of the background collections and the heap usage 1346 // of the application, this preemption can be seldom or frequent. 1347 // There are only certain 1348 // points in the background collection that the "collection-baton" 1349 // can be passed to the foreground collector. 1350 // 1351 // The foreground collector will wait for the baton before 1352 // starting any part of the collection. The foreground collector 1353 // will only wait at one location. 1354 // 1355 // The background collector will yield the baton before starting a new 1356 // phase of the collection (e.g., before initial marking, marking from roots, 1357 // precleaning, final re-mark, sweep etc.) This is normally done at the head 1358 // of the loop which switches the phases. The background collector does some 1359 // of the phases (initial mark, final re-mark) with the world stopped. 1360 // Because of locking involved in stopping the world, 1361 // the foreground collector should not block waiting for the background 1362 // collector when it is doing a stop-the-world phase. The background 1363 // collector will yield the baton at an additional point just before 1364 // it enters a stop-the-world phase. Once the world is stopped, the 1365 // background collector checks the phase of the collection. If the 1366 // phase has not changed, it proceeds with the collection. If the 1367 // phase has changed, it skips that phase of the collection. See 1368 // the comments on the use of the Heap_lock in collect_in_background(). 1369 // 1370 // Variable used in baton passing. 1371 // _foregroundGCIsActive - Set to true by the foreground collector when 1372 // it wants the baton. The foreground clears it when it has finished 1373 // the collection. 1374 // _foregroundGCShouldWait - Set to true by the background collector 1375 // when it is running. The foreground collector waits while 1376 // _foregroundGCShouldWait is true. 1377 // CGC_lock - monitor used to protect access to the above variables 1378 // and to notify the foreground and background collectors. 1379 // _collectorState - current state of the CMS collection. 1380 // 1381 // The foreground collector 1382 // acquires the CGC_lock 1383 // sets _foregroundGCIsActive 1384 // waits on the CGC_lock for _foregroundGCShouldWait to be false 1385 // various locks acquired in preparation for the collection 1386 // are released so as not to block the background collector 1387 // that is in the midst of a collection 1388 // proceeds with the collection 1389 // clears _foregroundGCIsActive 1390 // returns 1391 // 1392 // The background collector in a loop iterating on the phases of the 1393 // collection 1394 // acquires the CGC_lock 1395 // sets _foregroundGCShouldWait 1396 // if _foregroundGCIsActive is set 1397 // clears _foregroundGCShouldWait, notifies _CGC_lock 1398 // waits on _CGC_lock for _foregroundGCIsActive to become false 1399 // and exits the loop. 1400 // otherwise 1401 // proceed with that phase of the collection 1402 // if the phase is a stop-the-world phase, 1403 // yield the baton once more just before enqueueing 1404 // the stop-world CMS operation (executed by the VM thread). 1405 // returns after all phases of the collection are done 1406 // 1407 1408 void CMSCollector::acquire_control_and_collect(bool full, 1409 bool clear_all_soft_refs) { 1410 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 1411 assert(!Thread::current()->is_ConcurrentGC_thread(), 1412 "shouldn't try to acquire control from self!"); 1413 1414 // Start the protocol for acquiring control of the 1415 // collection from the background collector (aka CMS thread). 1416 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), 1417 "VM thread should have CMS token"); 1418 // Remember the possibly interrupted state of an ongoing 1419 // concurrent collection 1420 CollectorState first_state = _collectorState; 1421 1422 // Signal to a possibly ongoing concurrent collection that 1423 // we want to do a foreground collection. 1424 _foregroundGCIsActive = true; 1425 1426 // release locks and wait for a notify from the background collector 1427 // releasing the locks in only necessary for phases which 1428 // do yields to improve the granularity of the collection. 1429 assert_lock_strong(bitMapLock()); 1430 // We need to lock the Free list lock for the space that we are 1431 // currently collecting. 1432 assert(haveFreelistLocks(), "Must be holding free list locks"); 1433 bitMapLock()->unlock(); 1434 releaseFreelistLocks(); 1435 { 1436 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); 1437 if (_foregroundGCShouldWait) { 1438 // We are going to be waiting for action for the CMS thread; 1439 // it had better not be gone (for instance at shutdown)! 1440 assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(), 1441 "CMS thread must be running"); 1442 // Wait here until the background collector gives us the go-ahead 1443 ConcurrentMarkSweepThread::clear_CMS_flag( 1444 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token 1445 // Get a possibly blocked CMS thread going: 1446 // Note that we set _foregroundGCIsActive true above, 1447 // without protection of the CGC_lock. 1448 CGC_lock->notify(); 1449 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(), 1450 "Possible deadlock"); 1451 while (_foregroundGCShouldWait) { 1452 // wait for notification 1453 CGC_lock->wait_without_safepoint_check(); 1454 // Possibility of delay/starvation here, since CMS token does 1455 // not know to give priority to VM thread? Actually, i think 1456 // there wouldn't be any delay/starvation, but the proof of 1457 // that "fact" (?) appears non-trivial. XXX 20011219YSR 1458 } 1459 ConcurrentMarkSweepThread::set_CMS_flag( 1460 ConcurrentMarkSweepThread::CMS_vm_has_token); 1461 } 1462 } 1463 // The CMS_token is already held. Get back the other locks. 1464 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), 1465 "VM thread should have CMS token"); 1466 getFreelistLocks(); 1467 bitMapLock()->lock_without_safepoint_check(); 1468 log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d", 1469 p2i(Thread::current()), first_state); 1470 log_debug(gc, state)(" gets control with state %d", _collectorState); 1471 1472 // Inform cms gen if this was due to partial collection failing. 1473 // The CMS gen may use this fact to determine its expansion policy. 1474 CMSHeap* heap = CMSHeap::heap(); 1475 if (heap->incremental_collection_will_fail(false /* don't consult_young */)) { 1476 assert(!_cmsGen->incremental_collection_failed(), 1477 "Should have been noticed, reacted to and cleared"); 1478 _cmsGen->set_incremental_collection_failed(); 1479 } 1480 1481 if (first_state > Idling) { 1482 report_concurrent_mode_interruption(); 1483 } 1484 1485 set_did_compact(true); 1486 1487 // If the collection is being acquired from the background 1488 // collector, there may be references on the discovered 1489 // references lists. Abandon those references, since some 1490 // of them may have become unreachable after concurrent 1491 // discovery; the STW compacting collector will redo discovery 1492 // more precisely, without being subject to floating garbage. 1493 // Leaving otherwise unreachable references in the discovered 1494 // lists would require special handling. 1495 ref_processor()->disable_discovery(); 1496 ref_processor()->abandon_partial_discovery(); 1497 ref_processor()->verify_no_references_recorded(); 1498 1499 if (first_state > Idling) { 1500 save_heap_summary(); 1501 } 1502 1503 do_compaction_work(clear_all_soft_refs); 1504 1505 // Has the GC time limit been exceeded? 1506 size_t max_eden_size = _young_gen->max_eden_size(); 1507 GCCause::Cause gc_cause = heap->gc_cause(); 1508 size_policy()->check_gc_overhead_limit(_young_gen->eden()->used(), 1509 _cmsGen->max_capacity(), 1510 max_eden_size, 1511 full, 1512 gc_cause, 1513 heap->soft_ref_policy()); 1514 1515 // Reset the expansion cause, now that we just completed 1516 // a collection cycle. 1517 clear_expansion_cause(); 1518 _foregroundGCIsActive = false; 1519 return; 1520 } 1521 1522 // Resize the tenured generation 1523 // after obtaining the free list locks for the 1524 // two generations. 1525 void CMSCollector::compute_new_size() { 1526 assert_locked_or_safepoint(Heap_lock); 1527 FreelistLocker z(this); 1528 MetaspaceGC::compute_new_size(); 1529 _cmsGen->compute_new_size_free_list(); 1530 // recalculate CMS used space after CMS collection 1531 _cmsGen->cmsSpace()->recalculate_used_stable(); 1532 } 1533 1534 // A work method used by the foreground collector to do 1535 // a mark-sweep-compact. 1536 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { 1537 CMSHeap* heap = CMSHeap::heap(); 1538 1539 STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); 1540 gc_timer->register_gc_start(); 1541 1542 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); 1543 gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start()); 1544 1545 heap->pre_full_gc_dump(gc_timer); 1546 1547 GCTraceTime(Trace, gc, phases) t("CMS:MSC"); 1548 1549 // Temporarily widen the span of the weak reference processing to 1550 // the entire heap. 1551 MemRegion new_span(CMSHeap::heap()->reserved_region()); 1552 ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span); 1553 // Temporarily, clear the "is_alive_non_header" field of the 1554 // reference processor. 1555 ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL); 1556 // Temporarily make reference _processing_ single threaded (non-MT). 1557 ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false); 1558 // Temporarily make refs discovery atomic 1559 ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true); 1560 // Temporarily make reference _discovery_ single threaded (non-MT) 1561 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); 1562 1563 ref_processor()->set_enqueuing_is_done(false); 1564 ref_processor()->enable_discovery(); 1565 ref_processor()->setup_policy(clear_all_soft_refs); 1566 // If an asynchronous collection finishes, the _modUnionTable is 1567 // all clear. If we are assuming the collection from an asynchronous 1568 // collection, clear the _modUnionTable. 1569 assert(_collectorState != Idling || _modUnionTable.isAllClear(), 1570 "_modUnionTable should be clear if the baton was not passed"); 1571 _modUnionTable.clear_all(); 1572 assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(), 1573 "mod union for klasses should be clear if the baton was passed"); 1574 _ct->cld_rem_set()->clear_mod_union(); 1575 1576 1577 // We must adjust the allocation statistics being maintained 1578 // in the free list space. We do so by reading and clearing 1579 // the sweep timer and updating the block flux rate estimates below. 1580 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive"); 1581 if (_inter_sweep_timer.is_active()) { 1582 _inter_sweep_timer.stop(); 1583 // Note that we do not use this sample to update the _inter_sweep_estimate. 1584 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), 1585 _inter_sweep_estimate.padded_average(), 1586 _intra_sweep_estimate.padded_average()); 1587 } 1588 1589 GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs); 1590 #ifdef ASSERT 1591 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); 1592 size_t free_size = cms_space->free(); 1593 assert(free_size == 1594 pointer_delta(cms_space->end(), cms_space->compaction_top()) 1595 * HeapWordSize, 1596 "All the free space should be compacted into one chunk at top"); 1597 assert(cms_space->dictionary()->total_chunk_size( 1598 debug_only(cms_space->freelistLock())) == 0 || 1599 cms_space->totalSizeInIndexedFreeLists() == 0, 1600 "All the free space should be in a single chunk"); 1601 size_t num = cms_space->totalCount(); 1602 assert((free_size == 0 && num == 0) || 1603 (free_size > 0 && (num == 1 || num == 2)), 1604 "There should be at most 2 free chunks after compaction"); 1605 #endif // ASSERT 1606 _collectorState = Resetting; 1607 assert(_restart_addr == NULL, 1608 "Should have been NULL'd before baton was passed"); 1609 reset_stw(); 1610 _cmsGen->reset_after_compaction(); 1611 _concurrent_cycles_since_last_unload = 0; 1612 1613 // Clear any data recorded in the PLAB chunk arrays. 1614 if (_survivor_plab_array != NULL) { 1615 reset_survivor_plab_arrays(); 1616 } 1617 1618 // Adjust the per-size allocation stats for the next epoch. 1619 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */); 1620 // Restart the "inter sweep timer" for the next epoch. 1621 _inter_sweep_timer.reset(); 1622 _inter_sweep_timer.start(); 1623 1624 // No longer a need to do a concurrent collection for Metaspace. 1625 MetaspaceGC::set_should_concurrent_collect(false); 1626 1627 heap->post_full_gc_dump(gc_timer); 1628 1629 gc_timer->register_gc_end(); 1630 1631 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); 1632 1633 // For a mark-sweep-compact, compute_new_size() will be called 1634 // in the heap's do_collection() method. 1635 } 1636 1637 void CMSCollector::print_eden_and_survivor_chunk_arrays() { 1638 Log(gc, heap) log; 1639 if (!log.is_trace()) { 1640 return; 1641 } 1642 1643 ContiguousSpace* eden_space = _young_gen->eden(); 1644 ContiguousSpace* from_space = _young_gen->from(); 1645 ContiguousSpace* to_space = _young_gen->to(); 1646 // Eden 1647 if (_eden_chunk_array != NULL) { 1648 log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", 1649 p2i(eden_space->bottom()), p2i(eden_space->top()), 1650 p2i(eden_space->end()), eden_space->capacity()); 1651 log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT, 1652 _eden_chunk_index, _eden_chunk_capacity); 1653 for (size_t i = 0; i < _eden_chunk_index; i++) { 1654 log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i])); 1655 } 1656 } 1657 // Survivor 1658 if (_survivor_chunk_array != NULL) { 1659 log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")", 1660 p2i(from_space->bottom()), p2i(from_space->top()), 1661 p2i(from_space->end()), from_space->capacity()); 1662 log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT, 1663 _survivor_chunk_index, _survivor_chunk_capacity); 1664 for (size_t i = 0; i < _survivor_chunk_index; i++) { 1665 log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i])); 1666 } 1667 } 1668 } 1669 1670 void CMSCollector::getFreelistLocks() const { 1671 // Get locks for all free lists in all generations that this 1672 // collector is responsible for 1673 _cmsGen->freelistLock()->lock_without_safepoint_check(); 1674 } 1675 1676 void CMSCollector::releaseFreelistLocks() const { 1677 // Release locks for all free lists in all generations that this 1678 // collector is responsible for 1679 _cmsGen->freelistLock()->unlock(); 1680 } 1681 1682 bool CMSCollector::haveFreelistLocks() const { 1683 // Check locks for all free lists in all generations that this 1684 // collector is responsible for 1685 assert_lock_strong(_cmsGen->freelistLock()); 1686 PRODUCT_ONLY(ShouldNotReachHere()); 1687 return true; 1688 } 1689 1690 // A utility class that is used by the CMS collector to 1691 // temporarily "release" the foreground collector from its 1692 // usual obligation to wait for the background collector to 1693 // complete an ongoing phase before proceeding. 1694 class ReleaseForegroundGC: public StackObj { 1695 private: 1696 CMSCollector* _c; 1697 public: 1698 ReleaseForegroundGC(CMSCollector* c) : _c(c) { 1699 assert(_c->_foregroundGCShouldWait, "Else should not need to call"); 1700 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); 1701 // allow a potentially blocked foreground collector to proceed 1702 _c->_foregroundGCShouldWait = false; 1703 if (_c->_foregroundGCIsActive) { 1704 CGC_lock->notify(); 1705 } 1706 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 1707 "Possible deadlock"); 1708 } 1709 1710 ~ReleaseForegroundGC() { 1711 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?"); 1712 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); 1713 _c->_foregroundGCShouldWait = true; 1714 } 1715 }; 1716 1717 void CMSCollector::collect_in_background(GCCause::Cause cause) { 1718 assert(Thread::current()->is_ConcurrentGC_thread(), 1719 "A CMS asynchronous collection is only allowed on a CMS thread."); 1720 1721 CMSHeap* heap = CMSHeap::heap(); 1722 { 1723 MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag); 1724 FreelistLocker fll(this); 1725 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); 1726 if (_foregroundGCIsActive) { 1727 // The foreground collector is. Skip this 1728 // background collection. 1729 assert(!_foregroundGCShouldWait, "Should be clear"); 1730 return; 1731 } else { 1732 assert(_collectorState == Idling, "Should be idling before start."); 1733 _collectorState = InitialMarking; 1734 register_gc_start(cause); 1735 // Reset the expansion cause, now that we are about to begin 1736 // a new cycle. 1737 clear_expansion_cause(); 1738 1739 // Clear the MetaspaceGC flag since a concurrent collection 1740 // is starting but also clear it after the collection. 1741 MetaspaceGC::set_should_concurrent_collect(false); 1742 } 1743 // Decide if we want to enable class unloading as part of the 1744 // ensuing concurrent GC cycle. 1745 update_should_unload_classes(); 1746 _full_gc_requested = false; // acks all outstanding full gc requests 1747 _full_gc_cause = GCCause::_no_gc; 1748 // Signal that we are about to start a collection 1749 heap->increment_total_full_collections(); // ... starting a collection cycle 1750 _collection_count_start = heap->total_full_collections(); 1751 } 1752 1753 size_t prev_used = _cmsGen->used(); 1754 _cmsGen->cmsSpace()->recalculate_used_stable(); 1755 1756 // The change of the collection state is normally done at this level; 1757 // the exceptions are phases that are executed while the world is 1758 // stopped. For those phases the change of state is done while the 1759 // world is stopped. For baton passing purposes this allows the 1760 // background collector to finish the phase and change state atomically. 1761 // The foreground collector cannot wait on a phase that is done 1762 // while the world is stopped because the foreground collector already 1763 // has the world stopped and would deadlock. 1764 while (_collectorState != Idling) { 1765 log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d", 1766 p2i(Thread::current()), _collectorState); 1767 // The foreground collector 1768 // holds the Heap_lock throughout its collection. 1769 // holds the CMS token (but not the lock) 1770 // except while it is waiting for the background collector to yield. 1771 // 1772 // The foreground collector should be blocked (not for long) 1773 // if the background collector is about to start a phase 1774 // executed with world stopped. If the background 1775 // collector has already started such a phase, the 1776 // foreground collector is blocked waiting for the 1777 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking) 1778 // are executed in the VM thread. 1779 // 1780 // The locking order is 1781 // PendingListLock (PLL) -- if applicable (FinalMarking) 1782 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue()) 1783 // CMS token (claimed in 1784 // stop_world_and_do() --> 1785 // safepoint_synchronize() --> 1786 // CMSThread::synchronize()) 1787 1788 { 1789 // Check if the FG collector wants us to yield. 1790 CMSTokenSync x(true); // is cms thread 1791 if (waitForForegroundGC()) { 1792 // We yielded to a foreground GC, nothing more to be 1793 // done this round. 1794 assert(_foregroundGCShouldWait == false, "We set it to false in " 1795 "waitForForegroundGC()"); 1796 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d", 1797 p2i(Thread::current()), _collectorState); 1798 return; 1799 } else { 1800 // The background collector can run but check to see if the 1801 // foreground collector has done a collection while the 1802 // background collector was waiting to get the CGC_lock 1803 // above. If yes, break so that _foregroundGCShouldWait 1804 // is cleared before returning. 1805 if (_collectorState == Idling) { 1806 break; 1807 } 1808 } 1809 } 1810 1811 assert(_foregroundGCShouldWait, "Foreground collector, if active, " 1812 "should be waiting"); 1813 1814 switch (_collectorState) { 1815 case InitialMarking: 1816 { 1817 ReleaseForegroundGC x(this); 1818 stats().record_cms_begin(); 1819 VM_CMS_Initial_Mark initial_mark_op(this); 1820 VMThread::execute(&initial_mark_op); 1821 } 1822 // The collector state may be any legal state at this point 1823 // since the background collector may have yielded to the 1824 // foreground collector. 1825 break; 1826 case Marking: 1827 // initial marking in checkpointRootsInitialWork has been completed 1828 if (markFromRoots()) { // we were successful 1829 assert(_collectorState == Precleaning, "Collector state should " 1830 "have changed"); 1831 } else { 1832 assert(_foregroundGCIsActive, "Internal state inconsistency"); 1833 } 1834 break; 1835 case Precleaning: 1836 // marking from roots in markFromRoots has been completed 1837 preclean(); 1838 assert(_collectorState == AbortablePreclean || 1839 _collectorState == FinalMarking, 1840 "Collector state should have changed"); 1841 break; 1842 case AbortablePreclean: 1843 abortable_preclean(); 1844 assert(_collectorState == FinalMarking, "Collector state should " 1845 "have changed"); 1846 break; 1847 case FinalMarking: 1848 { 1849 ReleaseForegroundGC x(this); 1850 1851 VM_CMS_Final_Remark final_remark_op(this); 1852 VMThread::execute(&final_remark_op); 1853 } 1854 assert(_foregroundGCShouldWait, "block post-condition"); 1855 break; 1856 case Sweeping: 1857 // final marking in checkpointRootsFinal has been completed 1858 sweep(); 1859 assert(_collectorState == Resizing, "Collector state change " 1860 "to Resizing must be done under the free_list_lock"); 1861 1862 case Resizing: { 1863 // Sweeping has been completed... 1864 // At this point the background collection has completed. 1865 // Don't move the call to compute_new_size() down 1866 // into code that might be executed if the background 1867 // collection was preempted. 1868 { 1869 ReleaseForegroundGC x(this); // unblock FG collection 1870 MutexLocker y(Heap_lock, Mutex::_no_safepoint_check_flag); 1871 CMSTokenSync z(true); // not strictly needed. 1872 if (_collectorState == Resizing) { 1873 compute_new_size(); 1874 save_heap_summary(); 1875 _collectorState = Resetting; 1876 } else { 1877 assert(_collectorState == Idling, "The state should only change" 1878 " because the foreground collector has finished the collection"); 1879 } 1880 } 1881 break; 1882 } 1883 case Resetting: 1884 // CMS heap resizing has been completed 1885 reset_concurrent(); 1886 assert(_collectorState == Idling, "Collector state should " 1887 "have changed"); 1888 1889 MetaspaceGC::set_should_concurrent_collect(false); 1890 1891 stats().record_cms_end(); 1892 // Don't move the concurrent_phases_end() and compute_new_size() 1893 // calls to here because a preempted background collection 1894 // has it's state set to "Resetting". 1895 break; 1896 case Idling: 1897 default: 1898 ShouldNotReachHere(); 1899 break; 1900 } 1901 log_debug(gc, state)(" Thread " INTPTR_FORMAT " done - next CMS state %d", 1902 p2i(Thread::current()), _collectorState); 1903 assert(_foregroundGCShouldWait, "block post-condition"); 1904 } 1905 1906 // Should this be in gc_epilogue? 1907 heap->counters()->update_counters(); 1908 1909 { 1910 // Clear _foregroundGCShouldWait and, in the event that the 1911 // foreground collector is waiting, notify it, before 1912 // returning. 1913 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); 1914 _foregroundGCShouldWait = false; 1915 if (_foregroundGCIsActive) { 1916 CGC_lock->notify(); 1917 } 1918 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 1919 "Possible deadlock"); 1920 } 1921 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d", 1922 p2i(Thread::current()), _collectorState); 1923 log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)", 1924 prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K); 1925 } 1926 1927 void CMSCollector::register_gc_start(GCCause::Cause cause) { 1928 _cms_start_registered = true; 1929 _gc_timer_cm->register_gc_start(); 1930 _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start()); 1931 } 1932 1933 void CMSCollector::register_gc_end() { 1934 if (_cms_start_registered) { 1935 report_heap_summary(GCWhen::AfterGC); 1936 1937 _gc_timer_cm->register_gc_end(); 1938 _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions()); 1939 _cms_start_registered = false; 1940 } 1941 } 1942 1943 void CMSCollector::save_heap_summary() { 1944 CMSHeap* heap = CMSHeap::heap(); 1945 _last_heap_summary = heap->create_heap_summary(); 1946 _last_metaspace_summary = heap->create_metaspace_summary(); 1947 } 1948 1949 void CMSCollector::report_heap_summary(GCWhen::Type when) { 1950 _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary); 1951 _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary); 1952 } 1953 1954 bool CMSCollector::waitForForegroundGC() { 1955 bool res = false; 1956 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 1957 "CMS thread should have CMS token"); 1958 // Block the foreground collector until the 1959 // background collectors decides whether to 1960 // yield. 1961 MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag); 1962 _foregroundGCShouldWait = true; 1963 if (_foregroundGCIsActive) { 1964 // The background collector yields to the 1965 // foreground collector and returns a value 1966 // indicating that it has yielded. The foreground 1967 // collector can proceed. 1968 res = true; 1969 _foregroundGCShouldWait = false; 1970 ConcurrentMarkSweepThread::clear_CMS_flag( 1971 ConcurrentMarkSweepThread::CMS_cms_has_token); 1972 ConcurrentMarkSweepThread::set_CMS_flag( 1973 ConcurrentMarkSweepThread::CMS_cms_wants_token); 1974 // Get a possibly blocked foreground thread going 1975 CGC_lock->notify(); 1976 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", 1977 p2i(Thread::current()), _collectorState); 1978 while (_foregroundGCIsActive) { 1979 CGC_lock->wait_without_safepoint_check(); 1980 } 1981 ConcurrentMarkSweepThread::set_CMS_flag( 1982 ConcurrentMarkSweepThread::CMS_cms_has_token); 1983 ConcurrentMarkSweepThread::clear_CMS_flag( 1984 ConcurrentMarkSweepThread::CMS_cms_wants_token); 1985 } 1986 log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", 1987 p2i(Thread::current()), _collectorState); 1988 return res; 1989 } 1990 1991 // Because of the need to lock the free lists and other structures in 1992 // the collector, common to all the generations that the collector is 1993 // collecting, we need the gc_prologues of individual CMS generations 1994 // delegate to their collector. It may have been simpler had the 1995 // current infrastructure allowed one to call a prologue on a 1996 // collector. In the absence of that we have the generation's 1997 // prologue delegate to the collector, which delegates back 1998 // some "local" work to a worker method in the individual generations 1999 // that it's responsible for collecting, while itself doing any 2000 // work common to all generations it's responsible for. A similar 2001 // comment applies to the gc_epilogue()'s. 2002 // The role of the variable _between_prologue_and_epilogue is to 2003 // enforce the invocation protocol. 2004 void CMSCollector::gc_prologue(bool full) { 2005 // Call gc_prologue_work() for the CMSGen 2006 // we are responsible for. 2007 2008 // The following locking discipline assumes that we are only called 2009 // when the world is stopped. 2010 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption"); 2011 2012 // The CMSCollector prologue must call the gc_prologues for the 2013 // "generations" that it's responsible 2014 // for. 2015 2016 assert( Thread::current()->is_VM_thread() 2017 || ( CMSScavengeBeforeRemark 2018 && Thread::current()->is_ConcurrentGC_thread()), 2019 "Incorrect thread type for prologue execution"); 2020 2021 if (_between_prologue_and_epilogue) { 2022 // We have already been invoked; this is a gc_prologue delegation 2023 // from yet another CMS generation that we are responsible for, just 2024 // ignore it since all relevant work has already been done. 2025 return; 2026 } 2027 2028 // set a bit saying prologue has been called; cleared in epilogue 2029 _between_prologue_and_epilogue = true; 2030 // Claim locks for common data structures, then call gc_prologue_work() 2031 // for each CMSGen. 2032 2033 getFreelistLocks(); // gets free list locks on constituent spaces 2034 bitMapLock()->lock_without_safepoint_check(); 2035 2036 // Should call gc_prologue_work() for all cms gens we are responsible for 2037 bool duringMarking = _collectorState >= Marking 2038 && _collectorState < Sweeping; 2039 2040 // The young collections clear the modified oops state, which tells if 2041 // there are any modified oops in the class. The remark phase also needs 2042 // that information. Tell the young collection to save the union of all 2043 // modified klasses. 2044 if (duringMarking) { 2045 _ct->cld_rem_set()->set_accumulate_modified_oops(true); 2046 } 2047 2048 bool registerClosure = duringMarking; 2049 2050 _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar); 2051 2052 if (!full) { 2053 stats().record_gc0_begin(); 2054 } 2055 } 2056 2057 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) { 2058 2059 _capacity_at_prologue = capacity(); 2060 _used_at_prologue = used(); 2061 _cmsSpace->recalculate_used_stable(); 2062 2063 // We enable promotion tracking so that card-scanning can recognize 2064 // which objects have been promoted during this GC and skip them. 2065 for (uint i = 0; i < ParallelGCThreads; i++) { 2066 _par_gc_thread_states[i]->promo.startTrackingPromotions(); 2067 } 2068 2069 // Delegate to CMScollector which knows how to coordinate between 2070 // this and any other CMS generations that it is responsible for 2071 // collecting. 2072 collector()->gc_prologue(full); 2073 } 2074 2075 // This is a "private" interface for use by this generation's CMSCollector. 2076 // Not to be called directly by any other entity (for instance, 2077 // GenCollectedHeap, which calls the "public" gc_prologue method above). 2078 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full, 2079 bool registerClosure, ModUnionClosure* modUnionClosure) { 2080 assert(!incremental_collection_failed(), "Shouldn't be set yet"); 2081 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL, 2082 "Should be NULL"); 2083 if (registerClosure) { 2084 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure); 2085 } 2086 cmsSpace()->gc_prologue(); 2087 // Clear stat counters 2088 NOT_PRODUCT( 2089 assert(_numObjectsPromoted == 0, "check"); 2090 assert(_numWordsPromoted == 0, "check"); 2091 log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently", 2092 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); 2093 _numObjectsAllocated = 0; 2094 _numWordsAllocated = 0; 2095 ) 2096 } 2097 2098 void CMSCollector::gc_epilogue(bool full) { 2099 // The following locking discipline assumes that we are only called 2100 // when the world is stopped. 2101 assert(SafepointSynchronize::is_at_safepoint(), 2102 "world is stopped assumption"); 2103 2104 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks 2105 // if linear allocation blocks need to be appropriately marked to allow the 2106 // the blocks to be parsable. We also check here whether we need to nudge the 2107 // CMS collector thread to start a new cycle (if it's not already active). 2108 assert( Thread::current()->is_VM_thread() 2109 || ( CMSScavengeBeforeRemark 2110 && Thread::current()->is_ConcurrentGC_thread()), 2111 "Incorrect thread type for epilogue execution"); 2112 2113 if (!_between_prologue_and_epilogue) { 2114 // We have already been invoked; this is a gc_epilogue delegation 2115 // from yet another CMS generation that we are responsible for, just 2116 // ignore it since all relevant work has already been done. 2117 return; 2118 } 2119 assert(haveFreelistLocks(), "must have freelist locks"); 2120 assert_lock_strong(bitMapLock()); 2121 2122 _ct->cld_rem_set()->set_accumulate_modified_oops(false); 2123 2124 _cmsGen->gc_epilogue_work(full); 2125 2126 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) { 2127 // in case sampling was not already enabled, enable it 2128 _start_sampling = true; 2129 } 2130 // reset _eden_chunk_array so sampling starts afresh 2131 _eden_chunk_index = 0; 2132 2133 size_t cms_used = _cmsGen->cmsSpace()->used(); 2134 _cmsGen->cmsSpace()->recalculate_used_stable(); 2135 2136 // update performance counters - this uses a special version of 2137 // update_counters() that allows the utilization to be passed as a 2138 // parameter, avoiding multiple calls to used(). 2139 // 2140 _cmsGen->update_counters(cms_used); 2141 2142 bitMapLock()->unlock(); 2143 releaseFreelistLocks(); 2144 2145 if (!CleanChunkPoolAsync) { 2146 Chunk::clean_chunk_pool(); 2147 } 2148 2149 set_did_compact(false); 2150 _between_prologue_and_epilogue = false; // ready for next cycle 2151 } 2152 2153 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) { 2154 collector()->gc_epilogue(full); 2155 2156 // When using ParNew, promotion tracking should have already been 2157 // disabled. However, the prologue (which enables promotion 2158 // tracking) and epilogue are called irrespective of the type of 2159 // GC. So they will also be called before and after Full GCs, during 2160 // which promotion tracking will not be explicitly disabled. So, 2161 // it's safer to also disable it here too (to be symmetric with 2162 // enabling it in the prologue). 2163 for (uint i = 0; i < ParallelGCThreads; i++) { 2164 _par_gc_thread_states[i]->promo.stopTrackingPromotions(); 2165 } 2166 } 2167 2168 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) { 2169 assert(!incremental_collection_failed(), "Should have been cleared"); 2170 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL); 2171 cmsSpace()->gc_epilogue(); 2172 // Print stat counters 2173 NOT_PRODUCT( 2174 assert(_numObjectsAllocated == 0, "check"); 2175 assert(_numWordsAllocated == 0, "check"); 2176 log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", 2177 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); 2178 _numObjectsPromoted = 0; 2179 _numWordsPromoted = 0; 2180 ) 2181 2182 // Call down the chain in contiguous_available needs the freelistLock 2183 // so print this out before releasing the freeListLock. 2184 log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available()); 2185 } 2186 2187 #ifndef PRODUCT 2188 bool CMSCollector::have_cms_token() { 2189 Thread* thr = Thread::current(); 2190 if (thr->is_VM_thread()) { 2191 return ConcurrentMarkSweepThread::vm_thread_has_cms_token(); 2192 } else if (thr->is_ConcurrentGC_thread()) { 2193 return ConcurrentMarkSweepThread::cms_thread_has_cms_token(); 2194 } else if (thr->is_GC_task_thread()) { 2195 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() && 2196 ParGCRareEvent_lock->owned_by_self(); 2197 } 2198 return false; 2199 } 2200 2201 // Check reachability of the given heap address in CMS generation, 2202 // treating all other generations as roots. 2203 bool CMSCollector::is_cms_reachable(HeapWord* addr) { 2204 // We could "guarantee" below, rather than assert, but I'll 2205 // leave these as "asserts" so that an adventurous debugger 2206 // could try this in the product build provided some subset of 2207 // the conditions were met, provided they were interested in the 2208 // results and knew that the computation below wouldn't interfere 2209 // with other concurrent computations mutating the structures 2210 // being read or written. 2211 assert(SafepointSynchronize::is_at_safepoint(), 2212 "Else mutations in object graph will make answer suspect"); 2213 assert(have_cms_token(), "Should hold cms token"); 2214 assert(haveFreelistLocks(), "must hold free list locks"); 2215 assert_lock_strong(bitMapLock()); 2216 2217 // Clear the marking bit map array before starting, but, just 2218 // for kicks, first report if the given address is already marked 2219 tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr), 2220 _markBitMap.isMarked(addr) ? "" : " not"); 2221 2222 if (verify_after_remark()) { 2223 MutexLocker x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); 2224 bool result = verification_mark_bm()->isMarked(addr); 2225 tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr), 2226 result ? "IS" : "is NOT"); 2227 return result; 2228 } else { 2229 tty->print_cr("Could not compute result"); 2230 return false; 2231 } 2232 } 2233 #endif 2234 2235 void 2236 CMSCollector::print_on_error(outputStream* st) { 2237 CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector; 2238 if (collector != NULL) { 2239 CMSBitMap* bitmap = &collector->_markBitMap; 2240 st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap)); 2241 bitmap->print_on_error(st, " Bits: "); 2242 2243 st->cr(); 2244 2245 CMSBitMap* mut_bitmap = &collector->_modUnionTable; 2246 st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap)); 2247 mut_bitmap->print_on_error(st, " Bits: "); 2248 } 2249 } 2250 2251 //////////////////////////////////////////////////////// 2252 // CMS Verification Support 2253 //////////////////////////////////////////////////////// 2254 // Following the remark phase, the following invariant 2255 // should hold -- each object in the CMS heap which is 2256 // marked in markBitMap() should be marked in the verification_mark_bm(). 2257 2258 class VerifyMarkedClosure: public BitMapClosure { 2259 CMSBitMap* _marks; 2260 bool _failed; 2261 2262 public: 2263 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} 2264 2265 bool do_bit(size_t offset) { 2266 HeapWord* addr = _marks->offsetToHeapWord(offset); 2267 if (!_marks->isMarked(addr)) { 2268 Log(gc, verify) log; 2269 ResourceMark rm; 2270 LogStream ls(log.error()); 2271 oop(addr)->print_on(&ls); 2272 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); 2273 _failed = true; 2274 } 2275 return true; 2276 } 2277 2278 bool failed() { return _failed; } 2279 }; 2280 2281 bool CMSCollector::verify_after_remark() { 2282 GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking."); 2283 MutexLocker ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); 2284 static bool init = false; 2285 2286 assert(SafepointSynchronize::is_at_safepoint(), 2287 "Else mutations in object graph will make answer suspect"); 2288 assert(have_cms_token(), 2289 "Else there may be mutual interference in use of " 2290 " verification data structures"); 2291 assert(_collectorState > Marking && _collectorState <= Sweeping, 2292 "Else marking info checked here may be obsolete"); 2293 assert(haveFreelistLocks(), "must hold free list locks"); 2294 assert_lock_strong(bitMapLock()); 2295 2296 2297 // Allocate marking bit map if not already allocated 2298 if (!init) { // first time 2299 if (!verification_mark_bm()->allocate(_span)) { 2300 return false; 2301 } 2302 init = true; 2303 } 2304 2305 assert(verification_mark_stack()->isEmpty(), "Should be empty"); 2306 2307 // Turn off refs discovery -- so we will be tracing through refs. 2308 // This is as intended, because by this time 2309 // GC must already have cleared any refs that need to be cleared, 2310 // and traced those that need to be marked; moreover, 2311 // the marking done here is not going to interfere in any 2312 // way with the marking information used by GC. 2313 NoRefDiscovery no_discovery(ref_processor()); 2314 2315 #if COMPILER2_OR_JVMCI 2316 DerivedPointerTableDeactivate dpt_deact; 2317 #endif 2318 2319 // Clear any marks from a previous round 2320 verification_mark_bm()->clear_all(); 2321 assert(verification_mark_stack()->isEmpty(), "markStack should be empty"); 2322 verify_work_stacks_empty(); 2323 2324 CMSHeap* heap = CMSHeap::heap(); 2325 heap->ensure_parsability(false); // fill TLABs, but no need to retire them 2326 // Update the saved marks which may affect the root scans. 2327 heap->save_marks(); 2328 2329 if (CMSRemarkVerifyVariant == 1) { 2330 // In this first variant of verification, we complete 2331 // all marking, then check if the new marks-vector is 2332 // a subset of the CMS marks-vector. 2333 verify_after_remark_work_1(); 2334 } else { 2335 guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2"); 2336 // In this second variant of verification, we flag an error 2337 // (i.e. an object reachable in the new marks-vector not reachable 2338 // in the CMS marks-vector) immediately, also indicating the 2339 // identify of an object (A) that references the unmarked object (B) -- 2340 // presumably, a mutation to A failed to be picked up by preclean/remark? 2341 verify_after_remark_work_2(); 2342 } 2343 2344 return true; 2345 } 2346 2347 void CMSCollector::verify_after_remark_work_1() { 2348 ResourceMark rm; 2349 HandleMark hm; 2350 CMSHeap* heap = CMSHeap::heap(); 2351 2352 // Get a clear set of claim bits for the roots processing to work with. 2353 ClassLoaderDataGraph::clear_claimed_marks(); 2354 2355 // Mark from roots one level into CMS 2356 MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); 2357 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 2358 2359 { 2360 StrongRootsScope srs(1); 2361 2362 heap->cms_process_roots(&srs, 2363 true, // young gen as roots 2364 GenCollectedHeap::ScanningOption(roots_scanning_options()), 2365 should_unload_classes(), 2366 ¬Older, 2367 NULL); 2368 } 2369 2370 // Now mark from the roots 2371 MarkFromRootsClosure markFromRootsClosure(this, _span, 2372 verification_mark_bm(), verification_mark_stack(), 2373 false /* don't yield */, true /* verifying */); 2374 assert(_restart_addr == NULL, "Expected pre-condition"); 2375 verification_mark_bm()->iterate(&markFromRootsClosure); 2376 while (_restart_addr != NULL) { 2377 // Deal with stack overflow: by restarting at the indicated 2378 // address. 2379 HeapWord* ra = _restart_addr; 2380 markFromRootsClosure.reset(ra); 2381 _restart_addr = NULL; 2382 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); 2383 } 2384 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); 2385 verify_work_stacks_empty(); 2386 2387 // Marking completed -- now verify that each bit marked in 2388 // verification_mark_bm() is also marked in markBitMap(); flag all 2389 // errors by printing corresponding objects. 2390 VerifyMarkedClosure vcl(markBitMap()); 2391 verification_mark_bm()->iterate(&vcl); 2392 if (vcl.failed()) { 2393 Log(gc, verify) log; 2394 log.error("Failed marking verification after remark"); 2395 ResourceMark rm; 2396 LogStream ls(log.error()); 2397 heap->print_on(&ls); 2398 fatal("CMS: failed marking verification after remark"); 2399 } 2400 } 2401 2402 class VerifyCLDOopsCLDClosure : public CLDClosure { 2403 class VerifyCLDOopsClosure : public OopClosure { 2404 CMSBitMap* _bitmap; 2405 public: 2406 VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { } 2407 void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); } 2408 void do_oop(narrowOop* p) { ShouldNotReachHere(); } 2409 } _oop_closure; 2410 public: 2411 VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {} 2412 void do_cld(ClassLoaderData* cld) { 2413 cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, false); 2414 } 2415 }; 2416 2417 void CMSCollector::verify_after_remark_work_2() { 2418 ResourceMark rm; 2419 HandleMark hm; 2420 CMSHeap* heap = CMSHeap::heap(); 2421 2422 // Get a clear set of claim bits for the roots processing to work with. 2423 ClassLoaderDataGraph::clear_claimed_marks(); 2424 2425 // Mark from roots one level into CMS 2426 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), 2427 markBitMap()); 2428 CLDToOopClosure cld_closure(¬Older, ClassLoaderData::_claim_strong); 2429 2430 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 2431 2432 { 2433 StrongRootsScope srs(1); 2434 2435 heap->cms_process_roots(&srs, 2436 true, // young gen as roots 2437 GenCollectedHeap::ScanningOption(roots_scanning_options()), 2438 should_unload_classes(), 2439 ¬Older, 2440 &cld_closure); 2441 } 2442 2443 // Now mark from the roots 2444 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, 2445 verification_mark_bm(), markBitMap(), verification_mark_stack()); 2446 assert(_restart_addr == NULL, "Expected pre-condition"); 2447 verification_mark_bm()->iterate(&markFromRootsClosure); 2448 while (_restart_addr != NULL) { 2449 // Deal with stack overflow: by restarting at the indicated 2450 // address. 2451 HeapWord* ra = _restart_addr; 2452 markFromRootsClosure.reset(ra); 2453 _restart_addr = NULL; 2454 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); 2455 } 2456 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); 2457 verify_work_stacks_empty(); 2458 2459 VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm()); 2460 ClassLoaderDataGraph::cld_do(&verify_cld_oops); 2461 2462 // Marking completed -- now verify that each bit marked in 2463 // verification_mark_bm() is also marked in markBitMap(); flag all 2464 // errors by printing corresponding objects. 2465 VerifyMarkedClosure vcl(markBitMap()); 2466 verification_mark_bm()->iterate(&vcl); 2467 assert(!vcl.failed(), "Else verification above should not have succeeded"); 2468 } 2469 2470 void ConcurrentMarkSweepGeneration::save_marks() { 2471 // delegate to CMS space 2472 cmsSpace()->save_marks(); 2473 } 2474 2475 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() { 2476 return cmsSpace()->no_allocs_since_save_marks(); 2477 } 2478 2479 void 2480 ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) { 2481 if (freelistLock()->owned_by_self()) { 2482 Generation::oop_iterate(cl); 2483 } else { 2484 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); 2485 Generation::oop_iterate(cl); 2486 } 2487 } 2488 2489 void 2490 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) { 2491 if (freelistLock()->owned_by_self()) { 2492 Generation::object_iterate(cl); 2493 } else { 2494 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); 2495 Generation::object_iterate(cl); 2496 } 2497 } 2498 2499 void 2500 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) { 2501 if (freelistLock()->owned_by_self()) { 2502 Generation::safe_object_iterate(cl); 2503 } else { 2504 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); 2505 Generation::safe_object_iterate(cl); 2506 } 2507 } 2508 2509 void 2510 ConcurrentMarkSweepGeneration::post_compact() { 2511 } 2512 2513 void 2514 ConcurrentMarkSweepGeneration::prepare_for_verify() { 2515 // Fix the linear allocation blocks to look like free blocks. 2516 2517 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those 2518 // are not called when the heap is verified during universe initialization and 2519 // at vm shutdown. 2520 if (freelistLock()->owned_by_self()) { 2521 cmsSpace()->prepare_for_verify(); 2522 } else { 2523 MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag); 2524 cmsSpace()->prepare_for_verify(); 2525 } 2526 } 2527 2528 void 2529 ConcurrentMarkSweepGeneration::verify() { 2530 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those 2531 // are not called when the heap is verified during universe initialization and 2532 // at vm shutdown. 2533 if (freelistLock()->owned_by_self()) { 2534 cmsSpace()->verify(); 2535 } else { 2536 MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag); 2537 cmsSpace()->verify(); 2538 } 2539 } 2540 2541 void CMSCollector::verify() { 2542 _cmsGen->verify(); 2543 } 2544 2545 #ifndef PRODUCT 2546 bool CMSCollector::overflow_list_is_empty() const { 2547 assert(_num_par_pushes >= 0, "Inconsistency"); 2548 if (_overflow_list == NULL) { 2549 assert(_num_par_pushes == 0, "Inconsistency"); 2550 } 2551 return _overflow_list == NULL; 2552 } 2553 2554 // The methods verify_work_stacks_empty() and verify_overflow_empty() 2555 // merely consolidate assertion checks that appear to occur together frequently. 2556 void CMSCollector::verify_work_stacks_empty() const { 2557 assert(_markStack.isEmpty(), "Marking stack should be empty"); 2558 assert(overflow_list_is_empty(), "Overflow list should be empty"); 2559 } 2560 2561 void CMSCollector::verify_overflow_empty() const { 2562 assert(overflow_list_is_empty(), "Overflow list should be empty"); 2563 assert(no_preserved_marks(), "No preserved marks"); 2564 } 2565 #endif // PRODUCT 2566 2567 // Decide if we want to enable class unloading as part of the 2568 // ensuing concurrent GC cycle. We will collect and 2569 // unload classes if it's the case that: 2570 // (a) class unloading is enabled at the command line, and 2571 // (b) old gen is getting really full 2572 // NOTE: Provided there is no change in the state of the heap between 2573 // calls to this method, it should have idempotent results. Moreover, 2574 // its results should be monotonically increasing (i.e. going from 0 to 1, 2575 // but not 1 to 0) between successive calls between which the heap was 2576 // not collected. For the implementation below, it must thus rely on 2577 // the property that concurrent_cycles_since_last_unload() 2578 // will not decrease unless a collection cycle happened and that 2579 // _cmsGen->is_too_full() are 2580 // themselves also monotonic in that sense. See check_monotonicity() 2581 // below. 2582 void CMSCollector::update_should_unload_classes() { 2583 _should_unload_classes = false; 2584 if (CMSClassUnloadingEnabled) { 2585 _should_unload_classes = (concurrent_cycles_since_last_unload() >= 2586 CMSClassUnloadingMaxInterval) 2587 || _cmsGen->is_too_full(); 2588 } 2589 } 2590 2591 bool ConcurrentMarkSweepGeneration::is_too_full() const { 2592 bool res = should_concurrent_collect(); 2593 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0); 2594 return res; 2595 } 2596 2597 void CMSCollector::setup_cms_unloading_and_verification_state() { 2598 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC 2599 || VerifyBeforeExit; 2600 const int rso = GenCollectedHeap::SO_AllCodeCache; 2601 2602 // We set the proper root for this CMS cycle here. 2603 if (should_unload_classes()) { // Should unload classes this cycle 2604 remove_root_scanning_option(rso); // Shrink the root set appropriately 2605 set_verifying(should_verify); // Set verification state for this cycle 2606 return; // Nothing else needs to be done at this time 2607 } 2608 2609 // Not unloading classes this cycle 2610 assert(!should_unload_classes(), "Inconsistency!"); 2611 2612 // If we are not unloading classes then add SO_AllCodeCache to root 2613 // scanning options. 2614 add_root_scanning_option(rso); 2615 2616 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { 2617 set_verifying(true); 2618 } else if (verifying() && !should_verify) { 2619 // We were verifying, but some verification flags got disabled. 2620 set_verifying(false); 2621 // Exclude symbols, strings and code cache elements from root scanning to 2622 // reduce IM and RM pauses. 2623 remove_root_scanning_option(rso); 2624 } 2625 } 2626 2627 2628 #ifndef PRODUCT 2629 HeapWord* CMSCollector::block_start(const void* p) const { 2630 const HeapWord* addr = (HeapWord*)p; 2631 if (_span.contains(p)) { 2632 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) { 2633 return _cmsGen->cmsSpace()->block_start(p); 2634 } 2635 } 2636 return NULL; 2637 } 2638 #endif 2639 2640 HeapWord* 2641 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size, 2642 bool tlab, 2643 bool parallel) { 2644 CMSSynchronousYieldRequest yr; 2645 assert(!tlab, "Can't deal with TLAB allocation"); 2646 MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); 2647 expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation); 2648 if (GCExpandToAllocateDelayMillis > 0) { 2649 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 2650 } 2651 return have_lock_and_allocate(word_size, tlab); 2652 } 2653 2654 void ConcurrentMarkSweepGeneration::expand_for_gc_cause( 2655 size_t bytes, 2656 size_t expand_bytes, 2657 CMSExpansionCause::Cause cause) 2658 { 2659 2660 bool success = expand(bytes, expand_bytes); 2661 2662 // remember why we expanded; this information is used 2663 // by shouldConcurrentCollect() when making decisions on whether to start 2664 // a new CMS cycle. 2665 if (success) { 2666 set_expansion_cause(cause); 2667 log_trace(gc)("Expanded CMS gen for %s", CMSExpansionCause::to_string(cause)); 2668 } 2669 } 2670 2671 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) { 2672 HeapWord* res = NULL; 2673 MutexLocker x(ParGCRareEvent_lock); 2674 while (true) { 2675 // Expansion by some other thread might make alloc OK now: 2676 res = ps->lab.alloc(word_sz); 2677 if (res != NULL) return res; 2678 // If there's not enough expansion space available, give up. 2679 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { 2680 return NULL; 2681 } 2682 // Otherwise, we try expansion. 2683 expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab); 2684 // Now go around the loop and try alloc again; 2685 // A competing par_promote might beat us to the expansion space, 2686 // so we may go around the loop again if promotion fails again. 2687 if (GCExpandToAllocateDelayMillis > 0) { 2688 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 2689 } 2690 } 2691 } 2692 2693 2694 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space( 2695 PromotionInfo* promo) { 2696 MutexLocker x(ParGCRareEvent_lock); 2697 size_t refill_size_bytes = promo->refillSize() * HeapWordSize; 2698 while (true) { 2699 // Expansion by some other thread might make alloc OK now: 2700 if (promo->ensure_spooling_space()) { 2701 assert(promo->has_spooling_space(), 2702 "Post-condition of successful ensure_spooling_space()"); 2703 return true; 2704 } 2705 // If there's not enough expansion space available, give up. 2706 if (_virtual_space.uncommitted_size() < refill_size_bytes) { 2707 return false; 2708 } 2709 // Otherwise, we try expansion. 2710 expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space); 2711 // Now go around the loop and try alloc again; 2712 // A competing allocation might beat us to the expansion space, 2713 // so we may go around the loop again if allocation fails again. 2714 if (GCExpandToAllocateDelayMillis > 0) { 2715 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 2716 } 2717 } 2718 } 2719 2720 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { 2721 // Only shrink if a compaction was done so that all the free space 2722 // in the generation is in a contiguous block at the end. 2723 if (did_compact()) { 2724 CardGeneration::shrink(bytes); 2725 } 2726 } 2727 2728 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() { 2729 assert_locked_or_safepoint(Heap_lock); 2730 } 2731 2732 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) { 2733 assert_locked_or_safepoint(Heap_lock); 2734 assert_lock_strong(freelistLock()); 2735 log_trace(gc)("Shrinking of CMS not yet implemented"); 2736 return; 2737 } 2738 2739 2740 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent 2741 // phases. 2742 class CMSPhaseAccounting: public StackObj { 2743 public: 2744 CMSPhaseAccounting(CMSCollector *collector, 2745 const char *title); 2746 ~CMSPhaseAccounting(); 2747 2748 private: 2749 CMSCollector *_collector; 2750 const char *_title; 2751 GCTraceConcTime(Info, gc) _trace_time; 2752 2753 public: 2754 // Not MT-safe; so do not pass around these StackObj's 2755 // where they may be accessed by other threads. 2756 double wallclock_millis() { 2757 return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time()); 2758 } 2759 }; 2760 2761 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, 2762 const char *title) : 2763 _collector(collector), _title(title), _trace_time(title) { 2764 2765 _collector->resetYields(); 2766 _collector->resetTimer(); 2767 _collector->startTimer(); 2768 _collector->gc_timer_cm()->register_gc_concurrent_start(title); 2769 } 2770 2771 CMSPhaseAccounting::~CMSPhaseAccounting() { 2772 _collector->gc_timer_cm()->register_gc_concurrent_end(); 2773 _collector->stopTimer(); 2774 log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_millis(_collector->timerTicks())); 2775 log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields()); 2776 } 2777 2778 // CMS work 2779 2780 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask. 2781 class CMSParMarkTask : public AbstractGangTask { 2782 protected: 2783 CMSCollector* _collector; 2784 uint _n_workers; 2785 CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) : 2786 AbstractGangTask(name), 2787 _collector(collector), 2788 _n_workers(n_workers) {} 2789 // Work method in support of parallel rescan ... of young gen spaces 2790 void do_young_space_rescan(OopsInGenClosure* cl, 2791 ContiguousSpace* space, 2792 HeapWord** chunk_array, size_t chunk_top); 2793 void work_on_young_gen_roots(OopsInGenClosure* cl); 2794 }; 2795 2796 // Parallel initial mark task 2797 class CMSParInitialMarkTask: public CMSParMarkTask { 2798 StrongRootsScope* _strong_roots_scope; 2799 public: 2800 CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) : 2801 CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers), 2802 _strong_roots_scope(strong_roots_scope) {} 2803 void work(uint worker_id); 2804 }; 2805 2806 // Checkpoint the roots into this generation from outside 2807 // this generation. [Note this initial checkpoint need only 2808 // be approximate -- we'll do a catch up phase subsequently.] 2809 void CMSCollector::checkpointRootsInitial() { 2810 assert(_collectorState == InitialMarking, "Wrong collector state"); 2811 check_correct_thread_executing(); 2812 TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause()); 2813 2814 save_heap_summary(); 2815 report_heap_summary(GCWhen::BeforeGC); 2816 2817 ReferenceProcessor* rp = ref_processor(); 2818 assert(_restart_addr == NULL, "Control point invariant"); 2819 { 2820 // acquire locks for subsequent manipulations 2821 MutexLocker x(bitMapLock(), 2822 Mutex::_no_safepoint_check_flag); 2823 checkpointRootsInitialWork(); 2824 // enable ("weak") refs discovery 2825 rp->enable_discovery(); 2826 _collectorState = Marking; 2827 } 2828 2829 _cmsGen->cmsSpace()->recalculate_used_stable(); 2830 } 2831 2832 void CMSCollector::checkpointRootsInitialWork() { 2833 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); 2834 assert(_collectorState == InitialMarking, "just checking"); 2835 2836 // Already have locks. 2837 assert_lock_strong(bitMapLock()); 2838 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); 2839 2840 // Setup the verification and class unloading state for this 2841 // CMS collection cycle. 2842 setup_cms_unloading_and_verification_state(); 2843 2844 GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm); 2845 2846 // Reset all the PLAB chunk arrays if necessary. 2847 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { 2848 reset_survivor_plab_arrays(); 2849 } 2850 2851 ResourceMark rm; 2852 HandleMark hm; 2853 2854 MarkRefsIntoClosure notOlder(_span, &_markBitMap); 2855 CMSHeap* heap = CMSHeap::heap(); 2856 2857 verify_work_stacks_empty(); 2858 verify_overflow_empty(); 2859 2860 heap->ensure_parsability(false); // fill TLABs, but no need to retire them 2861 // Update the saved marks which may affect the root scans. 2862 heap->save_marks(); 2863 2864 // weak reference processing has not started yet. 2865 ref_processor()->set_enqueuing_is_done(false); 2866 2867 // Need to remember all newly created CLDs, 2868 // so that we can guarantee that the remark finds them. 2869 ClassLoaderDataGraph::remember_new_clds(true); 2870 2871 // Whenever a CLD is found, it will be claimed before proceeding to mark 2872 // the klasses. The claimed marks need to be cleared before marking starts. 2873 ClassLoaderDataGraph::clear_claimed_marks(); 2874 2875 print_eden_and_survivor_chunk_arrays(); 2876 2877 { 2878 #if COMPILER2_OR_JVMCI 2879 DerivedPointerTableDeactivate dpt_deact; 2880 #endif 2881 if (CMSParallelInitialMarkEnabled) { 2882 // The parallel version. 2883 WorkGang* workers = heap->workers(); 2884 assert(workers != NULL, "Need parallel worker threads."); 2885 uint n_workers = workers->active_workers(); 2886 2887 StrongRootsScope srs(n_workers); 2888 2889 CMSParInitialMarkTask tsk(this, &srs, n_workers); 2890 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); 2891 // If the total workers is greater than 1, then multiple workers 2892 // may be used at some time and the initialization has been set 2893 // such that the single threaded path cannot be used. 2894 if (workers->total_workers() > 1) { 2895 workers->run_task(&tsk); 2896 } else { 2897 tsk.work(0); 2898 } 2899 } else { 2900 // The serial version. 2901 CLDToOopClosure cld_closure(¬Older, ClassLoaderData::_claim_strong); 2902 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 2903 2904 StrongRootsScope srs(1); 2905 2906 heap->cms_process_roots(&srs, 2907 true, // young gen as roots 2908 GenCollectedHeap::ScanningOption(roots_scanning_options()), 2909 should_unload_classes(), 2910 ¬Older, 2911 &cld_closure); 2912 } 2913 } 2914 2915 // Clear mod-union table; it will be dirtied in the prologue of 2916 // CMS generation per each young generation collection. 2917 2918 assert(_modUnionTable.isAllClear(), 2919 "Was cleared in most recent final checkpoint phase" 2920 " or no bits are set in the gc_prologue before the start of the next " 2921 "subsequent marking phase."); 2922 2923 assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be"); 2924 2925 // Save the end of the used_region of the constituent generations 2926 // to be used to limit the extent of sweep in each generation. 2927 save_sweep_limits(); 2928 verify_overflow_empty(); 2929 } 2930 2931 bool CMSCollector::markFromRoots() { 2932 // we might be tempted to assert that: 2933 // assert(!SafepointSynchronize::is_at_safepoint(), 2934 // "inconsistent argument?"); 2935 // However that wouldn't be right, because it's possible that 2936 // a safepoint is indeed in progress as a young generation 2937 // stop-the-world GC happens even as we mark in this generation. 2938 assert(_collectorState == Marking, "inconsistent state?"); 2939 check_correct_thread_executing(); 2940 verify_overflow_empty(); 2941 2942 // Weak ref discovery note: We may be discovering weak 2943 // refs in this generation concurrent (but interleaved) with 2944 // weak ref discovery by the young generation collector. 2945 2946 CMSTokenSyncWithLocks ts(true, bitMapLock()); 2947 GCTraceCPUTime tcpu; 2948 CMSPhaseAccounting pa(this, "Concurrent Mark"); 2949 bool res = markFromRootsWork(); 2950 if (res) { 2951 _collectorState = Precleaning; 2952 } else { // We failed and a foreground collection wants to take over 2953 assert(_foregroundGCIsActive, "internal state inconsistency"); 2954 assert(_restart_addr == NULL, "foreground will restart from scratch"); 2955 log_debug(gc)("bailing out to foreground collection"); 2956 } 2957 verify_overflow_empty(); 2958 return res; 2959 } 2960 2961 bool CMSCollector::markFromRootsWork() { 2962 // iterate over marked bits in bit map, doing a full scan and mark 2963 // from these roots using the following algorithm: 2964 // . if oop is to the right of the current scan pointer, 2965 // mark corresponding bit (we'll process it later) 2966 // . else (oop is to left of current scan pointer) 2967 // push oop on marking stack 2968 // . drain the marking stack 2969 2970 // Note that when we do a marking step we need to hold the 2971 // bit map lock -- recall that direct allocation (by mutators) 2972 // and promotion (by the young generation collector) is also 2973 // marking the bit map. [the so-called allocate live policy.] 2974 // Because the implementation of bit map marking is not 2975 // robust wrt simultaneous marking of bits in the same word, 2976 // we need to make sure that there is no such interference 2977 // between concurrent such updates. 2978 2979 // already have locks 2980 assert_lock_strong(bitMapLock()); 2981 2982 verify_work_stacks_empty(); 2983 verify_overflow_empty(); 2984 bool result = false; 2985 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { 2986 result = do_marking_mt(); 2987 } else { 2988 result = do_marking_st(); 2989 } 2990 return result; 2991 } 2992 2993 // Forward decl 2994 class CMSConcMarkingTask; 2995 2996 class CMSConcMarkingParallelTerminator: public ParallelTaskTerminator { 2997 CMSCollector* _collector; 2998 CMSConcMarkingTask* _task; 2999 public: 3000 virtual void yield(); 3001 3002 // "n_threads" is the number of threads to be terminated. 3003 // "queue_set" is a set of work queues of other threads. 3004 // "collector" is the CMS collector associated with this task terminator. 3005 // "yield" indicates whether we need the gang as a whole to yield. 3006 CMSConcMarkingParallelTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) : 3007 ParallelTaskTerminator(n_threads, queue_set), 3008 _collector(collector) { } 3009 3010 void set_task(CMSConcMarkingTask* task) { 3011 _task = task; 3012 } 3013 }; 3014 3015 class CMSConcMarkingOWSTTerminator: public OWSTTaskTerminator { 3016 CMSCollector* _collector; 3017 CMSConcMarkingTask* _task; 3018 public: 3019 virtual void yield(); 3020 3021 // "n_threads" is the number of threads to be terminated. 3022 // "queue_set" is a set of work queues of other threads. 3023 // "collector" is the CMS collector associated with this task terminator. 3024 // "yield" indicates whether we need the gang as a whole to yield. 3025 CMSConcMarkingOWSTTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) : 3026 OWSTTaskTerminator(n_threads, queue_set), 3027 _collector(collector) { } 3028 3029 void set_task(CMSConcMarkingTask* task) { 3030 _task = task; 3031 } 3032 }; 3033 3034 class CMSConcMarkingTaskTerminator { 3035 private: 3036 ParallelTaskTerminator* _term; 3037 public: 3038 CMSConcMarkingTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) { 3039 if (UseOWSTTaskTerminator) { 3040 _term = new CMSConcMarkingOWSTTerminator(n_threads, queue_set, collector); 3041 } else { 3042 _term = new CMSConcMarkingParallelTerminator(n_threads, queue_set, collector); 3043 } 3044 } 3045 ~CMSConcMarkingTaskTerminator() { 3046 assert(_term != NULL, "Must not be NULL"); 3047 delete _term; 3048 } 3049 3050 void set_task(CMSConcMarkingTask* task); 3051 ParallelTaskTerminator* terminator() const { return _term; } 3052 }; 3053 3054 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator { 3055 CMSConcMarkingTask* _task; 3056 public: 3057 bool should_exit_termination(); 3058 void set_task(CMSConcMarkingTask* task) { 3059 _task = task; 3060 } 3061 }; 3062 3063 // MT Concurrent Marking Task 3064 class CMSConcMarkingTask: public YieldingFlexibleGangTask { 3065 CMSCollector* _collector; 3066 uint _n_workers; // requested/desired # workers 3067 bool _result; 3068 CompactibleFreeListSpace* _cms_space; 3069 char _pad_front[64]; // padding to ... 3070 HeapWord* volatile _global_finger; // ... avoid sharing cache line 3071 char _pad_back[64]; 3072 HeapWord* _restart_addr; 3073 3074 // Exposed here for yielding support 3075 Mutex* const _bit_map_lock; 3076 3077 // The per thread work queues, available here for stealing 3078 OopTaskQueueSet* _task_queues; 3079 3080 // Termination (and yielding) support 3081 CMSConcMarkingTaskTerminator _term; 3082 CMSConcMarkingTerminatorTerminator _term_term; 3083 3084 public: 3085 CMSConcMarkingTask(CMSCollector* collector, 3086 CompactibleFreeListSpace* cms_space, 3087 YieldingFlexibleWorkGang* workers, 3088 OopTaskQueueSet* task_queues): 3089 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), 3090 _collector(collector), 3091 _n_workers(0), 3092 _result(true), 3093 _cms_space(cms_space), 3094 _bit_map_lock(collector->bitMapLock()), 3095 _task_queues(task_queues), 3096 _term(_n_workers, task_queues, _collector) 3097 { 3098 _requested_size = _n_workers; 3099 _term.set_task(this); 3100 _term_term.set_task(this); 3101 _restart_addr = _global_finger = _cms_space->bottom(); 3102 } 3103 3104 3105 OopTaskQueueSet* task_queues() { return _task_queues; } 3106 3107 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } 3108 3109 HeapWord* volatile* global_finger_addr() { return &_global_finger; } 3110 3111 ParallelTaskTerminator* terminator() { return _term.terminator(); } 3112 3113 virtual void set_for_termination(uint active_workers) { 3114 terminator()->reset_for_reuse(active_workers); 3115 } 3116 3117 void work(uint worker_id); 3118 bool should_yield() { 3119 return ConcurrentMarkSweepThread::should_yield() 3120 && !_collector->foregroundGCIsActive(); 3121 } 3122 3123 virtual void coordinator_yield(); // stuff done by coordinator 3124 bool result() { return _result; } 3125 3126 void reset(HeapWord* ra) { 3127 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); 3128 _restart_addr = _global_finger = ra; 3129 _term.terminator()->reset_for_reuse(); 3130 } 3131 3132 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, 3133 OopTaskQueue* work_q); 3134 3135 private: 3136 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp); 3137 void do_work_steal(int i); 3138 void bump_global_finger(HeapWord* f); 3139 }; 3140 3141 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() { 3142 assert(_task != NULL, "Error"); 3143 return _task->yielding(); 3144 // Note that we do not need the disjunct || _task->should_yield() above 3145 // because we want terminating threads to yield only if the task 3146 // is already in the midst of yielding, which happens only after at least one 3147 // thread has yielded. 3148 } 3149 3150 void CMSConcMarkingParallelTerminator::yield() { 3151 if (_task->should_yield()) { 3152 _task->yield(); 3153 } else { 3154 ParallelTaskTerminator::yield(); 3155 } 3156 } 3157 3158 void CMSConcMarkingOWSTTerminator::yield() { 3159 if (_task->should_yield()) { 3160 _task->yield(); 3161 } else { 3162 OWSTTaskTerminator::yield(); 3163 } 3164 } 3165 3166 void CMSConcMarkingTaskTerminator::set_task(CMSConcMarkingTask* task) { 3167 if (UseOWSTTaskTerminator) { 3168 ((CMSConcMarkingOWSTTerminator*)_term)->set_task(task); 3169 } else { 3170 ((CMSConcMarkingParallelTerminator*)_term)->set_task(task); 3171 } 3172 } 3173 3174 //////////////////////////////////////////////////////////////// 3175 // Concurrent Marking Algorithm Sketch 3176 //////////////////////////////////////////////////////////////// 3177 // Until all tasks exhausted (both spaces): 3178 // -- claim next available chunk 3179 // -- bump global finger via CAS 3180 // -- find first object that starts in this chunk 3181 // and start scanning bitmap from that position 3182 // -- scan marked objects for oops 3183 // -- CAS-mark target, and if successful: 3184 // . if target oop is above global finger (volatile read) 3185 // nothing to do 3186 // . if target oop is in chunk and above local finger 3187 // then nothing to do 3188 // . else push on work-queue 3189 // -- Deal with possible overflow issues: 3190 // . local work-queue overflow causes stuff to be pushed on 3191 // global (common) overflow queue 3192 // . always first empty local work queue 3193 // . then get a batch of oops from global work queue if any 3194 // . then do work stealing 3195 // -- When all tasks claimed (both spaces) 3196 // and local work queue empty, 3197 // then in a loop do: 3198 // . check global overflow stack; steal a batch of oops and trace 3199 // . try to steal from other threads oif GOS is empty 3200 // . if neither is available, offer termination 3201 // -- Terminate and return result 3202 // 3203 void CMSConcMarkingTask::work(uint worker_id) { 3204 elapsedTimer _timer; 3205 ResourceMark rm; 3206 HandleMark hm; 3207 3208 DEBUG_ONLY(_collector->verify_overflow_empty();) 3209 3210 // Before we begin work, our work queue should be empty 3211 assert(work_queue(worker_id)->size() == 0, "Expected to be empty"); 3212 // Scan the bitmap covering _cms_space, tracing through grey objects. 3213 _timer.start(); 3214 do_scan_and_mark(worker_id, _cms_space); 3215 _timer.stop(); 3216 log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 3217 3218 // ... do work stealing 3219 _timer.reset(); 3220 _timer.start(); 3221 do_work_steal(worker_id); 3222 _timer.stop(); 3223 log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 3224 assert(_collector->_markStack.isEmpty(), "Should have been emptied"); 3225 assert(work_queue(worker_id)->size() == 0, "Should have been emptied"); 3226 // Note that under the current task protocol, the 3227 // following assertion is true even of the spaces 3228 // expanded since the completion of the concurrent 3229 // marking. XXX This will likely change under a strict 3230 // ABORT semantics. 3231 // After perm removal the comparison was changed to 3232 // greater than or equal to from strictly greater than. 3233 // Before perm removal the highest address sweep would 3234 // have been at the end of perm gen but now is at the 3235 // end of the tenured gen. 3236 assert(_global_finger >= _cms_space->end(), 3237 "All tasks have been completed"); 3238 DEBUG_ONLY(_collector->verify_overflow_empty();) 3239 } 3240 3241 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) { 3242 HeapWord* read = _global_finger; 3243 HeapWord* cur = read; 3244 while (f > read) { 3245 cur = read; 3246 read = Atomic::cmpxchg(f, &_global_finger, cur); 3247 if (cur == read) { 3248 // our cas succeeded 3249 assert(_global_finger >= f, "protocol consistency"); 3250 break; 3251 } 3252 } 3253 } 3254 3255 // This is really inefficient, and should be redone by 3256 // using (not yet available) block-read and -write interfaces to the 3257 // stack and the work_queue. XXX FIX ME !!! 3258 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, 3259 OopTaskQueue* work_q) { 3260 // Fast lock-free check 3261 if (ovflw_stk->length() == 0) { 3262 return false; 3263 } 3264 assert(work_q->size() == 0, "Shouldn't steal"); 3265 MutexLocker ml(ovflw_stk->par_lock(), 3266 Mutex::_no_safepoint_check_flag); 3267 // Grab up to 1/4 the size of the work queue 3268 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 3269 (size_t)ParGCDesiredObjsFromOverflowList); 3270 num = MIN2(num, ovflw_stk->length()); 3271 for (int i = (int) num; i > 0; i--) { 3272 oop cur = ovflw_stk->pop(); 3273 assert(cur != NULL, "Counted wrong?"); 3274 work_q->push(cur); 3275 } 3276 return num > 0; 3277 } 3278 3279 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) { 3280 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); 3281 int n_tasks = pst->n_tasks(); 3282 // We allow that there may be no tasks to do here because 3283 // we are restarting after a stack overflow. 3284 assert(pst->valid() || n_tasks == 0, "Uninitialized use?"); 3285 uint nth_task = 0; 3286 3287 HeapWord* aligned_start = sp->bottom(); 3288 if (sp->used_region().contains(_restart_addr)) { 3289 // Align down to a card boundary for the start of 0th task 3290 // for this space. 3291 aligned_start = align_down(_restart_addr, CardTable::card_size); 3292 } 3293 3294 size_t chunk_size = sp->marking_task_size(); 3295 while (pst->try_claim_task(/* reference */ nth_task)) { 3296 // Having claimed the nth task in this space, 3297 // compute the chunk that it corresponds to: 3298 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size, 3299 aligned_start + (nth_task+1)*chunk_size); 3300 // Try and bump the global finger via a CAS; 3301 // note that we need to do the global finger bump 3302 // _before_ taking the intersection below, because 3303 // the task corresponding to that region will be 3304 // deemed done even if the used_region() expands 3305 // because of allocation -- as it almost certainly will 3306 // during start-up while the threads yield in the 3307 // closure below. 3308 HeapWord* finger = span.end(); 3309 bump_global_finger(finger); // atomically 3310 // There are null tasks here corresponding to chunks 3311 // beyond the "top" address of the space. 3312 span = span.intersection(sp->used_region()); 3313 if (!span.is_empty()) { // Non-null task 3314 HeapWord* prev_obj; 3315 assert(!span.contains(_restart_addr) || nth_task == 0, 3316 "Inconsistency"); 3317 if (nth_task == 0) { 3318 // For the 0th task, we'll not need to compute a block_start. 3319 if (span.contains(_restart_addr)) { 3320 // In the case of a restart because of stack overflow, 3321 // we might additionally skip a chunk prefix. 3322 prev_obj = _restart_addr; 3323 } else { 3324 prev_obj = span.start(); 3325 } 3326 } else { 3327 // We want to skip the first object because 3328 // the protocol is to scan any object in its entirety 3329 // that _starts_ in this span; a fortiori, any 3330 // object starting in an earlier span is scanned 3331 // as part of an earlier claimed task. 3332 // Below we use the "careful" version of block_start 3333 // so we do not try to navigate uninitialized objects. 3334 prev_obj = sp->block_start_careful(span.start()); 3335 // Below we use a variant of block_size that uses the 3336 // Printezis bits to avoid waiting for allocated 3337 // objects to become initialized/parsable. 3338 while (prev_obj < span.start()) { 3339 size_t sz = sp->block_size_no_stall(prev_obj, _collector); 3340 if (sz > 0) { 3341 prev_obj += sz; 3342 } else { 3343 // In this case we may end up doing a bit of redundant 3344 // scanning, but that appears unavoidable, short of 3345 // locking the free list locks; see bug 6324141. 3346 break; 3347 } 3348 } 3349 } 3350 if (prev_obj < span.end()) { 3351 MemRegion my_span = MemRegion(prev_obj, span.end()); 3352 // Do the marking work within a non-empty span -- 3353 // the last argument to the constructor indicates whether the 3354 // iteration should be incremental with periodic yields. 3355 ParMarkFromRootsClosure cl(this, _collector, my_span, 3356 &_collector->_markBitMap, 3357 work_queue(i), 3358 &_collector->_markStack); 3359 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); 3360 } // else nothing to do for this task 3361 } // else nothing to do for this task 3362 } 3363 // We'd be tempted to assert here that since there are no 3364 // more tasks left to claim in this space, the global_finger 3365 // must exceed space->top() and a fortiori space->end(). However, 3366 // that would not quite be correct because the bumping of 3367 // global_finger occurs strictly after the claiming of a task, 3368 // so by the time we reach here the global finger may not yet 3369 // have been bumped up by the thread that claimed the last 3370 // task. 3371 pst->all_tasks_completed(); 3372 } 3373 3374 class ParConcMarkingClosure: public MetadataVisitingOopIterateClosure { 3375 private: 3376 CMSCollector* _collector; 3377 CMSConcMarkingTask* _task; 3378 MemRegion _span; 3379 CMSBitMap* _bit_map; 3380 CMSMarkStack* _overflow_stack; 3381 OopTaskQueue* _work_queue; 3382 protected: 3383 DO_OOP_WORK_DEFN 3384 public: 3385 ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, 3386 CMSBitMap* bit_map, CMSMarkStack* overflow_stack): 3387 MetadataVisitingOopIterateClosure(collector->ref_processor()), 3388 _collector(collector), 3389 _task(task), 3390 _span(collector->_span), 3391 _bit_map(bit_map), 3392 _overflow_stack(overflow_stack), 3393 _work_queue(work_queue) 3394 { } 3395 virtual void do_oop(oop* p); 3396 virtual void do_oop(narrowOop* p); 3397 3398 void trim_queue(size_t max); 3399 void handle_stack_overflow(HeapWord* lost); 3400 void do_yield_check() { 3401 if (_task->should_yield()) { 3402 _task->yield(); 3403 } 3404 } 3405 }; 3406 3407 DO_OOP_WORK_IMPL(ParConcMarkingClosure) 3408 3409 // Grey object scanning during work stealing phase -- 3410 // the salient assumption here is that any references 3411 // that are in these stolen objects being scanned must 3412 // already have been initialized (else they would not have 3413 // been published), so we do not need to check for 3414 // uninitialized objects before pushing here. 3415 void ParConcMarkingClosure::do_oop(oop obj) { 3416 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); 3417 HeapWord* addr = (HeapWord*)obj; 3418 // Check if oop points into the CMS generation 3419 // and is not marked 3420 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 3421 // a white object ... 3422 // If we manage to "claim" the object, by being the 3423 // first thread to mark it, then we push it on our 3424 // marking stack 3425 if (_bit_map->par_mark(addr)) { // ... now grey 3426 // push on work queue (grey set) 3427 bool simulate_overflow = false; 3428 NOT_PRODUCT( 3429 if (CMSMarkStackOverflowALot && 3430 _collector->simulate_overflow()) { 3431 // simulate a stack overflow 3432 simulate_overflow = true; 3433 } 3434 ) 3435 if (simulate_overflow || 3436 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { 3437 // stack overflow 3438 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity()); 3439 // We cannot assert that the overflow stack is full because 3440 // it may have been emptied since. 3441 assert(simulate_overflow || 3442 _work_queue->size() == _work_queue->max_elems(), 3443 "Else push should have succeeded"); 3444 handle_stack_overflow(addr); 3445 } 3446 } // Else, some other thread got there first 3447 do_yield_check(); 3448 } 3449 } 3450 3451 void ParConcMarkingClosure::trim_queue(size_t max) { 3452 while (_work_queue->size() > max) { 3453 oop new_oop; 3454 if (_work_queue->pop_local(new_oop)) { 3455 assert(oopDesc::is_oop(new_oop), "Should be an oop"); 3456 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object"); 3457 assert(_span.contains((HeapWord*)new_oop), "Not in span"); 3458 new_oop->oop_iterate(this); // do_oop() above 3459 do_yield_check(); 3460 } 3461 } 3462 } 3463 3464 // Upon stack overflow, we discard (part of) the stack, 3465 // remembering the least address amongst those discarded 3466 // in CMSCollector's _restart_address. 3467 void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { 3468 // We need to do this under a mutex to prevent other 3469 // workers from interfering with the work done below. 3470 MutexLocker ml(_overflow_stack->par_lock(), 3471 Mutex::_no_safepoint_check_flag); 3472 // Remember the least grey address discarded 3473 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); 3474 _collector->lower_restart_addr(ra); 3475 _overflow_stack->reset(); // discard stack contents 3476 _overflow_stack->expand(); // expand the stack if possible 3477 } 3478 3479 3480 void CMSConcMarkingTask::do_work_steal(int i) { 3481 OopTaskQueue* work_q = work_queue(i); 3482 oop obj_to_scan; 3483 CMSBitMap* bm = &(_collector->_markBitMap); 3484 CMSMarkStack* ovflw = &(_collector->_markStack); 3485 ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw); 3486 while (true) { 3487 cl.trim_queue(0); 3488 assert(work_q->size() == 0, "Should have been emptied above"); 3489 if (get_work_from_overflow_stack(ovflw, work_q)) { 3490 // Can't assert below because the work obtained from the 3491 // overflow stack may already have been stolen from us. 3492 // assert(work_q->size() > 0, "Work from overflow stack"); 3493 continue; 3494 } else if (task_queues()->steal(i, /* reference */ obj_to_scan)) { 3495 assert(oopDesc::is_oop(obj_to_scan), "Should be an oop"); 3496 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object"); 3497 obj_to_scan->oop_iterate(&cl); 3498 } else if (terminator()->offer_termination(&_term_term)) { 3499 assert(work_q->size() == 0, "Impossible!"); 3500 break; 3501 } else if (yielding() || should_yield()) { 3502 yield(); 3503 } 3504 } 3505 } 3506 3507 // This is run by the CMS (coordinator) thread. 3508 void CMSConcMarkingTask::coordinator_yield() { 3509 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 3510 "CMS thread should hold CMS token"); 3511 // First give up the locks, then yield, then re-lock 3512 // We should probably use a constructor/destructor idiom to 3513 // do this unlock/lock or modify the MutexUnlocker class to 3514 // serve our purpose. XXX 3515 assert_lock_strong(_bit_map_lock); 3516 _bit_map_lock->unlock(); 3517 ConcurrentMarkSweepThread::desynchronize(true); 3518 _collector->stopTimer(); 3519 _collector->incrementYields(); 3520 3521 // It is possible for whichever thread initiated the yield request 3522 // not to get a chance to wake up and take the bitmap lock between 3523 // this thread releasing it and reacquiring it. So, while the 3524 // should_yield() flag is on, let's sleep for a bit to give the 3525 // other thread a chance to wake up. The limit imposed on the number 3526 // of iterations is defensive, to avoid any unforseen circumstances 3527 // putting us into an infinite loop. Since it's always been this 3528 // (coordinator_yield()) method that was observed to cause the 3529 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount) 3530 // which is by default non-zero. For the other seven methods that 3531 // also perform the yield operation, as are using a different 3532 // parameter (CMSYieldSleepCount) which is by default zero. This way we 3533 // can enable the sleeping for those methods too, if necessary. 3534 // See 6442774. 3535 // 3536 // We really need to reconsider the synchronization between the GC 3537 // thread and the yield-requesting threads in the future and we 3538 // should really use wait/notify, which is the recommended 3539 // way of doing this type of interaction. Additionally, we should 3540 // consolidate the eight methods that do the yield operation and they 3541 // are almost identical into one for better maintainability and 3542 // readability. See 6445193. 3543 // 3544 // Tony 2006.06.29 3545 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && 3546 ConcurrentMarkSweepThread::should_yield() && 3547 !CMSCollector::foregroundGCIsActive(); ++i) { 3548 os::sleep(Thread::current(), 1, false); 3549 } 3550 3551 ConcurrentMarkSweepThread::synchronize(true); 3552 _bit_map_lock->lock_without_safepoint_check(); 3553 _collector->startTimer(); 3554 } 3555 3556 bool CMSCollector::do_marking_mt() { 3557 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition"); 3558 uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(), 3559 conc_workers()->active_workers(), 3560 Threads::number_of_non_daemon_threads()); 3561 num_workers = conc_workers()->update_active_workers(num_workers); 3562 log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers()); 3563 3564 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); 3565 3566 CMSConcMarkingTask tsk(this, 3567 cms_space, 3568 conc_workers(), 3569 task_queues()); 3570 3571 // Since the actual number of workers we get may be different 3572 // from the number we requested above, do we need to do anything different 3573 // below? In particular, may be we need to subclass the SequantialSubTasksDone 3574 // class?? XXX 3575 cms_space ->initialize_sequential_subtasks_for_marking(num_workers); 3576 3577 // Refs discovery is already non-atomic. 3578 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); 3579 assert(ref_processor()->discovery_is_mt(), "Discovery should be MT"); 3580 conc_workers()->start_task(&tsk); 3581 while (tsk.yielded()) { 3582 tsk.coordinator_yield(); 3583 conc_workers()->continue_task(&tsk); 3584 } 3585 // If the task was aborted, _restart_addr will be non-NULL 3586 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency"); 3587 while (_restart_addr != NULL) { 3588 // XXX For now we do not make use of ABORTED state and have not 3589 // yet implemented the right abort semantics (even in the original 3590 // single-threaded CMS case). That needs some more investigation 3591 // and is deferred for now; see CR# TBF. 07252005YSR. XXX 3592 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); 3593 // If _restart_addr is non-NULL, a marking stack overflow 3594 // occurred; we need to do a fresh marking iteration from the 3595 // indicated restart address. 3596 if (_foregroundGCIsActive) { 3597 // We may be running into repeated stack overflows, having 3598 // reached the limit of the stack size, while making very 3599 // slow forward progress. It may be best to bail out and 3600 // let the foreground collector do its job. 3601 // Clear _restart_addr, so that foreground GC 3602 // works from scratch. This avoids the headache of 3603 // a "rescan" which would otherwise be needed because 3604 // of the dirty mod union table & card table. 3605 _restart_addr = NULL; 3606 return false; 3607 } 3608 // Adjust the task to restart from _restart_addr 3609 tsk.reset(_restart_addr); 3610 cms_space ->initialize_sequential_subtasks_for_marking(num_workers, 3611 _restart_addr); 3612 _restart_addr = NULL; 3613 // Get the workers going again 3614 conc_workers()->start_task(&tsk); 3615 while (tsk.yielded()) { 3616 tsk.coordinator_yield(); 3617 conc_workers()->continue_task(&tsk); 3618 } 3619 } 3620 assert(tsk.completed(), "Inconsistency"); 3621 assert(tsk.result() == true, "Inconsistency"); 3622 return true; 3623 } 3624 3625 bool CMSCollector::do_marking_st() { 3626 ResourceMark rm; 3627 HandleMark hm; 3628 3629 // Temporarily make refs discovery single threaded (non-MT) 3630 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false); 3631 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, 3632 &_markStack, CMSYield); 3633 // the last argument to iterate indicates whether the iteration 3634 // should be incremental with periodic yields. 3635 _markBitMap.iterate(&markFromRootsClosure); 3636 // If _restart_addr is non-NULL, a marking stack overflow 3637 // occurred; we need to do a fresh iteration from the 3638 // indicated restart address. 3639 while (_restart_addr != NULL) { 3640 if (_foregroundGCIsActive) { 3641 // We may be running into repeated stack overflows, having 3642 // reached the limit of the stack size, while making very 3643 // slow forward progress. It may be best to bail out and 3644 // let the foreground collector do its job. 3645 // Clear _restart_addr, so that foreground GC 3646 // works from scratch. This avoids the headache of 3647 // a "rescan" which would otherwise be needed because 3648 // of the dirty mod union table & card table. 3649 _restart_addr = NULL; 3650 return false; // indicating failure to complete marking 3651 } 3652 // Deal with stack overflow: 3653 // we restart marking from _restart_addr 3654 HeapWord* ra = _restart_addr; 3655 markFromRootsClosure.reset(ra); 3656 _restart_addr = NULL; 3657 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end()); 3658 } 3659 return true; 3660 } 3661 3662 void CMSCollector::preclean() { 3663 check_correct_thread_executing(); 3664 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread"); 3665 verify_work_stacks_empty(); 3666 verify_overflow_empty(); 3667 _abort_preclean = false; 3668 if (CMSPrecleaningEnabled) { 3669 if (!CMSEdenChunksRecordAlways) { 3670 _eden_chunk_index = 0; 3671 } 3672 size_t used = get_eden_used(); 3673 size_t capacity = get_eden_capacity(); 3674 // Don't start sampling unless we will get sufficiently 3675 // many samples. 3676 if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100) 3677 * CMSScheduleRemarkEdenPenetration)) { 3678 _start_sampling = true; 3679 } else { 3680 _start_sampling = false; 3681 } 3682 GCTraceCPUTime tcpu; 3683 CMSPhaseAccounting pa(this, "Concurrent Preclean"); 3684 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); 3685 } 3686 CMSTokenSync x(true); // is cms thread 3687 if (CMSPrecleaningEnabled) { 3688 sample_eden(); 3689 _collectorState = AbortablePreclean; 3690 } else { 3691 _collectorState = FinalMarking; 3692 } 3693 verify_work_stacks_empty(); 3694 verify_overflow_empty(); 3695 } 3696 3697 // Try and schedule the remark such that young gen 3698 // occupancy is CMSScheduleRemarkEdenPenetration %. 3699 void CMSCollector::abortable_preclean() { 3700 check_correct_thread_executing(); 3701 assert(CMSPrecleaningEnabled, "Inconsistent control state"); 3702 assert(_collectorState == AbortablePreclean, "Inconsistent control state"); 3703 3704 // If Eden's current occupancy is below this threshold, 3705 // immediately schedule the remark; else preclean 3706 // past the next scavenge in an effort to 3707 // schedule the pause as described above. By choosing 3708 // CMSScheduleRemarkEdenSizeThreshold >= max eden size 3709 // we will never do an actual abortable preclean cycle. 3710 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { 3711 GCTraceCPUTime tcpu; 3712 CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean"); 3713 // We need more smarts in the abortable preclean 3714 // loop below to deal with cases where allocation 3715 // in young gen is very very slow, and our precleaning 3716 // is running a losing race against a horde of 3717 // mutators intent on flooding us with CMS updates 3718 // (dirty cards). 3719 // One, admittedly dumb, strategy is to give up 3720 // after a certain number of abortable precleaning loops 3721 // or after a certain maximum time. We want to make 3722 // this smarter in the next iteration. 3723 // XXX FIX ME!!! YSR 3724 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0; 3725 while (!(should_abort_preclean() || 3726 ConcurrentMarkSweepThread::cmst()->should_terminate())) { 3727 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); 3728 cumworkdone += workdone; 3729 loops++; 3730 // Voluntarily terminate abortable preclean phase if we have 3731 // been at it for too long. 3732 if ((CMSMaxAbortablePrecleanLoops != 0) && 3733 loops >= CMSMaxAbortablePrecleanLoops) { 3734 log_debug(gc)(" CMS: abort preclean due to loops "); 3735 break; 3736 } 3737 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) { 3738 log_debug(gc)(" CMS: abort preclean due to time "); 3739 break; 3740 } 3741 // If we are doing little work each iteration, we should 3742 // take a short break. 3743 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) { 3744 // Sleep for some time, waiting for work to accumulate 3745 stopTimer(); 3746 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis); 3747 startTimer(); 3748 waited++; 3749 } 3750 } 3751 log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ", 3752 loops, waited, cumworkdone); 3753 } 3754 CMSTokenSync x(true); // is cms thread 3755 if (_collectorState != Idling) { 3756 assert(_collectorState == AbortablePreclean, 3757 "Spontaneous state transition?"); 3758 _collectorState = FinalMarking; 3759 } // Else, a foreground collection completed this CMS cycle. 3760 return; 3761 } 3762 3763 // Respond to an Eden sampling opportunity 3764 void CMSCollector::sample_eden() { 3765 // Make sure a young gc cannot sneak in between our 3766 // reading and recording of a sample. 3767 assert(Thread::current()->is_ConcurrentGC_thread(), 3768 "Only the cms thread may collect Eden samples"); 3769 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 3770 "Should collect samples while holding CMS token"); 3771 if (!_start_sampling) { 3772 return; 3773 } 3774 // When CMSEdenChunksRecordAlways is true, the eden chunk array 3775 // is populated by the young generation. 3776 if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) { 3777 if (_eden_chunk_index < _eden_chunk_capacity) { 3778 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample 3779 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr, 3780 "Unexpected state of Eden"); 3781 // We'd like to check that what we just sampled is an oop-start address; 3782 // however, we cannot do that here since the object may not yet have been 3783 // initialized. So we'll instead do the check when we _use_ this sample 3784 // later. 3785 if (_eden_chunk_index == 0 || 3786 (pointer_delta(_eden_chunk_array[_eden_chunk_index], 3787 _eden_chunk_array[_eden_chunk_index-1]) 3788 >= CMSSamplingGrain)) { 3789 _eden_chunk_index++; // commit sample 3790 } 3791 } 3792 } 3793 if ((_collectorState == AbortablePreclean) && !_abort_preclean) { 3794 size_t used = get_eden_used(); 3795 size_t capacity = get_eden_capacity(); 3796 assert(used <= capacity, "Unexpected state of Eden"); 3797 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) { 3798 _abort_preclean = true; 3799 } 3800 } 3801 } 3802 3803 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) { 3804 assert(_collectorState == Precleaning || 3805 _collectorState == AbortablePreclean, "incorrect state"); 3806 ResourceMark rm; 3807 HandleMark hm; 3808 3809 // Precleaning is currently not MT but the reference processor 3810 // may be set for MT. Disable it temporarily here. 3811 ReferenceProcessor* rp = ref_processor(); 3812 ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false); 3813 3814 // Do one pass of scrubbing the discovered reference lists 3815 // to remove any reference objects with strongly-reachable 3816 // referents. 3817 if (clean_refs) { 3818 CMSPrecleanRefsYieldClosure yield_cl(this); 3819 assert(_span_based_discoverer.span().equals(_span), "Spans should be equal"); 3820 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, 3821 &_markStack, true /* preclean */); 3822 CMSDrainMarkingStackClosure complete_trace(this, 3823 _span, &_markBitMap, &_markStack, 3824 &keep_alive, true /* preclean */); 3825 3826 // We don't want this step to interfere with a young 3827 // collection because we don't want to take CPU 3828 // or memory bandwidth away from the young GC threads 3829 // (which may be as many as there are CPUs). 3830 // Note that we don't need to protect ourselves from 3831 // interference with mutators because they can't 3832 // manipulate the discovered reference lists nor affect 3833 // the computed reachability of the referents, the 3834 // only properties manipulated by the precleaning 3835 // of these reference lists. 3836 stopTimer(); 3837 CMSTokenSyncWithLocks x(true /* is cms thread */, 3838 bitMapLock()); 3839 startTimer(); 3840 sample_eden(); 3841 3842 // The following will yield to allow foreground 3843 // collection to proceed promptly. XXX YSR: 3844 // The code in this method may need further 3845 // tweaking for better performance and some restructuring 3846 // for cleaner interfaces. 3847 GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases 3848 rp->preclean_discovered_references( 3849 rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl, 3850 gc_timer); 3851 } 3852 3853 if (clean_survivor) { // preclean the active survivor space(s) 3854 PushAndMarkClosure pam_cl(this, _span, ref_processor(), 3855 &_markBitMap, &_modUnionTable, 3856 &_markStack, true /* precleaning phase */); 3857 stopTimer(); 3858 CMSTokenSyncWithLocks ts(true /* is cms thread */, 3859 bitMapLock()); 3860 startTimer(); 3861 unsigned int before_count = 3862 CMSHeap::heap()->total_collections(); 3863 SurvivorSpacePrecleanClosure 3864 sss_cl(this, _span, &_markBitMap, &_markStack, 3865 &pam_cl, before_count, CMSYield); 3866 _young_gen->from()->object_iterate_careful(&sss_cl); 3867 _young_gen->to()->object_iterate_careful(&sss_cl); 3868 } 3869 MarkRefsIntoAndScanClosure 3870 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, 3871 &_markStack, this, CMSYield, 3872 true /* precleaning phase */); 3873 // CAUTION: The following closure has persistent state that may need to 3874 // be reset upon a decrease in the sequence of addresses it 3875 // processes. 3876 ScanMarkedObjectsAgainCarefullyClosure 3877 smoac_cl(this, _span, 3878 &_markBitMap, &_markStack, &mrias_cl, CMSYield); 3879 3880 // Preclean dirty cards in ModUnionTable and CardTable using 3881 // appropriate convergence criterion; 3882 // repeat CMSPrecleanIter times unless we find that 3883 // we are losing. 3884 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large"); 3885 assert(CMSPrecleanNumerator < CMSPrecleanDenominator, 3886 "Bad convergence multiplier"); 3887 assert(CMSPrecleanThreshold >= 100, 3888 "Unreasonably low CMSPrecleanThreshold"); 3889 3890 size_t numIter, cumNumCards, lastNumCards, curNumCards; 3891 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0; 3892 numIter < CMSPrecleanIter; 3893 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { 3894 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); 3895 log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards); 3896 // Either there are very few dirty cards, so re-mark 3897 // pause will be small anyway, or our pre-cleaning isn't 3898 // that much faster than the rate at which cards are being 3899 // dirtied, so we might as well stop and re-mark since 3900 // precleaning won't improve our re-mark time by much. 3901 if (curNumCards <= CMSPrecleanThreshold || 3902 (numIter > 0 && 3903 (curNumCards * CMSPrecleanDenominator > 3904 lastNumCards * CMSPrecleanNumerator))) { 3905 numIter++; 3906 cumNumCards += curNumCards; 3907 break; 3908 } 3909 } 3910 3911 preclean_cld(&mrias_cl, _cmsGen->freelistLock()); 3912 3913 curNumCards = preclean_card_table(_cmsGen, &smoac_cl); 3914 cumNumCards += curNumCards; 3915 log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)", 3916 curNumCards, cumNumCards, numIter); 3917 return cumNumCards; // as a measure of useful work done 3918 } 3919 3920 // PRECLEANING NOTES: 3921 // Precleaning involves: 3922 // . reading the bits of the modUnionTable and clearing the set bits. 3923 // . For the cards corresponding to the set bits, we scan the 3924 // objects on those cards. This means we need the free_list_lock 3925 // so that we can safely iterate over the CMS space when scanning 3926 // for oops. 3927 // . When we scan the objects, we'll be both reading and setting 3928 // marks in the marking bit map, so we'll need the marking bit map. 3929 // . For protecting _collector_state transitions, we take the CGC_lock. 3930 // Note that any races in the reading of of card table entries by the 3931 // CMS thread on the one hand and the clearing of those entries by the 3932 // VM thread or the setting of those entries by the mutator threads on the 3933 // other are quite benign. However, for efficiency it makes sense to keep 3934 // the VM thread from racing with the CMS thread while the latter is 3935 // dirty card info to the modUnionTable. We therefore also use the 3936 // CGC_lock to protect the reading of the card table and the mod union 3937 // table by the CM thread. 3938 // . We run concurrently with mutator updates, so scanning 3939 // needs to be done carefully -- we should not try to scan 3940 // potentially uninitialized objects. 3941 // 3942 // Locking strategy: While holding the CGC_lock, we scan over and 3943 // reset a maximal dirty range of the mod union / card tables, then lock 3944 // the free_list_lock and bitmap lock to do a full marking, then 3945 // release these locks; and repeat the cycle. This allows for a 3946 // certain amount of fairness in the sharing of these locks between 3947 // the CMS collector on the one hand, and the VM thread and the 3948 // mutators on the other. 3949 3950 // NOTE: preclean_mod_union_table() and preclean_card_table() 3951 // further below are largely identical; if you need to modify 3952 // one of these methods, please check the other method too. 3953 3954 size_t CMSCollector::preclean_mod_union_table( 3955 ConcurrentMarkSweepGeneration* old_gen, 3956 ScanMarkedObjectsAgainCarefullyClosure* cl) { 3957 verify_work_stacks_empty(); 3958 verify_overflow_empty(); 3959 3960 // strategy: starting with the first card, accumulate contiguous 3961 // ranges of dirty cards; clear these cards, then scan the region 3962 // covered by these cards. 3963 3964 // Since all of the MUT is committed ahead, we can just use 3965 // that, in case the generations expand while we are precleaning. 3966 // It might also be fine to just use the committed part of the 3967 // generation, but we might potentially miss cards when the 3968 // generation is rapidly expanding while we are in the midst 3969 // of precleaning. 3970 HeapWord* startAddr = old_gen->reserved().start(); 3971 HeapWord* endAddr = old_gen->reserved().end(); 3972 3973 cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding 3974 3975 size_t numDirtyCards, cumNumDirtyCards; 3976 HeapWord *nextAddr, *lastAddr; 3977 for (cumNumDirtyCards = numDirtyCards = 0, 3978 nextAddr = lastAddr = startAddr; 3979 nextAddr < endAddr; 3980 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { 3981 3982 ResourceMark rm; 3983 HandleMark hm; 3984 3985 MemRegion dirtyRegion; 3986 { 3987 stopTimer(); 3988 // Potential yield point 3989 CMSTokenSync ts(true); 3990 startTimer(); 3991 sample_eden(); 3992 // Get dirty region starting at nextOffset (inclusive), 3993 // simultaneously clearing it. 3994 dirtyRegion = 3995 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr); 3996 assert(dirtyRegion.start() >= nextAddr, 3997 "returned region inconsistent?"); 3998 } 3999 // Remember where the next search should begin. 4000 // The returned region (if non-empty) is a right open interval, 4001 // so lastOffset is obtained from the right end of that 4002 // interval. 4003 lastAddr = dirtyRegion.end(); 4004 // Should do something more transparent and less hacky XXX 4005 numDirtyCards = 4006 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size()); 4007 4008 // We'll scan the cards in the dirty region (with periodic 4009 // yields for foreground GC as needed). 4010 if (!dirtyRegion.is_empty()) { 4011 assert(numDirtyCards > 0, "consistency check"); 4012 HeapWord* stop_point = NULL; 4013 stopTimer(); 4014 // Potential yield point 4015 CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), 4016 bitMapLock()); 4017 startTimer(); 4018 { 4019 verify_work_stacks_empty(); 4020 verify_overflow_empty(); 4021 sample_eden(); 4022 stop_point = 4023 old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); 4024 } 4025 if (stop_point != NULL) { 4026 // The careful iteration stopped early either because it found an 4027 // uninitialized object, or because we were in the midst of an 4028 // "abortable preclean", which should now be aborted. Redirty 4029 // the bits corresponding to the partially-scanned or unscanned 4030 // cards. We'll either restart at the next block boundary or 4031 // abort the preclean. 4032 assert((_collectorState == AbortablePreclean && should_abort_preclean()), 4033 "Should only be AbortablePreclean."); 4034 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); 4035 if (should_abort_preclean()) { 4036 break; // out of preclean loop 4037 } else { 4038 // Compute the next address at which preclean should pick up; 4039 // might need bitMapLock in order to read P-bits. 4040 lastAddr = next_card_start_after_block(stop_point); 4041 } 4042 } 4043 } else { 4044 assert(lastAddr == endAddr, "consistency check"); 4045 assert(numDirtyCards == 0, "consistency check"); 4046 break; 4047 } 4048 } 4049 verify_work_stacks_empty(); 4050 verify_overflow_empty(); 4051 return cumNumDirtyCards; 4052 } 4053 4054 // NOTE: preclean_mod_union_table() above and preclean_card_table() 4055 // below are largely identical; if you need to modify 4056 // one of these methods, please check the other method too. 4057 4058 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen, 4059 ScanMarkedObjectsAgainCarefullyClosure* cl) { 4060 // strategy: it's similar to precleamModUnionTable above, in that 4061 // we accumulate contiguous ranges of dirty cards, mark these cards 4062 // precleaned, then scan the region covered by these cards. 4063 HeapWord* endAddr = (HeapWord*)(old_gen->_virtual_space.high()); 4064 HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low()); 4065 4066 cl->setFreelistLock(old_gen->freelistLock()); // needed for yielding 4067 4068 size_t numDirtyCards, cumNumDirtyCards; 4069 HeapWord *lastAddr, *nextAddr; 4070 4071 for (cumNumDirtyCards = numDirtyCards = 0, 4072 nextAddr = lastAddr = startAddr; 4073 nextAddr < endAddr; 4074 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { 4075 4076 ResourceMark rm; 4077 HandleMark hm; 4078 4079 MemRegion dirtyRegion; 4080 { 4081 // See comments in "Precleaning notes" above on why we 4082 // do this locking. XXX Could the locking overheads be 4083 // too high when dirty cards are sparse? [I don't think so.] 4084 stopTimer(); 4085 CMSTokenSync x(true); // is cms thread 4086 startTimer(); 4087 sample_eden(); 4088 // Get and clear dirty region from card table 4089 dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr), 4090 true, 4091 CardTable::precleaned_card_val()); 4092 4093 assert(dirtyRegion.start() >= nextAddr, 4094 "returned region inconsistent?"); 4095 } 4096 lastAddr = dirtyRegion.end(); 4097 numDirtyCards = 4098 dirtyRegion.word_size()/CardTable::card_size_in_words; 4099 4100 if (!dirtyRegion.is_empty()) { 4101 stopTimer(); 4102 CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock()); 4103 startTimer(); 4104 sample_eden(); 4105 verify_work_stacks_empty(); 4106 verify_overflow_empty(); 4107 HeapWord* stop_point = 4108 old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); 4109 if (stop_point != NULL) { 4110 assert((_collectorState == AbortablePreclean && should_abort_preclean()), 4111 "Should only be AbortablePreclean."); 4112 _ct->invalidate(MemRegion(stop_point, dirtyRegion.end())); 4113 if (should_abort_preclean()) { 4114 break; // out of preclean loop 4115 } else { 4116 // Compute the next address at which preclean should pick up. 4117 lastAddr = next_card_start_after_block(stop_point); 4118 } 4119 } 4120 } else { 4121 break; 4122 } 4123 } 4124 verify_work_stacks_empty(); 4125 verify_overflow_empty(); 4126 return cumNumDirtyCards; 4127 } 4128 4129 class PrecleanCLDClosure : public CLDClosure { 4130 MetadataVisitingOopsInGenClosure* _cm_closure; 4131 public: 4132 PrecleanCLDClosure(MetadataVisitingOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {} 4133 void do_cld(ClassLoaderData* cld) { 4134 if (cld->has_accumulated_modified_oops()) { 4135 cld->clear_accumulated_modified_oops(); 4136 4137 _cm_closure->do_cld(cld); 4138 } 4139 } 4140 }; 4141 4142 // The freelist lock is needed to prevent asserts, is it really needed? 4143 void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) { 4144 // Needed to walk CLDG 4145 MutexLocker ml(ClassLoaderDataGraph_lock); 4146 4147 cl->set_freelistLock(freelistLock); 4148 4149 CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock()); 4150 4151 // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean? 4152 // SSS: We should probably check if precleaning should be aborted, at suitable intervals? 4153 PrecleanCLDClosure preclean_closure(cl); 4154 ClassLoaderDataGraph::cld_do(&preclean_closure); 4155 4156 verify_work_stacks_empty(); 4157 verify_overflow_empty(); 4158 } 4159 4160 void CMSCollector::checkpointRootsFinal() { 4161 assert(_collectorState == FinalMarking, "incorrect state transition?"); 4162 check_correct_thread_executing(); 4163 // world is stopped at this checkpoint 4164 assert(SafepointSynchronize::is_at_safepoint(), 4165 "world should be stopped"); 4166 TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause()); 4167 4168 verify_work_stacks_empty(); 4169 verify_overflow_empty(); 4170 4171 log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)", 4172 _young_gen->used() / K, _young_gen->capacity() / K); 4173 { 4174 if (CMSScavengeBeforeRemark) { 4175 CMSHeap* heap = CMSHeap::heap(); 4176 // Temporarily set flag to false, GCH->do_collection will 4177 // expect it to be false and set to true 4178 FlagSetting fl(heap->_is_gc_active, false); 4179 4180 heap->do_collection(true, // full (i.e. force, see below) 4181 false, // !clear_all_soft_refs 4182 0, // size 4183 false, // is_tlab 4184 GenCollectedHeap::YoungGen // type 4185 ); 4186 } 4187 FreelistLocker x(this); 4188 MutexLocker y(bitMapLock(), 4189 Mutex::_no_safepoint_check_flag); 4190 checkpointRootsFinalWork(); 4191 _cmsGen->cmsSpace()->recalculate_used_stable(); 4192 } 4193 verify_work_stacks_empty(); 4194 verify_overflow_empty(); 4195 } 4196 4197 void CMSCollector::checkpointRootsFinalWork() { 4198 GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm); 4199 4200 assert(haveFreelistLocks(), "must have free list locks"); 4201 assert_lock_strong(bitMapLock()); 4202 4203 ResourceMark rm; 4204 HandleMark hm; 4205 4206 CMSHeap* heap = CMSHeap::heap(); 4207 4208 assert(haveFreelistLocks(), "must have free list locks"); 4209 assert_lock_strong(bitMapLock()); 4210 4211 // We might assume that we need not fill TLAB's when 4212 // CMSScavengeBeforeRemark is set, because we may have just done 4213 // a scavenge which would have filled all TLAB's -- and besides 4214 // Eden would be empty. This however may not always be the case -- 4215 // for instance although we asked for a scavenge, it may not have 4216 // happened because of a JNI critical section. We probably need 4217 // a policy for deciding whether we can in that case wait until 4218 // the critical section releases and then do the remark following 4219 // the scavenge, and skip it here. In the absence of that policy, 4220 // or of an indication of whether the scavenge did indeed occur, 4221 // we cannot rely on TLAB's having been filled and must do 4222 // so here just in case a scavenge did not happen. 4223 heap->ensure_parsability(false); // fill TLAB's, but no need to retire them 4224 // Update the saved marks which may affect the root scans. 4225 heap->save_marks(); 4226 4227 print_eden_and_survivor_chunk_arrays(); 4228 4229 { 4230 #if COMPILER2_OR_JVMCI 4231 DerivedPointerTableDeactivate dpt_deact; 4232 #endif 4233 4234 // Note on the role of the mod union table: 4235 // Since the marker in "markFromRoots" marks concurrently with 4236 // mutators, it is possible for some reachable objects not to have been 4237 // scanned. For instance, an only reference to an object A was 4238 // placed in object B after the marker scanned B. Unless B is rescanned, 4239 // A would be collected. Such updates to references in marked objects 4240 // are detected via the mod union table which is the set of all cards 4241 // dirtied since the first checkpoint in this GC cycle and prior to 4242 // the most recent young generation GC, minus those cleaned up by the 4243 // concurrent precleaning. 4244 if (CMSParallelRemarkEnabled) { 4245 GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm); 4246 do_remark_parallel(); 4247 } else { 4248 GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm); 4249 do_remark_non_parallel(); 4250 } 4251 } 4252 verify_work_stacks_empty(); 4253 verify_overflow_empty(); 4254 4255 { 4256 GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm); 4257 refProcessingWork(); 4258 } 4259 verify_work_stacks_empty(); 4260 verify_overflow_empty(); 4261 4262 if (should_unload_classes()) { 4263 heap->prune_scavengable_nmethods(); 4264 } 4265 JvmtiExport::gc_epilogue(); 4266 4267 // If we encountered any (marking stack / work queue) overflow 4268 // events during the current CMS cycle, take appropriate 4269 // remedial measures, where possible, so as to try and avoid 4270 // recurrence of that condition. 4271 assert(_markStack.isEmpty(), "No grey objects"); 4272 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + 4273 _ser_kac_ovflw + _ser_kac_preclean_ovflw; 4274 if (ser_ovflw > 0) { 4275 log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")", 4276 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw); 4277 _markStack.expand(); 4278 _ser_pmc_remark_ovflw = 0; 4279 _ser_pmc_preclean_ovflw = 0; 4280 _ser_kac_preclean_ovflw = 0; 4281 _ser_kac_ovflw = 0; 4282 } 4283 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { 4284 log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")", 4285 _par_pmc_remark_ovflw, _par_kac_ovflw); 4286 _par_pmc_remark_ovflw = 0; 4287 _par_kac_ovflw = 0; 4288 } 4289 if (_markStack._hit_limit > 0) { 4290 log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")", 4291 _markStack._hit_limit); 4292 } 4293 if (_markStack._failed_double > 0) { 4294 log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT, 4295 _markStack._failed_double, _markStack.capacity()); 4296 } 4297 _markStack._hit_limit = 0; 4298 _markStack._failed_double = 0; 4299 4300 if ((VerifyAfterGC || VerifyDuringGC) && 4301 CMSHeap::heap()->total_collections() >= VerifyGCStartAt) { 4302 verify_after_remark(); 4303 } 4304 4305 _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure); 4306 4307 // Change under the freelistLocks. 4308 _collectorState = Sweeping; 4309 // Call isAllClear() under bitMapLock 4310 assert(_modUnionTable.isAllClear(), 4311 "Should be clear by end of the final marking"); 4312 assert(_ct->cld_rem_set()->mod_union_is_clear(), 4313 "Should be clear by end of the final marking"); 4314 } 4315 4316 void CMSParInitialMarkTask::work(uint worker_id) { 4317 elapsedTimer _timer; 4318 ResourceMark rm; 4319 HandleMark hm; 4320 4321 // ---------- scan from roots -------------- 4322 _timer.start(); 4323 CMSHeap* heap = CMSHeap::heap(); 4324 ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); 4325 4326 // ---------- young gen roots -------------- 4327 { 4328 work_on_young_gen_roots(&par_mri_cl); 4329 _timer.stop(); 4330 log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 4331 } 4332 4333 // ---------- remaining roots -------------- 4334 _timer.reset(); 4335 _timer.start(); 4336 4337 CLDToOopClosure cld_closure(&par_mri_cl, ClassLoaderData::_claim_strong); 4338 4339 heap->cms_process_roots(_strong_roots_scope, 4340 false, // yg was scanned above 4341 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), 4342 _collector->should_unload_classes(), 4343 &par_mri_cl, 4344 &cld_closure); 4345 4346 assert(_collector->should_unload_classes() 4347 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), 4348 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 4349 _timer.stop(); 4350 log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 4351 } 4352 4353 // Parallel remark task 4354 class CMSParRemarkTask: public CMSParMarkTask { 4355 CompactibleFreeListSpace* _cms_space; 4356 4357 // The per-thread work queues, available here for stealing. 4358 OopTaskQueueSet* _task_queues; 4359 TaskTerminator _term; 4360 StrongRootsScope* _strong_roots_scope; 4361 4362 public: 4363 // A value of 0 passed to n_workers will cause the number of 4364 // workers to be taken from the active workers in the work gang. 4365 CMSParRemarkTask(CMSCollector* collector, 4366 CompactibleFreeListSpace* cms_space, 4367 uint n_workers, WorkGang* workers, 4368 OopTaskQueueSet* task_queues, 4369 StrongRootsScope* strong_roots_scope): 4370 CMSParMarkTask("Rescan roots and grey objects in parallel", 4371 collector, n_workers), 4372 _cms_space(cms_space), 4373 _task_queues(task_queues), 4374 _term(n_workers, task_queues), 4375 _strong_roots_scope(strong_roots_scope) { } 4376 4377 OopTaskQueueSet* task_queues() { return _task_queues; } 4378 4379 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } 4380 4381 ParallelTaskTerminator* terminator() { return _term.terminator(); } 4382 uint n_workers() { return _n_workers; } 4383 4384 void work(uint worker_id); 4385 4386 private: 4387 // ... of dirty cards in old space 4388 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, 4389 ParMarkRefsIntoAndScanClosure* cl); 4390 4391 // ... work stealing for the above 4392 void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl); 4393 }; 4394 4395 class RemarkCLDClosure : public CLDClosure { 4396 CLDToOopClosure _cm_closure; 4397 public: 4398 RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure, ClassLoaderData::_claim_strong) {} 4399 void do_cld(ClassLoaderData* cld) { 4400 // Check if we have modified any oops in the CLD during the concurrent marking. 4401 if (cld->has_accumulated_modified_oops()) { 4402 cld->clear_accumulated_modified_oops(); 4403 4404 // We could have transfered the current modified marks to the accumulated marks, 4405 // like we do with the Card Table to Mod Union Table. But it's not really necessary. 4406 } else if (cld->has_modified_oops()) { 4407 // Don't clear anything, this info is needed by the next young collection. 4408 } else { 4409 // No modified oops in the ClassLoaderData. 4410 return; 4411 } 4412 4413 // The klass has modified fields, need to scan the klass. 4414 _cm_closure.do_cld(cld); 4415 } 4416 }; 4417 4418 void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) { 4419 ParNewGeneration* young_gen = _collector->_young_gen; 4420 ContiguousSpace* eden_space = young_gen->eden(); 4421 ContiguousSpace* from_space = young_gen->from(); 4422 ContiguousSpace* to_space = young_gen->to(); 4423 4424 HeapWord** eca = _collector->_eden_chunk_array; 4425 size_t ect = _collector->_eden_chunk_index; 4426 HeapWord** sca = _collector->_survivor_chunk_array; 4427 size_t sct = _collector->_survivor_chunk_index; 4428 4429 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds"); 4430 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds"); 4431 4432 do_young_space_rescan(cl, to_space, NULL, 0); 4433 do_young_space_rescan(cl, from_space, sca, sct); 4434 do_young_space_rescan(cl, eden_space, eca, ect); 4435 } 4436 4437 // work_queue(i) is passed to the closure 4438 // ParMarkRefsIntoAndScanClosure. The "i" parameter 4439 // also is passed to do_dirty_card_rescan_tasks() and to 4440 // do_work_steal() to select the i-th task_queue. 4441 4442 void CMSParRemarkTask::work(uint worker_id) { 4443 elapsedTimer _timer; 4444 ResourceMark rm; 4445 HandleMark hm; 4446 4447 // ---------- rescan from roots -------------- 4448 _timer.start(); 4449 CMSHeap* heap = CMSHeap::heap(); 4450 ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector, 4451 _collector->_span, _collector->ref_processor(), 4452 &(_collector->_markBitMap), 4453 work_queue(worker_id)); 4454 4455 // Rescan young gen roots first since these are likely 4456 // coarsely partitioned and may, on that account, constitute 4457 // the critical path; thus, it's best to start off that 4458 // work first. 4459 // ---------- young gen roots -------------- 4460 { 4461 work_on_young_gen_roots(&par_mrias_cl); 4462 _timer.stop(); 4463 log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 4464 } 4465 4466 // ---------- remaining roots -------------- 4467 _timer.reset(); 4468 _timer.start(); 4469 heap->cms_process_roots(_strong_roots_scope, 4470 false, // yg was scanned above 4471 GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), 4472 _collector->should_unload_classes(), 4473 &par_mrias_cl, 4474 NULL); // The dirty klasses will be handled below 4475 4476 assert(_collector->should_unload_classes() 4477 || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), 4478 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 4479 _timer.stop(); 4480 log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 4481 4482 // ---------- unhandled CLD scanning ---------- 4483 if (worker_id == 0) { // Single threaded at the moment. 4484 _timer.reset(); 4485 _timer.start(); 4486 4487 // Scan all new class loader data objects and new dependencies that were 4488 // introduced during concurrent marking. 4489 ResourceMark rm; 4490 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds(); 4491 for (int i = 0; i < array->length(); i++) { 4492 Devirtualizer::do_cld(&par_mrias_cl, array->at(i)); 4493 } 4494 4495 // We don't need to keep track of new CLDs anymore. 4496 ClassLoaderDataGraph::remember_new_clds(false); 4497 4498 _timer.stop(); 4499 log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 4500 } 4501 4502 // We might have added oops to ClassLoaderData::_handles during the 4503 // concurrent marking phase. These oops do not always point to newly allocated objects 4504 // that are guaranteed to be kept alive. Hence, 4505 // we do have to revisit the _handles block during the remark phase. 4506 4507 // ---------- dirty CLD scanning ---------- 4508 if (worker_id == 0) { // Single threaded at the moment. 4509 _timer.reset(); 4510 _timer.start(); 4511 4512 // Scan all classes that was dirtied during the concurrent marking phase. 4513 RemarkCLDClosure remark_closure(&par_mrias_cl); 4514 ClassLoaderDataGraph::cld_do(&remark_closure); 4515 4516 _timer.stop(); 4517 log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 4518 } 4519 4520 4521 // ---------- rescan dirty cards ------------ 4522 _timer.reset(); 4523 _timer.start(); 4524 4525 // Do the rescan tasks for each of the two spaces 4526 // (cms_space) in turn. 4527 // "worker_id" is passed to select the task_queue for "worker_id" 4528 do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl); 4529 _timer.stop(); 4530 log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 4531 4532 // ---------- steal work from other threads ... 4533 // ---------- ... and drain overflow list. 4534 _timer.reset(); 4535 _timer.start(); 4536 do_work_steal(worker_id, &par_mrias_cl); 4537 _timer.stop(); 4538 log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds()); 4539 } 4540 4541 void 4542 CMSParMarkTask::do_young_space_rescan( 4543 OopsInGenClosure* cl, ContiguousSpace* space, 4544 HeapWord** chunk_array, size_t chunk_top) { 4545 // Until all tasks completed: 4546 // . claim an unclaimed task 4547 // . compute region boundaries corresponding to task claimed 4548 // using chunk_array 4549 // . par_oop_iterate(cl) over that region 4550 4551 ResourceMark rm; 4552 HandleMark hm; 4553 4554 SequentialSubTasksDone* pst = space->par_seq_tasks(); 4555 4556 uint nth_task = 0; 4557 uint n_tasks = pst->n_tasks(); 4558 4559 if (n_tasks > 0) { 4560 assert(pst->valid(), "Uninitialized use?"); 4561 HeapWord *start, *end; 4562 while (pst->try_claim_task(/* reference */ nth_task)) { 4563 // We claimed task # nth_task; compute its boundaries. 4564 if (chunk_top == 0) { // no samples were taken 4565 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task"); 4566 start = space->bottom(); 4567 end = space->top(); 4568 } else if (nth_task == 0) { 4569 start = space->bottom(); 4570 end = chunk_array[nth_task]; 4571 } else if (nth_task < (uint)chunk_top) { 4572 assert(nth_task >= 1, "Control point invariant"); 4573 start = chunk_array[nth_task - 1]; 4574 end = chunk_array[nth_task]; 4575 } else { 4576 assert(nth_task == (uint)chunk_top, "Control point invariant"); 4577 start = chunk_array[chunk_top - 1]; 4578 end = space->top(); 4579 } 4580 MemRegion mr(start, end); 4581 // Verify that mr is in space 4582 assert(mr.is_empty() || space->used_region().contains(mr), 4583 "Should be in space"); 4584 // Verify that "start" is an object boundary 4585 assert(mr.is_empty() || oopDesc::is_oop(oop(mr.start())), 4586 "Should be an oop"); 4587 space->par_oop_iterate(mr, cl); 4588 } 4589 pst->all_tasks_completed(); 4590 } 4591 } 4592 4593 void 4594 CMSParRemarkTask::do_dirty_card_rescan_tasks( 4595 CompactibleFreeListSpace* sp, int i, 4596 ParMarkRefsIntoAndScanClosure* cl) { 4597 // Until all tasks completed: 4598 // . claim an unclaimed task 4599 // . compute region boundaries corresponding to task claimed 4600 // . transfer dirty bits ct->mut for that region 4601 // . apply rescanclosure to dirty mut bits for that region 4602 4603 ResourceMark rm; 4604 HandleMark hm; 4605 4606 OopTaskQueue* work_q = work_queue(i); 4607 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable)); 4608 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! 4609 // CAUTION: This closure has state that persists across calls to 4610 // the work method dirty_range_iterate_clear() in that it has 4611 // embedded in it a (subtype of) UpwardsObjectClosure. The 4612 // use of that state in the embedded UpwardsObjectClosure instance 4613 // assumes that the cards are always iterated (even if in parallel 4614 // by several threads) in monotonically increasing order per each 4615 // thread. This is true of the implementation below which picks 4616 // card ranges (chunks) in monotonically increasing order globally 4617 // and, a-fortiori, in monotonically increasing order per thread 4618 // (the latter order being a subsequence of the former). 4619 // If the work code below is ever reorganized into a more chaotic 4620 // work-partitioning form than the current "sequential tasks" 4621 // paradigm, the use of that persistent state will have to be 4622 // revisited and modified appropriately. See also related 4623 // bug 4756801 work on which should examine this code to make 4624 // sure that the changes there do not run counter to the 4625 // assumptions made here and necessary for correctness and 4626 // efficiency. Note also that this code might yield inefficient 4627 // behavior in the case of very large objects that span one or 4628 // more work chunks. Such objects would potentially be scanned 4629 // several times redundantly. Work on 4756801 should try and 4630 // address that performance anomaly if at all possible. XXX 4631 MemRegion full_span = _collector->_span; 4632 CMSBitMap* bm = &(_collector->_markBitMap); // shared 4633 MarkFromDirtyCardsClosure 4634 greyRescanClosure(_collector, full_span, // entire span of interest 4635 sp, bm, work_q, cl); 4636 4637 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); 4638 assert(pst->valid(), "Uninitialized use?"); 4639 uint nth_task = 0; 4640 const int alignment = CardTable::card_size * BitsPerWord; 4641 MemRegion span = sp->used_region(); 4642 HeapWord* start_addr = span.start(); 4643 HeapWord* end_addr = align_up(span.end(), alignment); 4644 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units 4645 assert(is_aligned(start_addr, alignment), "Check alignment"); 4646 assert(is_aligned(chunk_size, alignment), "Check alignment"); 4647 4648 while (pst->try_claim_task(/* reference */ nth_task)) { 4649 // Having claimed the nth_task, compute corresponding mem-region, 4650 // which is a-fortiori aligned correctly (i.e. at a MUT boundary). 4651 // The alignment restriction ensures that we do not need any 4652 // synchronization with other gang-workers while setting or 4653 // clearing bits in thus chunk of the MUT. 4654 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size, 4655 start_addr + (nth_task+1)*chunk_size); 4656 // The last chunk's end might be way beyond end of the 4657 // used region. In that case pull back appropriately. 4658 if (this_span.end() > end_addr) { 4659 this_span.set_end(end_addr); 4660 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)"); 4661 } 4662 // Iterate over the dirty cards covering this chunk, marking them 4663 // precleaned, and setting the corresponding bits in the mod union 4664 // table. Since we have been careful to partition at Card and MUT-word 4665 // boundaries no synchronization is needed between parallel threads. 4666 _collector->_ct->dirty_card_iterate(this_span, 4667 &modUnionClosure); 4668 4669 // Having transferred these marks into the modUnionTable, 4670 // rescan the marked objects on the dirty cards in the modUnionTable. 4671 // Even if this is at a synchronous collection, the initial marking 4672 // may have been done during an asynchronous collection so there 4673 // may be dirty bits in the mod-union table. 4674 _collector->_modUnionTable.dirty_range_iterate_clear( 4675 this_span, &greyRescanClosure); 4676 _collector->_modUnionTable.verifyNoOneBitsInRange( 4677 this_span.start(), 4678 this_span.end()); 4679 } 4680 pst->all_tasks_completed(); // declare that i am done 4681 } 4682 4683 // . see if we can share work_queues with ParNew? XXX 4684 void 4685 CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl) { 4686 OopTaskQueue* work_q = work_queue(i); 4687 NOT_PRODUCT(int num_steals = 0;) 4688 oop obj_to_scan; 4689 CMSBitMap* bm = &(_collector->_markBitMap); 4690 4691 while (true) { 4692 // Completely finish any left over work from (an) earlier round(s) 4693 cl->trim_queue(0); 4694 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 4695 (size_t)ParGCDesiredObjsFromOverflowList); 4696 // Now check if there's any work in the overflow list 4697 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads, 4698 // only affects the number of attempts made to get work from the 4699 // overflow list and does not affect the number of workers. Just 4700 // pass ParallelGCThreads so this behavior is unchanged. 4701 if (_collector->par_take_from_overflow_list(num_from_overflow_list, 4702 work_q, 4703 ParallelGCThreads)) { 4704 // found something in global overflow list; 4705 // not yet ready to go stealing work from others. 4706 // We'd like to assert(work_q->size() != 0, ...) 4707 // because we just took work from the overflow list, 4708 // but of course we can't since all of that could have 4709 // been already stolen from us. 4710 // "He giveth and He taketh away." 4711 continue; 4712 } 4713 // Verify that we have no work before we resort to stealing 4714 assert(work_q->size() == 0, "Have work, shouldn't steal"); 4715 // Try to steal from other queues that have work 4716 if (task_queues()->steal(i, /* reference */ obj_to_scan)) { 4717 NOT_PRODUCT(num_steals++;) 4718 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!"); 4719 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); 4720 // Do scanning work 4721 obj_to_scan->oop_iterate(cl); 4722 // Loop around, finish this work, and try to steal some more 4723 } else if (terminator()->offer_termination()) { 4724 break; // nirvana from the infinite cycle 4725 } 4726 } 4727 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals); 4728 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(), 4729 "Else our work is not yet done"); 4730 } 4731 4732 // Record object boundaries in _eden_chunk_array by sampling the eden 4733 // top in the slow-path eden object allocation code path and record 4734 // the boundaries, if CMSEdenChunksRecordAlways is true. If 4735 // CMSEdenChunksRecordAlways is false, we use the other asynchronous 4736 // sampling in sample_eden() that activates during the part of the 4737 // preclean phase. 4738 void CMSCollector::sample_eden_chunk() { 4739 if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) { 4740 if (_eden_chunk_lock->try_lock()) { 4741 // Record a sample. This is the critical section. The contents 4742 // of the _eden_chunk_array have to be non-decreasing in the 4743 // address order. 4744 _eden_chunk_array[_eden_chunk_index] = *_top_addr; 4745 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr, 4746 "Unexpected state of Eden"); 4747 if (_eden_chunk_index == 0 || 4748 ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) && 4749 (pointer_delta(_eden_chunk_array[_eden_chunk_index], 4750 _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) { 4751 _eden_chunk_index++; // commit sample 4752 } 4753 _eden_chunk_lock->unlock(); 4754 } 4755 } 4756 } 4757 4758 // Return a thread-local PLAB recording array, as appropriate. 4759 void* CMSCollector::get_data_recorder(int thr_num) { 4760 if (_survivor_plab_array != NULL && 4761 (CMSPLABRecordAlways || 4762 (_collectorState > Marking && _collectorState < FinalMarking))) { 4763 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds"); 4764 ChunkArray* ca = &_survivor_plab_array[thr_num]; 4765 ca->reset(); // clear it so that fresh data is recorded 4766 return (void*) ca; 4767 } else { 4768 return NULL; 4769 } 4770 } 4771 4772 // Reset all the thread-local PLAB recording arrays 4773 void CMSCollector::reset_survivor_plab_arrays() { 4774 for (uint i = 0; i < ParallelGCThreads; i++) { 4775 _survivor_plab_array[i].reset(); 4776 } 4777 } 4778 4779 // Merge the per-thread plab arrays into the global survivor chunk 4780 // array which will provide the partitioning of the survivor space 4781 // for CMS initial scan and rescan. 4782 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv, 4783 int no_of_gc_threads) { 4784 assert(_survivor_plab_array != NULL, "Error"); 4785 assert(_survivor_chunk_array != NULL, "Error"); 4786 assert(_collectorState == FinalMarking || 4787 (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error"); 4788 for (int j = 0; j < no_of_gc_threads; j++) { 4789 _cursor[j] = 0; 4790 } 4791 HeapWord* top = surv->top(); 4792 size_t i; 4793 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries 4794 HeapWord* min_val = top; // Higher than any PLAB address 4795 uint min_tid = 0; // position of min_val this round 4796 for (int j = 0; j < no_of_gc_threads; j++) { 4797 ChunkArray* cur_sca = &_survivor_plab_array[j]; 4798 if (_cursor[j] == cur_sca->end()) { 4799 continue; 4800 } 4801 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant"); 4802 HeapWord* cur_val = cur_sca->nth(_cursor[j]); 4803 assert(surv->used_region().contains(cur_val), "Out of bounds value"); 4804 if (cur_val < min_val) { 4805 min_tid = j; 4806 min_val = cur_val; 4807 } else { 4808 assert(cur_val < top, "All recorded addresses should be less"); 4809 } 4810 } 4811 // At this point min_val and min_tid are respectively 4812 // the least address in _survivor_plab_array[j]->nth(_cursor[j]) 4813 // and the thread (j) that witnesses that address. 4814 // We record this address in the _survivor_chunk_array[i] 4815 // and increment _cursor[min_tid] prior to the next round i. 4816 if (min_val == top) { 4817 break; 4818 } 4819 _survivor_chunk_array[i] = min_val; 4820 _cursor[min_tid]++; 4821 } 4822 // We are all done; record the size of the _survivor_chunk_array 4823 _survivor_chunk_index = i; // exclusive: [0, i) 4824 log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i); 4825 // Verify that we used up all the recorded entries 4826 #ifdef ASSERT 4827 size_t total = 0; 4828 for (int j = 0; j < no_of_gc_threads; j++) { 4829 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant"); 4830 total += _cursor[j]; 4831 } 4832 assert(total == _survivor_chunk_index, "Ctl Pt Invariant"); 4833 // Check that the merged array is in sorted order 4834 if (total > 0) { 4835 for (size_t i = 0; i < total - 1; i++) { 4836 log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", 4837 i, p2i(_survivor_chunk_array[i])); 4838 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1], 4839 "Not sorted"); 4840 } 4841 } 4842 #endif // ASSERT 4843 } 4844 4845 // Set up the space's par_seq_tasks structure for work claiming 4846 // for parallel initial scan and rescan of young gen. 4847 // See ParRescanTask where this is currently used. 4848 void 4849 CMSCollector:: 4850 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) { 4851 assert(n_threads > 0, "Unexpected n_threads argument"); 4852 4853 // Eden space 4854 if (!_young_gen->eden()->is_empty()) { 4855 SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks(); 4856 assert(!pst->valid(), "Clobbering existing data?"); 4857 // Each valid entry in [0, _eden_chunk_index) represents a task. 4858 size_t n_tasks = _eden_chunk_index + 1; 4859 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error"); 4860 // Sets the condition for completion of the subtask (how many threads 4861 // need to finish in order to be done). 4862 pst->set_n_threads(n_threads); 4863 pst->set_n_tasks((int)n_tasks); 4864 } 4865 4866 // Merge the survivor plab arrays into _survivor_chunk_array 4867 if (_survivor_plab_array != NULL) { 4868 merge_survivor_plab_arrays(_young_gen->from(), n_threads); 4869 } else { 4870 assert(_survivor_chunk_index == 0, "Error"); 4871 } 4872 4873 // To space 4874 { 4875 SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks(); 4876 assert(!pst->valid(), "Clobbering existing data?"); 4877 // Sets the condition for completion of the subtask (how many threads 4878 // need to finish in order to be done). 4879 pst->set_n_threads(n_threads); 4880 pst->set_n_tasks(1); 4881 assert(pst->valid(), "Error"); 4882 } 4883 4884 // From space 4885 { 4886 SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks(); 4887 assert(!pst->valid(), "Clobbering existing data?"); 4888 size_t n_tasks = _survivor_chunk_index + 1; 4889 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error"); 4890 // Sets the condition for completion of the subtask (how many threads 4891 // need to finish in order to be done). 4892 pst->set_n_threads(n_threads); 4893 pst->set_n_tasks((int)n_tasks); 4894 assert(pst->valid(), "Error"); 4895 } 4896 } 4897 4898 // Parallel version of remark 4899 void CMSCollector::do_remark_parallel() { 4900 CMSHeap* heap = CMSHeap::heap(); 4901 WorkGang* workers = heap->workers(); 4902 assert(workers != NULL, "Need parallel worker threads."); 4903 // Choose to use the number of GC workers most recently set 4904 // into "active_workers". 4905 uint n_workers = workers->active_workers(); 4906 4907 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); 4908 4909 StrongRootsScope srs(n_workers); 4910 4911 CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs); 4912 4913 // We won't be iterating over the cards in the card table updating 4914 // the younger_gen cards, so we shouldn't call the following else 4915 // the verification code as well as subsequent younger_refs_iterate 4916 // code would get confused. XXX 4917 // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel 4918 4919 // The young gen rescan work will not be done as part of 4920 // process_roots (which currently doesn't know how to 4921 // parallelize such a scan), but rather will be broken up into 4922 // a set of parallel tasks (via the sampling that the [abortable] 4923 // preclean phase did of eden, plus the [two] tasks of 4924 // scanning the [two] survivor spaces. Further fine-grain 4925 // parallelization of the scanning of the survivor spaces 4926 // themselves, and of precleaning of the young gen itself 4927 // is deferred to the future. 4928 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); 4929 4930 // The dirty card rescan work is broken up into a "sequence" 4931 // of parallel tasks (per constituent space) that are dynamically 4932 // claimed by the parallel threads. 4933 cms_space->initialize_sequential_subtasks_for_rescan(n_workers); 4934 4935 // It turns out that even when we're using 1 thread, doing the work in a 4936 // separate thread causes wide variance in run times. We can't help this 4937 // in the multi-threaded case, but we special-case n=1 here to get 4938 // repeatable measurements of the 1-thread overhead of the parallel code. 4939 if (n_workers > 1) { 4940 // Make refs discovery MT-safe, if it isn't already: it may not 4941 // necessarily be so, since it's possible that we are doing 4942 // ST marking. 4943 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true); 4944 workers->run_task(&tsk); 4945 } else { 4946 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false); 4947 tsk.work(0); 4948 } 4949 4950 // restore, single-threaded for now, any preserved marks 4951 // as a result of work_q overflow 4952 restore_preserved_marks_if_any(); 4953 } 4954 4955 // Non-parallel version of remark 4956 void CMSCollector::do_remark_non_parallel() { 4957 ResourceMark rm; 4958 HandleMark hm; 4959 CMSHeap* heap = CMSHeap::heap(); 4960 ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false); 4961 4962 MarkRefsIntoAndScanClosure 4963 mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */, 4964 &_markStack, this, 4965 false /* should_yield */, false /* not precleaning */); 4966 MarkFromDirtyCardsClosure 4967 markFromDirtyCardsClosure(this, _span, 4968 NULL, // space is set further below 4969 &_markBitMap, &_markStack, &mrias_cl); 4970 { 4971 GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm); 4972 // Iterate over the dirty cards, setting the corresponding bits in the 4973 // mod union table. 4974 { 4975 ModUnionClosure modUnionClosure(&_modUnionTable); 4976 _ct->dirty_card_iterate(_cmsGen->used_region(), 4977 &modUnionClosure); 4978 } 4979 // Having transferred these marks into the modUnionTable, we just need 4980 // to rescan the marked objects on the dirty cards in the modUnionTable. 4981 // The initial marking may have been done during an asynchronous 4982 // collection so there may be dirty bits in the mod-union table. 4983 const int alignment = CardTable::card_size * BitsPerWord; 4984 { 4985 // ... First handle dirty cards in CMS gen 4986 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace()); 4987 MemRegion ur = _cmsGen->used_region(); 4988 HeapWord* lb = ur.start(); 4989 HeapWord* ub = align_up(ur.end(), alignment); 4990 MemRegion cms_span(lb, ub); 4991 _modUnionTable.dirty_range_iterate_clear(cms_span, 4992 &markFromDirtyCardsClosure); 4993 verify_work_stacks_empty(); 4994 log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards()); 4995 } 4996 } 4997 if (VerifyDuringGC && 4998 CMSHeap::heap()->total_collections() >= VerifyGCStartAt) { 4999 HandleMark hm; // Discard invalid handles created during verification 5000 Universe::verify(); 5001 } 5002 { 5003 GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm); 5004 5005 verify_work_stacks_empty(); 5006 5007 heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 5008 StrongRootsScope srs(1); 5009 5010 heap->cms_process_roots(&srs, 5011 true, // young gen as roots 5012 GenCollectedHeap::ScanningOption(roots_scanning_options()), 5013 should_unload_classes(), 5014 &mrias_cl, 5015 NULL); // The dirty klasses will be handled below 5016 5017 assert(should_unload_classes() 5018 || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache), 5019 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5020 } 5021 5022 { 5023 GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm); 5024 5025 verify_work_stacks_empty(); 5026 5027 // Scan all class loader data objects that might have been introduced 5028 // during concurrent marking. 5029 ResourceMark rm; 5030 GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds(); 5031 for (int i = 0; i < array->length(); i++) { 5032 Devirtualizer::do_cld(&mrias_cl, array->at(i)); 5033 } 5034 5035 // We don't need to keep track of new CLDs anymore. 5036 ClassLoaderDataGraph::remember_new_clds(false); 5037 5038 verify_work_stacks_empty(); 5039 } 5040 5041 // We might have added oops to ClassLoaderData::_handles during the 5042 // concurrent marking phase. These oops do not point to newly allocated objects 5043 // that are guaranteed to be kept alive. Hence, 5044 // we do have to revisit the _handles block during the remark phase. 5045 { 5046 GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm); 5047 5048 verify_work_stacks_empty(); 5049 5050 RemarkCLDClosure remark_closure(&mrias_cl); 5051 ClassLoaderDataGraph::cld_do(&remark_closure); 5052 5053 verify_work_stacks_empty(); 5054 } 5055 5056 verify_work_stacks_empty(); 5057 // Restore evacuated mark words, if any, used for overflow list links 5058 restore_preserved_marks_if_any(); 5059 5060 verify_overflow_empty(); 5061 } 5062 5063 //////////////////////////////////////////////////////// 5064 // Parallel Reference Processing Task Proxy Class 5065 //////////////////////////////////////////////////////// 5066 class AbstractGangTaskWOopQueues : public AbstractGangTask { 5067 OopTaskQueueSet* _queues; 5068 TaskTerminator _terminator; 5069 public: 5070 AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) : 5071 AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {} 5072 ParallelTaskTerminator* terminator() { return _terminator.terminator(); } 5073 OopTaskQueueSet* queues() { return _queues; } 5074 }; 5075 5076 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues { 5077 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 5078 CMSCollector* _collector; 5079 CMSBitMap* _mark_bit_map; 5080 const MemRegion _span; 5081 ProcessTask& _task; 5082 5083 public: 5084 CMSRefProcTaskProxy(ProcessTask& task, 5085 CMSCollector* collector, 5086 const MemRegion& span, 5087 CMSBitMap* mark_bit_map, 5088 AbstractWorkGang* workers, 5089 OopTaskQueueSet* task_queues): 5090 AbstractGangTaskWOopQueues("Process referents by policy in parallel", 5091 task_queues, 5092 workers->active_workers()), 5093 _collector(collector), 5094 _mark_bit_map(mark_bit_map), 5095 _span(span), 5096 _task(task) 5097 { 5098 assert(_collector->_span.equals(_span) && !_span.is_empty(), 5099 "Inconsistency in _span"); 5100 } 5101 5102 OopTaskQueueSet* task_queues() { return queues(); } 5103 5104 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } 5105 5106 void do_work_steal(int i, 5107 CMSParDrainMarkingStackClosure* drain, 5108 CMSParKeepAliveClosure* keep_alive); 5109 5110 virtual void work(uint worker_id); 5111 }; 5112 5113 void CMSRefProcTaskProxy::work(uint worker_id) { 5114 ResourceMark rm; 5115 HandleMark hm; 5116 assert(_collector->_span.equals(_span), "Inconsistency in _span"); 5117 CMSParKeepAliveClosure par_keep_alive(_collector, _span, 5118 _mark_bit_map, 5119 work_queue(worker_id)); 5120 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, 5121 _mark_bit_map, 5122 work_queue(worker_id)); 5123 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); 5124 _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack); 5125 if (_task.marks_oops_alive()) { 5126 do_work_steal(worker_id, &par_drain_stack, &par_keep_alive); 5127 } 5128 assert(work_queue(worker_id)->size() == 0, "work_queue should be empty"); 5129 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list"); 5130 } 5131 5132 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, 5133 MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue): 5134 _span(span), 5135 _work_queue(work_queue), 5136 _bit_map(bit_map), 5137 _mark_and_push(collector, span, bit_map, work_queue), 5138 _low_water_mark(MIN2((work_queue->max_elems()/4), 5139 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))) 5140 { } 5141 5142 // . see if we can share work_queues with ParNew? XXX 5143 void CMSRefProcTaskProxy::do_work_steal(int i, 5144 CMSParDrainMarkingStackClosure* drain, 5145 CMSParKeepAliveClosure* keep_alive) { 5146 OopTaskQueue* work_q = work_queue(i); 5147 NOT_PRODUCT(int num_steals = 0;) 5148 oop obj_to_scan; 5149 5150 while (true) { 5151 // Completely finish any left over work from (an) earlier round(s) 5152 drain->trim_queue(0); 5153 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 5154 (size_t)ParGCDesiredObjsFromOverflowList); 5155 // Now check if there's any work in the overflow list 5156 // Passing ParallelGCThreads as the third parameter, no_of_gc_threads, 5157 // only affects the number of attempts made to get work from the 5158 // overflow list and does not affect the number of workers. Just 5159 // pass ParallelGCThreads so this behavior is unchanged. 5160 if (_collector->par_take_from_overflow_list(num_from_overflow_list, 5161 work_q, 5162 ParallelGCThreads)) { 5163 // Found something in global overflow list; 5164 // not yet ready to go stealing work from others. 5165 // We'd like to assert(work_q->size() != 0, ...) 5166 // because we just took work from the overflow list, 5167 // but of course we can't, since all of that might have 5168 // been already stolen from us. 5169 continue; 5170 } 5171 // Verify that we have no work before we resort to stealing 5172 assert(work_q->size() == 0, "Have work, shouldn't steal"); 5173 // Try to steal from other queues that have work 5174 if (task_queues()->steal(i, /* reference */ obj_to_scan)) { 5175 NOT_PRODUCT(num_steals++;) 5176 assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!"); 5177 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); 5178 // Do scanning work 5179 obj_to_scan->oop_iterate(keep_alive); 5180 // Loop around, finish this work, and try to steal some more 5181 } else if (terminator()->offer_termination()) { 5182 break; // nirvana from the infinite cycle 5183 } 5184 } 5185 log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals); 5186 } 5187 5188 void CMSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) { 5189 CMSHeap* heap = CMSHeap::heap(); 5190 WorkGang* workers = heap->workers(); 5191 assert(workers != NULL, "Need parallel worker threads."); 5192 assert(workers->active_workers() == ergo_workers, 5193 "Ergonomically chosen workers (%u) must be equal to active workers (%u)", 5194 ergo_workers, workers->active_workers()); 5195 CMSRefProcTaskProxy rp_task(task, &_collector, 5196 _collector.ref_processor_span(), 5197 _collector.markBitMap(), 5198 workers, _collector.task_queues()); 5199 workers->run_task(&rp_task, workers->active_workers()); 5200 } 5201 5202 void CMSCollector::refProcessingWork() { 5203 ResourceMark rm; 5204 HandleMark hm; 5205 5206 ReferenceProcessor* rp = ref_processor(); 5207 assert(_span_based_discoverer.span().equals(_span), "Spans should be equal"); 5208 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); 5209 // Process weak references. 5210 rp->setup_policy(false); 5211 verify_work_stacks_empty(); 5212 5213 ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues()); 5214 { 5215 GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm); 5216 5217 // Setup keep_alive and complete closures. 5218 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, 5219 &_markStack, false /* !preclean */); 5220 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, 5221 _span, &_markBitMap, &_markStack, 5222 &cmsKeepAliveClosure, false /* !preclean */); 5223 5224 ReferenceProcessorStats stats; 5225 if (rp->processing_is_mt()) { 5226 // Set the degree of MT here. If the discovery is done MT, there 5227 // may have been a different number of threads doing the discovery 5228 // and a different number of discovered lists may have Ref objects. 5229 // That is OK as long as the Reference lists are balanced (see 5230 // balance_all_queues() and balance_queues()). 5231 CMSHeap* heap = CMSHeap::heap(); 5232 uint active_workers = ParallelGCThreads; 5233 WorkGang* workers = heap->workers(); 5234 if (workers != NULL) { 5235 active_workers = workers->active_workers(); 5236 // The expectation is that active_workers will have already 5237 // been set to a reasonable value. If it has not been set, 5238 // investigate. 5239 assert(active_workers > 0, "Should have been set during scavenge"); 5240 } 5241 rp->set_active_mt_degree(active_workers); 5242 CMSRefProcTaskExecutor task_executor(*this); 5243 stats = rp->process_discovered_references(&_is_alive_closure, 5244 &cmsKeepAliveClosure, 5245 &cmsDrainMarkingStackClosure, 5246 &task_executor, 5247 &pt); 5248 } else { 5249 stats = rp->process_discovered_references(&_is_alive_closure, 5250 &cmsKeepAliveClosure, 5251 &cmsDrainMarkingStackClosure, 5252 NULL, 5253 &pt); 5254 } 5255 _gc_tracer_cm->report_gc_reference_stats(stats); 5256 pt.print_all_references(); 5257 } 5258 5259 // This is the point where the entire marking should have completed. 5260 verify_work_stacks_empty(); 5261 5262 { 5263 GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer_cm); 5264 WeakProcessor::weak_oops_do(&_is_alive_closure, &do_nothing_cl); 5265 } 5266 5267 if (should_unload_classes()) { 5268 { 5269 GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm); 5270 5271 // Unload classes and purge the SystemDictionary. 5272 bool purged_class = SystemDictionary::do_unloading(_gc_timer_cm); 5273 5274 // Unload nmethods. 5275 CodeCache::do_unloading(&_is_alive_closure, purged_class); 5276 5277 // Prune dead klasses from subklass/sibling/implementor lists. 5278 Klass::clean_weak_klass_links(purged_class); 5279 5280 // Clean JVMCI metadata handles. 5281 JVMCI_ONLY(JVMCI::do_unloading(purged_class)); 5282 } 5283 } 5284 5285 // Restore any preserved marks as a result of mark stack or 5286 // work queue overflow 5287 restore_preserved_marks_if_any(); // done single-threaded for now 5288 5289 rp->set_enqueuing_is_done(true); 5290 rp->verify_no_references_recorded(); 5291 } 5292 5293 #ifndef PRODUCT 5294 void CMSCollector::check_correct_thread_executing() { 5295 Thread* t = Thread::current(); 5296 // Only the VM thread or the CMS thread should be here. 5297 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(), 5298 "Unexpected thread type"); 5299 // If this is the vm thread, the foreground process 5300 // should not be waiting. Note that _foregroundGCIsActive is 5301 // true while the foreground collector is waiting. 5302 if (_foregroundGCShouldWait) { 5303 // We cannot be the VM thread 5304 assert(t->is_ConcurrentGC_thread(), 5305 "Should be CMS thread"); 5306 } else { 5307 // We can be the CMS thread only if we are in a stop-world 5308 // phase of CMS collection. 5309 if (t->is_ConcurrentGC_thread()) { 5310 assert(_collectorState == InitialMarking || 5311 _collectorState == FinalMarking, 5312 "Should be a stop-world phase"); 5313 // The CMS thread should be holding the CMS_token. 5314 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 5315 "Potential interference with concurrently " 5316 "executing VM thread"); 5317 } 5318 } 5319 } 5320 #endif 5321 5322 void CMSCollector::sweep() { 5323 assert(_collectorState == Sweeping, "just checking"); 5324 check_correct_thread_executing(); 5325 verify_work_stacks_empty(); 5326 verify_overflow_empty(); 5327 increment_sweep_count(); 5328 TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause()); 5329 5330 _inter_sweep_timer.stop(); 5331 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); 5332 5333 assert(!_intra_sweep_timer.is_active(), "Should not be active"); 5334 _intra_sweep_timer.reset(); 5335 _intra_sweep_timer.start(); 5336 { 5337 GCTraceCPUTime tcpu; 5338 CMSPhaseAccounting pa(this, "Concurrent Sweep"); 5339 // First sweep the old gen 5340 { 5341 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), 5342 bitMapLock()); 5343 sweepWork(_cmsGen); 5344 } 5345 5346 // Update Universe::_heap_*_at_gc figures. 5347 // We need all the free list locks to make the abstract state 5348 // transition from Sweeping to Resetting. See detailed note 5349 // further below. 5350 { 5351 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock()); 5352 5353 // Update heap occupancy information which is used as 5354 // input to soft ref clearing policy at the next gc. 5355 Universe::update_heap_info_at_gc(); 5356 5357 // recalculate CMS used space after CMS collection 5358 _cmsGen->cmsSpace()->recalculate_used_stable(); 5359 5360 _collectorState = Resizing; 5361 } 5362 } 5363 verify_work_stacks_empty(); 5364 verify_overflow_empty(); 5365 5366 if (should_unload_classes()) { 5367 // Delay purge to the beginning of the next safepoint. Metaspace::contains 5368 // requires that the virtual spaces are stable and not deleted. 5369 ClassLoaderDataGraph::set_should_purge(true); 5370 } 5371 5372 _intra_sweep_timer.stop(); 5373 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds()); 5374 5375 _inter_sweep_timer.reset(); 5376 _inter_sweep_timer.start(); 5377 5378 // We need to use a monotonically non-decreasing time in ms 5379 // or we will see time-warp warnings and os::javaTimeMillis() 5380 // does not guarantee monotonicity. 5381 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC; 5382 update_time_of_last_gc(now); 5383 5384 // NOTE on abstract state transitions: 5385 // Mutators allocate-live and/or mark the mod-union table dirty 5386 // based on the state of the collection. The former is done in 5387 // the interval [Marking, Sweeping] and the latter in the interval 5388 // [Marking, Sweeping). Thus the transitions into the Marking state 5389 // and out of the Sweeping state must be synchronously visible 5390 // globally to the mutators. 5391 // The transition into the Marking state happens with the world 5392 // stopped so the mutators will globally see it. Sweeping is 5393 // done asynchronously by the background collector so the transition 5394 // from the Sweeping state to the Resizing state must be done 5395 // under the freelistLock (as is the check for whether to 5396 // allocate-live and whether to dirty the mod-union table). 5397 assert(_collectorState == Resizing, "Change of collector state to" 5398 " Resizing must be done under the freelistLocks (plural)"); 5399 5400 // Now that sweeping has been completed, we clear 5401 // the incremental_collection_failed flag, 5402 // thus inviting a younger gen collection to promote into 5403 // this generation. If such a promotion may still fail, 5404 // the flag will be set again when a young collection is 5405 // attempted. 5406 CMSHeap* heap = CMSHeap::heap(); 5407 heap->clear_incremental_collection_failed(); // Worth retrying as fresh space may have been freed up 5408 heap->update_full_collections_completed(_collection_count_start); 5409 } 5410 5411 // FIX ME!!! Looks like this belongs in CFLSpace, with 5412 // CMSGen merely delegating to it. 5413 void ConcurrentMarkSweepGeneration::setNearLargestChunk() { 5414 double nearLargestPercent = FLSLargestBlockCoalesceProximity; 5415 HeapWord* minAddr = _cmsSpace->bottom(); 5416 HeapWord* largestAddr = 5417 (HeapWord*) _cmsSpace->dictionary()->find_largest_dict(); 5418 if (largestAddr == NULL) { 5419 // The dictionary appears to be empty. In this case 5420 // try to coalesce at the end of the heap. 5421 largestAddr = _cmsSpace->end(); 5422 } 5423 size_t largestOffset = pointer_delta(largestAddr, minAddr); 5424 size_t nearLargestOffset = 5425 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; 5426 log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT, 5427 p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset)); 5428 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); 5429 } 5430 5431 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) { 5432 return addr >= _cmsSpace->nearLargestChunk(); 5433 } 5434 5435 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() { 5436 return _cmsSpace->find_chunk_at_end(); 5437 } 5438 5439 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation, 5440 bool full) { 5441 // If the young generation has been collected, gather any statistics 5442 // that are of interest at this point. 5443 bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation); 5444 if (!full && current_is_young) { 5445 // Gather statistics on the young generation collection. 5446 collector()->stats().record_gc0_end(used()); 5447 } 5448 _cmsSpace->recalculate_used_stable(); 5449 } 5450 5451 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) { 5452 // We iterate over the space(s) underlying this generation, 5453 // checking the mark bit map to see if the bits corresponding 5454 // to specific blocks are marked or not. Blocks that are 5455 // marked are live and are not swept up. All remaining blocks 5456 // are swept up, with coalescing on-the-fly as we sweep up 5457 // contiguous free and/or garbage blocks: 5458 // We need to ensure that the sweeper synchronizes with allocators 5459 // and stop-the-world collectors. In particular, the following 5460 // locks are used: 5461 // . CMS token: if this is held, a stop the world collection cannot occur 5462 // . freelistLock: if this is held no allocation can occur from this 5463 // generation by another thread 5464 // . bitMapLock: if this is held, no other thread can access or update 5465 // 5466 5467 // Note that we need to hold the freelistLock if we use 5468 // block iterate below; else the iterator might go awry if 5469 // a mutator (or promotion) causes block contents to change 5470 // (for instance if the allocator divvies up a block). 5471 // If we hold the free list lock, for all practical purposes 5472 // young generation GC's can't occur (they'll usually need to 5473 // promote), so we might as well prevent all young generation 5474 // GC's while we do a sweeping step. For the same reason, we might 5475 // as well take the bit map lock for the entire duration 5476 5477 // check that we hold the requisite locks 5478 assert(have_cms_token(), "Should hold cms token"); 5479 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep"); 5480 assert_lock_strong(old_gen->freelistLock()); 5481 assert_lock_strong(bitMapLock()); 5482 5483 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); 5484 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); 5485 old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), 5486 _inter_sweep_estimate.padded_average(), 5487 _intra_sweep_estimate.padded_average()); 5488 old_gen->setNearLargestChunk(); 5489 5490 { 5491 SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield); 5492 old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure); 5493 // We need to free-up/coalesce garbage/blocks from a 5494 // co-terminal free run. This is done in the SweepClosure 5495 // destructor; so, do not remove this scope, else the 5496 // end-of-sweep-census below will be off by a little bit. 5497 } 5498 old_gen->cmsSpace()->sweep_completed(); 5499 old_gen->cmsSpace()->endSweepFLCensus(sweep_count()); 5500 if (should_unload_classes()) { // unloaded classes this cycle, 5501 _concurrent_cycles_since_last_unload = 0; // ... reset count 5502 } else { // did not unload classes, 5503 _concurrent_cycles_since_last_unload++; // ... increment count 5504 } 5505 } 5506 5507 // Reset CMS data structures (for now just the marking bit map) 5508 // preparatory for the next cycle. 5509 void CMSCollector::reset_concurrent() { 5510 CMSTokenSyncWithLocks ts(true, bitMapLock()); 5511 5512 // If the state is not "Resetting", the foreground thread 5513 // has done a collection and the resetting. 5514 if (_collectorState != Resetting) { 5515 assert(_collectorState == Idling, "The state should only change" 5516 " because the foreground collector has finished the collection"); 5517 return; 5518 } 5519 5520 { 5521 // Clear the mark bitmap (no grey objects to start with) 5522 // for the next cycle. 5523 GCTraceCPUTime tcpu; 5524 CMSPhaseAccounting cmspa(this, "Concurrent Reset"); 5525 5526 HeapWord* curAddr = _markBitMap.startWord(); 5527 while (curAddr < _markBitMap.endWord()) { 5528 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); 5529 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); 5530 _markBitMap.clear_large_range(chunk); 5531 if (ConcurrentMarkSweepThread::should_yield() && 5532 !foregroundGCIsActive() && 5533 CMSYield) { 5534 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 5535 "CMS thread should hold CMS token"); 5536 assert_lock_strong(bitMapLock()); 5537 bitMapLock()->unlock(); 5538 ConcurrentMarkSweepThread::desynchronize(true); 5539 stopTimer(); 5540 incrementYields(); 5541 5542 // See the comment in coordinator_yield() 5543 for (unsigned i = 0; i < CMSYieldSleepCount && 5544 ConcurrentMarkSweepThread::should_yield() && 5545 !CMSCollector::foregroundGCIsActive(); ++i) { 5546 os::sleep(Thread::current(), 1, false); 5547 } 5548 5549 ConcurrentMarkSweepThread::synchronize(true); 5550 bitMapLock()->lock_without_safepoint_check(); 5551 startTimer(); 5552 } 5553 curAddr = chunk.end(); 5554 } 5555 // A successful mostly concurrent collection has been done. 5556 // Because only the full (i.e., concurrent mode failure) collections 5557 // are being measured for gc overhead limits, clean the "near" flag 5558 // and count. 5559 size_policy()->reset_gc_overhead_limit_count(); 5560 _collectorState = Idling; 5561 } 5562 5563 register_gc_end(); 5564 } 5565 5566 // Same as above but for STW paths 5567 void CMSCollector::reset_stw() { 5568 // already have the lock 5569 assert(_collectorState == Resetting, "just checking"); 5570 assert_lock_strong(bitMapLock()); 5571 GCIdMark gc_id_mark(_cmsThread->gc_id()); 5572 _markBitMap.clear_all(); 5573 _collectorState = Idling; 5574 register_gc_end(); 5575 } 5576 5577 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) { 5578 GCTraceCPUTime tcpu; 5579 TraceCollectorStats tcs_cgc(cgc_counters()); 5580 5581 switch (op) { 5582 case CMS_op_checkpointRootsInitial: { 5583 GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true); 5584 SvcGCMarker sgcm(SvcGCMarker::CONCURRENT); 5585 checkpointRootsInitial(); 5586 break; 5587 } 5588 case CMS_op_checkpointRootsFinal: { 5589 GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true); 5590 SvcGCMarker sgcm(SvcGCMarker::CONCURRENT); 5591 checkpointRootsFinal(); 5592 break; 5593 } 5594 default: 5595 fatal("No such CMS_op"); 5596 } 5597 } 5598 5599 #ifndef PRODUCT 5600 size_t const CMSCollector::skip_header_HeapWords() { 5601 return FreeChunk::header_size(); 5602 } 5603 5604 // Try and collect here conditions that should hold when 5605 // CMS thread is exiting. The idea is that the foreground GC 5606 // thread should not be blocked if it wants to terminate 5607 // the CMS thread and yet continue to run the VM for a while 5608 // after that. 5609 void CMSCollector::verify_ok_to_terminate() const { 5610 assert(Thread::current()->is_ConcurrentGC_thread(), 5611 "should be called by CMS thread"); 5612 assert(!_foregroundGCShouldWait, "should be false"); 5613 // We could check here that all the various low-level locks 5614 // are not held by the CMS thread, but that is overkill; see 5615 // also CMSThread::verify_ok_to_terminate() where the CGC_lock 5616 // is checked. 5617 } 5618 #endif 5619 5620 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { 5621 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), 5622 "missing Printezis mark?"); 5623 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); 5624 size_t size = pointer_delta(nextOneAddr + 1, addr); 5625 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 5626 "alignment problem"); 5627 assert(size >= 3, "Necessary for Printezis marks to work"); 5628 return size; 5629 } 5630 5631 // A variant of the above (block_size_using_printezis_bits()) except 5632 // that we return 0 if the P-bits are not yet set. 5633 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const { 5634 if (_markBitMap.isMarked(addr + 1)) { 5635 assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects"); 5636 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); 5637 size_t size = pointer_delta(nextOneAddr + 1, addr); 5638 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 5639 "alignment problem"); 5640 assert(size >= 3, "Necessary for Printezis marks to work"); 5641 return size; 5642 } 5643 return 0; 5644 } 5645 5646 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const { 5647 size_t sz = 0; 5648 oop p = (oop)addr; 5649 if (p->klass_or_null_acquire() != NULL) { 5650 sz = CompactibleFreeListSpace::adjustObjectSize(p->size()); 5651 } else { 5652 sz = block_size_using_printezis_bits(addr); 5653 } 5654 assert(sz > 0, "size must be nonzero"); 5655 HeapWord* next_block = addr + sz; 5656 HeapWord* next_card = align_up(next_block, CardTable::card_size); 5657 assert(align_down((uintptr_t)addr, CardTable::card_size) < 5658 align_down((uintptr_t)next_card, CardTable::card_size), 5659 "must be different cards"); 5660 return next_card; 5661 } 5662 5663 5664 // CMS Bit Map Wrapper ///////////////////////////////////////// 5665 5666 // Construct a CMS bit map infrastructure, but don't create the 5667 // bit vector itself. That is done by a separate call CMSBitMap::allocate() 5668 // further below. 5669 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name): 5670 _shifter(shifter), 5671 _bm(), 5672 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true, 5673 Monitor::_safepoint_check_never) : NULL) 5674 { 5675 _bmStartWord = 0; 5676 _bmWordSize = 0; 5677 } 5678 5679 bool CMSBitMap::allocate(MemRegion mr) { 5680 _bmStartWord = mr.start(); 5681 _bmWordSize = mr.word_size(); 5682 ReservedSpace brs(ReservedSpace::allocation_align_size_up( 5683 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 5684 if (!brs.is_reserved()) { 5685 log_warning(gc)("CMS bit map allocation failure"); 5686 return false; 5687 } 5688 // For now we'll just commit all of the bit map up front. 5689 // Later on we'll try to be more parsimonious with swap. 5690 if (!_virtual_space.initialize(brs, brs.size())) { 5691 log_warning(gc)("CMS bit map backing store failure"); 5692 return false; 5693 } 5694 assert(_virtual_space.committed_size() == brs.size(), 5695 "didn't reserve backing store for all of CMS bit map?"); 5696 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 5697 _bmWordSize, "inconsistency in bit map sizing"); 5698 _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter); 5699 5700 // bm.clear(); // can we rely on getting zero'd memory? verify below 5701 assert(isAllClear(), 5702 "Expected zero'd memory from ReservedSpace constructor"); 5703 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()), 5704 "consistency check"); 5705 return true; 5706 } 5707 5708 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) { 5709 HeapWord *next_addr, *end_addr, *last_addr; 5710 assert_locked(); 5711 assert(covers(mr), "out-of-range error"); 5712 // XXX assert that start and end are appropriately aligned 5713 for (next_addr = mr.start(), end_addr = mr.end(); 5714 next_addr < end_addr; next_addr = last_addr) { 5715 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr); 5716 last_addr = dirty_region.end(); 5717 if (!dirty_region.is_empty()) { 5718 cl->do_MemRegion(dirty_region); 5719 } else { 5720 assert(last_addr == end_addr, "program logic"); 5721 return; 5722 } 5723 } 5724 } 5725 5726 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const { 5727 _bm.print_on_error(st, prefix); 5728 } 5729 5730 #ifndef PRODUCT 5731 void CMSBitMap::assert_locked() const { 5732 CMSLockVerifier::assert_locked(lock()); 5733 } 5734 5735 bool CMSBitMap::covers(MemRegion mr) const { 5736 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 5737 assert((size_t)_bm.size() == (_bmWordSize >> _shifter), 5738 "size inconsistency"); 5739 return (mr.start() >= _bmStartWord) && 5740 (mr.end() <= endWord()); 5741 } 5742 5743 bool CMSBitMap::covers(HeapWord* start, size_t size) const { 5744 return (start >= _bmStartWord && (start + size) <= endWord()); 5745 } 5746 5747 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) { 5748 // verify that there are no 1 bits in the interval [left, right) 5749 FalseBitMapClosure falseBitMapClosure; 5750 iterate(&falseBitMapClosure, left, right); 5751 } 5752 5753 void CMSBitMap::region_invariant(MemRegion mr) 5754 { 5755 assert_locked(); 5756 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 5757 assert(!mr.is_empty(), "unexpected empty region"); 5758 assert(covers(mr), "mr should be covered by bit map"); 5759 // convert address range into offset range 5760 size_t start_ofs = heapWordToOffset(mr.start()); 5761 // Make sure that end() is appropriately aligned 5762 assert(mr.end() == align_up(mr.end(), (1 << (_shifter+LogHeapWordSize))), 5763 "Misaligned mr.end()"); 5764 size_t end_ofs = heapWordToOffset(mr.end()); 5765 assert(end_ofs > start_ofs, "Should mark at least one bit"); 5766 } 5767 5768 #endif 5769 5770 bool CMSMarkStack::allocate(size_t size) { 5771 // allocate a stack of the requisite depth 5772 ReservedSpace rs(ReservedSpace::allocation_align_size_up( 5773 size * sizeof(oop))); 5774 if (!rs.is_reserved()) { 5775 log_warning(gc)("CMSMarkStack allocation failure"); 5776 return false; 5777 } 5778 if (!_virtual_space.initialize(rs, rs.size())) { 5779 log_warning(gc)("CMSMarkStack backing store failure"); 5780 return false; 5781 } 5782 assert(_virtual_space.committed_size() == rs.size(), 5783 "didn't reserve backing store for all of CMS stack?"); 5784 _base = (oop*)(_virtual_space.low()); 5785 _index = 0; 5786 _capacity = size; 5787 NOT_PRODUCT(_max_depth = 0); 5788 return true; 5789 } 5790 5791 // XXX FIX ME !!! In the MT case we come in here holding a 5792 // leaf lock. For printing we need to take a further lock 5793 // which has lower rank. We need to recalibrate the two 5794 // lock-ranks involved in order to be able to print the 5795 // messages below. (Or defer the printing to the caller. 5796 // For now we take the expedient path of just disabling the 5797 // messages for the problematic case.) 5798 void CMSMarkStack::expand() { 5799 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); 5800 if (_capacity == MarkStackSizeMax) { 5801 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) { 5802 // We print a warning message only once per CMS cycle. 5803 log_debug(gc)(" (benign) Hit CMSMarkStack max size limit"); 5804 } 5805 return; 5806 } 5807 // Double capacity if possible 5808 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax); 5809 // Do not give up existing stack until we have managed to 5810 // get the double capacity that we desired. 5811 ReservedSpace rs(ReservedSpace::allocation_align_size_up( 5812 new_capacity * sizeof(oop))); 5813 if (rs.is_reserved()) { 5814 // Release the backing store associated with old stack 5815 _virtual_space.release(); 5816 // Reinitialize virtual space for new stack 5817 if (!_virtual_space.initialize(rs, rs.size())) { 5818 fatal("Not enough swap for expanded marking stack"); 5819 } 5820 _base = (oop*)(_virtual_space.low()); 5821 _index = 0; 5822 _capacity = new_capacity; 5823 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) { 5824 // Failed to double capacity, continue; 5825 // we print a detail message only once per CMS cycle. 5826 log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K", 5827 _capacity / K, new_capacity / K); 5828 } 5829 } 5830 5831 5832 // Closures 5833 // XXX: there seems to be a lot of code duplication here; 5834 // should refactor and consolidate common code. 5835 5836 // This closure is used to mark refs into the CMS generation in 5837 // the CMS bit map. Called at the first checkpoint. This closure 5838 // assumes that we do not need to re-mark dirty cards; if the CMS 5839 // generation on which this is used is not an oldest 5840 // generation then this will lose younger_gen cards! 5841 5842 MarkRefsIntoClosure::MarkRefsIntoClosure( 5843 MemRegion span, CMSBitMap* bitMap): 5844 _span(span), 5845 _bitMap(bitMap) 5846 { 5847 assert(ref_discoverer() == NULL, "deliberately left NULL"); 5848 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); 5849 } 5850 5851 void MarkRefsIntoClosure::do_oop(oop obj) { 5852 // if p points into _span, then mark corresponding bit in _markBitMap 5853 assert(oopDesc::is_oop(obj), "expected an oop"); 5854 HeapWord* addr = (HeapWord*)obj; 5855 if (_span.contains(addr)) { 5856 // this should be made more efficient 5857 _bitMap->mark(addr); 5858 } 5859 } 5860 5861 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure( 5862 MemRegion span, CMSBitMap* bitMap): 5863 _span(span), 5864 _bitMap(bitMap) 5865 { 5866 assert(ref_discoverer() == NULL, "deliberately left NULL"); 5867 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); 5868 } 5869 5870 void ParMarkRefsIntoClosure::do_oop(oop obj) { 5871 // if p points into _span, then mark corresponding bit in _markBitMap 5872 assert(oopDesc::is_oop(obj), "expected an oop"); 5873 HeapWord* addr = (HeapWord*)obj; 5874 if (_span.contains(addr)) { 5875 // this should be made more efficient 5876 _bitMap->par_mark(addr); 5877 } 5878 } 5879 5880 // A variant of the above, used for CMS marking verification. 5881 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( 5882 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm): 5883 _span(span), 5884 _verification_bm(verification_bm), 5885 _cms_bm(cms_bm) 5886 { 5887 assert(ref_discoverer() == NULL, "deliberately left NULL"); 5888 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch"); 5889 } 5890 5891 void MarkRefsIntoVerifyClosure::do_oop(oop obj) { 5892 // if p points into _span, then mark corresponding bit in _markBitMap 5893 assert(oopDesc::is_oop(obj), "expected an oop"); 5894 HeapWord* addr = (HeapWord*)obj; 5895 if (_span.contains(addr)) { 5896 _verification_bm->mark(addr); 5897 if (!_cms_bm->isMarked(addr)) { 5898 Log(gc, verify) log; 5899 ResourceMark rm; 5900 LogStream ls(log.error()); 5901 oop(addr)->print_on(&ls); 5902 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); 5903 fatal("... aborting"); 5904 } 5905 } 5906 } 5907 5908 ////////////////////////////////////////////////// 5909 // MarkRefsIntoAndScanClosure 5910 ////////////////////////////////////////////////// 5911 5912 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span, 5913 ReferenceDiscoverer* rd, 5914 CMSBitMap* bit_map, 5915 CMSBitMap* mod_union_table, 5916 CMSMarkStack* mark_stack, 5917 CMSCollector* collector, 5918 bool should_yield, 5919 bool concurrent_precleaning): 5920 _span(span), 5921 _bit_map(bit_map), 5922 _mark_stack(mark_stack), 5923 _pushAndMarkClosure(collector, span, rd, bit_map, mod_union_table, 5924 mark_stack, concurrent_precleaning), 5925 _collector(collector), 5926 _freelistLock(NULL), 5927 _yield(should_yield), 5928 _concurrent_precleaning(concurrent_precleaning) 5929 { 5930 // FIXME: Should initialize in base class constructor. 5931 assert(rd != NULL, "ref_discoverer shouldn't be NULL"); 5932 set_ref_discoverer_internal(rd); 5933 } 5934 5935 // This closure is used to mark refs into the CMS generation at the 5936 // second (final) checkpoint, and to scan and transitively follow 5937 // the unmarked oops. It is also used during the concurrent precleaning 5938 // phase while scanning objects on dirty cards in the CMS generation. 5939 // The marks are made in the marking bit map and the marking stack is 5940 // used for keeping the (newly) grey objects during the scan. 5941 // The parallel version (Par_...) appears further below. 5942 void MarkRefsIntoAndScanClosure::do_oop(oop obj) { 5943 if (obj != NULL) { 5944 assert(oopDesc::is_oop(obj), "expected an oop"); 5945 HeapWord* addr = (HeapWord*)obj; 5946 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); 5947 assert(_collector->overflow_list_is_empty(), 5948 "overflow list should be empty"); 5949 if (_span.contains(addr) && 5950 !_bit_map->isMarked(addr)) { 5951 // mark bit map (object is now grey) 5952 _bit_map->mark(addr); 5953 // push on marking stack (stack should be empty), and drain the 5954 // stack by applying this closure to the oops in the oops popped 5955 // from the stack (i.e. blacken the grey objects) 5956 bool res = _mark_stack->push(obj); 5957 assert(res, "Should have space to push on empty stack"); 5958 do { 5959 oop new_oop = _mark_stack->pop(); 5960 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop"); 5961 assert(_bit_map->isMarked((HeapWord*)new_oop), 5962 "only grey objects on this stack"); 5963 // iterate over the oops in this oop, marking and pushing 5964 // the ones in CMS heap (i.e. in _span). 5965 new_oop->oop_iterate(&_pushAndMarkClosure); 5966 // check if it's time to yield 5967 do_yield_check(); 5968 } while (!_mark_stack->isEmpty() || 5969 (!_concurrent_precleaning && take_from_overflow_list())); 5970 // if marking stack is empty, and we are not doing this 5971 // during precleaning, then check the overflow list 5972 } 5973 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); 5974 assert(_collector->overflow_list_is_empty(), 5975 "overflow list was drained above"); 5976 5977 assert(_collector->no_preserved_marks(), 5978 "All preserved marks should have been restored above"); 5979 } 5980 } 5981 5982 void MarkRefsIntoAndScanClosure::do_yield_work() { 5983 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 5984 "CMS thread should hold CMS token"); 5985 assert_lock_strong(_freelistLock); 5986 assert_lock_strong(_bit_map->lock()); 5987 // relinquish the free_list_lock and bitMaplock() 5988 _bit_map->lock()->unlock(); 5989 _freelistLock->unlock(); 5990 ConcurrentMarkSweepThread::desynchronize(true); 5991 _collector->stopTimer(); 5992 _collector->incrementYields(); 5993 5994 // See the comment in coordinator_yield() 5995 for (unsigned i = 0; 5996 i < CMSYieldSleepCount && 5997 ConcurrentMarkSweepThread::should_yield() && 5998 !CMSCollector::foregroundGCIsActive(); 5999 ++i) { 6000 os::sleep(Thread::current(), 1, false); 6001 } 6002 6003 ConcurrentMarkSweepThread::synchronize(true); 6004 _freelistLock->lock_without_safepoint_check(); 6005 _bit_map->lock()->lock_without_safepoint_check(); 6006 _collector->startTimer(); 6007 } 6008 6009 /////////////////////////////////////////////////////////// 6010 // ParMarkRefsIntoAndScanClosure: a parallel version of 6011 // MarkRefsIntoAndScanClosure 6012 /////////////////////////////////////////////////////////// 6013 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure( 6014 CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd, 6015 CMSBitMap* bit_map, OopTaskQueue* work_queue): 6016 _span(span), 6017 _bit_map(bit_map), 6018 _work_queue(work_queue), 6019 _low_water_mark(MIN2((work_queue->max_elems()/4), 6020 ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))), 6021 _parPushAndMarkClosure(collector, span, rd, bit_map, work_queue) 6022 { 6023 // FIXME: Should initialize in base class constructor. 6024 assert(rd != NULL, "ref_discoverer shouldn't be NULL"); 6025 set_ref_discoverer_internal(rd); 6026 } 6027 6028 // This closure is used to mark refs into the CMS generation at the 6029 // second (final) checkpoint, and to scan and transitively follow 6030 // the unmarked oops. The marks are made in the marking bit map and 6031 // the work_queue is used for keeping the (newly) grey objects during 6032 // the scan phase whence they are also available for stealing by parallel 6033 // threads. Since the marking bit map is shared, updates are 6034 // synchronized (via CAS). 6035 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) { 6036 if (obj != NULL) { 6037 // Ignore mark word because this could be an already marked oop 6038 // that may be chained at the end of the overflow list. 6039 assert(oopDesc::is_oop(obj, true), "expected an oop"); 6040 HeapWord* addr = (HeapWord*)obj; 6041 if (_span.contains(addr) && 6042 !_bit_map->isMarked(addr)) { 6043 // mark bit map (object will become grey): 6044 // It is possible for several threads to be 6045 // trying to "claim" this object concurrently; 6046 // the unique thread that succeeds in marking the 6047 // object first will do the subsequent push on 6048 // to the work queue (or overflow list). 6049 if (_bit_map->par_mark(addr)) { 6050 // push on work_queue (which may not be empty), and trim the 6051 // queue to an appropriate length by applying this closure to 6052 // the oops in the oops popped from the stack (i.e. blacken the 6053 // grey objects) 6054 bool res = _work_queue->push(obj); 6055 assert(res, "Low water mark should be less than capacity?"); 6056 trim_queue(_low_water_mark); 6057 } // Else, another thread claimed the object 6058 } 6059 } 6060 } 6061 6062 // This closure is used to rescan the marked objects on the dirty cards 6063 // in the mod union table and the card table proper. 6064 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( 6065 oop p, MemRegion mr) { 6066 6067 size_t size = 0; 6068 HeapWord* addr = (HeapWord*)p; 6069 DEBUG_ONLY(_collector->verify_work_stacks_empty();) 6070 assert(_span.contains(addr), "we are scanning the CMS generation"); 6071 // check if it's time to yield 6072 if (do_yield_check()) { 6073 // We yielded for some foreground stop-world work, 6074 // and we have been asked to abort this ongoing preclean cycle. 6075 return 0; 6076 } 6077 if (_bitMap->isMarked(addr)) { 6078 // it's marked; is it potentially uninitialized? 6079 if (p->klass_or_null_acquire() != NULL) { 6080 // an initialized object; ignore mark word in verification below 6081 // since we are running concurrent with mutators 6082 assert(oopDesc::is_oop(p, true), "should be an oop"); 6083 if (p->is_objArray()) { 6084 // objArrays are precisely marked; restrict scanning 6085 // to dirty cards only. 6086 size = CompactibleFreeListSpace::adjustObjectSize( 6087 p->oop_iterate_size(_scanningClosure, mr)); 6088 } else { 6089 // A non-array may have been imprecisely marked; we need 6090 // to scan object in its entirety. 6091 size = CompactibleFreeListSpace::adjustObjectSize( 6092 p->oop_iterate_size(_scanningClosure)); 6093 } 6094 #ifdef ASSERT 6095 size_t direct_size = 6096 CompactibleFreeListSpace::adjustObjectSize(p->size()); 6097 assert(size == direct_size, "Inconsistency in size"); 6098 assert(size >= 3, "Necessary for Printezis marks to work"); 6099 HeapWord* start_pbit = addr + 1; 6100 HeapWord* end_pbit = addr + size - 1; 6101 assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit), 6102 "inconsistent Printezis mark"); 6103 // Verify inner mark bits (between Printezis bits) are clear, 6104 // but don't repeat if there are multiple dirty regions for 6105 // the same object, to avoid potential O(N^2) performance. 6106 if (addr != _last_scanned_object) { 6107 _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit); 6108 _last_scanned_object = addr; 6109 } 6110 #endif // ASSERT 6111 } else { 6112 // An uninitialized object. 6113 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); 6114 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); 6115 size = pointer_delta(nextOneAddr + 1, addr); 6116 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 6117 "alignment problem"); 6118 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass() 6119 // will dirty the card when the klass pointer is installed in the 6120 // object (signaling the completion of initialization). 6121 } 6122 } else { 6123 // Either a not yet marked object or an uninitialized object 6124 if (p->klass_or_null_acquire() == NULL) { 6125 // An uninitialized object, skip to the next card, since 6126 // we may not be able to read its P-bits yet. 6127 assert(size == 0, "Initial value"); 6128 } else { 6129 // An object not (yet) reached by marking: we merely need to 6130 // compute its size so as to go look at the next block. 6131 assert(oopDesc::is_oop(p, true), "should be an oop"); 6132 size = CompactibleFreeListSpace::adjustObjectSize(p->size()); 6133 } 6134 } 6135 DEBUG_ONLY(_collector->verify_work_stacks_empty();) 6136 return size; 6137 } 6138 6139 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { 6140 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6141 "CMS thread should hold CMS token"); 6142 assert_lock_strong(_freelistLock); 6143 assert_lock_strong(_bitMap->lock()); 6144 // relinquish the free_list_lock and bitMaplock() 6145 _bitMap->lock()->unlock(); 6146 _freelistLock->unlock(); 6147 ConcurrentMarkSweepThread::desynchronize(true); 6148 _collector->stopTimer(); 6149 _collector->incrementYields(); 6150 6151 // See the comment in coordinator_yield() 6152 for (unsigned i = 0; i < CMSYieldSleepCount && 6153 ConcurrentMarkSweepThread::should_yield() && 6154 !CMSCollector::foregroundGCIsActive(); ++i) { 6155 os::sleep(Thread::current(), 1, false); 6156 } 6157 6158 ConcurrentMarkSweepThread::synchronize(true); 6159 _freelistLock->lock_without_safepoint_check(); 6160 _bitMap->lock()->lock_without_safepoint_check(); 6161 _collector->startTimer(); 6162 } 6163 6164 6165 ////////////////////////////////////////////////////////////////// 6166 // SurvivorSpacePrecleanClosure 6167 ////////////////////////////////////////////////////////////////// 6168 // This (single-threaded) closure is used to preclean the oops in 6169 // the survivor spaces. 6170 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) { 6171 6172 HeapWord* addr = (HeapWord*)p; 6173 DEBUG_ONLY(_collector->verify_work_stacks_empty();) 6174 assert(!_span.contains(addr), "we are scanning the survivor spaces"); 6175 assert(p->klass_or_null() != NULL, "object should be initialized"); 6176 // an initialized object; ignore mark word in verification below 6177 // since we are running concurrent with mutators 6178 assert(oopDesc::is_oop(p, true), "should be an oop"); 6179 // Note that we do not yield while we iterate over 6180 // the interior oops of p, pushing the relevant ones 6181 // on our marking stack. 6182 size_t size = p->oop_iterate_size(_scanning_closure); 6183 do_yield_check(); 6184 // Observe that below, we do not abandon the preclean 6185 // phase as soon as we should; rather we empty the 6186 // marking stack before returning. This is to satisfy 6187 // some existing assertions. In general, it may be a 6188 // good idea to abort immediately and complete the marking 6189 // from the grey objects at a later time. 6190 while (!_mark_stack->isEmpty()) { 6191 oop new_oop = _mark_stack->pop(); 6192 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop"); 6193 assert(_bit_map->isMarked((HeapWord*)new_oop), 6194 "only grey objects on this stack"); 6195 // iterate over the oops in this oop, marking and pushing 6196 // the ones in CMS heap (i.e. in _span). 6197 new_oop->oop_iterate(_scanning_closure); 6198 // check if it's time to yield 6199 do_yield_check(); 6200 } 6201 unsigned int after_count = 6202 CMSHeap::heap()->total_collections(); 6203 bool abort = (_before_count != after_count) || 6204 _collector->should_abort_preclean(); 6205 return abort ? 0 : size; 6206 } 6207 6208 void SurvivorSpacePrecleanClosure::do_yield_work() { 6209 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6210 "CMS thread should hold CMS token"); 6211 assert_lock_strong(_bit_map->lock()); 6212 // Relinquish the bit map lock 6213 _bit_map->lock()->unlock(); 6214 ConcurrentMarkSweepThread::desynchronize(true); 6215 _collector->stopTimer(); 6216 _collector->incrementYields(); 6217 6218 // See the comment in coordinator_yield() 6219 for (unsigned i = 0; i < CMSYieldSleepCount && 6220 ConcurrentMarkSweepThread::should_yield() && 6221 !CMSCollector::foregroundGCIsActive(); ++i) { 6222 os::sleep(Thread::current(), 1, false); 6223 } 6224 6225 ConcurrentMarkSweepThread::synchronize(true); 6226 _bit_map->lock()->lock_without_safepoint_check(); 6227 _collector->startTimer(); 6228 } 6229 6230 // This closure is used to rescan the marked objects on the dirty cards 6231 // in the mod union table and the card table proper. In the parallel 6232 // case, although the bitMap is shared, we do a single read so the 6233 // isMarked() query is "safe". 6234 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { 6235 // Ignore mark word because we are running concurrent with mutators 6236 assert(oopDesc::is_oop_or_null(p, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p)); 6237 HeapWord* addr = (HeapWord*)p; 6238 assert(_span.contains(addr), "we are scanning the CMS generation"); 6239 bool is_obj_array = false; 6240 #ifdef ASSERT 6241 if (!_parallel) { 6242 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); 6243 assert(_collector->overflow_list_is_empty(), 6244 "overflow list should be empty"); 6245 6246 } 6247 #endif // ASSERT 6248 if (_bit_map->isMarked(addr)) { 6249 // Obj arrays are precisely marked, non-arrays are not; 6250 // so we scan objArrays precisely and non-arrays in their 6251 // entirety. 6252 if (p->is_objArray()) { 6253 is_obj_array = true; 6254 if (_parallel) { 6255 p->oop_iterate(_par_scan_closure, mr); 6256 } else { 6257 p->oop_iterate(_scan_closure, mr); 6258 } 6259 } else { 6260 if (_parallel) { 6261 p->oop_iterate(_par_scan_closure); 6262 } else { 6263 p->oop_iterate(_scan_closure); 6264 } 6265 } 6266 } 6267 #ifdef ASSERT 6268 if (!_parallel) { 6269 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); 6270 assert(_collector->overflow_list_is_empty(), 6271 "overflow list should be empty"); 6272 6273 } 6274 #endif // ASSERT 6275 return is_obj_array; 6276 } 6277 6278 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector, 6279 MemRegion span, 6280 CMSBitMap* bitMap, CMSMarkStack* markStack, 6281 bool should_yield, bool verifying): 6282 _collector(collector), 6283 _span(span), 6284 _bitMap(bitMap), 6285 _mut(&collector->_modUnionTable), 6286 _markStack(markStack), 6287 _yield(should_yield), 6288 _skipBits(0) 6289 { 6290 assert(_markStack->isEmpty(), "stack should be empty"); 6291 _finger = _bitMap->startWord(); 6292 _threshold = _finger; 6293 assert(_collector->_restart_addr == NULL, "Sanity check"); 6294 assert(_span.contains(_finger), "Out of bounds _finger?"); 6295 DEBUG_ONLY(_verifying = verifying;) 6296 } 6297 6298 void MarkFromRootsClosure::reset(HeapWord* addr) { 6299 assert(_markStack->isEmpty(), "would cause duplicates on stack"); 6300 assert(_span.contains(addr), "Out of bounds _finger?"); 6301 _finger = addr; 6302 _threshold = align_up(_finger, CardTable::card_size); 6303 } 6304 6305 // Should revisit to see if this should be restructured for 6306 // greater efficiency. 6307 bool MarkFromRootsClosure::do_bit(size_t offset) { 6308 if (_skipBits > 0) { 6309 _skipBits--; 6310 return true; 6311 } 6312 // convert offset into a HeapWord* 6313 HeapWord* addr = _bitMap->startWord() + offset; 6314 assert(_bitMap->endWord() && addr < _bitMap->endWord(), 6315 "address out of range"); 6316 assert(_bitMap->isMarked(addr), "tautology"); 6317 if (_bitMap->isMarked(addr+1)) { 6318 // this is an allocated but not yet initialized object 6319 assert(_skipBits == 0, "tautology"); 6320 _skipBits = 2; // skip next two marked bits ("Printezis-marks") 6321 oop p = oop(addr); 6322 if (p->klass_or_null_acquire() == NULL) { 6323 DEBUG_ONLY(if (!_verifying) {) 6324 // We re-dirty the cards on which this object lies and increase 6325 // the _threshold so that we'll come back to scan this object 6326 // during the preclean or remark phase. (CMSCleanOnEnter) 6327 if (CMSCleanOnEnter) { 6328 size_t sz = _collector->block_size_using_printezis_bits(addr); 6329 HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size); 6330 MemRegion redirty_range = MemRegion(addr, end_card_addr); 6331 assert(!redirty_range.is_empty(), "Arithmetical tautology"); 6332 // Bump _threshold to end_card_addr; note that 6333 // _threshold cannot possibly exceed end_card_addr, anyhow. 6334 // This prevents future clearing of the card as the scan proceeds 6335 // to the right. 6336 assert(_threshold <= end_card_addr, 6337 "Because we are just scanning into this object"); 6338 if (_threshold < end_card_addr) { 6339 _threshold = end_card_addr; 6340 } 6341 if (p->klass_or_null_acquire() != NULL) { 6342 // Redirty the range of cards... 6343 _mut->mark_range(redirty_range); 6344 } // ...else the setting of klass will dirty the card anyway. 6345 } 6346 DEBUG_ONLY(}) 6347 return true; 6348 } 6349 } 6350 scanOopsInOop(addr); 6351 return true; 6352 } 6353 6354 // We take a break if we've been at this for a while, 6355 // so as to avoid monopolizing the locks involved. 6356 void MarkFromRootsClosure::do_yield_work() { 6357 // First give up the locks, then yield, then re-lock 6358 // We should probably use a constructor/destructor idiom to 6359 // do this unlock/lock or modify the MutexUnlocker class to 6360 // serve our purpose. XXX 6361 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6362 "CMS thread should hold CMS token"); 6363 assert_lock_strong(_bitMap->lock()); 6364 _bitMap->lock()->unlock(); 6365 ConcurrentMarkSweepThread::desynchronize(true); 6366 _collector->stopTimer(); 6367 _collector->incrementYields(); 6368 6369 // See the comment in coordinator_yield() 6370 for (unsigned i = 0; i < CMSYieldSleepCount && 6371 ConcurrentMarkSweepThread::should_yield() && 6372 !CMSCollector::foregroundGCIsActive(); ++i) { 6373 os::sleep(Thread::current(), 1, false); 6374 } 6375 6376 ConcurrentMarkSweepThread::synchronize(true); 6377 _bitMap->lock()->lock_without_safepoint_check(); 6378 _collector->startTimer(); 6379 } 6380 6381 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) { 6382 assert(_bitMap->isMarked(ptr), "expected bit to be set"); 6383 assert(_markStack->isEmpty(), 6384 "should drain stack to limit stack usage"); 6385 // convert ptr to an oop preparatory to scanning 6386 oop obj = oop(ptr); 6387 // Ignore mark word in verification below, since we 6388 // may be running concurrent with mutators. 6389 assert(oopDesc::is_oop(obj, true), "should be an oop"); 6390 assert(_finger <= ptr, "_finger runneth ahead"); 6391 // advance the finger to right end of this object 6392 _finger = ptr + obj->size(); 6393 assert(_finger > ptr, "we just incremented it above"); 6394 // On large heaps, it may take us some time to get through 6395 // the marking phase. During 6396 // this time it's possible that a lot of mutations have 6397 // accumulated in the card table and the mod union table -- 6398 // these mutation records are redundant until we have 6399 // actually traced into the corresponding card. 6400 // Here, we check whether advancing the finger would make 6401 // us cross into a new card, and if so clear corresponding 6402 // cards in the MUT (preclean them in the card-table in the 6403 // future). 6404 6405 DEBUG_ONLY(if (!_verifying) {) 6406 // The clean-on-enter optimization is disabled by default, 6407 // until we fix 6178663. 6408 if (CMSCleanOnEnter && (_finger > _threshold)) { 6409 // [_threshold, _finger) represents the interval 6410 // of cards to be cleared in MUT (or precleaned in card table). 6411 // The set of cards to be cleared is all those that overlap 6412 // with the interval [_threshold, _finger); note that 6413 // _threshold is always kept card-aligned but _finger isn't 6414 // always card-aligned. 6415 HeapWord* old_threshold = _threshold; 6416 assert(is_aligned(old_threshold, CardTable::card_size), 6417 "_threshold should always be card-aligned"); 6418 _threshold = align_up(_finger, CardTable::card_size); 6419 MemRegion mr(old_threshold, _threshold); 6420 assert(!mr.is_empty(), "Control point invariant"); 6421 assert(_span.contains(mr), "Should clear within span"); 6422 _mut->clear_range(mr); 6423 } 6424 DEBUG_ONLY(}) 6425 // Note: the finger doesn't advance while we drain 6426 // the stack below. 6427 PushOrMarkClosure pushOrMarkClosure(_collector, 6428 _span, _bitMap, _markStack, 6429 _finger, this); 6430 bool res = _markStack->push(obj); 6431 assert(res, "Empty non-zero size stack should have space for single push"); 6432 while (!_markStack->isEmpty()) { 6433 oop new_oop = _markStack->pop(); 6434 // Skip verifying header mark word below because we are 6435 // running concurrent with mutators. 6436 assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop"); 6437 // now scan this oop's oops 6438 new_oop->oop_iterate(&pushOrMarkClosure); 6439 do_yield_check(); 6440 } 6441 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); 6442 } 6443 6444 ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task, 6445 CMSCollector* collector, MemRegion span, 6446 CMSBitMap* bit_map, 6447 OopTaskQueue* work_queue, 6448 CMSMarkStack* overflow_stack): 6449 _collector(collector), 6450 _whole_span(collector->_span), 6451 _span(span), 6452 _bit_map(bit_map), 6453 _mut(&collector->_modUnionTable), 6454 _work_queue(work_queue), 6455 _overflow_stack(overflow_stack), 6456 _skip_bits(0), 6457 _task(task) 6458 { 6459 assert(_work_queue->size() == 0, "work_queue should be empty"); 6460 _finger = span.start(); 6461 _threshold = _finger; // XXX Defer clear-on-enter optimization for now 6462 assert(_span.contains(_finger), "Out of bounds _finger?"); 6463 } 6464 6465 // Should revisit to see if this should be restructured for 6466 // greater efficiency. 6467 bool ParMarkFromRootsClosure::do_bit(size_t offset) { 6468 if (_skip_bits > 0) { 6469 _skip_bits--; 6470 return true; 6471 } 6472 // convert offset into a HeapWord* 6473 HeapWord* addr = _bit_map->startWord() + offset; 6474 assert(_bit_map->endWord() && addr < _bit_map->endWord(), 6475 "address out of range"); 6476 assert(_bit_map->isMarked(addr), "tautology"); 6477 if (_bit_map->isMarked(addr+1)) { 6478 // this is an allocated object that might not yet be initialized 6479 assert(_skip_bits == 0, "tautology"); 6480 _skip_bits = 2; // skip next two marked bits ("Printezis-marks") 6481 oop p = oop(addr); 6482 if (p->klass_or_null_acquire() == NULL) { 6483 // in the case of Clean-on-Enter optimization, redirty card 6484 // and avoid clearing card by increasing the threshold. 6485 return true; 6486 } 6487 } 6488 scan_oops_in_oop(addr); 6489 return true; 6490 } 6491 6492 void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { 6493 assert(_bit_map->isMarked(ptr), "expected bit to be set"); 6494 // Should we assert that our work queue is empty or 6495 // below some drain limit? 6496 assert(_work_queue->size() == 0, 6497 "should drain stack to limit stack usage"); 6498 // convert ptr to an oop preparatory to scanning 6499 oop obj = oop(ptr); 6500 // Ignore mark word in verification below, since we 6501 // may be running concurrent with mutators. 6502 assert(oopDesc::is_oop(obj, true), "should be an oop"); 6503 assert(_finger <= ptr, "_finger runneth ahead"); 6504 // advance the finger to right end of this object 6505 _finger = ptr + obj->size(); 6506 assert(_finger > ptr, "we just incremented it above"); 6507 // On large heaps, it may take us some time to get through 6508 // the marking phase. During 6509 // this time it's possible that a lot of mutations have 6510 // accumulated in the card table and the mod union table -- 6511 // these mutation records are redundant until we have 6512 // actually traced into the corresponding card. 6513 // Here, we check whether advancing the finger would make 6514 // us cross into a new card, and if so clear corresponding 6515 // cards in the MUT (preclean them in the card-table in the 6516 // future). 6517 6518 // The clean-on-enter optimization is disabled by default, 6519 // until we fix 6178663. 6520 if (CMSCleanOnEnter && (_finger > _threshold)) { 6521 // [_threshold, _finger) represents the interval 6522 // of cards to be cleared in MUT (or precleaned in card table). 6523 // The set of cards to be cleared is all those that overlap 6524 // with the interval [_threshold, _finger); note that 6525 // _threshold is always kept card-aligned but _finger isn't 6526 // always card-aligned. 6527 HeapWord* old_threshold = _threshold; 6528 assert(is_aligned(old_threshold, CardTable::card_size), 6529 "_threshold should always be card-aligned"); 6530 _threshold = align_up(_finger, CardTable::card_size); 6531 MemRegion mr(old_threshold, _threshold); 6532 assert(!mr.is_empty(), "Control point invariant"); 6533 assert(_span.contains(mr), "Should clear within span"); // _whole_span ?? 6534 _mut->clear_range(mr); 6535 } 6536 6537 // Note: the local finger doesn't advance while we drain 6538 // the stack below, but the global finger sure can and will. 6539 HeapWord* volatile* gfa = _task->global_finger_addr(); 6540 ParPushOrMarkClosure pushOrMarkClosure(_collector, 6541 _span, _bit_map, 6542 _work_queue, 6543 _overflow_stack, 6544 _finger, 6545 gfa, this); 6546 bool res = _work_queue->push(obj); // overflow could occur here 6547 assert(res, "Will hold once we use workqueues"); 6548 while (true) { 6549 oop new_oop; 6550 if (!_work_queue->pop_local(new_oop)) { 6551 // We emptied our work_queue; check if there's stuff that can 6552 // be gotten from the overflow stack. 6553 if (CMSConcMarkingTask::get_work_from_overflow_stack( 6554 _overflow_stack, _work_queue)) { 6555 do_yield_check(); 6556 continue; 6557 } else { // done 6558 break; 6559 } 6560 } 6561 // Skip verifying header mark word below because we are 6562 // running concurrent with mutators. 6563 assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop"); 6564 // now scan this oop's oops 6565 new_oop->oop_iterate(&pushOrMarkClosure); 6566 do_yield_check(); 6567 } 6568 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition"); 6569 } 6570 6571 // Yield in response to a request from VM Thread or 6572 // from mutators. 6573 void ParMarkFromRootsClosure::do_yield_work() { 6574 assert(_task != NULL, "sanity"); 6575 _task->yield(); 6576 } 6577 6578 // A variant of the above used for verifying CMS marking work. 6579 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector, 6580 MemRegion span, 6581 CMSBitMap* verification_bm, CMSBitMap* cms_bm, 6582 CMSMarkStack* mark_stack): 6583 _collector(collector), 6584 _span(span), 6585 _verification_bm(verification_bm), 6586 _cms_bm(cms_bm), 6587 _mark_stack(mark_stack), 6588 _pam_verify_closure(collector, span, verification_bm, cms_bm, 6589 mark_stack) 6590 { 6591 assert(_mark_stack->isEmpty(), "stack should be empty"); 6592 _finger = _verification_bm->startWord(); 6593 assert(_collector->_restart_addr == NULL, "Sanity check"); 6594 assert(_span.contains(_finger), "Out of bounds _finger?"); 6595 } 6596 6597 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) { 6598 assert(_mark_stack->isEmpty(), "would cause duplicates on stack"); 6599 assert(_span.contains(addr), "Out of bounds _finger?"); 6600 _finger = addr; 6601 } 6602 6603 // Should revisit to see if this should be restructured for 6604 // greater efficiency. 6605 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) { 6606 // convert offset into a HeapWord* 6607 HeapWord* addr = _verification_bm->startWord() + offset; 6608 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(), 6609 "address out of range"); 6610 assert(_verification_bm->isMarked(addr), "tautology"); 6611 assert(_cms_bm->isMarked(addr), "tautology"); 6612 6613 assert(_mark_stack->isEmpty(), 6614 "should drain stack to limit stack usage"); 6615 // convert addr to an oop preparatory to scanning 6616 oop obj = oop(addr); 6617 assert(oopDesc::is_oop(obj), "should be an oop"); 6618 assert(_finger <= addr, "_finger runneth ahead"); 6619 // advance the finger to right end of this object 6620 _finger = addr + obj->size(); 6621 assert(_finger > addr, "we just incremented it above"); 6622 // Note: the finger doesn't advance while we drain 6623 // the stack below. 6624 bool res = _mark_stack->push(obj); 6625 assert(res, "Empty non-zero size stack should have space for single push"); 6626 while (!_mark_stack->isEmpty()) { 6627 oop new_oop = _mark_stack->pop(); 6628 assert(oopDesc::is_oop(new_oop), "Oops! expected to pop an oop"); 6629 // now scan this oop's oops 6630 new_oop->oop_iterate(&_pam_verify_closure); 6631 } 6632 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); 6633 return true; 6634 } 6635 6636 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( 6637 CMSCollector* collector, MemRegion span, 6638 CMSBitMap* verification_bm, CMSBitMap* cms_bm, 6639 CMSMarkStack* mark_stack): 6640 MetadataVisitingOopIterateClosure(collector->ref_processor()), 6641 _collector(collector), 6642 _span(span), 6643 _verification_bm(verification_bm), 6644 _cms_bm(cms_bm), 6645 _mark_stack(mark_stack) 6646 { } 6647 6648 template <class T> void PushAndMarkVerifyClosure::do_oop_work(T *p) { 6649 oop obj = RawAccess<>::oop_load(p); 6650 do_oop(obj); 6651 } 6652 6653 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } 6654 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } 6655 6656 // Upon stack overflow, we discard (part of) the stack, 6657 // remembering the least address amongst those discarded 6658 // in CMSCollector's _restart_address. 6659 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) { 6660 // Remember the least grey address discarded 6661 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost); 6662 _collector->lower_restart_addr(ra); 6663 _mark_stack->reset(); // discard stack contents 6664 _mark_stack->expand(); // expand the stack if possible 6665 } 6666 6667 void PushAndMarkVerifyClosure::do_oop(oop obj) { 6668 assert(oopDesc::is_oop_or_null(obj), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); 6669 HeapWord* addr = (HeapWord*)obj; 6670 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { 6671 // Oop lies in _span and isn't yet grey or black 6672 _verification_bm->mark(addr); // now grey 6673 if (!_cms_bm->isMarked(addr)) { 6674 Log(gc, verify) log; 6675 ResourceMark rm; 6676 LogStream ls(log.error()); 6677 oop(addr)->print_on(&ls); 6678 log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr)); 6679 fatal("... aborting"); 6680 } 6681 6682 if (!_mark_stack->push(obj)) { // stack overflow 6683 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity()); 6684 assert(_mark_stack->isFull(), "Else push should have succeeded"); 6685 handle_stack_overflow(addr); 6686 } 6687 // anything including and to the right of _finger 6688 // will be scanned as we iterate over the remainder of the 6689 // bit map 6690 } 6691 } 6692 6693 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, 6694 MemRegion span, 6695 CMSBitMap* bitMap, CMSMarkStack* markStack, 6696 HeapWord* finger, MarkFromRootsClosure* parent) : 6697 MetadataVisitingOopIterateClosure(collector->ref_processor()), 6698 _collector(collector), 6699 _span(span), 6700 _bitMap(bitMap), 6701 _markStack(markStack), 6702 _finger(finger), 6703 _parent(parent) 6704 { } 6705 6706 ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector, 6707 MemRegion span, 6708 CMSBitMap* bit_map, 6709 OopTaskQueue* work_queue, 6710 CMSMarkStack* overflow_stack, 6711 HeapWord* finger, 6712 HeapWord* volatile* global_finger_addr, 6713 ParMarkFromRootsClosure* parent) : 6714 MetadataVisitingOopIterateClosure(collector->ref_processor()), 6715 _collector(collector), 6716 _whole_span(collector->_span), 6717 _span(span), 6718 _bit_map(bit_map), 6719 _work_queue(work_queue), 6720 _overflow_stack(overflow_stack), 6721 _finger(finger), 6722 _global_finger_addr(global_finger_addr), 6723 _parent(parent) 6724 { } 6725 6726 // Assumes thread-safe access by callers, who are 6727 // responsible for mutual exclusion. 6728 void CMSCollector::lower_restart_addr(HeapWord* low) { 6729 assert(_span.contains(low), "Out of bounds addr"); 6730 if (_restart_addr == NULL) { 6731 _restart_addr = low; 6732 } else { 6733 _restart_addr = MIN2(_restart_addr, low); 6734 } 6735 } 6736 6737 // Upon stack overflow, we discard (part of) the stack, 6738 // remembering the least address amongst those discarded 6739 // in CMSCollector's _restart_address. 6740 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { 6741 // Remember the least grey address discarded 6742 HeapWord* ra = (HeapWord*)_markStack->least_value(lost); 6743 _collector->lower_restart_addr(ra); 6744 _markStack->reset(); // discard stack contents 6745 _markStack->expand(); // expand the stack if possible 6746 } 6747 6748 // Upon stack overflow, we discard (part of) the stack, 6749 // remembering the least address amongst those discarded 6750 // in CMSCollector's _restart_address. 6751 void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { 6752 // We need to do this under a mutex to prevent other 6753 // workers from interfering with the work done below. 6754 MutexLocker ml(_overflow_stack->par_lock(), 6755 Mutex::_no_safepoint_check_flag); 6756 // Remember the least grey address discarded 6757 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); 6758 _collector->lower_restart_addr(ra); 6759 _overflow_stack->reset(); // discard stack contents 6760 _overflow_stack->expand(); // expand the stack if possible 6761 } 6762 6763 void PushOrMarkClosure::do_oop(oop obj) { 6764 // Ignore mark word because we are running concurrent with mutators. 6765 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); 6766 HeapWord* addr = (HeapWord*)obj; 6767 if (_span.contains(addr) && !_bitMap->isMarked(addr)) { 6768 // Oop lies in _span and isn't yet grey or black 6769 _bitMap->mark(addr); // now grey 6770 if (addr < _finger) { 6771 // the bit map iteration has already either passed, or 6772 // sampled, this bit in the bit map; we'll need to 6773 // use the marking stack to scan this oop's oops. 6774 bool simulate_overflow = false; 6775 NOT_PRODUCT( 6776 if (CMSMarkStackOverflowALot && 6777 _collector->simulate_overflow()) { 6778 // simulate a stack overflow 6779 simulate_overflow = true; 6780 } 6781 ) 6782 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow 6783 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity()); 6784 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); 6785 handle_stack_overflow(addr); 6786 } 6787 } 6788 // anything including and to the right of _finger 6789 // will be scanned as we iterate over the remainder of the 6790 // bit map 6791 do_yield_check(); 6792 } 6793 } 6794 6795 void ParPushOrMarkClosure::do_oop(oop obj) { 6796 // Ignore mark word because we are running concurrent with mutators. 6797 assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); 6798 HeapWord* addr = (HeapWord*)obj; 6799 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { 6800 // Oop lies in _span and isn't yet grey or black 6801 // We read the global_finger (volatile read) strictly after marking oop 6802 bool res = _bit_map->par_mark(addr); // now grey 6803 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr; 6804 // Should we push this marked oop on our stack? 6805 // -- if someone else marked it, nothing to do 6806 // -- if target oop is above global finger nothing to do 6807 // -- if target oop is in chunk and above local finger 6808 // then nothing to do 6809 // -- else push on work queue 6810 if ( !res // someone else marked it, they will deal with it 6811 || (addr >= *gfa) // will be scanned in a later task 6812 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk 6813 return; 6814 } 6815 // the bit map iteration has already either passed, or 6816 // sampled, this bit in the bit map; we'll need to 6817 // use the marking stack to scan this oop's oops. 6818 bool simulate_overflow = false; 6819 NOT_PRODUCT( 6820 if (CMSMarkStackOverflowALot && 6821 _collector->simulate_overflow()) { 6822 // simulate a stack overflow 6823 simulate_overflow = true; 6824 } 6825 ) 6826 if (simulate_overflow || 6827 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { 6828 // stack overflow 6829 log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity()); 6830 // We cannot assert that the overflow stack is full because 6831 // it may have been emptied since. 6832 assert(simulate_overflow || 6833 _work_queue->size() == _work_queue->max_elems(), 6834 "Else push should have succeeded"); 6835 handle_stack_overflow(addr); 6836 } 6837 do_yield_check(); 6838 } 6839 } 6840 6841 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, 6842 MemRegion span, 6843 ReferenceDiscoverer* rd, 6844 CMSBitMap* bit_map, 6845 CMSBitMap* mod_union_table, 6846 CMSMarkStack* mark_stack, 6847 bool concurrent_precleaning): 6848 MetadataVisitingOopIterateClosure(rd), 6849 _collector(collector), 6850 _span(span), 6851 _bit_map(bit_map), 6852 _mod_union_table(mod_union_table), 6853 _mark_stack(mark_stack), 6854 _concurrent_precleaning(concurrent_precleaning) 6855 { 6856 assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL"); 6857 } 6858 6859 // Grey object rescan during pre-cleaning and second checkpoint phases -- 6860 // the non-parallel version (the parallel version appears further below.) 6861 void PushAndMarkClosure::do_oop(oop obj) { 6862 // Ignore mark word verification. If during concurrent precleaning, 6863 // the object monitor may be locked. If during the checkpoint 6864 // phases, the object may already have been reached by a different 6865 // path and may be at the end of the global overflow list (so 6866 // the mark word may be NULL). 6867 assert(oopDesc::is_oop_or_null(obj, true /* ignore mark word */), 6868 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); 6869 HeapWord* addr = (HeapWord*)obj; 6870 // Check if oop points into the CMS generation 6871 // and is not marked 6872 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 6873 // a white object ... 6874 _bit_map->mark(addr); // ... now grey 6875 // push on the marking stack (grey set) 6876 bool simulate_overflow = false; 6877 NOT_PRODUCT( 6878 if (CMSMarkStackOverflowALot && 6879 _collector->simulate_overflow()) { 6880 // simulate a stack overflow 6881 simulate_overflow = true; 6882 } 6883 ) 6884 if (simulate_overflow || !_mark_stack->push(obj)) { 6885 if (_concurrent_precleaning) { 6886 // During precleaning we can just dirty the appropriate card(s) 6887 // in the mod union table, thus ensuring that the object remains 6888 // in the grey set and continue. In the case of object arrays 6889 // we need to dirty all of the cards that the object spans, 6890 // since the rescan of object arrays will be limited to the 6891 // dirty cards. 6892 // Note that no one can be interfering with us in this action 6893 // of dirtying the mod union table, so no locking or atomics 6894 // are required. 6895 if (obj->is_objArray()) { 6896 size_t sz = obj->size(); 6897 HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size); 6898 MemRegion redirty_range = MemRegion(addr, end_card_addr); 6899 assert(!redirty_range.is_empty(), "Arithmetical tautology"); 6900 _mod_union_table->mark_range(redirty_range); 6901 } else { 6902 _mod_union_table->mark(addr); 6903 } 6904 _collector->_ser_pmc_preclean_ovflw++; 6905 } else { 6906 // During the remark phase, we need to remember this oop 6907 // in the overflow list. 6908 _collector->push_on_overflow_list(obj); 6909 _collector->_ser_pmc_remark_ovflw++; 6910 } 6911 } 6912 } 6913 } 6914 6915 ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector, 6916 MemRegion span, 6917 ReferenceDiscoverer* rd, 6918 CMSBitMap* bit_map, 6919 OopTaskQueue* work_queue): 6920 MetadataVisitingOopIterateClosure(rd), 6921 _collector(collector), 6922 _span(span), 6923 _bit_map(bit_map), 6924 _work_queue(work_queue) 6925 { 6926 assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL"); 6927 } 6928 6929 // Grey object rescan during second checkpoint phase -- 6930 // the parallel version. 6931 void ParPushAndMarkClosure::do_oop(oop obj) { 6932 // In the assert below, we ignore the mark word because 6933 // this oop may point to an already visited object that is 6934 // on the overflow stack (in which case the mark word has 6935 // been hijacked for chaining into the overflow stack -- 6936 // if this is the last object in the overflow stack then 6937 // its mark word will be NULL). Because this object may 6938 // have been subsequently popped off the global overflow 6939 // stack, and the mark word possibly restored to the prototypical 6940 // value, by the time we get to examined this failing assert in 6941 // the debugger, is_oop_or_null(false) may subsequently start 6942 // to hold. 6943 assert(oopDesc::is_oop_or_null(obj, true), 6944 "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); 6945 HeapWord* addr = (HeapWord*)obj; 6946 // Check if oop points into the CMS generation 6947 // and is not marked 6948 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 6949 // a white object ... 6950 // If we manage to "claim" the object, by being the 6951 // first thread to mark it, then we push it on our 6952 // marking stack 6953 if (_bit_map->par_mark(addr)) { // ... now grey 6954 // push on work queue (grey set) 6955 bool simulate_overflow = false; 6956 NOT_PRODUCT( 6957 if (CMSMarkStackOverflowALot && 6958 _collector->par_simulate_overflow()) { 6959 // simulate a stack overflow 6960 simulate_overflow = true; 6961 } 6962 ) 6963 if (simulate_overflow || !_work_queue->push(obj)) { 6964 _collector->par_push_on_overflow_list(obj); 6965 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS 6966 } 6967 } // Else, some other thread got there first 6968 } 6969 } 6970 6971 void CMSPrecleanRefsYieldClosure::do_yield_work() { 6972 Mutex* bml = _collector->bitMapLock(); 6973 assert_lock_strong(bml); 6974 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6975 "CMS thread should hold CMS token"); 6976 6977 bml->unlock(); 6978 ConcurrentMarkSweepThread::desynchronize(true); 6979 6980 _collector->stopTimer(); 6981 _collector->incrementYields(); 6982 6983 // See the comment in coordinator_yield() 6984 for (unsigned i = 0; i < CMSYieldSleepCount && 6985 ConcurrentMarkSweepThread::should_yield() && 6986 !CMSCollector::foregroundGCIsActive(); ++i) { 6987 os::sleep(Thread::current(), 1, false); 6988 } 6989 6990 ConcurrentMarkSweepThread::synchronize(true); 6991 bml->lock(); 6992 6993 _collector->startTimer(); 6994 } 6995 6996 bool CMSPrecleanRefsYieldClosure::should_return() { 6997 if (ConcurrentMarkSweepThread::should_yield()) { 6998 do_yield_work(); 6999 } 7000 return _collector->foregroundGCIsActive(); 7001 } 7002 7003 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) { 7004 assert(((size_t)mr.start())%CardTable::card_size_in_words == 0, 7005 "mr should be aligned to start at a card boundary"); 7006 // We'd like to assert: 7007 // assert(mr.word_size()%CardTable::card_size_in_words == 0, 7008 // "mr should be a range of cards"); 7009 // However, that would be too strong in one case -- the last 7010 // partition ends at _unallocated_block which, in general, can be 7011 // an arbitrary boundary, not necessarily card aligned. 7012 _num_dirty_cards += mr.word_size()/CardTable::card_size_in_words; 7013 _space->object_iterate_mem(mr, &_scan_cl); 7014 } 7015 7016 SweepClosure::SweepClosure(CMSCollector* collector, 7017 ConcurrentMarkSweepGeneration* g, 7018 CMSBitMap* bitMap, bool should_yield) : 7019 _collector(collector), 7020 _g(g), 7021 _sp(g->cmsSpace()), 7022 _limit(_sp->sweep_limit()), 7023 _freelistLock(_sp->freelistLock()), 7024 _bitMap(bitMap), 7025 _inFreeRange(false), // No free range at beginning of sweep 7026 _freeRangeInFreeLists(false), // No free range at beginning of sweep 7027 _lastFreeRangeCoalesced(false), 7028 _yield(should_yield), 7029 _freeFinger(g->used_region().start()) 7030 { 7031 NOT_PRODUCT( 7032 _numObjectsFreed = 0; 7033 _numWordsFreed = 0; 7034 _numObjectsLive = 0; 7035 _numWordsLive = 0; 7036 _numObjectsAlreadyFree = 0; 7037 _numWordsAlreadyFree = 0; 7038 _last_fc = NULL; 7039 7040 _sp->initializeIndexedFreeListArrayReturnedBytes(); 7041 _sp->dictionary()->initialize_dict_returned_bytes(); 7042 ) 7043 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), 7044 "sweep _limit out of bounds"); 7045 log_develop_trace(gc, sweep)("===================="); 7046 log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit)); 7047 } 7048 7049 void SweepClosure::print_on(outputStream* st) const { 7050 st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")", 7051 p2i(_sp->bottom()), p2i(_sp->end())); 7052 st->print_cr("_limit = " PTR_FORMAT, p2i(_limit)); 7053 st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger)); 7054 NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));) 7055 st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d", 7056 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced); 7057 } 7058 7059 #ifndef PRODUCT 7060 // Assertion checking only: no useful work in product mode -- 7061 // however, if any of the flags below become product flags, 7062 // you may need to review this code to see if it needs to be 7063 // enabled in product mode. 7064 SweepClosure::~SweepClosure() { 7065 assert_lock_strong(_freelistLock); 7066 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), 7067 "sweep _limit out of bounds"); 7068 if (inFreeRange()) { 7069 Log(gc, sweep) log; 7070 log.error("inFreeRange() should have been reset; dumping state of SweepClosure"); 7071 ResourceMark rm; 7072 LogStream ls(log.error()); 7073 print_on(&ls); 7074 ShouldNotReachHere(); 7075 } 7076 7077 if (log_is_enabled(Debug, gc, sweep)) { 7078 log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", 7079 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); 7080 log_debug(gc, sweep)("Live " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes", 7081 _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); 7082 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord); 7083 log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes); 7084 } 7085 7086 if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) { 7087 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); 7088 size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes(); 7089 size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes; 7090 log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes Indexed List Returned " SIZE_FORMAT " bytes Dictionary Returned " SIZE_FORMAT " bytes", 7091 returned_bytes, indexListReturnedBytes, dict_returned_bytes); 7092 } 7093 log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit)); 7094 log_develop_trace(gc, sweep)("================"); 7095 } 7096 #endif // PRODUCT 7097 7098 void SweepClosure::initialize_free_range(HeapWord* freeFinger, 7099 bool freeRangeInFreeLists) { 7100 log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)", 7101 p2i(freeFinger), freeRangeInFreeLists); 7102 assert(!inFreeRange(), "Trampling existing free range"); 7103 set_inFreeRange(true); 7104 set_lastFreeRangeCoalesced(false); 7105 7106 set_freeFinger(freeFinger); 7107 set_freeRangeInFreeLists(freeRangeInFreeLists); 7108 if (CMSTestInFreeList) { 7109 if (freeRangeInFreeLists) { 7110 FreeChunk* fc = (FreeChunk*) freeFinger; 7111 assert(fc->is_free(), "A chunk on the free list should be free."); 7112 assert(fc->size() > 0, "Free range should have a size"); 7113 assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists"); 7114 } 7115 } 7116 } 7117 7118 // Note that the sweeper runs concurrently with mutators. Thus, 7119 // it is possible for direct allocation in this generation to happen 7120 // in the middle of the sweep. Note that the sweeper also coalesces 7121 // contiguous free blocks. Thus, unless the sweeper and the allocator 7122 // synchronize appropriately freshly allocated blocks may get swept up. 7123 // This is accomplished by the sweeper locking the free lists while 7124 // it is sweeping. Thus blocks that are determined to be free are 7125 // indeed free. There is however one additional complication: 7126 // blocks that have been allocated since the final checkpoint and 7127 // mark, will not have been marked and so would be treated as 7128 // unreachable and swept up. To prevent this, the allocator marks 7129 // the bit map when allocating during the sweep phase. This leads, 7130 // however, to a further complication -- objects may have been allocated 7131 // but not yet initialized -- in the sense that the header isn't yet 7132 // installed. The sweeper can not then determine the size of the block 7133 // in order to skip over it. To deal with this case, we use a technique 7134 // (due to Printezis) to encode such uninitialized block sizes in the 7135 // bit map. Since the bit map uses a bit per every HeapWord, but the 7136 // CMS generation has a minimum object size of 3 HeapWords, it follows 7137 // that "normal marks" won't be adjacent in the bit map (there will 7138 // always be at least two 0 bits between successive 1 bits). We make use 7139 // of these "unused" bits to represent uninitialized blocks -- the bit 7140 // corresponding to the start of the uninitialized object and the next 7141 // bit are both set. Finally, a 1 bit marks the end of the object that 7142 // started with the two consecutive 1 bits to indicate its potentially 7143 // uninitialized state. 7144 7145 size_t SweepClosure::do_blk_careful(HeapWord* addr) { 7146 FreeChunk* fc = (FreeChunk*)addr; 7147 size_t res; 7148 7149 // Check if we are done sweeping. Below we check "addr >= _limit" rather 7150 // than "addr == _limit" because although _limit was a block boundary when 7151 // we started the sweep, it may no longer be one because heap expansion 7152 // may have caused us to coalesce the block ending at the address _limit 7153 // with a newly expanded chunk (this happens when _limit was set to the 7154 // previous _end of the space), so we may have stepped past _limit: 7155 // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740. 7156 if (addr >= _limit) { // we have swept up to or past the limit: finish up 7157 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), 7158 "sweep _limit out of bounds"); 7159 assert(addr < _sp->end(), "addr out of bounds"); 7160 // Flush any free range we might be holding as a single 7161 // coalesced chunk to the appropriate free list. 7162 if (inFreeRange()) { 7163 assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit, 7164 "freeFinger() " PTR_FORMAT " is out of bounds", p2i(freeFinger())); 7165 flush_cur_free_chunk(freeFinger(), 7166 pointer_delta(addr, freeFinger())); 7167 log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]", 7168 p2i(freeFinger()), pointer_delta(addr, freeFinger()), 7169 lastFreeRangeCoalesced() ? 1 : 0); 7170 } 7171 7172 // help the iterator loop finish 7173 return pointer_delta(_sp->end(), addr); 7174 } 7175 7176 assert(addr < _limit, "sweep invariant"); 7177 // check if we should yield 7178 do_yield_check(addr); 7179 if (fc->is_free()) { 7180 // Chunk that is already free 7181 res = fc->size(); 7182 do_already_free_chunk(fc); 7183 debug_only(_sp->verifyFreeLists()); 7184 // If we flush the chunk at hand in lookahead_and_flush() 7185 // and it's coalesced with a preceding chunk, then the 7186 // process of "mangling" the payload of the coalesced block 7187 // will cause erasure of the size information from the 7188 // (erstwhile) header of all the coalesced blocks but the 7189 // first, so the first disjunct in the assert will not hold 7190 // in that specific case (in which case the second disjunct 7191 // will hold). 7192 assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit, 7193 "Otherwise the size info doesn't change at this step"); 7194 NOT_PRODUCT( 7195 _numObjectsAlreadyFree++; 7196 _numWordsAlreadyFree += res; 7197 ) 7198 NOT_PRODUCT(_last_fc = fc;) 7199 } else if (!_bitMap->isMarked(addr)) { 7200 // Chunk is fresh garbage 7201 res = do_garbage_chunk(fc); 7202 debug_only(_sp->verifyFreeLists()); 7203 NOT_PRODUCT( 7204 _numObjectsFreed++; 7205 _numWordsFreed += res; 7206 ) 7207 } else { 7208 // Chunk that is alive. 7209 res = do_live_chunk(fc); 7210 debug_only(_sp->verifyFreeLists()); 7211 NOT_PRODUCT( 7212 _numObjectsLive++; 7213 _numWordsLive += res; 7214 ) 7215 } 7216 return res; 7217 } 7218 7219 // For the smart allocation, record following 7220 // split deaths - a free chunk is removed from its free list because 7221 // it is being split into two or more chunks. 7222 // split birth - a free chunk is being added to its free list because 7223 // a larger free chunk has been split and resulted in this free chunk. 7224 // coal death - a free chunk is being removed from its free list because 7225 // it is being coalesced into a large free chunk. 7226 // coal birth - a free chunk is being added to its free list because 7227 // it was created when two or more free chunks where coalesced into 7228 // this free chunk. 7229 // 7230 // These statistics are used to determine the desired number of free 7231 // chunks of a given size. The desired number is chosen to be relative 7232 // to the end of a CMS sweep. The desired number at the end of a sweep 7233 // is the 7234 // count-at-end-of-previous-sweep (an amount that was enough) 7235 // - count-at-beginning-of-current-sweep (the excess) 7236 // + split-births (gains in this size during interval) 7237 // - split-deaths (demands on this size during interval) 7238 // where the interval is from the end of one sweep to the end of the 7239 // next. 7240 // 7241 // When sweeping the sweeper maintains an accumulated chunk which is 7242 // the chunk that is made up of chunks that have been coalesced. That 7243 // will be termed the left-hand chunk. A new chunk of garbage that 7244 // is being considered for coalescing will be referred to as the 7245 // right-hand chunk. 7246 // 7247 // When making a decision on whether to coalesce a right-hand chunk with 7248 // the current left-hand chunk, the current count vs. the desired count 7249 // of the left-hand chunk is considered. Also if the right-hand chunk 7250 // is near the large chunk at the end of the heap (see 7251 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the 7252 // left-hand chunk is coalesced. 7253 // 7254 // When making a decision about whether to split a chunk, the desired count 7255 // vs. the current count of the candidate to be split is also considered. 7256 // If the candidate is underpopulated (currently fewer chunks than desired) 7257 // a chunk of an overpopulated (currently more chunks than desired) size may 7258 // be chosen. The "hint" associated with a free list, if non-null, points 7259 // to a free list which may be overpopulated. 7260 // 7261 7262 void SweepClosure::do_already_free_chunk(FreeChunk* fc) { 7263 const size_t size = fc->size(); 7264 // Chunks that cannot be coalesced are not in the 7265 // free lists. 7266 if (CMSTestInFreeList && !fc->cantCoalesce()) { 7267 assert(_sp->verify_chunk_in_free_list(fc), 7268 "free chunk should be in free lists"); 7269 } 7270 // a chunk that is already free, should not have been 7271 // marked in the bit map 7272 HeapWord* const addr = (HeapWord*) fc; 7273 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); 7274 // Verify that the bit map has no bits marked between 7275 // addr and purported end of this block. 7276 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); 7277 7278 // Some chunks cannot be coalesced under any circumstances. 7279 // See the definition of cantCoalesce(). 7280 if (!fc->cantCoalesce()) { 7281 // This chunk can potentially be coalesced. 7282 // All the work is done in 7283 do_post_free_or_garbage_chunk(fc, size); 7284 // Note that if the chunk is not coalescable (the else arm 7285 // below), we unconditionally flush, without needing to do 7286 // a "lookahead," as we do below. 7287 if (inFreeRange()) lookahead_and_flush(fc, size); 7288 } else { 7289 // Code path common to both original and adaptive free lists. 7290 7291 // cant coalesce with previous block; this should be treated 7292 // as the end of a free run if any 7293 if (inFreeRange()) { 7294 // we kicked some butt; time to pick up the garbage 7295 assert(freeFinger() < addr, "freeFinger points too high"); 7296 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); 7297 } 7298 // else, nothing to do, just continue 7299 } 7300 } 7301 7302 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) { 7303 // This is a chunk of garbage. It is not in any free list. 7304 // Add it to a free list or let it possibly be coalesced into 7305 // a larger chunk. 7306 HeapWord* const addr = (HeapWord*) fc; 7307 const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); 7308 7309 // Verify that the bit map has no bits marked between 7310 // addr and purported end of just dead object. 7311 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); 7312 do_post_free_or_garbage_chunk(fc, size); 7313 7314 assert(_limit >= addr + size, 7315 "A freshly garbage chunk can't possibly straddle over _limit"); 7316 if (inFreeRange()) lookahead_and_flush(fc, size); 7317 return size; 7318 } 7319 7320 size_t SweepClosure::do_live_chunk(FreeChunk* fc) { 7321 HeapWord* addr = (HeapWord*) fc; 7322 // The sweeper has just found a live object. Return any accumulated 7323 // left hand chunk to the free lists. 7324 if (inFreeRange()) { 7325 assert(freeFinger() < addr, "freeFinger points too high"); 7326 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); 7327 } 7328 7329 // This object is live: we'd normally expect this to be 7330 // an oop, and like to assert the following: 7331 // assert(oopDesc::is_oop(oop(addr)), "live block should be an oop"); 7332 // However, as we commented above, this may be an object whose 7333 // header hasn't yet been initialized. 7334 size_t size; 7335 assert(_bitMap->isMarked(addr), "Tautology for this control point"); 7336 if (_bitMap->isMarked(addr + 1)) { 7337 // Determine the size from the bit map, rather than trying to 7338 // compute it from the object header. 7339 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); 7340 size = pointer_delta(nextOneAddr + 1, addr); 7341 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 7342 "alignment problem"); 7343 7344 #ifdef ASSERT 7345 if (oop(addr)->klass_or_null_acquire() != NULL) { 7346 // Ignore mark word because we are running concurrent with mutators 7347 assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop"); 7348 assert(size == 7349 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), 7350 "P-mark and computed size do not agree"); 7351 } 7352 #endif 7353 7354 } else { 7355 // This should be an initialized object that's alive. 7356 assert(oop(addr)->klass_or_null_acquire() != NULL, 7357 "Should be an initialized object"); 7358 // Ignore mark word because we are running concurrent with mutators 7359 assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop"); 7360 // Verify that the bit map has no bits marked between 7361 // addr and purported end of this block. 7362 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); 7363 assert(size >= 3, "Necessary for Printezis marks to work"); 7364 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point"); 7365 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);) 7366 } 7367 return size; 7368 } 7369 7370 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc, 7371 size_t chunkSize) { 7372 // do_post_free_or_garbage_chunk() should only be called in the case 7373 // of the adaptive free list allocator. 7374 const bool fcInFreeLists = fc->is_free(); 7375 assert((HeapWord*)fc <= _limit, "sweep invariant"); 7376 if (CMSTestInFreeList && fcInFreeLists) { 7377 assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists"); 7378 } 7379 7380 log_develop_trace(gc, sweep)(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize); 7381 7382 HeapWord* const fc_addr = (HeapWord*) fc; 7383 7384 bool coalesce = false; 7385 const size_t left = pointer_delta(fc_addr, freeFinger()); 7386 const size_t right = chunkSize; 7387 switch (FLSCoalescePolicy) { 7388 // numeric value forms a coalition aggressiveness metric 7389 case 0: { // never coalesce 7390 coalesce = false; 7391 break; 7392 } 7393 case 1: { // coalesce if left & right chunks on overpopulated lists 7394 coalesce = _sp->coalOverPopulated(left) && 7395 _sp->coalOverPopulated(right); 7396 break; 7397 } 7398 case 2: { // coalesce if left chunk on overpopulated list (default) 7399 coalesce = _sp->coalOverPopulated(left); 7400 break; 7401 } 7402 case 3: { // coalesce if left OR right chunk on overpopulated list 7403 coalesce = _sp->coalOverPopulated(left) || 7404 _sp->coalOverPopulated(right); 7405 break; 7406 } 7407 case 4: { // always coalesce 7408 coalesce = true; 7409 break; 7410 } 7411 default: 7412 ShouldNotReachHere(); 7413 } 7414 7415 // Should the current free range be coalesced? 7416 // If the chunk is in a free range and either we decided to coalesce above 7417 // or the chunk is near the large block at the end of the heap 7418 // (isNearLargestChunk() returns true), then coalesce this chunk. 7419 const bool doCoalesce = inFreeRange() 7420 && (coalesce || _g->isNearLargestChunk(fc_addr)); 7421 if (doCoalesce) { 7422 // Coalesce the current free range on the left with the new 7423 // chunk on the right. If either is on a free list, 7424 // it must be removed from the list and stashed in the closure. 7425 if (freeRangeInFreeLists()) { 7426 FreeChunk* const ffc = (FreeChunk*)freeFinger(); 7427 assert(ffc->size() == pointer_delta(fc_addr, freeFinger()), 7428 "Size of free range is inconsistent with chunk size."); 7429 if (CMSTestInFreeList) { 7430 assert(_sp->verify_chunk_in_free_list(ffc), 7431 "Chunk is not in free lists"); 7432 } 7433 _sp->coalDeath(ffc->size()); 7434 _sp->removeFreeChunkFromFreeLists(ffc); 7435 set_freeRangeInFreeLists(false); 7436 } 7437 if (fcInFreeLists) { 7438 _sp->coalDeath(chunkSize); 7439 assert(fc->size() == chunkSize, 7440 "The chunk has the wrong size or is not in the free lists"); 7441 _sp->removeFreeChunkFromFreeLists(fc); 7442 } 7443 set_lastFreeRangeCoalesced(true); 7444 print_free_block_coalesced(fc); 7445 } else { // not in a free range and/or should not coalesce 7446 // Return the current free range and start a new one. 7447 if (inFreeRange()) { 7448 // In a free range but cannot coalesce with the right hand chunk. 7449 // Put the current free range into the free lists. 7450 flush_cur_free_chunk(freeFinger(), 7451 pointer_delta(fc_addr, freeFinger())); 7452 } 7453 // Set up for new free range. Pass along whether the right hand 7454 // chunk is in the free lists. 7455 initialize_free_range((HeapWord*)fc, fcInFreeLists); 7456 } 7457 } 7458 7459 // Lookahead flush: 7460 // If we are tracking a free range, and this is the last chunk that 7461 // we'll look at because its end crosses past _limit, we'll preemptively 7462 // flush it along with any free range we may be holding on to. Note that 7463 // this can be the case only for an already free or freshly garbage 7464 // chunk. If this block is an object, it can never straddle 7465 // over _limit. The "straddling" occurs when _limit is set at 7466 // the previous end of the space when this cycle started, and 7467 // a subsequent heap expansion caused the previously co-terminal 7468 // free block to be coalesced with the newly expanded portion, 7469 // thus rendering _limit a non-block-boundary making it dangerous 7470 // for the sweeper to step over and examine. 7471 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) { 7472 assert(inFreeRange(), "Should only be called if currently in a free range."); 7473 HeapWord* const eob = ((HeapWord*)fc) + chunk_size; 7474 assert(_sp->used_region().contains(eob - 1), 7475 "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT 7476 " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")" 7477 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")", 7478 p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size); 7479 if (eob >= _limit) { 7480 assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit"); 7481 log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block " 7482 "[" PTR_FORMAT "," PTR_FORMAT ") in space " 7483 "[" PTR_FORMAT "," PTR_FORMAT ")", 7484 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end())); 7485 // Return the storage we are tracking back into the free lists. 7486 log_develop_trace(gc, sweep)("Flushing ... "); 7487 assert(freeFinger() < eob, "Error"); 7488 flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger())); 7489 } 7490 } 7491 7492 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) { 7493 assert(inFreeRange(), "Should only be called if currently in a free range."); 7494 assert(size > 0, 7495 "A zero sized chunk cannot be added to the free lists."); 7496 if (!freeRangeInFreeLists()) { 7497 if (CMSTestInFreeList) { 7498 FreeChunk* fc = (FreeChunk*) chunk; 7499 fc->set_size(size); 7500 assert(!_sp->verify_chunk_in_free_list(fc), 7501 "chunk should not be in free lists yet"); 7502 } 7503 log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size); 7504 // A new free range is going to be starting. The current 7505 // free range has not been added to the free lists yet or 7506 // was removed so add it back. 7507 // If the current free range was coalesced, then the death 7508 // of the free range was recorded. Record a birth now. 7509 if (lastFreeRangeCoalesced()) { 7510 _sp->coalBirth(size); 7511 } 7512 _sp->addChunkAndRepairOffsetTable(chunk, size, 7513 lastFreeRangeCoalesced()); 7514 } else { 7515 log_develop_trace(gc, sweep)("Already in free list: nothing to flush"); 7516 } 7517 set_inFreeRange(false); 7518 set_freeRangeInFreeLists(false); 7519 } 7520 7521 // We take a break if we've been at this for a while, 7522 // so as to avoid monopolizing the locks involved. 7523 void SweepClosure::do_yield_work(HeapWord* addr) { 7524 // Return current free chunk being used for coalescing (if any) 7525 // to the appropriate freelist. After yielding, the next 7526 // free block encountered will start a coalescing range of 7527 // free blocks. If the next free block is adjacent to the 7528 // chunk just flushed, they will need to wait for the next 7529 // sweep to be coalesced. 7530 if (inFreeRange()) { 7531 flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger())); 7532 } 7533 7534 // First give up the locks, then yield, then re-lock. 7535 // We should probably use a constructor/destructor idiom to 7536 // do this unlock/lock or modify the MutexUnlocker class to 7537 // serve our purpose. XXX 7538 assert_lock_strong(_bitMap->lock()); 7539 assert_lock_strong(_freelistLock); 7540 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 7541 "CMS thread should hold CMS token"); 7542 _bitMap->lock()->unlock(); 7543 _freelistLock->unlock(); 7544 ConcurrentMarkSweepThread::desynchronize(true); 7545 _collector->stopTimer(); 7546 _collector->incrementYields(); 7547 7548 // See the comment in coordinator_yield() 7549 for (unsigned i = 0; i < CMSYieldSleepCount && 7550 ConcurrentMarkSweepThread::should_yield() && 7551 !CMSCollector::foregroundGCIsActive(); ++i) { 7552 os::sleep(Thread::current(), 1, false); 7553 } 7554 7555 ConcurrentMarkSweepThread::synchronize(true); 7556 _freelistLock->lock_without_safepoint_check(); 7557 _bitMap->lock()->lock_without_safepoint_check(); 7558 _collector->startTimer(); 7559 } 7560 7561 #ifndef PRODUCT 7562 // This is actually very useful in a product build if it can 7563 // be called from the debugger. Compile it into the product 7564 // as needed. 7565 bool debug_verify_chunk_in_free_list(FreeChunk* fc) { 7566 return debug_cms_space->verify_chunk_in_free_list(fc); 7567 } 7568 #endif 7569 7570 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const { 7571 log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")", 7572 p2i(fc), fc->size()); 7573 } 7574 7575 // CMSIsAliveClosure 7576 bool CMSIsAliveClosure::do_object_b(oop obj) { 7577 HeapWord* addr = (HeapWord*)obj; 7578 return addr != NULL && 7579 (!_span.contains(addr) || _bit_map->isMarked(addr)); 7580 } 7581 7582 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector, 7583 MemRegion span, 7584 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 7585 bool cpc): 7586 _collector(collector), 7587 _span(span), 7588 _mark_stack(mark_stack), 7589 _bit_map(bit_map), 7590 _concurrent_precleaning(cpc) { 7591 assert(!_span.is_empty(), "Empty span could spell trouble"); 7592 } 7593 7594 7595 // CMSKeepAliveClosure: the serial version 7596 void CMSKeepAliveClosure::do_oop(oop obj) { 7597 HeapWord* addr = (HeapWord*)obj; 7598 if (_span.contains(addr) && 7599 !_bit_map->isMarked(addr)) { 7600 _bit_map->mark(addr); 7601 bool simulate_overflow = false; 7602 NOT_PRODUCT( 7603 if (CMSMarkStackOverflowALot && 7604 _collector->simulate_overflow()) { 7605 // simulate a stack overflow 7606 simulate_overflow = true; 7607 } 7608 ) 7609 if (simulate_overflow || !_mark_stack->push(obj)) { 7610 if (_concurrent_precleaning) { 7611 // We dirty the overflown object and let the remark 7612 // phase deal with it. 7613 assert(_collector->overflow_list_is_empty(), "Error"); 7614 // In the case of object arrays, we need to dirty all of 7615 // the cards that the object spans. No locking or atomics 7616 // are needed since no one else can be mutating the mod union 7617 // table. 7618 if (obj->is_objArray()) { 7619 size_t sz = obj->size(); 7620 HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size); 7621 MemRegion redirty_range = MemRegion(addr, end_card_addr); 7622 assert(!redirty_range.is_empty(), "Arithmetical tautology"); 7623 _collector->_modUnionTable.mark_range(redirty_range); 7624 } else { 7625 _collector->_modUnionTable.mark(addr); 7626 } 7627 _collector->_ser_kac_preclean_ovflw++; 7628 } else { 7629 _collector->push_on_overflow_list(obj); 7630 _collector->_ser_kac_ovflw++; 7631 } 7632 } 7633 } 7634 } 7635 7636 // CMSParKeepAliveClosure: a parallel version of the above. 7637 // The work queues are private to each closure (thread), 7638 // but (may be) available for stealing by other threads. 7639 void CMSParKeepAliveClosure::do_oop(oop obj) { 7640 HeapWord* addr = (HeapWord*)obj; 7641 if (_span.contains(addr) && 7642 !_bit_map->isMarked(addr)) { 7643 // In general, during recursive tracing, several threads 7644 // may be concurrently getting here; the first one to 7645 // "tag" it, claims it. 7646 if (_bit_map->par_mark(addr)) { 7647 bool res = _work_queue->push(obj); 7648 assert(res, "Low water mark should be much less than capacity"); 7649 // Do a recursive trim in the hope that this will keep 7650 // stack usage lower, but leave some oops for potential stealers 7651 trim_queue(_low_water_mark); 7652 } // Else, another thread got there first 7653 } 7654 } 7655 7656 void CMSParKeepAliveClosure::trim_queue(uint max) { 7657 while (_work_queue->size() > max) { 7658 oop new_oop; 7659 if (_work_queue->pop_local(new_oop)) { 7660 assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop"); 7661 assert(_bit_map->isMarked((HeapWord*)new_oop), 7662 "no white objects on this stack!"); 7663 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); 7664 // iterate over the oops in this oop, marking and pushing 7665 // the ones in CMS heap (i.e. in _span). 7666 new_oop->oop_iterate(&_mark_and_push); 7667 } 7668 } 7669 } 7670 7671 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure( 7672 CMSCollector* collector, 7673 MemRegion span, CMSBitMap* bit_map, 7674 OopTaskQueue* work_queue): 7675 _collector(collector), 7676 _span(span), 7677 _work_queue(work_queue), 7678 _bit_map(bit_map) { } 7679 7680 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { 7681 HeapWord* addr = (HeapWord*)obj; 7682 if (_span.contains(addr) && 7683 !_bit_map->isMarked(addr)) { 7684 if (_bit_map->par_mark(addr)) { 7685 bool simulate_overflow = false; 7686 NOT_PRODUCT( 7687 if (CMSMarkStackOverflowALot && 7688 _collector->par_simulate_overflow()) { 7689 // simulate a stack overflow 7690 simulate_overflow = true; 7691 } 7692 ) 7693 if (simulate_overflow || !_work_queue->push(obj)) { 7694 _collector->par_push_on_overflow_list(obj); 7695 _collector->_par_kac_ovflw++; 7696 } 7697 } // Else another thread got there already 7698 } 7699 } 7700 7701 ////////////////////////////////////////////////////////////////// 7702 // CMSExpansionCause ///////////////////////////// 7703 ////////////////////////////////////////////////////////////////// 7704 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) { 7705 switch (cause) { 7706 case _no_expansion: 7707 return "No expansion"; 7708 case _satisfy_free_ratio: 7709 return "Free ratio"; 7710 case _satisfy_promotion: 7711 return "Satisfy promotion"; 7712 case _satisfy_allocation: 7713 return "allocation"; 7714 case _allocate_par_lab: 7715 return "Par LAB"; 7716 case _allocate_par_spooling_space: 7717 return "Par Spooling Space"; 7718 case _adaptive_size_policy: 7719 return "Ergonomics"; 7720 default: 7721 return "unknown"; 7722 } 7723 } 7724 7725 void CMSDrainMarkingStackClosure::do_void() { 7726 // the max number to take from overflow list at a time 7727 const size_t num = _mark_stack->capacity()/4; 7728 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(), 7729 "Overflow list should be NULL during concurrent phases"); 7730 while (!_mark_stack->isEmpty() || 7731 // if stack is empty, check the overflow list 7732 _collector->take_from_overflow_list(num, _mark_stack)) { 7733 oop obj = _mark_stack->pop(); 7734 HeapWord* addr = (HeapWord*)obj; 7735 assert(_span.contains(addr), "Should be within span"); 7736 assert(_bit_map->isMarked(addr), "Should be marked"); 7737 assert(oopDesc::is_oop(obj), "Should be an oop"); 7738 obj->oop_iterate(_keep_alive); 7739 } 7740 } 7741 7742 void CMSParDrainMarkingStackClosure::do_void() { 7743 // drain queue 7744 trim_queue(0); 7745 } 7746 7747 // Trim our work_queue so its length is below max at return 7748 void CMSParDrainMarkingStackClosure::trim_queue(uint max) { 7749 while (_work_queue->size() > max) { 7750 oop new_oop; 7751 if (_work_queue->pop_local(new_oop)) { 7752 assert(oopDesc::is_oop(new_oop), "Expected an oop"); 7753 assert(_bit_map->isMarked((HeapWord*)new_oop), 7754 "no white objects on this stack!"); 7755 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); 7756 // iterate over the oops in this oop, marking and pushing 7757 // the ones in CMS heap (i.e. in _span). 7758 new_oop->oop_iterate(&_mark_and_push); 7759 } 7760 } 7761 } 7762 7763 //////////////////////////////////////////////////////////////////// 7764 // Support for Marking Stack Overflow list handling and related code 7765 //////////////////////////////////////////////////////////////////// 7766 // Much of the following code is similar in shape and spirit to the 7767 // code used in ParNewGC. We should try and share that code 7768 // as much as possible in the future. 7769 7770 #ifndef PRODUCT 7771 // Debugging support for CMSStackOverflowALot 7772 7773 // It's OK to call this multi-threaded; the worst thing 7774 // that can happen is that we'll get a bunch of closely 7775 // spaced simulated overflows, but that's OK, in fact 7776 // probably good as it would exercise the overflow code 7777 // under contention. 7778 bool CMSCollector::simulate_overflow() { 7779 if (_overflow_counter-- <= 0) { // just being defensive 7780 _overflow_counter = CMSMarkStackOverflowInterval; 7781 return true; 7782 } else { 7783 return false; 7784 } 7785 } 7786 7787 bool CMSCollector::par_simulate_overflow() { 7788 return simulate_overflow(); 7789 } 7790 #endif 7791 7792 // Single-threaded 7793 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) { 7794 assert(stack->isEmpty(), "Expected precondition"); 7795 assert(stack->capacity() > num, "Shouldn't bite more than can chew"); 7796 size_t i = num; 7797 oop cur = _overflow_list; 7798 const markOop proto = markOopDesc::prototype(); 7799 NOT_PRODUCT(ssize_t n = 0;) 7800 for (oop next; i > 0 && cur != NULL; cur = next, i--) { 7801 next = oop(cur->mark_raw()); 7802 cur->set_mark_raw(proto); // until proven otherwise 7803 assert(oopDesc::is_oop(cur), "Should be an oop"); 7804 bool res = stack->push(cur); 7805 assert(res, "Bit off more than can chew?"); 7806 NOT_PRODUCT(n++;) 7807 } 7808 _overflow_list = cur; 7809 #ifndef PRODUCT 7810 assert(_num_par_pushes >= n, "Too many pops?"); 7811 _num_par_pushes -=n; 7812 #endif 7813 return !stack->isEmpty(); 7814 } 7815 7816 #define BUSY (cast_to_oop<intptr_t>(0x1aff1aff)) 7817 // (MT-safe) Get a prefix of at most "num" from the list. 7818 // The overflow list is chained through the mark word of 7819 // each object in the list. We fetch the entire list, 7820 // break off a prefix of the right size and return the 7821 // remainder. If other threads try to take objects from 7822 // the overflow list at that time, they will wait for 7823 // some time to see if data becomes available. If (and 7824 // only if) another thread places one or more object(s) 7825 // on the global list before we have returned the suffix 7826 // to the global list, we will walk down our local list 7827 // to find its end and append the global list to 7828 // our suffix before returning it. This suffix walk can 7829 // prove to be expensive (quadratic in the amount of traffic) 7830 // when there are many objects in the overflow list and 7831 // there is much producer-consumer contention on the list. 7832 // *NOTE*: The overflow list manipulation code here and 7833 // in ParNewGeneration:: are very similar in shape, 7834 // except that in the ParNew case we use the old (from/eden) 7835 // copy of the object to thread the list via its klass word. 7836 // Because of the common code, if you make any changes in 7837 // the code below, please check the ParNew version to see if 7838 // similar changes might be needed. 7839 // CR 6797058 has been filed to consolidate the common code. 7840 bool CMSCollector::par_take_from_overflow_list(size_t num, 7841 OopTaskQueue* work_q, 7842 int no_of_gc_threads) { 7843 assert(work_q->size() == 0, "First empty local work queue"); 7844 assert(num < work_q->max_elems(), "Can't bite more than we can chew"); 7845 if (_overflow_list == NULL) { 7846 return false; 7847 } 7848 // Grab the entire list; we'll put back a suffix 7849 oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); 7850 Thread* tid = Thread::current(); 7851 // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was 7852 // set to ParallelGCThreads. 7853 size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads; 7854 size_t sleep_time_millis = MAX2((size_t)1, num/100); 7855 // If the list is busy, we spin for a short while, 7856 // sleeping between attempts to get the list. 7857 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) { 7858 os::sleep(tid, sleep_time_millis, false); 7859 if (_overflow_list == NULL) { 7860 // Nothing left to take 7861 return false; 7862 } else if (_overflow_list != BUSY) { 7863 // Try and grab the prefix 7864 prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list)); 7865 } 7866 } 7867 // If the list was found to be empty, or we spun long 7868 // enough, we give up and return empty-handed. If we leave 7869 // the list in the BUSY state below, it must be the case that 7870 // some other thread holds the overflow list and will set it 7871 // to a non-BUSY state in the future. 7872 if (prefix == NULL || prefix == BUSY) { 7873 // Nothing to take or waited long enough 7874 if (prefix == NULL) { 7875 // Write back the NULL in case we overwrote it with BUSY above 7876 // and it is still the same value. 7877 Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); 7878 } 7879 return false; 7880 } 7881 assert(prefix != NULL && prefix != BUSY, "Error"); 7882 size_t i = num; 7883 oop cur = prefix; 7884 // Walk down the first "num" objects, unless we reach the end. 7885 for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--); 7886 if (cur->mark_raw() == NULL) { 7887 // We have "num" or fewer elements in the list, so there 7888 // is nothing to return to the global list. 7889 // Write back the NULL in lieu of the BUSY we wrote 7890 // above, if it is still the same value. 7891 if (_overflow_list == BUSY) { 7892 Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY); 7893 } 7894 } else { 7895 // Chop off the suffix and return it to the global list. 7896 assert(cur->mark_raw() != BUSY, "Error"); 7897 oop suffix_head = cur->mark_raw(); // suffix will be put back on global list 7898 cur->set_mark_raw(NULL); // break off suffix 7899 // It's possible that the list is still in the empty(busy) state 7900 // we left it in a short while ago; in that case we may be 7901 // able to place back the suffix without incurring the cost 7902 // of a walk down the list. 7903 oop observed_overflow_list = _overflow_list; 7904 oop cur_overflow_list = observed_overflow_list; 7905 bool attached = false; 7906 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 7907 observed_overflow_list = 7908 Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list); 7909 if (cur_overflow_list == observed_overflow_list) { 7910 attached = true; 7911 break; 7912 } else cur_overflow_list = observed_overflow_list; 7913 } 7914 if (!attached) { 7915 // Too bad, someone else sneaked in (at least) an element; we'll need 7916 // to do a splice. Find tail of suffix so we can prepend suffix to global 7917 // list. 7918 for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw())); 7919 oop suffix_tail = cur; 7920 assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL, 7921 "Tautology"); 7922 observed_overflow_list = _overflow_list; 7923 do { 7924 cur_overflow_list = observed_overflow_list; 7925 if (cur_overflow_list != BUSY) { 7926 // Do the splice ... 7927 suffix_tail->set_mark_raw(markOop(cur_overflow_list)); 7928 } else { // cur_overflow_list == BUSY 7929 suffix_tail->set_mark_raw(NULL); 7930 } 7931 // ... and try to place spliced list back on overflow_list ... 7932 observed_overflow_list = 7933 Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list); 7934 } while (cur_overflow_list != observed_overflow_list); 7935 // ... until we have succeeded in doing so. 7936 } 7937 } 7938 7939 // Push the prefix elements on work_q 7940 assert(prefix != NULL, "control point invariant"); 7941 const markOop proto = markOopDesc::prototype(); 7942 oop next; 7943 NOT_PRODUCT(ssize_t n = 0;) 7944 for (cur = prefix; cur != NULL; cur = next) { 7945 next = oop(cur->mark_raw()); 7946 cur->set_mark_raw(proto); // until proven otherwise 7947 assert(oopDesc::is_oop(cur), "Should be an oop"); 7948 bool res = work_q->push(cur); 7949 assert(res, "Bit off more than we can chew?"); 7950 NOT_PRODUCT(n++;) 7951 } 7952 #ifndef PRODUCT 7953 assert(_num_par_pushes >= n, "Too many pops?"); 7954 Atomic::sub(n, &_num_par_pushes); 7955 #endif 7956 return true; 7957 } 7958 7959 // Single-threaded 7960 void CMSCollector::push_on_overflow_list(oop p) { 7961 NOT_PRODUCT(_num_par_pushes++;) 7962 assert(oopDesc::is_oop(p), "Not an oop"); 7963 preserve_mark_if_necessary(p); 7964 p->set_mark_raw((markOop)_overflow_list); 7965 _overflow_list = p; 7966 } 7967 7968 // Multi-threaded; use CAS to prepend to overflow list 7969 void CMSCollector::par_push_on_overflow_list(oop p) { 7970 NOT_PRODUCT(Atomic::inc(&_num_par_pushes);) 7971 assert(oopDesc::is_oop(p), "Not an oop"); 7972 par_preserve_mark_if_necessary(p); 7973 oop observed_overflow_list = _overflow_list; 7974 oop cur_overflow_list; 7975 do { 7976 cur_overflow_list = observed_overflow_list; 7977 if (cur_overflow_list != BUSY) { 7978 p->set_mark_raw(markOop(cur_overflow_list)); 7979 } else { 7980 p->set_mark_raw(NULL); 7981 } 7982 observed_overflow_list = 7983 Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list); 7984 } while (cur_overflow_list != observed_overflow_list); 7985 } 7986 #undef BUSY 7987 7988 // Single threaded 7989 // General Note on GrowableArray: pushes may silently fail 7990 // because we are (temporarily) out of C-heap for expanding 7991 // the stack. The problem is quite ubiquitous and affects 7992 // a lot of code in the JVM. The prudent thing for GrowableArray 7993 // to do (for now) is to exit with an error. However, that may 7994 // be too draconian in some cases because the caller may be 7995 // able to recover without much harm. For such cases, we 7996 // should probably introduce a "soft_push" method which returns 7997 // an indication of success or failure with the assumption that 7998 // the caller may be able to recover from a failure; code in 7999 // the VM can then be changed, incrementally, to deal with such 8000 // failures where possible, thus, incrementally hardening the VM 8001 // in such low resource situations. 8002 void CMSCollector::preserve_mark_work(oop p, markOop m) { 8003 _preserved_oop_stack.push(p); 8004 _preserved_mark_stack.push(m); 8005 assert(m == p->mark_raw(), "Mark word changed"); 8006 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(), 8007 "bijection"); 8008 } 8009 8010 // Single threaded 8011 void CMSCollector::preserve_mark_if_necessary(oop p) { 8012 markOop m = p->mark_raw(); 8013 if (m->must_be_preserved(p)) { 8014 preserve_mark_work(p, m); 8015 } 8016 } 8017 8018 void CMSCollector::par_preserve_mark_if_necessary(oop p) { 8019 markOop m = p->mark_raw(); 8020 if (m->must_be_preserved(p)) { 8021 MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 8022 // Even though we read the mark word without holding 8023 // the lock, we are assured that it will not change 8024 // because we "own" this oop, so no other thread can 8025 // be trying to push it on the overflow list; see 8026 // the assertion in preserve_mark_work() that checks 8027 // that m == p->mark_raw(). 8028 preserve_mark_work(p, m); 8029 } 8030 } 8031 8032 // We should be able to do this multi-threaded, 8033 // a chunk of stack being a task (this is 8034 // correct because each oop only ever appears 8035 // once in the overflow list. However, it's 8036 // not very easy to completely overlap this with 8037 // other operations, so will generally not be done 8038 // until all work's been completed. Because we 8039 // expect the preserved oop stack (set) to be small, 8040 // it's probably fine to do this single-threaded. 8041 // We can explore cleverer concurrent/overlapped/parallel 8042 // processing of preserved marks if we feel the 8043 // need for this in the future. Stack overflow should 8044 // be so rare in practice and, when it happens, its 8045 // effect on performance so great that this will 8046 // likely just be in the noise anyway. 8047 void CMSCollector::restore_preserved_marks_if_any() { 8048 assert(SafepointSynchronize::is_at_safepoint(), 8049 "world should be stopped"); 8050 assert(Thread::current()->is_ConcurrentGC_thread() || 8051 Thread::current()->is_VM_thread(), 8052 "should be single-threaded"); 8053 assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(), 8054 "bijection"); 8055 8056 while (!_preserved_oop_stack.is_empty()) { 8057 oop p = _preserved_oop_stack.pop(); 8058 assert(oopDesc::is_oop(p), "Should be an oop"); 8059 assert(_span.contains(p), "oop should be in _span"); 8060 assert(p->mark_raw() == markOopDesc::prototype(), 8061 "Set when taken from overflow list"); 8062 markOop m = _preserved_mark_stack.pop(); 8063 p->set_mark_raw(m); 8064 } 8065 assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(), 8066 "stacks were cleared above"); 8067 } 8068 8069 #ifndef PRODUCT 8070 bool CMSCollector::no_preserved_marks() const { 8071 return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(); 8072 } 8073 #endif 8074 8075 // Transfer some number of overflown objects to usual marking 8076 // stack. Return true if some objects were transferred. 8077 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() { 8078 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4, 8079 (size_t)ParGCDesiredObjsFromOverflowList); 8080 8081 bool res = _collector->take_from_overflow_list(num, _mark_stack); 8082 assert(_collector->overflow_list_is_empty() || res, 8083 "If list is not empty, we should have taken something"); 8084 assert(!res || !_mark_stack->isEmpty(), 8085 "If we took something, it should now be on our stack"); 8086 return res; 8087 } 8088 8089 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) { 8090 size_t res = _sp->block_size_no_stall(addr, _collector); 8091 if (_sp->block_is_obj(addr)) { 8092 if (_live_bit_map->isMarked(addr)) { 8093 // It can't have been dead in a previous cycle 8094 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!"); 8095 } else { 8096 _dead_bit_map->mark(addr); // mark the dead object 8097 } 8098 } 8099 // Could be 0, if the block size could not be computed without stalling. 8100 return res; 8101 } 8102 8103 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() { 8104 GCMemoryManager* manager = CMSHeap::heap()->old_manager(); 8105 switch (phase) { 8106 case CMSCollector::InitialMarking: 8107 initialize(manager /* GC manager */ , 8108 cause /* cause of the GC */, 8109 true /* allMemoryPoolsAffected */, 8110 true /* recordGCBeginTime */, 8111 true /* recordPreGCUsage */, 8112 false /* recordPeakUsage */, 8113 false /* recordPostGCusage */, 8114 true /* recordAccumulatedGCTime */, 8115 false /* recordGCEndTime */, 8116 false /* countCollection */ ); 8117 break; 8118 8119 case CMSCollector::FinalMarking: 8120 initialize(manager /* GC manager */ , 8121 cause /* cause of the GC */, 8122 true /* allMemoryPoolsAffected */, 8123 false /* recordGCBeginTime */, 8124 false /* recordPreGCUsage */, 8125 false /* recordPeakUsage */, 8126 false /* recordPostGCusage */, 8127 true /* recordAccumulatedGCTime */, 8128 false /* recordGCEndTime */, 8129 false /* countCollection */ ); 8130 break; 8131 8132 case CMSCollector::Sweeping: 8133 initialize(manager /* GC manager */ , 8134 cause /* cause of the GC */, 8135 true /* allMemoryPoolsAffected */, 8136 false /* recordGCBeginTime */, 8137 false /* recordPreGCUsage */, 8138 true /* recordPeakUsage */, 8139 true /* recordPostGCusage */, 8140 false /* recordAccumulatedGCTime */, 8141 true /* recordGCEndTime */, 8142 true /* countCollection */ ); 8143 break; 8144 8145 default: 8146 ShouldNotReachHere(); 8147 } 8148 }