1 /* 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 # include "incls/_precompiled.incl" 26 # include "incls/_concurrentMarkSweepGeneration.cpp.incl" 27 28 // statics 29 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; 30 bool CMSCollector::_full_gc_requested = false; 31 32 ////////////////////////////////////////////////////////////////// 33 // In support of CMS/VM thread synchronization 34 ////////////////////////////////////////////////////////////////// 35 // We split use of the CGC_lock into 2 "levels". 36 // The low-level locking is of the usual CGC_lock monitor. We introduce 37 // a higher level "token" (hereafter "CMS token") built on top of the 38 // low level monitor (hereafter "CGC lock"). 39 // The token-passing protocol gives priority to the VM thread. The 40 // CMS-lock doesn't provide any fairness guarantees, but clients 41 // should ensure that it is only held for very short, bounded 42 // durations. 43 // 44 // When either of the CMS thread or the VM thread is involved in 45 // collection operations during which it does not want the other 46 // thread to interfere, it obtains the CMS token. 47 // 48 // If either thread tries to get the token while the other has 49 // it, that thread waits. However, if the VM thread and CMS thread 50 // both want the token, then the VM thread gets priority while the 51 // CMS thread waits. This ensures, for instance, that the "concurrent" 52 // phases of the CMS thread's work do not block out the VM thread 53 // for long periods of time as the CMS thread continues to hog 54 // the token. (See bug 4616232). 55 // 56 // The baton-passing functions are, however, controlled by the 57 // flags _foregroundGCShouldWait and _foregroundGCIsActive, 58 // and here the low-level CMS lock, not the high level token, 59 // ensures mutual exclusion. 60 // 61 // Two important conditions that we have to satisfy: 62 // 1. if a thread does a low-level wait on the CMS lock, then it 63 // relinquishes the CMS token if it were holding that token 64 // when it acquired the low-level CMS lock. 65 // 2. any low-level notifications on the low-level lock 66 // should only be sent when a thread has relinquished the token. 67 // 68 // In the absence of either property, we'd have potential deadlock. 69 // 70 // We protect each of the CMS (concurrent and sequential) phases 71 // with the CMS _token_, not the CMS _lock_. 72 // 73 // The only code protected by CMS lock is the token acquisition code 74 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the 75 // baton-passing code. 76 // 77 // Unfortunately, i couldn't come up with a good abstraction to factor and 78 // hide the naked CGC_lock manipulation in the baton-passing code 79 // further below. That's something we should try to do. Also, the proof 80 // of correctness of this 2-level locking scheme is far from obvious, 81 // and potentially quite slippery. We have an uneasy supsicion, for instance, 82 // that there may be a theoretical possibility of delay/starvation in the 83 // low-level lock/wait/notify scheme used for the baton-passing because of 84 // potential intereference with the priority scheme embodied in the 85 // CMS-token-passing protocol. See related comments at a CGC_lock->wait() 86 // invocation further below and marked with "XXX 20011219YSR". 87 // Indeed, as we note elsewhere, this may become yet more slippery 88 // in the presence of multiple CMS and/or multiple VM threads. XXX 89 90 class CMSTokenSync: public StackObj { 91 private: 92 bool _is_cms_thread; 93 public: 94 CMSTokenSync(bool is_cms_thread): 95 _is_cms_thread(is_cms_thread) { 96 assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(), 97 "Incorrect argument to constructor"); 98 ConcurrentMarkSweepThread::synchronize(_is_cms_thread); 99 } 100 101 ~CMSTokenSync() { 102 assert(_is_cms_thread ? 103 ConcurrentMarkSweepThread::cms_thread_has_cms_token() : 104 ConcurrentMarkSweepThread::vm_thread_has_cms_token(), 105 "Incorrect state"); 106 ConcurrentMarkSweepThread::desynchronize(_is_cms_thread); 107 } 108 }; 109 110 // Convenience class that does a CMSTokenSync, and then acquires 111 // upto three locks. 112 class CMSTokenSyncWithLocks: public CMSTokenSync { 113 private: 114 // Note: locks are acquired in textual declaration order 115 // and released in the opposite order 116 MutexLockerEx _locker1, _locker2, _locker3; 117 public: 118 CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1, 119 Mutex* mutex2 = NULL, Mutex* mutex3 = NULL): 120 CMSTokenSync(is_cms_thread), 121 _locker1(mutex1, Mutex::_no_safepoint_check_flag), 122 _locker2(mutex2, Mutex::_no_safepoint_check_flag), 123 _locker3(mutex3, Mutex::_no_safepoint_check_flag) 124 { } 125 }; 126 127 128 // Wrapper class to temporarily disable icms during a foreground cms collection. 129 class ICMSDisabler: public StackObj { 130 public: 131 // The ctor disables icms and wakes up the thread so it notices the change; 132 // the dtor re-enables icms. Note that the CMSCollector methods will check 133 // CMSIncrementalMode. 134 ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); } 135 ~ICMSDisabler() { CMSCollector::enable_icms(); } 136 }; 137 138 ////////////////////////////////////////////////////////////////// 139 // Concurrent Mark-Sweep Generation ///////////////////////////// 140 ////////////////////////////////////////////////////////////////// 141 142 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;) 143 144 // This struct contains per-thread things necessary to support parallel 145 // young-gen collection. 146 class CMSParGCThreadState: public CHeapObj { 147 public: 148 CFLS_LAB lab; 149 PromotionInfo promo; 150 151 // Constructor. 152 CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) { 153 promo.setSpace(cfls); 154 } 155 }; 156 157 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration( 158 ReservedSpace rs, size_t initial_byte_size, int level, 159 CardTableRS* ct, bool use_adaptive_freelists, 160 FreeBlockDictionary::DictionaryChoice dictionaryChoice) : 161 CardGeneration(rs, initial_byte_size, level, ct), 162 _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))), 163 _debug_collection_type(Concurrent_collection_type) 164 { 165 HeapWord* bottom = (HeapWord*) _virtual_space.low(); 166 HeapWord* end = (HeapWord*) _virtual_space.high(); 167 168 _direct_allocated_words = 0; 169 NOT_PRODUCT( 170 _numObjectsPromoted = 0; 171 _numWordsPromoted = 0; 172 _numObjectsAllocated = 0; 173 _numWordsAllocated = 0; 174 ) 175 176 _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end), 177 use_adaptive_freelists, 178 dictionaryChoice); 179 NOT_PRODUCT(debug_cms_space = _cmsSpace;) 180 if (_cmsSpace == NULL) { 181 vm_exit_during_initialization( 182 "CompactibleFreeListSpace allocation failure"); 183 } 184 _cmsSpace->_gen = this; 185 186 _gc_stats = new CMSGCStats(); 187 188 // Verify the assumption that FreeChunk::_prev and OopDesc::_klass 189 // offsets match. The ability to tell free chunks from objects 190 // depends on this property. 191 debug_only( 192 FreeChunk* junk = NULL; 193 assert(UseCompressedOops || 194 junk->prev_addr() == (void*)(oop(junk)->klass_addr()), 195 "Offset of FreeChunk::_prev within FreeChunk must match" 196 " that of OopDesc::_klass within OopDesc"); 197 ) 198 if (ParallelGCThreads > 0) { 199 typedef CMSParGCThreadState* CMSParGCThreadStatePtr; 200 _par_gc_thread_states = 201 NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads); 202 if (_par_gc_thread_states == NULL) { 203 vm_exit_during_initialization("Could not allocate par gc structs"); 204 } 205 for (uint i = 0; i < ParallelGCThreads; i++) { 206 _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace()); 207 if (_par_gc_thread_states[i] == NULL) { 208 vm_exit_during_initialization("Could not allocate par gc structs"); 209 } 210 } 211 } else { 212 _par_gc_thread_states = NULL; 213 } 214 _incremental_collection_failed = false; 215 // The "dilatation_factor" is the expansion that can occur on 216 // account of the fact that the minimum object size in the CMS 217 // generation may be larger than that in, say, a contiguous young 218 // generation. 219 // Ideally, in the calculation below, we'd compute the dilatation 220 // factor as: MinChunkSize/(promoting_gen's min object size) 221 // Since we do not have such a general query interface for the 222 // promoting generation, we'll instead just use the mimimum 223 // object size (which today is a header's worth of space); 224 // note that all arithmetic is in units of HeapWords. 225 assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking"); 226 assert(_dilatation_factor >= 1.0, "from previous assert"); 227 } 228 229 230 // The field "_initiating_occupancy" represents the occupancy percentage 231 // at which we trigger a new collection cycle. Unless explicitly specified 232 // via CMSInitiating[Perm]OccupancyFraction (argument "io" below), it 233 // is calculated by: 234 // 235 // Let "f" be MinHeapFreeRatio in 236 // 237 // _intiating_occupancy = 100-f + 238 // f * (CMSTrigger[Perm]Ratio/100) 239 // where CMSTrigger[Perm]Ratio is the argument "tr" below. 240 // 241 // That is, if we assume the heap is at its desired maximum occupancy at the 242 // end of a collection, we let CMSTrigger[Perm]Ratio of the (purported) free 243 // space be allocated before initiating a new collection cycle. 244 // 245 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) { 246 assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments"); 247 if (io >= 0) { 248 _initiating_occupancy = (double)io / 100.0; 249 } else { 250 _initiating_occupancy = ((100 - MinHeapFreeRatio) + 251 (double)(tr * MinHeapFreeRatio) / 100.0) 252 / 100.0; 253 } 254 } 255 256 void ConcurrentMarkSweepGeneration::ref_processor_init() { 257 assert(collector() != NULL, "no collector"); 258 collector()->ref_processor_init(); 259 } 260 261 void CMSCollector::ref_processor_init() { 262 if (_ref_processor == NULL) { 263 // Allocate and initialize a reference processor 264 _ref_processor = ReferenceProcessor::create_ref_processor( 265 _span, // span 266 _cmsGen->refs_discovery_is_atomic(), // atomic_discovery 267 _cmsGen->refs_discovery_is_mt(), // mt_discovery 268 &_is_alive_closure, 269 ParallelGCThreads, 270 ParallelRefProcEnabled); 271 // Initialize the _ref_processor field of CMSGen 272 _cmsGen->set_ref_processor(_ref_processor); 273 274 // Allocate a dummy ref processor for perm gen. 275 ReferenceProcessor* rp2 = new ReferenceProcessor(); 276 if (rp2 == NULL) { 277 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); 278 } 279 _permGen->set_ref_processor(rp2); 280 } 281 } 282 283 CMSAdaptiveSizePolicy* CMSCollector::size_policy() { 284 GenCollectedHeap* gch = GenCollectedHeap::heap(); 285 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 286 "Wrong type of heap"); 287 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*) 288 gch->gen_policy()->size_policy(); 289 assert(sp->is_gc_cms_adaptive_size_policy(), 290 "Wrong type of size policy"); 291 return sp; 292 } 293 294 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() { 295 CMSGCAdaptivePolicyCounters* results = 296 (CMSGCAdaptivePolicyCounters*) collector_policy()->counters(); 297 assert( 298 results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, 299 "Wrong gc policy counter kind"); 300 return results; 301 } 302 303 304 void ConcurrentMarkSweepGeneration::initialize_performance_counters() { 305 306 const char* gen_name = "old"; 307 308 // Generation Counters - generation 1, 1 subspace 309 _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space); 310 311 _space_counters = new GSpaceCounters(gen_name, 0, 312 _virtual_space.reserved_size(), 313 this, _gen_counters); 314 } 315 316 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha): 317 _cms_gen(cms_gen) 318 { 319 assert(alpha <= 100, "bad value"); 320 _saved_alpha = alpha; 321 322 // Initialize the alphas to the bootstrap value of 100. 323 _gc0_alpha = _cms_alpha = 100; 324 325 _cms_begin_time.update(); 326 _cms_end_time.update(); 327 328 _gc0_duration = 0.0; 329 _gc0_period = 0.0; 330 _gc0_promoted = 0; 331 332 _cms_duration = 0.0; 333 _cms_period = 0.0; 334 _cms_allocated = 0; 335 336 _cms_used_at_gc0_begin = 0; 337 _cms_used_at_gc0_end = 0; 338 _allow_duty_cycle_reduction = false; 339 _valid_bits = 0; 340 _icms_duty_cycle = CMSIncrementalDutyCycle; 341 } 342 343 double CMSStats::cms_free_adjustment_factor(size_t free) const { 344 // TBD: CR 6909490 345 return 1.0; 346 } 347 348 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) { 349 } 350 351 // If promotion failure handling is on use 352 // the padded average size of the promotion for each 353 // young generation collection. 354 double CMSStats::time_until_cms_gen_full() const { 355 size_t cms_free = _cms_gen->cmsSpace()->free(); 356 GenCollectedHeap* gch = GenCollectedHeap::heap(); 357 size_t expected_promotion = gch->get_gen(0)->capacity(); 358 if (HandlePromotionFailure) { 359 expected_promotion = MIN2( 360 (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average(), 361 expected_promotion); 362 } 363 if (cms_free > expected_promotion) { 364 // Start a cms collection if there isn't enough space to promote 365 // for the next minor collection. Use the padded average as 366 // a safety factor. 367 cms_free -= expected_promotion; 368 369 // Adjust by the safety factor. 370 double cms_free_dbl = (double)cms_free; 371 double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0; 372 // Apply a further correction factor which tries to adjust 373 // for recent occurance of concurrent mode failures. 374 cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free); 375 cms_free_dbl = cms_free_dbl * cms_adjustment; 376 377 if (PrintGCDetails && Verbose) { 378 gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free " 379 SIZE_FORMAT " expected_promotion " SIZE_FORMAT, 380 cms_free, expected_promotion); 381 gclog_or_tty->print_cr(" cms_free_dbl %f cms_consumption_rate %f", 382 cms_free_dbl, cms_consumption_rate() + 1.0); 383 } 384 // Add 1 in case the consumption rate goes to zero. 385 return cms_free_dbl / (cms_consumption_rate() + 1.0); 386 } 387 return 0.0; 388 } 389 390 // Compare the duration of the cms collection to the 391 // time remaining before the cms generation is empty. 392 // Note that the time from the start of the cms collection 393 // to the start of the cms sweep (less than the total 394 // duration of the cms collection) can be used. This 395 // has been tried and some applications experienced 396 // promotion failures early in execution. This was 397 // possibly because the averages were not accurate 398 // enough at the beginning. 399 double CMSStats::time_until_cms_start() const { 400 // We add "gc0_period" to the "work" calculation 401 // below because this query is done (mostly) at the 402 // end of a scavenge, so we need to conservatively 403 // account for that much possible delay 404 // in the query so as to avoid concurrent mode failures 405 // due to starting the collection just a wee bit too 406 // late. 407 double work = cms_duration() + gc0_period(); 408 double deadline = time_until_cms_gen_full(); 409 // If a concurrent mode failure occurred recently, we want to be 410 // more conservative and halve our expected time_until_cms_gen_full() 411 if (work > deadline) { 412 if (Verbose && PrintGCDetails) { 413 gclog_or_tty->print( 414 " CMSCollector: collect because of anticipated promotion " 415 "before full %3.7f + %3.7f > %3.7f ", cms_duration(), 416 gc0_period(), time_until_cms_gen_full()); 417 } 418 return 0.0; 419 } 420 return work - deadline; 421 } 422 423 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the 424 // amount of change to prevent wild oscillation. 425 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle, 426 unsigned int new_duty_cycle) { 427 assert(old_duty_cycle <= 100, "bad input value"); 428 assert(new_duty_cycle <= 100, "bad input value"); 429 430 // Note: use subtraction with caution since it may underflow (values are 431 // unsigned). Addition is safe since we're in the range 0-100. 432 unsigned int damped_duty_cycle = new_duty_cycle; 433 if (new_duty_cycle < old_duty_cycle) { 434 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U); 435 if (new_duty_cycle + largest_delta < old_duty_cycle) { 436 damped_duty_cycle = old_duty_cycle - largest_delta; 437 } 438 } else if (new_duty_cycle > old_duty_cycle) { 439 const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U); 440 if (new_duty_cycle > old_duty_cycle + largest_delta) { 441 damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U); 442 } 443 } 444 assert(damped_duty_cycle <= 100, "invalid duty cycle computed"); 445 446 if (CMSTraceIncrementalPacing) { 447 gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ", 448 old_duty_cycle, new_duty_cycle, damped_duty_cycle); 449 } 450 return damped_duty_cycle; 451 } 452 453 unsigned int CMSStats::icms_update_duty_cycle_impl() { 454 assert(CMSIncrementalPacing && valid(), 455 "should be handled in icms_update_duty_cycle()"); 456 457 double cms_time_so_far = cms_timer().seconds(); 458 double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M; 459 double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far); 460 461 // Avoid division by 0. 462 double time_until_full = MAX2(time_until_cms_gen_full(), 0.01); 463 double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full; 464 465 unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U); 466 if (new_duty_cycle > _icms_duty_cycle) { 467 // Avoid very small duty cycles (1 or 2); 0 is allowed. 468 if (new_duty_cycle > 2) { 469 _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, 470 new_duty_cycle); 471 } 472 } else if (_allow_duty_cycle_reduction) { 473 // The duty cycle is reduced only once per cms cycle (see record_cms_end()). 474 new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle); 475 // Respect the minimum duty cycle. 476 unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin; 477 _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle); 478 } 479 480 if (PrintGCDetails || CMSTraceIncrementalPacing) { 481 gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle); 482 } 483 484 _allow_duty_cycle_reduction = false; 485 return _icms_duty_cycle; 486 } 487 488 #ifndef PRODUCT 489 void CMSStats::print_on(outputStream *st) const { 490 st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha); 491 st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT, 492 gc0_duration(), gc0_period(), gc0_promoted()); 493 st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT, 494 cms_duration(), cms_duration_per_mb(), 495 cms_period(), cms_allocated()); 496 st->print(",cms_since_beg=%g,cms_since_end=%g", 497 cms_time_since_begin(), cms_time_since_end()); 498 st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT, 499 _cms_used_at_gc0_begin, _cms_used_at_gc0_end); 500 if (CMSIncrementalMode) { 501 st->print(",dc=%d", icms_duty_cycle()); 502 } 503 504 if (valid()) { 505 st->print(",promo_rate=%g,cms_alloc_rate=%g", 506 promotion_rate(), cms_allocation_rate()); 507 st->print(",cms_consumption_rate=%g,time_until_full=%g", 508 cms_consumption_rate(), time_until_cms_gen_full()); 509 } 510 st->print(" "); 511 } 512 #endif // #ifndef PRODUCT 513 514 CMSCollector::CollectorState CMSCollector::_collectorState = 515 CMSCollector::Idling; 516 bool CMSCollector::_foregroundGCIsActive = false; 517 bool CMSCollector::_foregroundGCShouldWait = false; 518 519 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen, 520 ConcurrentMarkSweepGeneration* permGen, 521 CardTableRS* ct, 522 ConcurrentMarkSweepPolicy* cp): 523 _cmsGen(cmsGen), 524 _permGen(permGen), 525 _ct(ct), 526 _ref_processor(NULL), // will be set later 527 _conc_workers(NULL), // may be set later 528 _abort_preclean(false), 529 _start_sampling(false), 530 _between_prologue_and_epilogue(false), 531 _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"), 532 _perm_gen_verify_bit_map(0, -1 /* no mutex */, "No_lock"), 533 _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize), 534 -1 /* lock-free */, "No_lock" /* dummy */), 535 _modUnionClosure(&_modUnionTable), 536 _modUnionClosurePar(&_modUnionTable), 537 // Adjust my span to cover old (cms) gen and perm gen 538 _span(cmsGen->reserved()._union(permGen->reserved())), 539 // Construct the is_alive_closure with _span & markBitMap 540 _is_alive_closure(_span, &_markBitMap), 541 _restart_addr(NULL), 542 _overflow_list(NULL), 543 _preserved_oop_stack(NULL), 544 _preserved_mark_stack(NULL), 545 _stats(cmsGen), 546 _eden_chunk_array(NULL), // may be set in ctor body 547 _eden_chunk_capacity(0), // -- ditto -- 548 _eden_chunk_index(0), // -- ditto -- 549 _survivor_plab_array(NULL), // -- ditto -- 550 _survivor_chunk_array(NULL), // -- ditto -- 551 _survivor_chunk_capacity(0), // -- ditto -- 552 _survivor_chunk_index(0), // -- ditto -- 553 _ser_pmc_preclean_ovflw(0), 554 _ser_kac_preclean_ovflw(0), 555 _ser_pmc_remark_ovflw(0), 556 _par_pmc_remark_ovflw(0), 557 _ser_kac_ovflw(0), 558 _par_kac_ovflw(0), 559 #ifndef PRODUCT 560 _num_par_pushes(0), 561 #endif 562 _collection_count_start(0), 563 _verifying(false), 564 _icms_start_limit(NULL), 565 _icms_stop_limit(NULL), 566 _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"), 567 _completed_initialization(false), 568 _collector_policy(cp), 569 _should_unload_classes(false), 570 _concurrent_cycles_since_last_unload(0), 571 _roots_scanning_options(0), 572 _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding), 573 _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding) 574 { 575 if (ExplicitGCInvokesConcurrentAndUnloadsClasses) { 576 ExplicitGCInvokesConcurrent = true; 577 } 578 // Now expand the span and allocate the collection support structures 579 // (MUT, marking bit map etc.) to cover both generations subject to 580 // collection. 581 582 // First check that _permGen is adjacent to _cmsGen and above it. 583 assert( _cmsGen->reserved().word_size() > 0 584 && _permGen->reserved().word_size() > 0, 585 "generations should not be of zero size"); 586 assert(_cmsGen->reserved().intersection(_permGen->reserved()).is_empty(), 587 "_cmsGen and _permGen should not overlap"); 588 assert(_cmsGen->reserved().end() == _permGen->reserved().start(), 589 "_cmsGen->end() different from _permGen->start()"); 590 591 // For use by dirty card to oop closures. 592 _cmsGen->cmsSpace()->set_collector(this); 593 _permGen->cmsSpace()->set_collector(this); 594 595 // Allocate MUT and marking bit map 596 { 597 MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag); 598 if (!_markBitMap.allocate(_span)) { 599 warning("Failed to allocate CMS Bit Map"); 600 return; 601 } 602 assert(_markBitMap.covers(_span), "_markBitMap inconsistency?"); 603 } 604 { 605 _modUnionTable.allocate(_span); 606 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); 607 } 608 609 if (!_markStack.allocate(MarkStackSize)) { 610 warning("Failed to allocate CMS Marking Stack"); 611 return; 612 } 613 if (!_revisitStack.allocate(CMSRevisitStackSize)) { 614 warning("Failed to allocate CMS Revisit Stack"); 615 return; 616 } 617 618 // Support for multi-threaded concurrent phases 619 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) { 620 if (FLAG_IS_DEFAULT(ConcGCThreads)) { 621 // just for now 622 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); 623 } 624 if (ConcGCThreads > 1) { 625 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads", 626 ConcGCThreads, true); 627 if (_conc_workers == NULL) { 628 warning("GC/CMS: _conc_workers allocation failure: " 629 "forcing -CMSConcurrentMTEnabled"); 630 CMSConcurrentMTEnabled = false; 631 } 632 } else { 633 CMSConcurrentMTEnabled = false; 634 } 635 } 636 if (!CMSConcurrentMTEnabled) { 637 ConcGCThreads = 0; 638 } else { 639 // Turn off CMSCleanOnEnter optimization temporarily for 640 // the MT case where it's not fixed yet; see 6178663. 641 CMSCleanOnEnter = false; 642 } 643 assert((_conc_workers != NULL) == (ConcGCThreads > 1), 644 "Inconsistency"); 645 646 // Parallel task queues; these are shared for the 647 // concurrent and stop-world phases of CMS, but 648 // are not shared with parallel scavenge (ParNew). 649 { 650 uint i; 651 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads); 652 653 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled 654 || ParallelRefProcEnabled) 655 && num_queues > 0) { 656 _task_queues = new OopTaskQueueSet(num_queues); 657 if (_task_queues == NULL) { 658 warning("task_queues allocation failure."); 659 return; 660 } 661 _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues); 662 if (_hash_seed == NULL) { 663 warning("_hash_seed array allocation failure"); 664 return; 665 } 666 667 // XXX use a global constant instead of 64! 668 typedef struct OopTaskQueuePadded { 669 OopTaskQueue work_queue; 670 char pad[64 - sizeof(OopTaskQueue)]; // prevent false sharing 671 } OopTaskQueuePadded; 672 673 for (i = 0; i < num_queues; i++) { 674 OopTaskQueuePadded *q_padded = new OopTaskQueuePadded(); 675 if (q_padded == NULL) { 676 warning("work_queue allocation failure."); 677 return; 678 } 679 _task_queues->register_queue(i, &q_padded->work_queue); 680 } 681 for (i = 0; i < num_queues; i++) { 682 _task_queues->queue(i)->initialize(); 683 _hash_seed[i] = 17; // copied from ParNew 684 } 685 } 686 } 687 688 _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio); 689 _permGen->init_initiating_occupancy(CMSInitiatingPermOccupancyFraction, CMSTriggerPermRatio); 690 691 // Clip CMSBootstrapOccupancy between 0 and 100. 692 _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy))) 693 /(double)100; 694 695 _full_gcs_since_conc_gc = 0; 696 697 // Now tell CMS generations the identity of their collector 698 ConcurrentMarkSweepGeneration::set_collector(this); 699 700 // Create & start a CMS thread for this CMS collector 701 _cmsThread = ConcurrentMarkSweepThread::start(this); 702 assert(cmsThread() != NULL, "CMS Thread should have been created"); 703 assert(cmsThread()->collector() == this, 704 "CMS Thread should refer to this gen"); 705 assert(CGC_lock != NULL, "Where's the CGC_lock?"); 706 707 // Support for parallelizing young gen rescan 708 GenCollectedHeap* gch = GenCollectedHeap::heap(); 709 _young_gen = gch->prev_gen(_cmsGen); 710 if (gch->supports_inline_contig_alloc()) { 711 _top_addr = gch->top_addr(); 712 _end_addr = gch->end_addr(); 713 assert(_young_gen != NULL, "no _young_gen"); 714 _eden_chunk_index = 0; 715 _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain; 716 _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity); 717 if (_eden_chunk_array == NULL) { 718 _eden_chunk_capacity = 0; 719 warning("GC/CMS: _eden_chunk_array allocation failure"); 720 } 721 } 722 assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error"); 723 724 // Support for parallelizing survivor space rescan 725 if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) { 726 const size_t max_plab_samples = 727 ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize; 728 729 _survivor_plab_array = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads); 730 _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples); 731 _cursor = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads); 732 if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL 733 || _cursor == NULL) { 734 warning("Failed to allocate survivor plab/chunk array"); 735 if (_survivor_plab_array != NULL) { 736 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); 737 _survivor_plab_array = NULL; 738 } 739 if (_survivor_chunk_array != NULL) { 740 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); 741 _survivor_chunk_array = NULL; 742 } 743 if (_cursor != NULL) { 744 FREE_C_HEAP_ARRAY(size_t, _cursor); 745 _cursor = NULL; 746 } 747 } else { 748 _survivor_chunk_capacity = 2*max_plab_samples; 749 for (uint i = 0; i < ParallelGCThreads; i++) { 750 HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples); 751 if (vec == NULL) { 752 warning("Failed to allocate survivor plab array"); 753 for (int j = i; j > 0; j--) { 754 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array()); 755 } 756 FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array); 757 FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array); 758 _survivor_plab_array = NULL; 759 _survivor_chunk_array = NULL; 760 _survivor_chunk_capacity = 0; 761 break; 762 } else { 763 ChunkArray* cur = 764 ::new (&_survivor_plab_array[i]) ChunkArray(vec, 765 max_plab_samples); 766 assert(cur->end() == 0, "Should be 0"); 767 assert(cur->array() == vec, "Should be vec"); 768 assert(cur->capacity() == max_plab_samples, "Error"); 769 } 770 } 771 } 772 } 773 assert( ( _survivor_plab_array != NULL 774 && _survivor_chunk_array != NULL) 775 || ( _survivor_chunk_capacity == 0 776 && _survivor_chunk_index == 0), 777 "Error"); 778 779 // Choose what strong roots should be scanned depending on verification options 780 // and perm gen collection mode. 781 if (!CMSClassUnloadingEnabled) { 782 // If class unloading is disabled we want to include all classes into the root set. 783 add_root_scanning_option(SharedHeap::SO_AllClasses); 784 } else { 785 add_root_scanning_option(SharedHeap::SO_SystemClasses); 786 } 787 788 NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;) 789 _gc_counters = new CollectorCounters("CMS", 1); 790 _completed_initialization = true; 791 _inter_sweep_timer.start(); // start of time 792 #ifdef SPARC 793 // Issue a stern warning, but allow use for experimentation and debugging. 794 if (VM_Version::is_sun4v() && UseMemSetInBOT) { 795 assert(!FLAG_IS_DEFAULT(UseMemSetInBOT), "Error"); 796 warning("Experimental flag -XX:+UseMemSetInBOT is known to cause instability" 797 " on sun4v; please understand that you are using at your own risk!"); 798 } 799 #endif 800 } 801 802 const char* ConcurrentMarkSweepGeneration::name() const { 803 return "concurrent mark-sweep generation"; 804 } 805 void ConcurrentMarkSweepGeneration::update_counters() { 806 if (UsePerfData) { 807 _space_counters->update_all(); 808 _gen_counters->update_all(); 809 } 810 } 811 812 // this is an optimized version of update_counters(). it takes the 813 // used value as a parameter rather than computing it. 814 // 815 void ConcurrentMarkSweepGeneration::update_counters(size_t used) { 816 if (UsePerfData) { 817 _space_counters->update_used(used); 818 _space_counters->update_capacity(); 819 _gen_counters->update_all(); 820 } 821 } 822 823 void ConcurrentMarkSweepGeneration::print() const { 824 Generation::print(); 825 cmsSpace()->print(); 826 } 827 828 #ifndef PRODUCT 829 void ConcurrentMarkSweepGeneration::print_statistics() { 830 cmsSpace()->printFLCensus(0); 831 } 832 #endif 833 834 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) { 835 GenCollectedHeap* gch = GenCollectedHeap::heap(); 836 if (PrintGCDetails) { 837 if (Verbose) { 838 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]", 839 level(), short_name(), s, used(), capacity()); 840 } else { 841 gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]", 842 level(), short_name(), s, used() / K, capacity() / K); 843 } 844 } 845 if (Verbose) { 846 gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")", 847 gch->used(), gch->capacity()); 848 } else { 849 gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)", 850 gch->used() / K, gch->capacity() / K); 851 } 852 } 853 854 size_t 855 ConcurrentMarkSweepGeneration::contiguous_available() const { 856 // dld proposes an improvement in precision here. If the committed 857 // part of the space ends in a free block we should add that to 858 // uncommitted size in the calculation below. Will make this 859 // change later, staying with the approximation below for the 860 // time being. -- ysr. 861 return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc()); 862 } 863 864 size_t 865 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const { 866 return _cmsSpace->max_alloc_in_words() * HeapWordSize; 867 } 868 869 size_t ConcurrentMarkSweepGeneration::max_available() const { 870 return free() + _virtual_space.uncommitted_size(); 871 } 872 873 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe( 874 size_t max_promotion_in_bytes, 875 bool younger_handles_promotion_failure) const { 876 877 // This is the most conservative test. Full promotion is 878 // guaranteed if this is used. The multiplicative factor is to 879 // account for the worst case "dilatation". 880 double adjusted_max_promo_bytes = _dilatation_factor * max_promotion_in_bytes; 881 if (adjusted_max_promo_bytes > (double)max_uintx) { // larger than size_t 882 adjusted_max_promo_bytes = (double)max_uintx; 883 } 884 bool result = (max_contiguous_available() >= (size_t)adjusted_max_promo_bytes); 885 886 if (younger_handles_promotion_failure && !result) { 887 // Full promotion is not guaranteed because fragmentation 888 // of the cms generation can prevent the full promotion. 889 result = (max_available() >= (size_t)adjusted_max_promo_bytes); 890 891 if (!result) { 892 // With promotion failure handling the test for the ability 893 // to support the promotion does not have to be guaranteed. 894 // Use an average of the amount promoted. 895 result = max_available() >= (size_t) 896 gc_stats()->avg_promoted()->padded_average(); 897 if (PrintGC && Verbose && result) { 898 gclog_or_tty->print_cr( 899 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" 900 " max_available: " SIZE_FORMAT 901 " avg_promoted: " SIZE_FORMAT, 902 max_available(), (size_t) 903 gc_stats()->avg_promoted()->padded_average()); 904 } 905 } else { 906 if (PrintGC && Verbose) { 907 gclog_or_tty->print_cr( 908 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" 909 " max_available: " SIZE_FORMAT 910 " adj_max_promo_bytes: " SIZE_FORMAT, 911 max_available(), (size_t)adjusted_max_promo_bytes); 912 } 913 } 914 } else { 915 if (PrintGC && Verbose) { 916 gclog_or_tty->print_cr( 917 "\nConcurrentMarkSweepGeneration::promotion_attempt_is_safe" 918 " contiguous_available: " SIZE_FORMAT 919 " adj_max_promo_bytes: " SIZE_FORMAT, 920 max_contiguous_available(), (size_t)adjusted_max_promo_bytes); 921 } 922 } 923 return result; 924 } 925 926 // At a promotion failure dump information on block layout in heap 927 // (cms old generation). 928 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() { 929 if (CMSDumpAtPromotionFailure) { 930 cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty); 931 } 932 } 933 934 CompactibleSpace* 935 ConcurrentMarkSweepGeneration::first_compaction_space() const { 936 return _cmsSpace; 937 } 938 939 void ConcurrentMarkSweepGeneration::reset_after_compaction() { 940 // Clear the promotion information. These pointers can be adjusted 941 // along with all the other pointers into the heap but 942 // compaction is expected to be a rare event with 943 // a heap using cms so don't do it without seeing the need. 944 if (ParallelGCThreads > 0) { 945 for (uint i = 0; i < ParallelGCThreads; i++) { 946 _par_gc_thread_states[i]->promo.reset(); 947 } 948 } 949 } 950 951 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) { 952 blk->do_space(_cmsSpace); 953 } 954 955 void ConcurrentMarkSweepGeneration::compute_new_size() { 956 assert_locked_or_safepoint(Heap_lock); 957 958 // If incremental collection failed, we just want to expand 959 // to the limit. 960 if (incremental_collection_failed()) { 961 clear_incremental_collection_failed(); 962 grow_to_reserved(); 963 return; 964 } 965 966 size_t expand_bytes = 0; 967 double free_percentage = ((double) free()) / capacity(); 968 double desired_free_percentage = (double) MinHeapFreeRatio / 100; 969 double maximum_free_percentage = (double) MaxHeapFreeRatio / 100; 970 971 // compute expansion delta needed for reaching desired free percentage 972 if (free_percentage < desired_free_percentage) { 973 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); 974 assert(desired_capacity >= capacity(), "invalid expansion size"); 975 expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes); 976 } 977 if (expand_bytes > 0) { 978 if (PrintGCDetails && Verbose) { 979 size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage)); 980 gclog_or_tty->print_cr("\nFrom compute_new_size: "); 981 gclog_or_tty->print_cr(" Free fraction %f", free_percentage); 982 gclog_or_tty->print_cr(" Desired free fraction %f", 983 desired_free_percentage); 984 gclog_or_tty->print_cr(" Maximum free fraction %f", 985 maximum_free_percentage); 986 gclog_or_tty->print_cr(" Capactiy "SIZE_FORMAT, capacity()/1000); 987 gclog_or_tty->print_cr(" Desired capacity "SIZE_FORMAT, 988 desired_capacity/1000); 989 int prev_level = level() - 1; 990 if (prev_level >= 0) { 991 size_t prev_size = 0; 992 GenCollectedHeap* gch = GenCollectedHeap::heap(); 993 Generation* prev_gen = gch->_gens[prev_level]; 994 prev_size = prev_gen->capacity(); 995 gclog_or_tty->print_cr(" Younger gen size "SIZE_FORMAT, 996 prev_size/1000); 997 } 998 gclog_or_tty->print_cr(" unsafe_max_alloc_nogc "SIZE_FORMAT, 999 unsafe_max_alloc_nogc()/1000); 1000 gclog_or_tty->print_cr(" contiguous available "SIZE_FORMAT, 1001 contiguous_available()/1000); 1002 gclog_or_tty->print_cr(" Expand by "SIZE_FORMAT" (bytes)", 1003 expand_bytes); 1004 } 1005 // safe if expansion fails 1006 expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio); 1007 if (PrintGCDetails && Verbose) { 1008 gclog_or_tty->print_cr(" Expanded free fraction %f", 1009 ((double) free()) / capacity()); 1010 } 1011 } 1012 } 1013 1014 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const { 1015 return cmsSpace()->freelistLock(); 1016 } 1017 1018 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, 1019 bool tlab) { 1020 CMSSynchronousYieldRequest yr; 1021 MutexLockerEx x(freelistLock(), 1022 Mutex::_no_safepoint_check_flag); 1023 return have_lock_and_allocate(size, tlab); 1024 } 1025 1026 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size, 1027 bool tlab) { 1028 assert_lock_strong(freelistLock()); 1029 size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size); 1030 HeapWord* res = cmsSpace()->allocate(adjustedSize); 1031 // Allocate the object live (grey) if the background collector has 1032 // started marking. This is necessary because the marker may 1033 // have passed this address and consequently this object will 1034 // not otherwise be greyed and would be incorrectly swept up. 1035 // Note that if this object contains references, the writing 1036 // of those references will dirty the card containing this object 1037 // allowing the object to be blackened (and its references scanned) 1038 // either during a preclean phase or at the final checkpoint. 1039 if (res != NULL) { 1040 collector()->direct_allocated(res, adjustedSize); 1041 _direct_allocated_words += adjustedSize; 1042 // allocation counters 1043 NOT_PRODUCT( 1044 _numObjectsAllocated++; 1045 _numWordsAllocated += (int)adjustedSize; 1046 ) 1047 } 1048 return res; 1049 } 1050 1051 // In the case of direct allocation by mutators in a generation that 1052 // is being concurrently collected, the object must be allocated 1053 // live (grey) if the background collector has started marking. 1054 // This is necessary because the marker may 1055 // have passed this address and consequently this object will 1056 // not otherwise be greyed and would be incorrectly swept up. 1057 // Note that if this object contains references, the writing 1058 // of those references will dirty the card containing this object 1059 // allowing the object to be blackened (and its references scanned) 1060 // either during a preclean phase or at the final checkpoint. 1061 void CMSCollector::direct_allocated(HeapWord* start, size_t size) { 1062 assert(_markBitMap.covers(start, size), "Out of bounds"); 1063 if (_collectorState >= Marking) { 1064 MutexLockerEx y(_markBitMap.lock(), 1065 Mutex::_no_safepoint_check_flag); 1066 // [see comments preceding SweepClosure::do_blk() below for details] 1067 // 1. need to mark the object as live so it isn't collected 1068 // 2. need to mark the 2nd bit to indicate the object may be uninitialized 1069 // 3. need to mark the end of the object so sweeper can skip over it 1070 // if it's uninitialized when the sweeper reaches it. 1071 _markBitMap.mark(start); // object is live 1072 _markBitMap.mark(start + 1); // object is potentially uninitialized? 1073 _markBitMap.mark(start + size - 1); 1074 // mark end of object 1075 } 1076 // check that oop looks uninitialized 1077 assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL"); 1078 } 1079 1080 void CMSCollector::promoted(bool par, HeapWord* start, 1081 bool is_obj_array, size_t obj_size) { 1082 assert(_markBitMap.covers(start), "Out of bounds"); 1083 // See comment in direct_allocated() about when objects should 1084 // be allocated live. 1085 if (_collectorState >= Marking) { 1086 // we already hold the marking bit map lock, taken in 1087 // the prologue 1088 if (par) { 1089 _markBitMap.par_mark(start); 1090 } else { 1091 _markBitMap.mark(start); 1092 } 1093 // We don't need to mark the object as uninitialized (as 1094 // in direct_allocated above) because this is being done with the 1095 // world stopped and the object will be initialized by the 1096 // time the sweeper gets to look at it. 1097 assert(SafepointSynchronize::is_at_safepoint(), 1098 "expect promotion only at safepoints"); 1099 1100 if (_collectorState < Sweeping) { 1101 // Mark the appropriate cards in the modUnionTable, so that 1102 // this object gets scanned before the sweep. If this is 1103 // not done, CMS generation references in the object might 1104 // not get marked. 1105 // For the case of arrays, which are otherwise precisely 1106 // marked, we need to dirty the entire array, not just its head. 1107 if (is_obj_array) { 1108 // The [par_]mark_range() method expects mr.end() below to 1109 // be aligned to the granularity of a bit's representation 1110 // in the heap. In the case of the MUT below, that's a 1111 // card size. 1112 MemRegion mr(start, 1113 (HeapWord*)round_to((intptr_t)(start + obj_size), 1114 CardTableModRefBS::card_size /* bytes */)); 1115 if (par) { 1116 _modUnionTable.par_mark_range(mr); 1117 } else { 1118 _modUnionTable.mark_range(mr); 1119 } 1120 } else { // not an obj array; we can just mark the head 1121 if (par) { 1122 _modUnionTable.par_mark(start); 1123 } else { 1124 _modUnionTable.mark(start); 1125 } 1126 } 1127 } 1128 } 1129 } 1130 1131 static inline size_t percent_of_space(Space* space, HeapWord* addr) 1132 { 1133 size_t delta = pointer_delta(addr, space->bottom()); 1134 return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize)); 1135 } 1136 1137 void CMSCollector::icms_update_allocation_limits() 1138 { 1139 Generation* gen0 = GenCollectedHeap::heap()->get_gen(0); 1140 EdenSpace* eden = gen0->as_DefNewGeneration()->eden(); 1141 1142 const unsigned int duty_cycle = stats().icms_update_duty_cycle(); 1143 if (CMSTraceIncrementalPacing) { 1144 stats().print(); 1145 } 1146 1147 assert(duty_cycle <= 100, "invalid duty cycle"); 1148 if (duty_cycle != 0) { 1149 // The duty_cycle is a percentage between 0 and 100; convert to words and 1150 // then compute the offset from the endpoints of the space. 1151 size_t free_words = eden->free() / HeapWordSize; 1152 double free_words_dbl = (double)free_words; 1153 size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0); 1154 size_t offset_words = (free_words - duty_cycle_words) / 2; 1155 1156 _icms_start_limit = eden->top() + offset_words; 1157 _icms_stop_limit = eden->end() - offset_words; 1158 1159 // The limits may be adjusted (shifted to the right) by 1160 // CMSIncrementalOffset, to allow the application more mutator time after a 1161 // young gen gc (when all mutators were stopped) and before CMS starts and 1162 // takes away one or more cpus. 1163 if (CMSIncrementalOffset != 0) { 1164 double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0; 1165 size_t adjustment = (size_t)adjustment_dbl; 1166 HeapWord* tmp_stop = _icms_stop_limit + adjustment; 1167 if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) { 1168 _icms_start_limit += adjustment; 1169 _icms_stop_limit = tmp_stop; 1170 } 1171 } 1172 } 1173 if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) { 1174 _icms_start_limit = _icms_stop_limit = eden->end(); 1175 } 1176 1177 // Install the new start limit. 1178 eden->set_soft_end(_icms_start_limit); 1179 1180 if (CMSTraceIncrementalMode) { 1181 gclog_or_tty->print(" icms alloc limits: " 1182 PTR_FORMAT "," PTR_FORMAT 1183 " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ", 1184 _icms_start_limit, _icms_stop_limit, 1185 percent_of_space(eden, _icms_start_limit), 1186 percent_of_space(eden, _icms_stop_limit)); 1187 if (Verbose) { 1188 gclog_or_tty->print("eden: "); 1189 eden->print_on(gclog_or_tty); 1190 } 1191 } 1192 } 1193 1194 // Any changes here should try to maintain the invariant 1195 // that if this method is called with _icms_start_limit 1196 // and _icms_stop_limit both NULL, then it should return NULL 1197 // and not notify the icms thread. 1198 HeapWord* 1199 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top, 1200 size_t word_size) 1201 { 1202 // A start_limit equal to end() means the duty cycle is 0, so treat that as a 1203 // nop. 1204 if (CMSIncrementalMode && _icms_start_limit != space->end()) { 1205 if (top <= _icms_start_limit) { 1206 if (CMSTraceIncrementalMode) { 1207 space->print_on(gclog_or_tty); 1208 gclog_or_tty->stamp(); 1209 gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT 1210 ", new limit=" PTR_FORMAT 1211 " (" SIZE_FORMAT "%%)", 1212 top, _icms_stop_limit, 1213 percent_of_space(space, _icms_stop_limit)); 1214 } 1215 ConcurrentMarkSweepThread::start_icms(); 1216 assert(top < _icms_stop_limit, "Tautology"); 1217 if (word_size < pointer_delta(_icms_stop_limit, top)) { 1218 return _icms_stop_limit; 1219 } 1220 1221 // The allocation will cross both the _start and _stop limits, so do the 1222 // stop notification also and return end(). 1223 if (CMSTraceIncrementalMode) { 1224 space->print_on(gclog_or_tty); 1225 gclog_or_tty->stamp(); 1226 gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT 1227 ", new limit=" PTR_FORMAT 1228 " (" SIZE_FORMAT "%%)", 1229 top, space->end(), 1230 percent_of_space(space, space->end())); 1231 } 1232 ConcurrentMarkSweepThread::stop_icms(); 1233 return space->end(); 1234 } 1235 1236 if (top <= _icms_stop_limit) { 1237 if (CMSTraceIncrementalMode) { 1238 space->print_on(gclog_or_tty); 1239 gclog_or_tty->stamp(); 1240 gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT 1241 ", new limit=" PTR_FORMAT 1242 " (" SIZE_FORMAT "%%)", 1243 top, space->end(), 1244 percent_of_space(space, space->end())); 1245 } 1246 ConcurrentMarkSweepThread::stop_icms(); 1247 return space->end(); 1248 } 1249 1250 if (CMSTraceIncrementalMode) { 1251 space->print_on(gclog_or_tty); 1252 gclog_or_tty->stamp(); 1253 gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT 1254 ", new limit=" PTR_FORMAT, 1255 top, NULL); 1256 } 1257 } 1258 1259 return NULL; 1260 } 1261 1262 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { 1263 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); 1264 // allocate, copy and if necessary update promoinfo -- 1265 // delegate to underlying space. 1266 assert_lock_strong(freelistLock()); 1267 1268 #ifndef PRODUCT 1269 if (Universe::heap()->promotion_should_fail()) { 1270 return NULL; 1271 } 1272 #endif // #ifndef PRODUCT 1273 1274 oop res = _cmsSpace->promote(obj, obj_size); 1275 if (res == NULL) { 1276 // expand and retry 1277 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords 1278 expand(s*HeapWordSize, MinHeapDeltaBytes, 1279 CMSExpansionCause::_satisfy_promotion); 1280 // Since there's currently no next generation, we don't try to promote 1281 // into a more senior generation. 1282 assert(next_gen() == NULL, "assumption, based upon which no attempt " 1283 "is made to pass on a possibly failing " 1284 "promotion to next generation"); 1285 res = _cmsSpace->promote(obj, obj_size); 1286 } 1287 if (res != NULL) { 1288 // See comment in allocate() about when objects should 1289 // be allocated live. 1290 assert(obj->is_oop(), "Will dereference klass pointer below"); 1291 collector()->promoted(false, // Not parallel 1292 (HeapWord*)res, obj->is_objArray(), obj_size); 1293 // promotion counters 1294 NOT_PRODUCT( 1295 _numObjectsPromoted++; 1296 _numWordsPromoted += 1297 (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size())); 1298 ) 1299 } 1300 return res; 1301 } 1302 1303 1304 HeapWord* 1305 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space, 1306 HeapWord* top, 1307 size_t word_sz) 1308 { 1309 return collector()->allocation_limit_reached(space, top, word_sz); 1310 } 1311 1312 // Things to support parallel young-gen collection. 1313 oop 1314 ConcurrentMarkSweepGeneration::par_promote(int thread_num, 1315 oop old, markOop m, 1316 size_t word_sz) { 1317 #ifndef PRODUCT 1318 if (Universe::heap()->promotion_should_fail()) { 1319 return NULL; 1320 } 1321 #endif // #ifndef PRODUCT 1322 1323 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; 1324 PromotionInfo* promoInfo = &ps->promo; 1325 // if we are tracking promotions, then first ensure space for 1326 // promotion (including spooling space for saving header if necessary). 1327 // then allocate and copy, then track promoted info if needed. 1328 // When tracking (see PromotionInfo::track()), the mark word may 1329 // be displaced and in this case restoration of the mark word 1330 // occurs in the (oop_since_save_marks_)iterate phase. 1331 if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) { 1332 // Out of space for allocating spooling buffers; 1333 // try expanding and allocating spooling buffers. 1334 if (!expand_and_ensure_spooling_space(promoInfo)) { 1335 return NULL; 1336 } 1337 } 1338 assert(promoInfo->has_spooling_space(), "Control point invariant"); 1339 HeapWord* obj_ptr = ps->lab.alloc(word_sz); 1340 if (obj_ptr == NULL) { 1341 obj_ptr = expand_and_par_lab_allocate(ps, word_sz); 1342 if (obj_ptr == NULL) { 1343 return NULL; 1344 } 1345 } 1346 oop obj = oop(obj_ptr); 1347 assert(obj->klass_or_null() == NULL, "Object should be uninitialized here."); 1348 // Otherwise, copy the object. Here we must be careful to insert the 1349 // klass pointer last, since this marks the block as an allocated object. 1350 // Except with compressed oops it's the mark word. 1351 HeapWord* old_ptr = (HeapWord*)old; 1352 if (word_sz > (size_t)oopDesc::header_size()) { 1353 Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(), 1354 obj_ptr + oopDesc::header_size(), 1355 word_sz - oopDesc::header_size()); 1356 } 1357 1358 if (UseCompressedOops) { 1359 // Copy gap missed by (aligned) header size calculation above 1360 obj->set_klass_gap(old->klass_gap()); 1361 } 1362 1363 // Restore the mark word copied above. 1364 obj->set_mark(m); 1365 1366 // Now we can track the promoted object, if necessary. We take care 1367 // to delay the transition from uninitialized to full object 1368 // (i.e., insertion of klass pointer) until after, so that it 1369 // atomically becomes a promoted object. 1370 if (promoInfo->tracking()) { 1371 promoInfo->track((PromotedObject*)obj, old->klass()); 1372 } 1373 1374 // Finally, install the klass pointer (this should be volatile). 1375 obj->set_klass(old->klass()); 1376 1377 assert(old->is_oop(), "Will dereference klass ptr below"); 1378 collector()->promoted(true, // parallel 1379 obj_ptr, old->is_objArray(), word_sz); 1380 1381 NOT_PRODUCT( 1382 Atomic::inc(&_numObjectsPromoted); 1383 Atomic::add((jint)CompactibleFreeListSpace::adjustObjectSize(obj->size()), 1384 &_numWordsPromoted); 1385 ) 1386 1387 return obj; 1388 } 1389 1390 void 1391 ConcurrentMarkSweepGeneration:: 1392 par_promote_alloc_undo(int thread_num, 1393 HeapWord* obj, size_t word_sz) { 1394 // CMS does not support promotion undo. 1395 ShouldNotReachHere(); 1396 } 1397 1398 void 1399 ConcurrentMarkSweepGeneration:: 1400 par_promote_alloc_done(int thread_num) { 1401 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; 1402 ps->lab.retire(thread_num); 1403 } 1404 1405 void 1406 ConcurrentMarkSweepGeneration:: 1407 par_oop_since_save_marks_iterate_done(int thread_num) { 1408 CMSParGCThreadState* ps = _par_gc_thread_states[thread_num]; 1409 ParScanWithoutBarrierClosure* dummy_cl = NULL; 1410 ps->promo.promoted_oops_iterate_nv(dummy_cl); 1411 } 1412 1413 // XXXPERM 1414 bool ConcurrentMarkSweepGeneration::should_collect(bool full, 1415 size_t size, 1416 bool tlab) 1417 { 1418 // We allow a STW collection only if a full 1419 // collection was requested. 1420 return full || should_allocate(size, tlab); // FIX ME !!! 1421 // This and promotion failure handling are connected at the 1422 // hip and should be fixed by untying them. 1423 } 1424 1425 bool CMSCollector::shouldConcurrentCollect() { 1426 if (_full_gc_requested) { 1427 if (Verbose && PrintGCDetails) { 1428 gclog_or_tty->print_cr("CMSCollector: collect because of explicit " 1429 " gc request (or gc_locker)"); 1430 } 1431 return true; 1432 } 1433 1434 // For debugging purposes, change the type of collection. 1435 // If the rotation is not on the concurrent collection 1436 // type, don't start a concurrent collection. 1437 NOT_PRODUCT( 1438 if (RotateCMSCollectionTypes && 1439 (_cmsGen->debug_collection_type() != 1440 ConcurrentMarkSweepGeneration::Concurrent_collection_type)) { 1441 assert(_cmsGen->debug_collection_type() != 1442 ConcurrentMarkSweepGeneration::Unknown_collection_type, 1443 "Bad cms collection type"); 1444 return false; 1445 } 1446 ) 1447 1448 FreelistLocker x(this); 1449 // ------------------------------------------------------------------ 1450 // Print out lots of information which affects the initiation of 1451 // a collection. 1452 if (PrintCMSInitiationStatistics && stats().valid()) { 1453 gclog_or_tty->print("CMSCollector shouldConcurrentCollect: "); 1454 gclog_or_tty->stamp(); 1455 gclog_or_tty->print_cr(""); 1456 stats().print_on(gclog_or_tty); 1457 gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f", 1458 stats().time_until_cms_gen_full()); 1459 gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free()); 1460 gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT, 1461 _cmsGen->contiguous_available()); 1462 gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate()); 1463 gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate()); 1464 gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy()); 1465 gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy()); 1466 gclog_or_tty->print_cr("initiatingPermOccupancy=%3.7f", _permGen->initiating_occupancy()); 1467 } 1468 // ------------------------------------------------------------------ 1469 1470 // If the estimated time to complete a cms collection (cms_duration()) 1471 // is less than the estimated time remaining until the cms generation 1472 // is full, start a collection. 1473 if (!UseCMSInitiatingOccupancyOnly) { 1474 if (stats().valid()) { 1475 if (stats().time_until_cms_start() == 0.0) { 1476 return true; 1477 } 1478 } else { 1479 // We want to conservatively collect somewhat early in order 1480 // to try and "bootstrap" our CMS/promotion statistics; 1481 // this branch will not fire after the first successful CMS 1482 // collection because the stats should then be valid. 1483 if (_cmsGen->occupancy() >= _bootstrap_occupancy) { 1484 if (Verbose && PrintGCDetails) { 1485 gclog_or_tty->print_cr( 1486 " CMSCollector: collect for bootstrapping statistics:" 1487 " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(), 1488 _bootstrap_occupancy); 1489 } 1490 return true; 1491 } 1492 } 1493 } 1494 1495 // Otherwise, we start a collection cycle if either the perm gen or 1496 // old gen want a collection cycle started. Each may use 1497 // an appropriate criterion for making this decision. 1498 // XXX We need to make sure that the gen expansion 1499 // criterion dovetails well with this. XXX NEED TO FIX THIS 1500 if (_cmsGen->should_concurrent_collect()) { 1501 if (Verbose && PrintGCDetails) { 1502 gclog_or_tty->print_cr("CMS old gen initiated"); 1503 } 1504 return true; 1505 } 1506 1507 // We start a collection if we believe an incremental collection may fail; 1508 // this is not likely to be productive in practice because it's probably too 1509 // late anyway. 1510 GenCollectedHeap* gch = GenCollectedHeap::heap(); 1511 assert(gch->collector_policy()->is_two_generation_policy(), 1512 "You may want to check the correctness of the following"); 1513 if (gch->incremental_collection_will_fail()) { 1514 if (PrintGCDetails && Verbose) { 1515 gclog_or_tty->print("CMSCollector: collect because incremental collection will fail "); 1516 } 1517 return true; 1518 } 1519 1520 if (CMSClassUnloadingEnabled && _permGen->should_concurrent_collect()) { 1521 bool res = update_should_unload_classes(); 1522 if (res) { 1523 if (Verbose && PrintGCDetails) { 1524 gclog_or_tty->print_cr("CMS perm gen initiated"); 1525 } 1526 return true; 1527 } 1528 } 1529 return false; 1530 } 1531 1532 // Clear _expansion_cause fields of constituent generations 1533 void CMSCollector::clear_expansion_cause() { 1534 _cmsGen->clear_expansion_cause(); 1535 _permGen->clear_expansion_cause(); 1536 } 1537 1538 // We should be conservative in starting a collection cycle. To 1539 // start too eagerly runs the risk of collecting too often in the 1540 // extreme. To collect too rarely falls back on full collections, 1541 // which works, even if not optimum in terms of concurrent work. 1542 // As a work around for too eagerly collecting, use the flag 1543 // UseCMSInitiatingOccupancyOnly. This also has the advantage of 1544 // giving the user an easily understandable way of controlling the 1545 // collections. 1546 // We want to start a new collection cycle if any of the following 1547 // conditions hold: 1548 // . our current occupancy exceeds the configured initiating occupancy 1549 // for this generation, or 1550 // . we recently needed to expand this space and have not, since that 1551 // expansion, done a collection of this generation, or 1552 // . the underlying space believes that it may be a good idea to initiate 1553 // a concurrent collection (this may be based on criteria such as the 1554 // following: the space uses linear allocation and linear allocation is 1555 // going to fail, or there is believed to be excessive fragmentation in 1556 // the generation, etc... or ... 1557 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for 1558 // the case of the old generation, not the perm generation; see CR 6543076): 1559 // we may be approaching a point at which allocation requests may fail because 1560 // we will be out of sufficient free space given allocation rate estimates.] 1561 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const { 1562 1563 assert_lock_strong(freelistLock()); 1564 if (occupancy() > initiating_occupancy()) { 1565 if (PrintGCDetails && Verbose) { 1566 gclog_or_tty->print(" %s: collect because of occupancy %f / %f ", 1567 short_name(), occupancy(), initiating_occupancy()); 1568 } 1569 return true; 1570 } 1571 if (UseCMSInitiatingOccupancyOnly) { 1572 return false; 1573 } 1574 if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) { 1575 if (PrintGCDetails && Verbose) { 1576 gclog_or_tty->print(" %s: collect because expanded for allocation ", 1577 short_name()); 1578 } 1579 return true; 1580 } 1581 if (_cmsSpace->should_concurrent_collect()) { 1582 if (PrintGCDetails && Verbose) { 1583 gclog_or_tty->print(" %s: collect because cmsSpace says so ", 1584 short_name()); 1585 } 1586 return true; 1587 } 1588 return false; 1589 } 1590 1591 void ConcurrentMarkSweepGeneration::collect(bool full, 1592 bool clear_all_soft_refs, 1593 size_t size, 1594 bool tlab) 1595 { 1596 collector()->collect(full, clear_all_soft_refs, size, tlab); 1597 } 1598 1599 void CMSCollector::collect(bool full, 1600 bool clear_all_soft_refs, 1601 size_t size, 1602 bool tlab) 1603 { 1604 if (!UseCMSCollectionPassing && _collectorState > Idling) { 1605 // For debugging purposes skip the collection if the state 1606 // is not currently idle 1607 if (TraceCMSState) { 1608 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d", 1609 Thread::current(), full, _collectorState); 1610 } 1611 return; 1612 } 1613 1614 // The following "if" branch is present for defensive reasons. 1615 // In the current uses of this interface, it can be replaced with: 1616 // assert(!GC_locker.is_active(), "Can't be called otherwise"); 1617 // But I am not placing that assert here to allow future 1618 // generality in invoking this interface. 1619 if (GC_locker::is_active()) { 1620 // A consistency test for GC_locker 1621 assert(GC_locker::needs_gc(), "Should have been set already"); 1622 // Skip this foreground collection, instead 1623 // expanding the heap if necessary. 1624 // Need the free list locks for the call to free() in compute_new_size() 1625 compute_new_size(); 1626 return; 1627 } 1628 acquire_control_and_collect(full, clear_all_soft_refs); 1629 _full_gcs_since_conc_gc++; 1630 1631 } 1632 1633 void CMSCollector::request_full_gc(unsigned int full_gc_count) { 1634 GenCollectedHeap* gch = GenCollectedHeap::heap(); 1635 unsigned int gc_count = gch->total_full_collections(); 1636 if (gc_count == full_gc_count) { 1637 MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag); 1638 _full_gc_requested = true; 1639 CGC_lock->notify(); // nudge CMS thread 1640 } 1641 } 1642 1643 1644 // The foreground and background collectors need to coordinate in order 1645 // to make sure that they do not mutually interfere with CMS collections. 1646 // When a background collection is active, 1647 // the foreground collector may need to take over (preempt) and 1648 // synchronously complete an ongoing collection. Depending on the 1649 // frequency of the background collections and the heap usage 1650 // of the application, this preemption can be seldom or frequent. 1651 // There are only certain 1652 // points in the background collection that the "collection-baton" 1653 // can be passed to the foreground collector. 1654 // 1655 // The foreground collector will wait for the baton before 1656 // starting any part of the collection. The foreground collector 1657 // will only wait at one location. 1658 // 1659 // The background collector will yield the baton before starting a new 1660 // phase of the collection (e.g., before initial marking, marking from roots, 1661 // precleaning, final re-mark, sweep etc.) This is normally done at the head 1662 // of the loop which switches the phases. The background collector does some 1663 // of the phases (initial mark, final re-mark) with the world stopped. 1664 // Because of locking involved in stopping the world, 1665 // the foreground collector should not block waiting for the background 1666 // collector when it is doing a stop-the-world phase. The background 1667 // collector will yield the baton at an additional point just before 1668 // it enters a stop-the-world phase. Once the world is stopped, the 1669 // background collector checks the phase of the collection. If the 1670 // phase has not changed, it proceeds with the collection. If the 1671 // phase has changed, it skips that phase of the collection. See 1672 // the comments on the use of the Heap_lock in collect_in_background(). 1673 // 1674 // Variable used in baton passing. 1675 // _foregroundGCIsActive - Set to true by the foreground collector when 1676 // it wants the baton. The foreground clears it when it has finished 1677 // the collection. 1678 // _foregroundGCShouldWait - Set to true by the background collector 1679 // when it is running. The foreground collector waits while 1680 // _foregroundGCShouldWait is true. 1681 // CGC_lock - monitor used to protect access to the above variables 1682 // and to notify the foreground and background collectors. 1683 // _collectorState - current state of the CMS collection. 1684 // 1685 // The foreground collector 1686 // acquires the CGC_lock 1687 // sets _foregroundGCIsActive 1688 // waits on the CGC_lock for _foregroundGCShouldWait to be false 1689 // various locks acquired in preparation for the collection 1690 // are released so as not to block the background collector 1691 // that is in the midst of a collection 1692 // proceeds with the collection 1693 // clears _foregroundGCIsActive 1694 // returns 1695 // 1696 // The background collector in a loop iterating on the phases of the 1697 // collection 1698 // acquires the CGC_lock 1699 // sets _foregroundGCShouldWait 1700 // if _foregroundGCIsActive is set 1701 // clears _foregroundGCShouldWait, notifies _CGC_lock 1702 // waits on _CGC_lock for _foregroundGCIsActive to become false 1703 // and exits the loop. 1704 // otherwise 1705 // proceed with that phase of the collection 1706 // if the phase is a stop-the-world phase, 1707 // yield the baton once more just before enqueueing 1708 // the stop-world CMS operation (executed by the VM thread). 1709 // returns after all phases of the collection are done 1710 // 1711 1712 void CMSCollector::acquire_control_and_collect(bool full, 1713 bool clear_all_soft_refs) { 1714 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 1715 assert(!Thread::current()->is_ConcurrentGC_thread(), 1716 "shouldn't try to acquire control from self!"); 1717 1718 // Start the protocol for acquiring control of the 1719 // collection from the background collector (aka CMS thread). 1720 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), 1721 "VM thread should have CMS token"); 1722 // Remember the possibly interrupted state of an ongoing 1723 // concurrent collection 1724 CollectorState first_state = _collectorState; 1725 1726 // Signal to a possibly ongoing concurrent collection that 1727 // we want to do a foreground collection. 1728 _foregroundGCIsActive = true; 1729 1730 // Disable incremental mode during a foreground collection. 1731 ICMSDisabler icms_disabler; 1732 1733 // release locks and wait for a notify from the background collector 1734 // releasing the locks in only necessary for phases which 1735 // do yields to improve the granularity of the collection. 1736 assert_lock_strong(bitMapLock()); 1737 // We need to lock the Free list lock for the space that we are 1738 // currently collecting. 1739 assert(haveFreelistLocks(), "Must be holding free list locks"); 1740 bitMapLock()->unlock(); 1741 releaseFreelistLocks(); 1742 { 1743 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 1744 if (_foregroundGCShouldWait) { 1745 // We are going to be waiting for action for the CMS thread; 1746 // it had better not be gone (for instance at shutdown)! 1747 assert(ConcurrentMarkSweepThread::cmst() != NULL, 1748 "CMS thread must be running"); 1749 // Wait here until the background collector gives us the go-ahead 1750 ConcurrentMarkSweepThread::clear_CMS_flag( 1751 ConcurrentMarkSweepThread::CMS_vm_has_token); // release token 1752 // Get a possibly blocked CMS thread going: 1753 // Note that we set _foregroundGCIsActive true above, 1754 // without protection of the CGC_lock. 1755 CGC_lock->notify(); 1756 assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(), 1757 "Possible deadlock"); 1758 while (_foregroundGCShouldWait) { 1759 // wait for notification 1760 CGC_lock->wait(Mutex::_no_safepoint_check_flag); 1761 // Possibility of delay/starvation here, since CMS token does 1762 // not know to give priority to VM thread? Actually, i think 1763 // there wouldn't be any delay/starvation, but the proof of 1764 // that "fact" (?) appears non-trivial. XXX 20011219YSR 1765 } 1766 ConcurrentMarkSweepThread::set_CMS_flag( 1767 ConcurrentMarkSweepThread::CMS_vm_has_token); 1768 } 1769 } 1770 // The CMS_token is already held. Get back the other locks. 1771 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), 1772 "VM thread should have CMS token"); 1773 getFreelistLocks(); 1774 bitMapLock()->lock_without_safepoint_check(); 1775 if (TraceCMSState) { 1776 gclog_or_tty->print_cr("CMS foreground collector has asked for control " 1777 INTPTR_FORMAT " with first state %d", Thread::current(), first_state); 1778 gclog_or_tty->print_cr(" gets control with state %d", _collectorState); 1779 } 1780 1781 // Check if we need to do a compaction, or if not, whether 1782 // we need to start the mark-sweep from scratch. 1783 bool should_compact = false; 1784 bool should_start_over = false; 1785 decide_foreground_collection_type(clear_all_soft_refs, 1786 &should_compact, &should_start_over); 1787 1788 NOT_PRODUCT( 1789 if (RotateCMSCollectionTypes) { 1790 if (_cmsGen->debug_collection_type() == 1791 ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) { 1792 should_compact = true; 1793 } else if (_cmsGen->debug_collection_type() == 1794 ConcurrentMarkSweepGeneration::MS_foreground_collection_type) { 1795 should_compact = false; 1796 } 1797 } 1798 ) 1799 1800 if (PrintGCDetails && first_state > Idling) { 1801 GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause(); 1802 if (GCCause::is_user_requested_gc(cause) || 1803 GCCause::is_serviceability_requested_gc(cause)) { 1804 gclog_or_tty->print(" (concurrent mode interrupted)"); 1805 } else { 1806 gclog_or_tty->print(" (concurrent mode failure)"); 1807 } 1808 } 1809 1810 if (should_compact) { 1811 // If the collection is being acquired from the background 1812 // collector, there may be references on the discovered 1813 // references lists that have NULL referents (being those 1814 // that were concurrently cleared by a mutator) or 1815 // that are no longer active (having been enqueued concurrently 1816 // by the mutator). 1817 // Scrub the list of those references because Mark-Sweep-Compact 1818 // code assumes referents are not NULL and that all discovered 1819 // Reference objects are active. 1820 ref_processor()->clean_up_discovered_references(); 1821 1822 do_compaction_work(clear_all_soft_refs); 1823 1824 // Has the GC time limit been exceeded? 1825 DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration(); 1826 size_t max_eden_size = young_gen->max_capacity() - 1827 young_gen->to()->capacity() - 1828 young_gen->from()->capacity(); 1829 GenCollectedHeap* gch = GenCollectedHeap::heap(); 1830 GCCause::Cause gc_cause = gch->gc_cause(); 1831 size_policy()->check_gc_overhead_limit(_young_gen->used(), 1832 young_gen->eden()->used(), 1833 _cmsGen->max_capacity(), 1834 max_eden_size, 1835 full, 1836 gc_cause, 1837 gch->collector_policy()); 1838 } else { 1839 do_mark_sweep_work(clear_all_soft_refs, first_state, 1840 should_start_over); 1841 } 1842 // Reset the expansion cause, now that we just completed 1843 // a collection cycle. 1844 clear_expansion_cause(); 1845 _foregroundGCIsActive = false; 1846 return; 1847 } 1848 1849 // Resize the perm generation and the tenured generation 1850 // after obtaining the free list locks for the 1851 // two generations. 1852 void CMSCollector::compute_new_size() { 1853 assert_locked_or_safepoint(Heap_lock); 1854 FreelistLocker z(this); 1855 _permGen->compute_new_size(); 1856 _cmsGen->compute_new_size(); 1857 } 1858 1859 // A work method used by foreground collection to determine 1860 // what type of collection (compacting or not, continuing or fresh) 1861 // it should do. 1862 // NOTE: the intent is to make UseCMSCompactAtFullCollection 1863 // and CMSCompactWhenClearAllSoftRefs the default in the future 1864 // and do away with the flags after a suitable period. 1865 void CMSCollector::decide_foreground_collection_type( 1866 bool clear_all_soft_refs, bool* should_compact, 1867 bool* should_start_over) { 1868 // Normally, we'll compact only if the UseCMSCompactAtFullCollection 1869 // flag is set, and we have either requested a System.gc() or 1870 // the number of full gc's since the last concurrent cycle 1871 // has exceeded the threshold set by CMSFullGCsBeforeCompaction, 1872 // or if an incremental collection has failed 1873 GenCollectedHeap* gch = GenCollectedHeap::heap(); 1874 assert(gch->collector_policy()->is_two_generation_policy(), 1875 "You may want to check the correctness of the following"); 1876 // Inform cms gen if this was due to partial collection failing. 1877 // The CMS gen may use this fact to determine its expansion policy. 1878 if (gch->incremental_collection_will_fail()) { 1879 assert(!_cmsGen->incremental_collection_failed(), 1880 "Should have been noticed, reacted to and cleared"); 1881 _cmsGen->set_incremental_collection_failed(); 1882 } 1883 *should_compact = 1884 UseCMSCompactAtFullCollection && 1885 ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) || 1886 GCCause::is_user_requested_gc(gch->gc_cause()) || 1887 gch->incremental_collection_will_fail()); 1888 *should_start_over = false; 1889 if (clear_all_soft_refs && !*should_compact) { 1890 // We are about to do a last ditch collection attempt 1891 // so it would normally make sense to do a compaction 1892 // to reclaim as much space as possible. 1893 if (CMSCompactWhenClearAllSoftRefs) { 1894 // Default: The rationale is that in this case either 1895 // we are past the final marking phase, in which case 1896 // we'd have to start over, or so little has been done 1897 // that there's little point in saving that work. Compaction 1898 // appears to be the sensible choice in either case. 1899 *should_compact = true; 1900 } else { 1901 // We have been asked to clear all soft refs, but not to 1902 // compact. Make sure that we aren't past the final checkpoint 1903 // phase, for that is where we process soft refs. If we are already 1904 // past that phase, we'll need to redo the refs discovery phase and 1905 // if necessary clear soft refs that weren't previously 1906 // cleared. We do so by remembering the phase in which 1907 // we came in, and if we are past the refs processing 1908 // phase, we'll choose to just redo the mark-sweep 1909 // collection from scratch. 1910 if (_collectorState > FinalMarking) { 1911 // We are past the refs processing phase; 1912 // start over and do a fresh synchronous CMS cycle 1913 _collectorState = Resetting; // skip to reset to start new cycle 1914 reset(false /* == !asynch */); 1915 *should_start_over = true; 1916 } // else we can continue a possibly ongoing current cycle 1917 } 1918 } 1919 } 1920 1921 // A work method used by the foreground collector to do 1922 // a mark-sweep-compact. 1923 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) { 1924 GenCollectedHeap* gch = GenCollectedHeap::heap(); 1925 TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty); 1926 if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) { 1927 gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d " 1928 "collections passed to foreground collector", _full_gcs_since_conc_gc); 1929 } 1930 1931 // Sample collection interval time and reset for collection pause. 1932 if (UseAdaptiveSizePolicy) { 1933 size_policy()->msc_collection_begin(); 1934 } 1935 1936 // Temporarily widen the span of the weak reference processing to 1937 // the entire heap. 1938 MemRegion new_span(GenCollectedHeap::heap()->reserved_region()); 1939 ReferenceProcessorSpanMutator x(ref_processor(), new_span); 1940 1941 // Temporarily, clear the "is_alive_non_header" field of the 1942 // reference processor. 1943 ReferenceProcessorIsAliveMutator y(ref_processor(), NULL); 1944 1945 // Temporarily make reference _processing_ single threaded (non-MT). 1946 ReferenceProcessorMTProcMutator z(ref_processor(), false); 1947 1948 // Temporarily make refs discovery atomic 1949 ReferenceProcessorAtomicMutator w(ref_processor(), true); 1950 1951 ref_processor()->set_enqueuing_is_done(false); 1952 ref_processor()->enable_discovery(); 1953 ref_processor()->setup_policy(clear_all_soft_refs); 1954 // If an asynchronous collection finishes, the _modUnionTable is 1955 // all clear. If we are assuming the collection from an asynchronous 1956 // collection, clear the _modUnionTable. 1957 assert(_collectorState != Idling || _modUnionTable.isAllClear(), 1958 "_modUnionTable should be clear if the baton was not passed"); 1959 _modUnionTable.clear_all(); 1960 1961 // We must adjust the allocation statistics being maintained 1962 // in the free list space. We do so by reading and clearing 1963 // the sweep timer and updating the block flux rate estimates below. 1964 assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive"); 1965 if (_inter_sweep_timer.is_active()) { 1966 _inter_sweep_timer.stop(); 1967 // Note that we do not use this sample to update the _inter_sweep_estimate. 1968 _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), 1969 _inter_sweep_estimate.padded_average(), 1970 _intra_sweep_estimate.padded_average()); 1971 } 1972 1973 { 1974 TraceCMSMemoryManagerStats(); 1975 } 1976 GenMarkSweep::invoke_at_safepoint(_cmsGen->level(), 1977 ref_processor(), clear_all_soft_refs); 1978 #ifdef ASSERT 1979 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); 1980 size_t free_size = cms_space->free(); 1981 assert(free_size == 1982 pointer_delta(cms_space->end(), cms_space->compaction_top()) 1983 * HeapWordSize, 1984 "All the free space should be compacted into one chunk at top"); 1985 assert(cms_space->dictionary()->totalChunkSize( 1986 debug_only(cms_space->freelistLock())) == 0 || 1987 cms_space->totalSizeInIndexedFreeLists() == 0, 1988 "All the free space should be in a single chunk"); 1989 size_t num = cms_space->totalCount(); 1990 assert((free_size == 0 && num == 0) || 1991 (free_size > 0 && (num == 1 || num == 2)), 1992 "There should be at most 2 free chunks after compaction"); 1993 #endif // ASSERT 1994 _collectorState = Resetting; 1995 assert(_restart_addr == NULL, 1996 "Should have been NULL'd before baton was passed"); 1997 reset(false /* == !asynch */); 1998 _cmsGen->reset_after_compaction(); 1999 _concurrent_cycles_since_last_unload = 0; 2000 2001 if (verifying() && !should_unload_classes()) { 2002 perm_gen_verify_bit_map()->clear_all(); 2003 } 2004 2005 // Clear any data recorded in the PLAB chunk arrays. 2006 if (_survivor_plab_array != NULL) { 2007 reset_survivor_plab_arrays(); 2008 } 2009 2010 // Adjust the per-size allocation stats for the next epoch. 2011 _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */); 2012 // Restart the "inter sweep timer" for the next epoch. 2013 _inter_sweep_timer.reset(); 2014 _inter_sweep_timer.start(); 2015 2016 // Sample collection pause time and reset for collection interval. 2017 if (UseAdaptiveSizePolicy) { 2018 size_policy()->msc_collection_end(gch->gc_cause()); 2019 } 2020 2021 // For a mark-sweep-compact, compute_new_size() will be called 2022 // in the heap's do_collection() method. 2023 } 2024 2025 // A work method used by the foreground collector to do 2026 // a mark-sweep, after taking over from a possibly on-going 2027 // concurrent mark-sweep collection. 2028 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs, 2029 CollectorState first_state, bool should_start_over) { 2030 if (PrintGC && Verbose) { 2031 gclog_or_tty->print_cr("Pass concurrent collection to foreground " 2032 "collector with count %d", 2033 _full_gcs_since_conc_gc); 2034 } 2035 switch (_collectorState) { 2036 case Idling: 2037 if (first_state == Idling || should_start_over) { 2038 // The background GC was not active, or should 2039 // restarted from scratch; start the cycle. 2040 _collectorState = InitialMarking; 2041 } 2042 // If first_state was not Idling, then a background GC 2043 // was in progress and has now finished. No need to do it 2044 // again. Leave the state as Idling. 2045 break; 2046 case Precleaning: 2047 // In the foreground case don't do the precleaning since 2048 // it is not done concurrently and there is extra work 2049 // required. 2050 _collectorState = FinalMarking; 2051 } 2052 if (PrintGCDetails && 2053 (_collectorState > Idling || 2054 !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) { 2055 gclog_or_tty->print(" (concurrent mode failure)"); 2056 } 2057 collect_in_foreground(clear_all_soft_refs); 2058 2059 // For a mark-sweep, compute_new_size() will be called 2060 // in the heap's do_collection() method. 2061 } 2062 2063 2064 void CMSCollector::getFreelistLocks() const { 2065 // Get locks for all free lists in all generations that this 2066 // collector is responsible for 2067 _cmsGen->freelistLock()->lock_without_safepoint_check(); 2068 _permGen->freelistLock()->lock_without_safepoint_check(); 2069 } 2070 2071 void CMSCollector::releaseFreelistLocks() const { 2072 // Release locks for all free lists in all generations that this 2073 // collector is responsible for 2074 _cmsGen->freelistLock()->unlock(); 2075 _permGen->freelistLock()->unlock(); 2076 } 2077 2078 bool CMSCollector::haveFreelistLocks() const { 2079 // Check locks for all free lists in all generations that this 2080 // collector is responsible for 2081 assert_lock_strong(_cmsGen->freelistLock()); 2082 assert_lock_strong(_permGen->freelistLock()); 2083 PRODUCT_ONLY(ShouldNotReachHere()); 2084 return true; 2085 } 2086 2087 // A utility class that is used by the CMS collector to 2088 // temporarily "release" the foreground collector from its 2089 // usual obligation to wait for the background collector to 2090 // complete an ongoing phase before proceeding. 2091 class ReleaseForegroundGC: public StackObj { 2092 private: 2093 CMSCollector* _c; 2094 public: 2095 ReleaseForegroundGC(CMSCollector* c) : _c(c) { 2096 assert(_c->_foregroundGCShouldWait, "Else should not need to call"); 2097 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 2098 // allow a potentially blocked foreground collector to proceed 2099 _c->_foregroundGCShouldWait = false; 2100 if (_c->_foregroundGCIsActive) { 2101 CGC_lock->notify(); 2102 } 2103 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 2104 "Possible deadlock"); 2105 } 2106 2107 ~ReleaseForegroundGC() { 2108 assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?"); 2109 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 2110 _c->_foregroundGCShouldWait = true; 2111 } 2112 }; 2113 2114 // There are separate collect_in_background and collect_in_foreground because of 2115 // the different locking requirements of the background collector and the 2116 // foreground collector. There was originally an attempt to share 2117 // one "collect" method between the background collector and the foreground 2118 // collector but the if-then-else required made it cleaner to have 2119 // separate methods. 2120 void CMSCollector::collect_in_background(bool clear_all_soft_refs) { 2121 assert(Thread::current()->is_ConcurrentGC_thread(), 2122 "A CMS asynchronous collection is only allowed on a CMS thread."); 2123 2124 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2125 { 2126 bool safepoint_check = Mutex::_no_safepoint_check_flag; 2127 MutexLockerEx hl(Heap_lock, safepoint_check); 2128 FreelistLocker fll(this); 2129 MutexLockerEx x(CGC_lock, safepoint_check); 2130 if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) { 2131 // The foreground collector is active or we're 2132 // not using asynchronous collections. Skip this 2133 // background collection. 2134 assert(!_foregroundGCShouldWait, "Should be clear"); 2135 return; 2136 } else { 2137 assert(_collectorState == Idling, "Should be idling before start."); 2138 _collectorState = InitialMarking; 2139 // Reset the expansion cause, now that we are about to begin 2140 // a new cycle. 2141 clear_expansion_cause(); 2142 } 2143 // Decide if we want to enable class unloading as part of the 2144 // ensuing concurrent GC cycle. 2145 update_should_unload_classes(); 2146 _full_gc_requested = false; // acks all outstanding full gc requests 2147 // Signal that we are about to start a collection 2148 gch->increment_total_full_collections(); // ... starting a collection cycle 2149 _collection_count_start = gch->total_full_collections(); 2150 } 2151 2152 // Used for PrintGC 2153 size_t prev_used; 2154 if (PrintGC && Verbose) { 2155 prev_used = _cmsGen->used(); // XXXPERM 2156 } 2157 2158 // The change of the collection state is normally done at this level; 2159 // the exceptions are phases that are executed while the world is 2160 // stopped. For those phases the change of state is done while the 2161 // world is stopped. For baton passing purposes this allows the 2162 // background collector to finish the phase and change state atomically. 2163 // The foreground collector cannot wait on a phase that is done 2164 // while the world is stopped because the foreground collector already 2165 // has the world stopped and would deadlock. 2166 while (_collectorState != Idling) { 2167 if (TraceCMSState) { 2168 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", 2169 Thread::current(), _collectorState); 2170 } 2171 // The foreground collector 2172 // holds the Heap_lock throughout its collection. 2173 // holds the CMS token (but not the lock) 2174 // except while it is waiting for the background collector to yield. 2175 // 2176 // The foreground collector should be blocked (not for long) 2177 // if the background collector is about to start a phase 2178 // executed with world stopped. If the background 2179 // collector has already started such a phase, the 2180 // foreground collector is blocked waiting for the 2181 // Heap_lock. The stop-world phases (InitialMarking and FinalMarking) 2182 // are executed in the VM thread. 2183 // 2184 // The locking order is 2185 // PendingListLock (PLL) -- if applicable (FinalMarking) 2186 // Heap_lock (both this & PLL locked in VM_CMS_Operation::prologue()) 2187 // CMS token (claimed in 2188 // stop_world_and_do() --> 2189 // safepoint_synchronize() --> 2190 // CMSThread::synchronize()) 2191 2192 { 2193 // Check if the FG collector wants us to yield. 2194 CMSTokenSync x(true); // is cms thread 2195 if (waitForForegroundGC()) { 2196 // We yielded to a foreground GC, nothing more to be 2197 // done this round. 2198 assert(_foregroundGCShouldWait == false, "We set it to false in " 2199 "waitForForegroundGC()"); 2200 if (TraceCMSState) { 2201 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 2202 " exiting collection CMS state %d", 2203 Thread::current(), _collectorState); 2204 } 2205 return; 2206 } else { 2207 // The background collector can run but check to see if the 2208 // foreground collector has done a collection while the 2209 // background collector was waiting to get the CGC_lock 2210 // above. If yes, break so that _foregroundGCShouldWait 2211 // is cleared before returning. 2212 if (_collectorState == Idling) { 2213 break; 2214 } 2215 } 2216 } 2217 2218 assert(_foregroundGCShouldWait, "Foreground collector, if active, " 2219 "should be waiting"); 2220 2221 switch (_collectorState) { 2222 case InitialMarking: 2223 { 2224 ReleaseForegroundGC x(this); 2225 stats().record_cms_begin(); 2226 2227 VM_CMS_Initial_Mark initial_mark_op(this); 2228 VMThread::execute(&initial_mark_op); 2229 } 2230 // The collector state may be any legal state at this point 2231 // since the background collector may have yielded to the 2232 // foreground collector. 2233 break; 2234 case Marking: 2235 // initial marking in checkpointRootsInitialWork has been completed 2236 if (markFromRoots(true)) { // we were successful 2237 assert(_collectorState == Precleaning, "Collector state should " 2238 "have changed"); 2239 } else { 2240 assert(_foregroundGCIsActive, "Internal state inconsistency"); 2241 } 2242 break; 2243 case Precleaning: 2244 if (UseAdaptiveSizePolicy) { 2245 size_policy()->concurrent_precleaning_begin(); 2246 } 2247 // marking from roots in markFromRoots has been completed 2248 preclean(); 2249 if (UseAdaptiveSizePolicy) { 2250 size_policy()->concurrent_precleaning_end(); 2251 } 2252 assert(_collectorState == AbortablePreclean || 2253 _collectorState == FinalMarking, 2254 "Collector state should have changed"); 2255 break; 2256 case AbortablePreclean: 2257 if (UseAdaptiveSizePolicy) { 2258 size_policy()->concurrent_phases_resume(); 2259 } 2260 abortable_preclean(); 2261 if (UseAdaptiveSizePolicy) { 2262 size_policy()->concurrent_precleaning_end(); 2263 } 2264 assert(_collectorState == FinalMarking, "Collector state should " 2265 "have changed"); 2266 break; 2267 case FinalMarking: 2268 { 2269 ReleaseForegroundGC x(this); 2270 2271 VM_CMS_Final_Remark final_remark_op(this); 2272 VMThread::execute(&final_remark_op); 2273 } 2274 assert(_foregroundGCShouldWait, "block post-condition"); 2275 break; 2276 case Sweeping: 2277 if (UseAdaptiveSizePolicy) { 2278 size_policy()->concurrent_sweeping_begin(); 2279 } 2280 // final marking in checkpointRootsFinal has been completed 2281 sweep(true); 2282 assert(_collectorState == Resizing, "Collector state change " 2283 "to Resizing must be done under the free_list_lock"); 2284 _full_gcs_since_conc_gc = 0; 2285 2286 // Stop the timers for adaptive size policy for the concurrent phases 2287 if (UseAdaptiveSizePolicy) { 2288 size_policy()->concurrent_sweeping_end(); 2289 size_policy()->concurrent_phases_end(gch->gc_cause(), 2290 gch->prev_gen(_cmsGen)->capacity(), 2291 _cmsGen->free()); 2292 } 2293 2294 case Resizing: { 2295 // Sweeping has been completed... 2296 // At this point the background collection has completed. 2297 // Don't move the call to compute_new_size() down 2298 // into code that might be executed if the background 2299 // collection was preempted. 2300 { 2301 ReleaseForegroundGC x(this); // unblock FG collection 2302 MutexLockerEx y(Heap_lock, Mutex::_no_safepoint_check_flag); 2303 CMSTokenSync z(true); // not strictly needed. 2304 if (_collectorState == Resizing) { 2305 compute_new_size(); 2306 _collectorState = Resetting; 2307 } else { 2308 assert(_collectorState == Idling, "The state should only change" 2309 " because the foreground collector has finished the collection"); 2310 } 2311 } 2312 break; 2313 } 2314 case Resetting: 2315 // CMS heap resizing has been completed 2316 reset(true); 2317 assert(_collectorState == Idling, "Collector state should " 2318 "have changed"); 2319 stats().record_cms_end(); 2320 // Don't move the concurrent_phases_end() and compute_new_size() 2321 // calls to here because a preempted background collection 2322 // has it's state set to "Resetting". 2323 break; 2324 case Idling: 2325 default: 2326 ShouldNotReachHere(); 2327 break; 2328 } 2329 if (TraceCMSState) { 2330 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", 2331 Thread::current(), _collectorState); 2332 } 2333 assert(_foregroundGCShouldWait, "block post-condition"); 2334 } 2335 2336 // Should this be in gc_epilogue? 2337 collector_policy()->counters()->update_counters(); 2338 2339 { 2340 // Clear _foregroundGCShouldWait and, in the event that the 2341 // foreground collector is waiting, notify it, before 2342 // returning. 2343 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 2344 _foregroundGCShouldWait = false; 2345 if (_foregroundGCIsActive) { 2346 CGC_lock->notify(); 2347 } 2348 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 2349 "Possible deadlock"); 2350 } 2351 if (TraceCMSState) { 2352 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 2353 " exiting collection CMS state %d", 2354 Thread::current(), _collectorState); 2355 } 2356 if (PrintGC && Verbose) { 2357 _cmsGen->print_heap_change(prev_used); 2358 } 2359 } 2360 2361 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) { 2362 assert(_foregroundGCIsActive && !_foregroundGCShouldWait, 2363 "Foreground collector should be waiting, not executing"); 2364 assert(Thread::current()->is_VM_thread(), "A foreground collection" 2365 "may only be done by the VM Thread with the world stopped"); 2366 assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(), 2367 "VM thread should have CMS token"); 2368 2369 NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose, 2370 true, gclog_or_tty);) 2371 if (UseAdaptiveSizePolicy) { 2372 size_policy()->ms_collection_begin(); 2373 } 2374 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact); 2375 2376 HandleMark hm; // Discard invalid handles created during verification 2377 2378 if (VerifyBeforeGC && 2379 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 2380 Universe::verify(true); 2381 } 2382 2383 // Snapshot the soft reference policy to be used in this collection cycle. 2384 ref_processor()->setup_policy(clear_all_soft_refs); 2385 2386 bool init_mark_was_synchronous = false; // until proven otherwise 2387 while (_collectorState != Idling) { 2388 if (TraceCMSState) { 2389 gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d", 2390 Thread::current(), _collectorState); 2391 } 2392 switch (_collectorState) { 2393 case InitialMarking: 2394 init_mark_was_synchronous = true; // fact to be exploited in re-mark 2395 checkpointRootsInitial(false); 2396 assert(_collectorState == Marking, "Collector state should have changed" 2397 " within checkpointRootsInitial()"); 2398 break; 2399 case Marking: 2400 // initial marking in checkpointRootsInitialWork has been completed 2401 if (VerifyDuringGC && 2402 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 2403 gclog_or_tty->print("Verify before initial mark: "); 2404 Universe::verify(true); 2405 } 2406 { 2407 bool res = markFromRoots(false); 2408 assert(res && _collectorState == FinalMarking, "Collector state should " 2409 "have changed"); 2410 break; 2411 } 2412 case FinalMarking: 2413 if (VerifyDuringGC && 2414 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 2415 gclog_or_tty->print("Verify before re-mark: "); 2416 Universe::verify(true); 2417 } 2418 checkpointRootsFinal(false, clear_all_soft_refs, 2419 init_mark_was_synchronous); 2420 assert(_collectorState == Sweeping, "Collector state should not " 2421 "have changed within checkpointRootsFinal()"); 2422 break; 2423 case Sweeping: 2424 // final marking in checkpointRootsFinal has been completed 2425 if (VerifyDuringGC && 2426 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 2427 gclog_or_tty->print("Verify before sweep: "); 2428 Universe::verify(true); 2429 } 2430 sweep(false); 2431 assert(_collectorState == Resizing, "Incorrect state"); 2432 break; 2433 case Resizing: { 2434 // Sweeping has been completed; the actual resize in this case 2435 // is done separately; nothing to be done in this state. 2436 _collectorState = Resetting; 2437 break; 2438 } 2439 case Resetting: 2440 // The heap has been resized. 2441 if (VerifyDuringGC && 2442 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 2443 gclog_or_tty->print("Verify before reset: "); 2444 Universe::verify(true); 2445 } 2446 reset(false); 2447 assert(_collectorState == Idling, "Collector state should " 2448 "have changed"); 2449 break; 2450 case Precleaning: 2451 case AbortablePreclean: 2452 // Elide the preclean phase 2453 _collectorState = FinalMarking; 2454 break; 2455 default: 2456 ShouldNotReachHere(); 2457 } 2458 if (TraceCMSState) { 2459 gclog_or_tty->print_cr(" Thread " INTPTR_FORMAT " done - next CMS state %d", 2460 Thread::current(), _collectorState); 2461 } 2462 } 2463 2464 if (UseAdaptiveSizePolicy) { 2465 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2466 size_policy()->ms_collection_end(gch->gc_cause()); 2467 } 2468 2469 if (VerifyAfterGC && 2470 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 2471 Universe::verify(true); 2472 } 2473 if (TraceCMSState) { 2474 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT 2475 " exiting collection CMS state %d", 2476 Thread::current(), _collectorState); 2477 } 2478 } 2479 2480 bool CMSCollector::waitForForegroundGC() { 2481 bool res = false; 2482 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 2483 "CMS thread should have CMS token"); 2484 // Block the foreground collector until the 2485 // background collectors decides whether to 2486 // yield. 2487 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 2488 _foregroundGCShouldWait = true; 2489 if (_foregroundGCIsActive) { 2490 // The background collector yields to the 2491 // foreground collector and returns a value 2492 // indicating that it has yielded. The foreground 2493 // collector can proceed. 2494 res = true; 2495 _foregroundGCShouldWait = false; 2496 ConcurrentMarkSweepThread::clear_CMS_flag( 2497 ConcurrentMarkSweepThread::CMS_cms_has_token); 2498 ConcurrentMarkSweepThread::set_CMS_flag( 2499 ConcurrentMarkSweepThread::CMS_cms_wants_token); 2500 // Get a possibly blocked foreground thread going 2501 CGC_lock->notify(); 2502 if (TraceCMSState) { 2503 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d", 2504 Thread::current(), _collectorState); 2505 } 2506 while (_foregroundGCIsActive) { 2507 CGC_lock->wait(Mutex::_no_safepoint_check_flag); 2508 } 2509 ConcurrentMarkSweepThread::set_CMS_flag( 2510 ConcurrentMarkSweepThread::CMS_cms_has_token); 2511 ConcurrentMarkSweepThread::clear_CMS_flag( 2512 ConcurrentMarkSweepThread::CMS_cms_wants_token); 2513 } 2514 if (TraceCMSState) { 2515 gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d", 2516 Thread::current(), _collectorState); 2517 } 2518 return res; 2519 } 2520 2521 // Because of the need to lock the free lists and other structures in 2522 // the collector, common to all the generations that the collector is 2523 // collecting, we need the gc_prologues of individual CMS generations 2524 // delegate to their collector. It may have been simpler had the 2525 // current infrastructure allowed one to call a prologue on a 2526 // collector. In the absence of that we have the generation's 2527 // prologue delegate to the collector, which delegates back 2528 // some "local" work to a worker method in the individual generations 2529 // that it's responsible for collecting, while itself doing any 2530 // work common to all generations it's responsible for. A similar 2531 // comment applies to the gc_epilogue()'s. 2532 // The role of the varaible _between_prologue_and_epilogue is to 2533 // enforce the invocation protocol. 2534 void CMSCollector::gc_prologue(bool full) { 2535 // Call gc_prologue_work() for each CMSGen and PermGen that 2536 // we are responsible for. 2537 2538 // The following locking discipline assumes that we are only called 2539 // when the world is stopped. 2540 assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption"); 2541 2542 // The CMSCollector prologue must call the gc_prologues for the 2543 // "generations" (including PermGen if any) that it's responsible 2544 // for. 2545 2546 assert( Thread::current()->is_VM_thread() 2547 || ( CMSScavengeBeforeRemark 2548 && Thread::current()->is_ConcurrentGC_thread()), 2549 "Incorrect thread type for prologue execution"); 2550 2551 if (_between_prologue_and_epilogue) { 2552 // We have already been invoked; this is a gc_prologue delegation 2553 // from yet another CMS generation that we are responsible for, just 2554 // ignore it since all relevant work has already been done. 2555 return; 2556 } 2557 2558 // set a bit saying prologue has been called; cleared in epilogue 2559 _between_prologue_and_epilogue = true; 2560 // Claim locks for common data structures, then call gc_prologue_work() 2561 // for each CMSGen and PermGen that we are responsible for. 2562 2563 getFreelistLocks(); // gets free list locks on constituent spaces 2564 bitMapLock()->lock_without_safepoint_check(); 2565 2566 // Should call gc_prologue_work() for all cms gens we are responsible for 2567 bool registerClosure = _collectorState >= Marking 2568 && _collectorState < Sweeping; 2569 ModUnionClosure* muc = ParallelGCThreads > 0 ? &_modUnionClosurePar 2570 : &_modUnionClosure; 2571 _cmsGen->gc_prologue_work(full, registerClosure, muc); 2572 _permGen->gc_prologue_work(full, registerClosure, muc); 2573 2574 if (!full) { 2575 stats().record_gc0_begin(); 2576 } 2577 } 2578 2579 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) { 2580 // Delegate to CMScollector which knows how to coordinate between 2581 // this and any other CMS generations that it is responsible for 2582 // collecting. 2583 collector()->gc_prologue(full); 2584 } 2585 2586 // This is a "private" interface for use by this generation's CMSCollector. 2587 // Not to be called directly by any other entity (for instance, 2588 // GenCollectedHeap, which calls the "public" gc_prologue method above). 2589 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full, 2590 bool registerClosure, ModUnionClosure* modUnionClosure) { 2591 assert(!incremental_collection_failed(), "Shouldn't be set yet"); 2592 assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL, 2593 "Should be NULL"); 2594 if (registerClosure) { 2595 cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure); 2596 } 2597 cmsSpace()->gc_prologue(); 2598 // Clear stat counters 2599 NOT_PRODUCT( 2600 assert(_numObjectsPromoted == 0, "check"); 2601 assert(_numWordsPromoted == 0, "check"); 2602 if (Verbose && PrintGC) { 2603 gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, " 2604 SIZE_FORMAT" bytes concurrently", 2605 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord)); 2606 } 2607 _numObjectsAllocated = 0; 2608 _numWordsAllocated = 0; 2609 ) 2610 } 2611 2612 void CMSCollector::gc_epilogue(bool full) { 2613 // The following locking discipline assumes that we are only called 2614 // when the world is stopped. 2615 assert(SafepointSynchronize::is_at_safepoint(), 2616 "world is stopped assumption"); 2617 2618 // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks 2619 // if linear allocation blocks need to be appropriately marked to allow the 2620 // the blocks to be parsable. We also check here whether we need to nudge the 2621 // CMS collector thread to start a new cycle (if it's not already active). 2622 assert( Thread::current()->is_VM_thread() 2623 || ( CMSScavengeBeforeRemark 2624 && Thread::current()->is_ConcurrentGC_thread()), 2625 "Incorrect thread type for epilogue execution"); 2626 2627 if (!_between_prologue_and_epilogue) { 2628 // We have already been invoked; this is a gc_epilogue delegation 2629 // from yet another CMS generation that we are responsible for, just 2630 // ignore it since all relevant work has already been done. 2631 return; 2632 } 2633 assert(haveFreelistLocks(), "must have freelist locks"); 2634 assert_lock_strong(bitMapLock()); 2635 2636 _cmsGen->gc_epilogue_work(full); 2637 _permGen->gc_epilogue_work(full); 2638 2639 if (_collectorState == AbortablePreclean || _collectorState == Precleaning) { 2640 // in case sampling was not already enabled, enable it 2641 _start_sampling = true; 2642 } 2643 // reset _eden_chunk_array so sampling starts afresh 2644 _eden_chunk_index = 0; 2645 2646 size_t cms_used = _cmsGen->cmsSpace()->used(); 2647 size_t perm_used = _permGen->cmsSpace()->used(); 2648 2649 // update performance counters - this uses a special version of 2650 // update_counters() that allows the utilization to be passed as a 2651 // parameter, avoiding multiple calls to used(). 2652 // 2653 _cmsGen->update_counters(cms_used); 2654 _permGen->update_counters(perm_used); 2655 2656 if (CMSIncrementalMode) { 2657 icms_update_allocation_limits(); 2658 } 2659 2660 bitMapLock()->unlock(); 2661 releaseFreelistLocks(); 2662 2663 _between_prologue_and_epilogue = false; // ready for next cycle 2664 } 2665 2666 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) { 2667 collector()->gc_epilogue(full); 2668 2669 // Also reset promotion tracking in par gc thread states. 2670 if (ParallelGCThreads > 0) { 2671 for (uint i = 0; i < ParallelGCThreads; i++) { 2672 _par_gc_thread_states[i]->promo.stopTrackingPromotions(i); 2673 } 2674 } 2675 } 2676 2677 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) { 2678 assert(!incremental_collection_failed(), "Should have been cleared"); 2679 cmsSpace()->setPreconsumptionDirtyCardClosure(NULL); 2680 cmsSpace()->gc_epilogue(); 2681 // Print stat counters 2682 NOT_PRODUCT( 2683 assert(_numObjectsAllocated == 0, "check"); 2684 assert(_numWordsAllocated == 0, "check"); 2685 if (Verbose && PrintGC) { 2686 gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, " 2687 SIZE_FORMAT" bytes", 2688 _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord)); 2689 } 2690 _numObjectsPromoted = 0; 2691 _numWordsPromoted = 0; 2692 ) 2693 2694 if (PrintGC && Verbose) { 2695 // Call down the chain in contiguous_available needs the freelistLock 2696 // so print this out before releasing the freeListLock. 2697 gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ", 2698 contiguous_available()); 2699 } 2700 } 2701 2702 #ifndef PRODUCT 2703 bool CMSCollector::have_cms_token() { 2704 Thread* thr = Thread::current(); 2705 if (thr->is_VM_thread()) { 2706 return ConcurrentMarkSweepThread::vm_thread_has_cms_token(); 2707 } else if (thr->is_ConcurrentGC_thread()) { 2708 return ConcurrentMarkSweepThread::cms_thread_has_cms_token(); 2709 } else if (thr->is_GC_task_thread()) { 2710 return ConcurrentMarkSweepThread::vm_thread_has_cms_token() && 2711 ParGCRareEvent_lock->owned_by_self(); 2712 } 2713 return false; 2714 } 2715 #endif 2716 2717 // Check reachability of the given heap address in CMS generation, 2718 // treating all other generations as roots. 2719 bool CMSCollector::is_cms_reachable(HeapWord* addr) { 2720 // We could "guarantee" below, rather than assert, but i'll 2721 // leave these as "asserts" so that an adventurous debugger 2722 // could try this in the product build provided some subset of 2723 // the conditions were met, provided they were intersted in the 2724 // results and knew that the computation below wouldn't interfere 2725 // with other concurrent computations mutating the structures 2726 // being read or written. 2727 assert(SafepointSynchronize::is_at_safepoint(), 2728 "Else mutations in object graph will make answer suspect"); 2729 assert(have_cms_token(), "Should hold cms token"); 2730 assert(haveFreelistLocks(), "must hold free list locks"); 2731 assert_lock_strong(bitMapLock()); 2732 2733 // Clear the marking bit map array before starting, but, just 2734 // for kicks, first report if the given address is already marked 2735 gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr, 2736 _markBitMap.isMarked(addr) ? "" : " not"); 2737 2738 if (verify_after_remark()) { 2739 MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); 2740 bool result = verification_mark_bm()->isMarked(addr); 2741 gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr, 2742 result ? "IS" : "is NOT"); 2743 return result; 2744 } else { 2745 gclog_or_tty->print_cr("Could not compute result"); 2746 return false; 2747 } 2748 } 2749 2750 //////////////////////////////////////////////////////// 2751 // CMS Verification Support 2752 //////////////////////////////////////////////////////// 2753 // Following the remark phase, the following invariant 2754 // should hold -- each object in the CMS heap which is 2755 // marked in markBitMap() should be marked in the verification_mark_bm(). 2756 2757 class VerifyMarkedClosure: public BitMapClosure { 2758 CMSBitMap* _marks; 2759 bool _failed; 2760 2761 public: 2762 VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {} 2763 2764 bool do_bit(size_t offset) { 2765 HeapWord* addr = _marks->offsetToHeapWord(offset); 2766 if (!_marks->isMarked(addr)) { 2767 oop(addr)->print_on(gclog_or_tty); 2768 gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); 2769 _failed = true; 2770 } 2771 return true; 2772 } 2773 2774 bool failed() { return _failed; } 2775 }; 2776 2777 bool CMSCollector::verify_after_remark() { 2778 gclog_or_tty->print(" [Verifying CMS Marking... "); 2779 MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag); 2780 static bool init = false; 2781 2782 assert(SafepointSynchronize::is_at_safepoint(), 2783 "Else mutations in object graph will make answer suspect"); 2784 assert(have_cms_token(), 2785 "Else there may be mutual interference in use of " 2786 " verification data structures"); 2787 assert(_collectorState > Marking && _collectorState <= Sweeping, 2788 "Else marking info checked here may be obsolete"); 2789 assert(haveFreelistLocks(), "must hold free list locks"); 2790 assert_lock_strong(bitMapLock()); 2791 2792 2793 // Allocate marking bit map if not already allocated 2794 if (!init) { // first time 2795 if (!verification_mark_bm()->allocate(_span)) { 2796 return false; 2797 } 2798 init = true; 2799 } 2800 2801 assert(verification_mark_stack()->isEmpty(), "Should be empty"); 2802 2803 // Turn off refs discovery -- so we will be tracing through refs. 2804 // This is as intended, because by this time 2805 // GC must already have cleared any refs that need to be cleared, 2806 // and traced those that need to be marked; moreover, 2807 // the marking done here is not going to intefere in any 2808 // way with the marking information used by GC. 2809 NoRefDiscovery no_discovery(ref_processor()); 2810 2811 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) 2812 2813 // Clear any marks from a previous round 2814 verification_mark_bm()->clear_all(); 2815 assert(verification_mark_stack()->isEmpty(), "markStack should be empty"); 2816 verify_work_stacks_empty(); 2817 2818 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2819 gch->ensure_parsability(false); // fill TLABs, but no need to retire them 2820 // Update the saved marks which may affect the root scans. 2821 gch->save_marks(); 2822 2823 if (CMSRemarkVerifyVariant == 1) { 2824 // In this first variant of verification, we complete 2825 // all marking, then check if the new marks-verctor is 2826 // a subset of the CMS marks-vector. 2827 verify_after_remark_work_1(); 2828 } else if (CMSRemarkVerifyVariant == 2) { 2829 // In this second variant of verification, we flag an error 2830 // (i.e. an object reachable in the new marks-vector not reachable 2831 // in the CMS marks-vector) immediately, also indicating the 2832 // identify of an object (A) that references the unmarked object (B) -- 2833 // presumably, a mutation to A failed to be picked up by preclean/remark? 2834 verify_after_remark_work_2(); 2835 } else { 2836 warning("Unrecognized value %d for CMSRemarkVerifyVariant", 2837 CMSRemarkVerifyVariant); 2838 } 2839 gclog_or_tty->print(" done] "); 2840 return true; 2841 } 2842 2843 void CMSCollector::verify_after_remark_work_1() { 2844 ResourceMark rm; 2845 HandleMark hm; 2846 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2847 2848 // Mark from roots one level into CMS 2849 MarkRefsIntoClosure notOlder(_span, verification_mark_bm()); 2850 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 2851 2852 gch->gen_process_strong_roots(_cmsGen->level(), 2853 true, // younger gens are roots 2854 true, // activate StrongRootsScope 2855 true, // collecting perm gen 2856 SharedHeap::ScanningOption(roots_scanning_options()), 2857 ¬Older, 2858 true, // walk code active on stacks 2859 NULL); 2860 2861 // Now mark from the roots 2862 assert(_revisitStack.isEmpty(), "Should be empty"); 2863 MarkFromRootsClosure markFromRootsClosure(this, _span, 2864 verification_mark_bm(), verification_mark_stack(), &_revisitStack, 2865 false /* don't yield */, true /* verifying */); 2866 assert(_restart_addr == NULL, "Expected pre-condition"); 2867 verification_mark_bm()->iterate(&markFromRootsClosure); 2868 while (_restart_addr != NULL) { 2869 // Deal with stack overflow: by restarting at the indicated 2870 // address. 2871 HeapWord* ra = _restart_addr; 2872 markFromRootsClosure.reset(ra); 2873 _restart_addr = NULL; 2874 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); 2875 } 2876 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); 2877 verify_work_stacks_empty(); 2878 // Should reset the revisit stack above, since no class tree 2879 // surgery is forthcoming. 2880 _revisitStack.reset(); // throwing away all contents 2881 2882 // Marking completed -- now verify that each bit marked in 2883 // verification_mark_bm() is also marked in markBitMap(); flag all 2884 // errors by printing corresponding objects. 2885 VerifyMarkedClosure vcl(markBitMap()); 2886 verification_mark_bm()->iterate(&vcl); 2887 if (vcl.failed()) { 2888 gclog_or_tty->print("Verification failed"); 2889 Universe::heap()->print_on(gclog_or_tty); 2890 fatal("CMS: failed marking verification after remark"); 2891 } 2892 } 2893 2894 void CMSCollector::verify_after_remark_work_2() { 2895 ResourceMark rm; 2896 HandleMark hm; 2897 GenCollectedHeap* gch = GenCollectedHeap::heap(); 2898 2899 // Mark from roots one level into CMS 2900 MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(), 2901 markBitMap()); 2902 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 2903 gch->gen_process_strong_roots(_cmsGen->level(), 2904 true, // younger gens are roots 2905 true, // activate StrongRootsScope 2906 true, // collecting perm gen 2907 SharedHeap::ScanningOption(roots_scanning_options()), 2908 ¬Older, 2909 true, // walk code active on stacks 2910 NULL); 2911 2912 // Now mark from the roots 2913 assert(_revisitStack.isEmpty(), "Should be empty"); 2914 MarkFromRootsVerifyClosure markFromRootsClosure(this, _span, 2915 verification_mark_bm(), markBitMap(), verification_mark_stack()); 2916 assert(_restart_addr == NULL, "Expected pre-condition"); 2917 verification_mark_bm()->iterate(&markFromRootsClosure); 2918 while (_restart_addr != NULL) { 2919 // Deal with stack overflow: by restarting at the indicated 2920 // address. 2921 HeapWord* ra = _restart_addr; 2922 markFromRootsClosure.reset(ra); 2923 _restart_addr = NULL; 2924 verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end()); 2925 } 2926 assert(verification_mark_stack()->isEmpty(), "Should have been drained"); 2927 verify_work_stacks_empty(); 2928 // Should reset the revisit stack above, since no class tree 2929 // surgery is forthcoming. 2930 _revisitStack.reset(); // throwing away all contents 2931 2932 // Marking completed -- now verify that each bit marked in 2933 // verification_mark_bm() is also marked in markBitMap(); flag all 2934 // errors by printing corresponding objects. 2935 VerifyMarkedClosure vcl(markBitMap()); 2936 verification_mark_bm()->iterate(&vcl); 2937 assert(!vcl.failed(), "Else verification above should not have succeeded"); 2938 } 2939 2940 void ConcurrentMarkSweepGeneration::save_marks() { 2941 // delegate to CMS space 2942 cmsSpace()->save_marks(); 2943 for (uint i = 0; i < ParallelGCThreads; i++) { 2944 _par_gc_thread_states[i]->promo.startTrackingPromotions(); 2945 } 2946 } 2947 2948 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() { 2949 return cmsSpace()->no_allocs_since_save_marks(); 2950 } 2951 2952 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ 2953 \ 2954 void ConcurrentMarkSweepGeneration:: \ 2955 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 2956 cl->set_generation(this); \ 2957 cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl); \ 2958 cl->reset_generation(); \ 2959 save_marks(); \ 2960 } 2961 2962 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN) 2963 2964 void 2965 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk) 2966 { 2967 // Not currently implemented; need to do the following. -- ysr. 2968 // dld -- I think that is used for some sort of allocation profiler. So it 2969 // really means the objects allocated by the mutator since the last 2970 // GC. We could potentially implement this cheaply by recording only 2971 // the direct allocations in a side data structure. 2972 // 2973 // I think we probably ought not to be required to support these 2974 // iterations at any arbitrary point; I think there ought to be some 2975 // call to enable/disable allocation profiling in a generation/space, 2976 // and the iterator ought to return the objects allocated in the 2977 // gen/space since the enable call, or the last iterator call (which 2978 // will probably be at a GC.) That way, for gens like CM&S that would 2979 // require some extra data structure to support this, we only pay the 2980 // cost when it's in use... 2981 cmsSpace()->object_iterate_since_last_GC(blk); 2982 } 2983 2984 void 2985 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) { 2986 cl->set_generation(this); 2987 younger_refs_in_space_iterate(_cmsSpace, cl); 2988 cl->reset_generation(); 2989 } 2990 2991 void 2992 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, OopClosure* cl) { 2993 if (freelistLock()->owned_by_self()) { 2994 Generation::oop_iterate(mr, cl); 2995 } else { 2996 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 2997 Generation::oop_iterate(mr, cl); 2998 } 2999 } 3000 3001 void 3002 ConcurrentMarkSweepGeneration::oop_iterate(OopClosure* cl) { 3003 if (freelistLock()->owned_by_self()) { 3004 Generation::oop_iterate(cl); 3005 } else { 3006 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 3007 Generation::oop_iterate(cl); 3008 } 3009 } 3010 3011 void 3012 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) { 3013 if (freelistLock()->owned_by_self()) { 3014 Generation::object_iterate(cl); 3015 } else { 3016 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 3017 Generation::object_iterate(cl); 3018 } 3019 } 3020 3021 void 3022 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) { 3023 if (freelistLock()->owned_by_self()) { 3024 Generation::safe_object_iterate(cl); 3025 } else { 3026 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 3027 Generation::safe_object_iterate(cl); 3028 } 3029 } 3030 3031 void 3032 ConcurrentMarkSweepGeneration::pre_adjust_pointers() { 3033 } 3034 3035 void 3036 ConcurrentMarkSweepGeneration::post_compact() { 3037 } 3038 3039 void 3040 ConcurrentMarkSweepGeneration::prepare_for_verify() { 3041 // Fix the linear allocation blocks to look like free blocks. 3042 3043 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those 3044 // are not called when the heap is verified during universe initialization and 3045 // at vm shutdown. 3046 if (freelistLock()->owned_by_self()) { 3047 cmsSpace()->prepare_for_verify(); 3048 } else { 3049 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag); 3050 cmsSpace()->prepare_for_verify(); 3051 } 3052 } 3053 3054 void 3055 ConcurrentMarkSweepGeneration::verify(bool allow_dirty /* ignored */) { 3056 // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those 3057 // are not called when the heap is verified during universe initialization and 3058 // at vm shutdown. 3059 if (freelistLock()->owned_by_self()) { 3060 cmsSpace()->verify(false /* ignored */); 3061 } else { 3062 MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag); 3063 cmsSpace()->verify(false /* ignored */); 3064 } 3065 } 3066 3067 void CMSCollector::verify(bool allow_dirty /* ignored */) { 3068 _cmsGen->verify(allow_dirty); 3069 _permGen->verify(allow_dirty); 3070 } 3071 3072 #ifndef PRODUCT 3073 bool CMSCollector::overflow_list_is_empty() const { 3074 assert(_num_par_pushes >= 0, "Inconsistency"); 3075 if (_overflow_list == NULL) { 3076 assert(_num_par_pushes == 0, "Inconsistency"); 3077 } 3078 return _overflow_list == NULL; 3079 } 3080 3081 // The methods verify_work_stacks_empty() and verify_overflow_empty() 3082 // merely consolidate assertion checks that appear to occur together frequently. 3083 void CMSCollector::verify_work_stacks_empty() const { 3084 assert(_markStack.isEmpty(), "Marking stack should be empty"); 3085 assert(overflow_list_is_empty(), "Overflow list should be empty"); 3086 } 3087 3088 void CMSCollector::verify_overflow_empty() const { 3089 assert(overflow_list_is_empty(), "Overflow list should be empty"); 3090 assert(no_preserved_marks(), "No preserved marks"); 3091 } 3092 #endif // PRODUCT 3093 3094 // Decide if we want to enable class unloading as part of the 3095 // ensuing concurrent GC cycle. We will collect the perm gen and 3096 // unload classes if it's the case that: 3097 // (1) an explicit gc request has been made and the flag 3098 // ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR 3099 // (2) (a) class unloading is enabled at the command line, and 3100 // (b) (i) perm gen threshold has been crossed, or 3101 // (ii) old gen is getting really full, or 3102 // (iii) the previous N CMS collections did not collect the 3103 // perm gen 3104 // NOTE: Provided there is no change in the state of the heap between 3105 // calls to this method, it should have idempotent results. Moreover, 3106 // its results should be monotonically increasing (i.e. going from 0 to 1, 3107 // but not 1 to 0) between successive calls between which the heap was 3108 // not collected. For the implementation below, it must thus rely on 3109 // the property that concurrent_cycles_since_last_unload() 3110 // will not decrease unless a collection cycle happened and that 3111 // _permGen->should_concurrent_collect() and _cmsGen->is_too_full() are 3112 // themselves also monotonic in that sense. See check_monotonicity() 3113 // below. 3114 bool CMSCollector::update_should_unload_classes() { 3115 _should_unload_classes = false; 3116 // Condition 1 above 3117 if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) { 3118 _should_unload_classes = true; 3119 } else if (CMSClassUnloadingEnabled) { // Condition 2.a above 3120 // Disjuncts 2.b.(i,ii,iii) above 3121 _should_unload_classes = (concurrent_cycles_since_last_unload() >= 3122 CMSClassUnloadingMaxInterval) 3123 || _permGen->should_concurrent_collect() 3124 || _cmsGen->is_too_full(); 3125 } 3126 return _should_unload_classes; 3127 } 3128 3129 bool ConcurrentMarkSweepGeneration::is_too_full() const { 3130 bool res = should_concurrent_collect(); 3131 res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0); 3132 return res; 3133 } 3134 3135 void CMSCollector::setup_cms_unloading_and_verification_state() { 3136 const bool should_verify = VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC 3137 || VerifyBeforeExit; 3138 const int rso = SharedHeap::SO_Symbols | SharedHeap::SO_Strings 3139 | SharedHeap::SO_CodeCache; 3140 3141 if (should_unload_classes()) { // Should unload classes this cycle 3142 remove_root_scanning_option(rso); // Shrink the root set appropriately 3143 set_verifying(should_verify); // Set verification state for this cycle 3144 return; // Nothing else needs to be done at this time 3145 } 3146 3147 // Not unloading classes this cycle 3148 assert(!should_unload_classes(), "Inconsitency!"); 3149 if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) { 3150 // We were not verifying, or we _were_ unloading classes in the last cycle, 3151 // AND some verification options are enabled this cycle; in this case, 3152 // we must make sure that the deadness map is allocated if not already so, 3153 // and cleared (if already allocated previously -- 3154 // CMSBitMap::sizeInBits() is used to determine if it's allocated). 3155 if (perm_gen_verify_bit_map()->sizeInBits() == 0) { 3156 if (!perm_gen_verify_bit_map()->allocate(_permGen->reserved())) { 3157 warning("Failed to allocate permanent generation verification CMS Bit Map;\n" 3158 "permanent generation verification disabled"); 3159 return; // Note that we leave verification disabled, so we'll retry this 3160 // allocation next cycle. We _could_ remember this failure 3161 // and skip further attempts and permanently disable verification 3162 // attempts if that is considered more desirable. 3163 } 3164 assert(perm_gen_verify_bit_map()->covers(_permGen->reserved()), 3165 "_perm_gen_ver_bit_map inconsistency?"); 3166 } else { 3167 perm_gen_verify_bit_map()->clear_all(); 3168 } 3169 // Include symbols, strings and code cache elements to prevent their resurrection. 3170 add_root_scanning_option(rso); 3171 set_verifying(true); 3172 } else if (verifying() && !should_verify) { 3173 // We were verifying, but some verification flags got disabled. 3174 set_verifying(false); 3175 // Exclude symbols, strings and code cache elements from root scanning to 3176 // reduce IM and RM pauses. 3177 remove_root_scanning_option(rso); 3178 } 3179 } 3180 3181 3182 #ifndef PRODUCT 3183 HeapWord* CMSCollector::block_start(const void* p) const { 3184 const HeapWord* addr = (HeapWord*)p; 3185 if (_span.contains(p)) { 3186 if (_cmsGen->cmsSpace()->is_in_reserved(addr)) { 3187 return _cmsGen->cmsSpace()->block_start(p); 3188 } else { 3189 assert(_permGen->cmsSpace()->is_in_reserved(addr), 3190 "Inconsistent _span?"); 3191 return _permGen->cmsSpace()->block_start(p); 3192 } 3193 } 3194 return NULL; 3195 } 3196 #endif 3197 3198 HeapWord* 3199 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size, 3200 bool tlab, 3201 bool parallel) { 3202 assert(!tlab, "Can't deal with TLAB allocation"); 3203 MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 3204 expand(word_size*HeapWordSize, MinHeapDeltaBytes, 3205 CMSExpansionCause::_satisfy_allocation); 3206 if (GCExpandToAllocateDelayMillis > 0) { 3207 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 3208 } 3209 return have_lock_and_allocate(word_size, tlab); 3210 } 3211 3212 // YSR: All of this generation expansion/shrinking stuff is an exact copy of 3213 // OneContigSpaceCardGeneration, which makes me wonder if we should move this 3214 // to CardGeneration and share it... 3215 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) { 3216 return CardGeneration::expand(bytes, expand_bytes); 3217 } 3218 3219 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes, 3220 CMSExpansionCause::Cause cause) 3221 { 3222 3223 bool success = expand(bytes, expand_bytes); 3224 3225 // remember why we expanded; this information is used 3226 // by shouldConcurrentCollect() when making decisions on whether to start 3227 // a new CMS cycle. 3228 if (success) { 3229 set_expansion_cause(cause); 3230 if (PrintGCDetails && Verbose) { 3231 gclog_or_tty->print_cr("Expanded CMS gen for %s", 3232 CMSExpansionCause::to_string(cause)); 3233 } 3234 } 3235 } 3236 3237 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) { 3238 HeapWord* res = NULL; 3239 MutexLocker x(ParGCRareEvent_lock); 3240 while (true) { 3241 // Expansion by some other thread might make alloc OK now: 3242 res = ps->lab.alloc(word_sz); 3243 if (res != NULL) return res; 3244 // If there's not enough expansion space available, give up. 3245 if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) { 3246 return NULL; 3247 } 3248 // Otherwise, we try expansion. 3249 expand(word_sz*HeapWordSize, MinHeapDeltaBytes, 3250 CMSExpansionCause::_allocate_par_lab); 3251 // Now go around the loop and try alloc again; 3252 // A competing par_promote might beat us to the expansion space, 3253 // so we may go around the loop again if promotion fails agaion. 3254 if (GCExpandToAllocateDelayMillis > 0) { 3255 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 3256 } 3257 } 3258 } 3259 3260 3261 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space( 3262 PromotionInfo* promo) { 3263 MutexLocker x(ParGCRareEvent_lock); 3264 size_t refill_size_bytes = promo->refillSize() * HeapWordSize; 3265 while (true) { 3266 // Expansion by some other thread might make alloc OK now: 3267 if (promo->ensure_spooling_space()) { 3268 assert(promo->has_spooling_space(), 3269 "Post-condition of successful ensure_spooling_space()"); 3270 return true; 3271 } 3272 // If there's not enough expansion space available, give up. 3273 if (_virtual_space.uncommitted_size() < refill_size_bytes) { 3274 return false; 3275 } 3276 // Otherwise, we try expansion. 3277 expand(refill_size_bytes, MinHeapDeltaBytes, 3278 CMSExpansionCause::_allocate_par_spooling_space); 3279 // Now go around the loop and try alloc again; 3280 // A competing allocation might beat us to the expansion space, 3281 // so we may go around the loop again if allocation fails again. 3282 if (GCExpandToAllocateDelayMillis > 0) { 3283 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); 3284 } 3285 } 3286 } 3287 3288 3289 3290 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) { 3291 assert_locked_or_safepoint(Heap_lock); 3292 size_t size = ReservedSpace::page_align_size_down(bytes); 3293 if (size > 0) { 3294 shrink_by(size); 3295 } 3296 } 3297 3298 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) { 3299 assert_locked_or_safepoint(Heap_lock); 3300 bool result = _virtual_space.expand_by(bytes); 3301 if (result) { 3302 HeapWord* old_end = _cmsSpace->end(); 3303 size_t new_word_size = 3304 heap_word_size(_virtual_space.committed_size()); 3305 MemRegion mr(_cmsSpace->bottom(), new_word_size); 3306 _bts->resize(new_word_size); // resize the block offset shared array 3307 Universe::heap()->barrier_set()->resize_covered_region(mr); 3308 // Hmmmm... why doesn't CFLS::set_end verify locking? 3309 // This is quite ugly; FIX ME XXX 3310 _cmsSpace->assert_locked(freelistLock()); 3311 _cmsSpace->set_end((HeapWord*)_virtual_space.high()); 3312 3313 // update the space and generation capacity counters 3314 if (UsePerfData) { 3315 _space_counters->update_capacity(); 3316 _gen_counters->update_all(); 3317 } 3318 3319 if (Verbose && PrintGC) { 3320 size_t new_mem_size = _virtual_space.committed_size(); 3321 size_t old_mem_size = new_mem_size - bytes; 3322 gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK", 3323 name(), old_mem_size/K, bytes/K, new_mem_size/K); 3324 } 3325 } 3326 return result; 3327 } 3328 3329 bool ConcurrentMarkSweepGeneration::grow_to_reserved() { 3330 assert_locked_or_safepoint(Heap_lock); 3331 bool success = true; 3332 const size_t remaining_bytes = _virtual_space.uncommitted_size(); 3333 if (remaining_bytes > 0) { 3334 success = grow_by(remaining_bytes); 3335 DEBUG_ONLY(if (!success) warning("grow to reserved failed");) 3336 } 3337 return success; 3338 } 3339 3340 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) { 3341 assert_locked_or_safepoint(Heap_lock); 3342 assert_lock_strong(freelistLock()); 3343 // XXX Fix when compaction is implemented. 3344 warning("Shrinking of CMS not yet implemented"); 3345 return; 3346 } 3347 3348 3349 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent 3350 // phases. 3351 class CMSPhaseAccounting: public StackObj { 3352 public: 3353 CMSPhaseAccounting(CMSCollector *collector, 3354 const char *phase, 3355 bool print_cr = true); 3356 ~CMSPhaseAccounting(); 3357 3358 private: 3359 CMSCollector *_collector; 3360 const char *_phase; 3361 elapsedTimer _wallclock; 3362 bool _print_cr; 3363 3364 public: 3365 // Not MT-safe; so do not pass around these StackObj's 3366 // where they may be accessed by other threads. 3367 jlong wallclock_millis() { 3368 assert(_wallclock.is_active(), "Wall clock should not stop"); 3369 _wallclock.stop(); // to record time 3370 jlong ret = _wallclock.milliseconds(); 3371 _wallclock.start(); // restart 3372 return ret; 3373 } 3374 }; 3375 3376 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector, 3377 const char *phase, 3378 bool print_cr) : 3379 _collector(collector), _phase(phase), _print_cr(print_cr) { 3380 3381 if (PrintCMSStatistics != 0) { 3382 _collector->resetYields(); 3383 } 3384 if (PrintGCDetails && PrintGCTimeStamps) { 3385 gclog_or_tty->date_stamp(PrintGCDateStamps); 3386 gclog_or_tty->stamp(); 3387 gclog_or_tty->print_cr(": [%s-concurrent-%s-start]", 3388 _collector->cmsGen()->short_name(), _phase); 3389 } 3390 _collector->resetTimer(); 3391 _wallclock.start(); 3392 _collector->startTimer(); 3393 } 3394 3395 CMSPhaseAccounting::~CMSPhaseAccounting() { 3396 assert(_wallclock.is_active(), "Wall clock should not have stopped"); 3397 _collector->stopTimer(); 3398 _wallclock.stop(); 3399 if (PrintGCDetails) { 3400 gclog_or_tty->date_stamp(PrintGCDateStamps); 3401 if (PrintGCTimeStamps) { 3402 gclog_or_tty->stamp(); 3403 gclog_or_tty->print(": "); 3404 } 3405 gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]", 3406 _collector->cmsGen()->short_name(), 3407 _phase, _collector->timerValue(), _wallclock.seconds()); 3408 if (_print_cr) { 3409 gclog_or_tty->print_cr(""); 3410 } 3411 if (PrintCMSStatistics != 0) { 3412 gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase, 3413 _collector->yields()); 3414 } 3415 } 3416 } 3417 3418 // CMS work 3419 3420 // Checkpoint the roots into this generation from outside 3421 // this generation. [Note this initial checkpoint need only 3422 // be approximate -- we'll do a catch up phase subsequently.] 3423 void CMSCollector::checkpointRootsInitial(bool asynch) { 3424 assert(_collectorState == InitialMarking, "Wrong collector state"); 3425 check_correct_thread_executing(); 3426 TraceCMSMemoryManagerStats tms(_collectorState); 3427 ReferenceProcessor* rp = ref_processor(); 3428 SpecializationStats::clear(); 3429 assert(_restart_addr == NULL, "Control point invariant"); 3430 if (asynch) { 3431 // acquire locks for subsequent manipulations 3432 MutexLockerEx x(bitMapLock(), 3433 Mutex::_no_safepoint_check_flag); 3434 checkpointRootsInitialWork(asynch); 3435 rp->verify_no_references_recorded(); 3436 rp->enable_discovery(); // enable ("weak") refs discovery 3437 _collectorState = Marking; 3438 } else { 3439 // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection 3440 // which recognizes if we are a CMS generation, and doesn't try to turn on 3441 // discovery; verify that they aren't meddling. 3442 assert(!rp->discovery_is_atomic(), 3443 "incorrect setting of discovery predicate"); 3444 assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control " 3445 "ref discovery for this generation kind"); 3446 // already have locks 3447 checkpointRootsInitialWork(asynch); 3448 rp->enable_discovery(); // now enable ("weak") refs discovery 3449 _collectorState = Marking; 3450 } 3451 SpecializationStats::print(); 3452 } 3453 3454 void CMSCollector::checkpointRootsInitialWork(bool asynch) { 3455 assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); 3456 assert(_collectorState == InitialMarking, "just checking"); 3457 3458 // If there has not been a GC[n-1] since last GC[n] cycle completed, 3459 // precede our marking with a collection of all 3460 // younger generations to keep floating garbage to a minimum. 3461 // XXX: we won't do this for now -- it's an optimization to be done later. 3462 3463 // already have locks 3464 assert_lock_strong(bitMapLock()); 3465 assert(_markBitMap.isAllClear(), "was reset at end of previous cycle"); 3466 3467 // Setup the verification and class unloading state for this 3468 // CMS collection cycle. 3469 setup_cms_unloading_and_verification_state(); 3470 3471 NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork", 3472 PrintGCDetails && Verbose, true, gclog_or_tty);) 3473 if (UseAdaptiveSizePolicy) { 3474 size_policy()->checkpoint_roots_initial_begin(); 3475 } 3476 3477 // Reset all the PLAB chunk arrays if necessary. 3478 if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) { 3479 reset_survivor_plab_arrays(); 3480 } 3481 3482 ResourceMark rm; 3483 HandleMark hm; 3484 3485 FalseClosure falseClosure; 3486 // In the case of a synchronous collection, we will elide the 3487 // remark step, so it's important to catch all the nmethod oops 3488 // in this step. 3489 // The final 'true' flag to gen_process_strong_roots will ensure this. 3490 // If 'async' is true, we can relax the nmethod tracing. 3491 MarkRefsIntoClosure notOlder(_span, &_markBitMap); 3492 GenCollectedHeap* gch = GenCollectedHeap::heap(); 3493 3494 verify_work_stacks_empty(); 3495 verify_overflow_empty(); 3496 3497 gch->ensure_parsability(false); // fill TLABs, but no need to retire them 3498 // Update the saved marks which may affect the root scans. 3499 gch->save_marks(); 3500 3501 // weak reference processing has not started yet. 3502 ref_processor()->set_enqueuing_is_done(false); 3503 3504 { 3505 // This is not needed. DEBUG_ONLY(RememberKlassesChecker imx(true);) 3506 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) 3507 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 3508 gch->gen_process_strong_roots(_cmsGen->level(), 3509 true, // younger gens are roots 3510 true, // activate StrongRootsScope 3511 true, // collecting perm gen 3512 SharedHeap::ScanningOption(roots_scanning_options()), 3513 ¬Older, 3514 true, // walk all of code cache if (so & SO_CodeCache) 3515 NULL); 3516 } 3517 3518 // Clear mod-union table; it will be dirtied in the prologue of 3519 // CMS generation per each younger generation collection. 3520 3521 assert(_modUnionTable.isAllClear(), 3522 "Was cleared in most recent final checkpoint phase" 3523 " or no bits are set in the gc_prologue before the start of the next " 3524 "subsequent marking phase."); 3525 3526 // Temporarily disabled, since pre/post-consumption closures don't 3527 // care about precleaned cards 3528 #if 0 3529 { 3530 MemRegion mr = MemRegion((HeapWord*)_virtual_space.low(), 3531 (HeapWord*)_virtual_space.high()); 3532 _ct->ct_bs()->preclean_dirty_cards(mr); 3533 } 3534 #endif 3535 3536 // Save the end of the used_region of the constituent generations 3537 // to be used to limit the extent of sweep in each generation. 3538 save_sweep_limits(); 3539 if (UseAdaptiveSizePolicy) { 3540 size_policy()->checkpoint_roots_initial_end(gch->gc_cause()); 3541 } 3542 verify_overflow_empty(); 3543 } 3544 3545 bool CMSCollector::markFromRoots(bool asynch) { 3546 // we might be tempted to assert that: 3547 // assert(asynch == !SafepointSynchronize::is_at_safepoint(), 3548 // "inconsistent argument?"); 3549 // However that wouldn't be right, because it's possible that 3550 // a safepoint is indeed in progress as a younger generation 3551 // stop-the-world GC happens even as we mark in this generation. 3552 assert(_collectorState == Marking, "inconsistent state?"); 3553 check_correct_thread_executing(); 3554 verify_overflow_empty(); 3555 3556 bool res; 3557 if (asynch) { 3558 3559 // Start the timers for adaptive size policy for the concurrent phases 3560 // Do it here so that the foreground MS can use the concurrent 3561 // timer since a foreground MS might has the sweep done concurrently 3562 // or STW. 3563 if (UseAdaptiveSizePolicy) { 3564 size_policy()->concurrent_marking_begin(); 3565 } 3566 3567 // Weak ref discovery note: We may be discovering weak 3568 // refs in this generation concurrent (but interleaved) with 3569 // weak ref discovery by a younger generation collector. 3570 3571 CMSTokenSyncWithLocks ts(true, bitMapLock()); 3572 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 3573 CMSPhaseAccounting pa(this, "mark", !PrintGCDetails); 3574 res = markFromRootsWork(asynch); 3575 if (res) { 3576 _collectorState = Precleaning; 3577 } else { // We failed and a foreground collection wants to take over 3578 assert(_foregroundGCIsActive, "internal state inconsistency"); 3579 assert(_restart_addr == NULL, "foreground will restart from scratch"); 3580 if (PrintGCDetails) { 3581 gclog_or_tty->print_cr("bailing out to foreground collection"); 3582 } 3583 } 3584 if (UseAdaptiveSizePolicy) { 3585 size_policy()->concurrent_marking_end(); 3586 } 3587 } else { 3588 assert(SafepointSynchronize::is_at_safepoint(), 3589 "inconsistent with asynch == false"); 3590 if (UseAdaptiveSizePolicy) { 3591 size_policy()->ms_collection_marking_begin(); 3592 } 3593 // already have locks 3594 res = markFromRootsWork(asynch); 3595 _collectorState = FinalMarking; 3596 if (UseAdaptiveSizePolicy) { 3597 GenCollectedHeap* gch = GenCollectedHeap::heap(); 3598 size_policy()->ms_collection_marking_end(gch->gc_cause()); 3599 } 3600 } 3601 verify_overflow_empty(); 3602 return res; 3603 } 3604 3605 bool CMSCollector::markFromRootsWork(bool asynch) { 3606 // iterate over marked bits in bit map, doing a full scan and mark 3607 // from these roots using the following algorithm: 3608 // . if oop is to the right of the current scan pointer, 3609 // mark corresponding bit (we'll process it later) 3610 // . else (oop is to left of current scan pointer) 3611 // push oop on marking stack 3612 // . drain the marking stack 3613 3614 // Note that when we do a marking step we need to hold the 3615 // bit map lock -- recall that direct allocation (by mutators) 3616 // and promotion (by younger generation collectors) is also 3617 // marking the bit map. [the so-called allocate live policy.] 3618 // Because the implementation of bit map marking is not 3619 // robust wrt simultaneous marking of bits in the same word, 3620 // we need to make sure that there is no such interference 3621 // between concurrent such updates. 3622 3623 // already have locks 3624 assert_lock_strong(bitMapLock()); 3625 3626 // Clear the revisit stack, just in case there are any 3627 // obsolete contents from a short-circuited previous CMS cycle. 3628 _revisitStack.reset(); 3629 verify_work_stacks_empty(); 3630 verify_overflow_empty(); 3631 assert(_revisitStack.isEmpty(), "tabula rasa"); 3632 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) 3633 bool result = false; 3634 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { 3635 result = do_marking_mt(asynch); 3636 } else { 3637 result = do_marking_st(asynch); 3638 } 3639 return result; 3640 } 3641 3642 // Forward decl 3643 class CMSConcMarkingTask; 3644 3645 class CMSConcMarkingTerminator: public ParallelTaskTerminator { 3646 CMSCollector* _collector; 3647 CMSConcMarkingTask* _task; 3648 bool _yield; 3649 protected: 3650 virtual void yield(); 3651 public: 3652 // "n_threads" is the number of threads to be terminated. 3653 // "queue_set" is a set of work queues of other threads. 3654 // "collector" is the CMS collector associated with this task terminator. 3655 // "yield" indicates whether we need the gang as a whole to yield. 3656 CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, 3657 CMSCollector* collector, bool yield) : 3658 ParallelTaskTerminator(n_threads, queue_set), 3659 _collector(collector), 3660 _yield(yield) { } 3661 3662 void set_task(CMSConcMarkingTask* task) { 3663 _task = task; 3664 } 3665 }; 3666 3667 // MT Concurrent Marking Task 3668 class CMSConcMarkingTask: public YieldingFlexibleGangTask { 3669 CMSCollector* _collector; 3670 YieldingFlexibleWorkGang* _workers; // the whole gang 3671 int _n_workers; // requested/desired # workers 3672 bool _asynch; 3673 bool _result; 3674 CompactibleFreeListSpace* _cms_space; 3675 CompactibleFreeListSpace* _perm_space; 3676 HeapWord* _global_finger; 3677 HeapWord* _restart_addr; 3678 3679 // Exposed here for yielding support 3680 Mutex* const _bit_map_lock; 3681 3682 // The per thread work queues, available here for stealing 3683 OopTaskQueueSet* _task_queues; 3684 CMSConcMarkingTerminator _term; 3685 3686 public: 3687 CMSConcMarkingTask(CMSCollector* collector, 3688 CompactibleFreeListSpace* cms_space, 3689 CompactibleFreeListSpace* perm_space, 3690 bool asynch, int n_workers, 3691 YieldingFlexibleWorkGang* workers, 3692 OopTaskQueueSet* task_queues): 3693 YieldingFlexibleGangTask("Concurrent marking done multi-threaded"), 3694 _collector(collector), 3695 _cms_space(cms_space), 3696 _perm_space(perm_space), 3697 _asynch(asynch), _n_workers(n_workers), _result(true), 3698 _workers(workers), _task_queues(task_queues), 3699 _term(n_workers, task_queues, _collector, asynch), 3700 _bit_map_lock(collector->bitMapLock()) 3701 { 3702 assert(n_workers <= workers->total_workers(), 3703 "Else termination won't work correctly today"); // XXX FIX ME! 3704 _requested_size = n_workers; 3705 _term.set_task(this); 3706 assert(_cms_space->bottom() < _perm_space->bottom(), 3707 "Finger incorrectly initialized below"); 3708 _restart_addr = _global_finger = _cms_space->bottom(); 3709 } 3710 3711 3712 OopTaskQueueSet* task_queues() { return _task_queues; } 3713 3714 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } 3715 3716 HeapWord** global_finger_addr() { return &_global_finger; } 3717 3718 CMSConcMarkingTerminator* terminator() { return &_term; } 3719 3720 void work(int i); 3721 3722 virtual void coordinator_yield(); // stuff done by coordinator 3723 bool result() { return _result; } 3724 3725 void reset(HeapWord* ra) { 3726 assert(_global_finger >= _cms_space->end(), "Postcondition of ::work(i)"); 3727 assert(_global_finger >= _perm_space->end(), "Postcondition of ::work(i)"); 3728 assert(ra < _perm_space->end(), "ra too large"); 3729 _restart_addr = _global_finger = ra; 3730 _term.reset_for_reuse(); 3731 } 3732 3733 static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, 3734 OopTaskQueue* work_q); 3735 3736 private: 3737 void do_scan_and_mark(int i, CompactibleFreeListSpace* sp); 3738 void do_work_steal(int i); 3739 void bump_global_finger(HeapWord* f); 3740 }; 3741 3742 void CMSConcMarkingTerminator::yield() { 3743 if (ConcurrentMarkSweepThread::should_yield() && 3744 !_collector->foregroundGCIsActive() && 3745 _yield) { 3746 _task->yield(); 3747 } else { 3748 ParallelTaskTerminator::yield(); 3749 } 3750 } 3751 3752 //////////////////////////////////////////////////////////////// 3753 // Concurrent Marking Algorithm Sketch 3754 //////////////////////////////////////////////////////////////// 3755 // Until all tasks exhausted (both spaces): 3756 // -- claim next available chunk 3757 // -- bump global finger via CAS 3758 // -- find first object that starts in this chunk 3759 // and start scanning bitmap from that position 3760 // -- scan marked objects for oops 3761 // -- CAS-mark target, and if successful: 3762 // . if target oop is above global finger (volatile read) 3763 // nothing to do 3764 // . if target oop is in chunk and above local finger 3765 // then nothing to do 3766 // . else push on work-queue 3767 // -- Deal with possible overflow issues: 3768 // . local work-queue overflow causes stuff to be pushed on 3769 // global (common) overflow queue 3770 // . always first empty local work queue 3771 // . then get a batch of oops from global work queue if any 3772 // . then do work stealing 3773 // -- When all tasks claimed (both spaces) 3774 // and local work queue empty, 3775 // then in a loop do: 3776 // . check global overflow stack; steal a batch of oops and trace 3777 // . try to steal from other threads oif GOS is empty 3778 // . if neither is available, offer termination 3779 // -- Terminate and return result 3780 // 3781 void CMSConcMarkingTask::work(int i) { 3782 elapsedTimer _timer; 3783 ResourceMark rm; 3784 HandleMark hm; 3785 3786 DEBUG_ONLY(_collector->verify_overflow_empty();) 3787 3788 // Before we begin work, our work queue should be empty 3789 assert(work_queue(i)->size() == 0, "Expected to be empty"); 3790 // Scan the bitmap covering _cms_space, tracing through grey objects. 3791 _timer.start(); 3792 do_scan_and_mark(i, _cms_space); 3793 _timer.stop(); 3794 if (PrintCMSStatistics != 0) { 3795 gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec", 3796 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers 3797 } 3798 3799 // ... do the same for the _perm_space 3800 _timer.reset(); 3801 _timer.start(); 3802 do_scan_and_mark(i, _perm_space); 3803 _timer.stop(); 3804 if (PrintCMSStatistics != 0) { 3805 gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec", 3806 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers 3807 } 3808 3809 // ... do work stealing 3810 _timer.reset(); 3811 _timer.start(); 3812 do_work_steal(i); 3813 _timer.stop(); 3814 if (PrintCMSStatistics != 0) { 3815 gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec", 3816 i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers 3817 } 3818 assert(_collector->_markStack.isEmpty(), "Should have been emptied"); 3819 assert(work_queue(i)->size() == 0, "Should have been emptied"); 3820 // Note that under the current task protocol, the 3821 // following assertion is true even of the spaces 3822 // expanded since the completion of the concurrent 3823 // marking. XXX This will likely change under a strict 3824 // ABORT semantics. 3825 assert(_global_finger > _cms_space->end() && 3826 _global_finger >= _perm_space->end(), 3827 "All tasks have been completed"); 3828 DEBUG_ONLY(_collector->verify_overflow_empty();) 3829 } 3830 3831 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) { 3832 HeapWord* read = _global_finger; 3833 HeapWord* cur = read; 3834 while (f > read) { 3835 cur = read; 3836 read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur); 3837 if (cur == read) { 3838 // our cas succeeded 3839 assert(_global_finger >= f, "protocol consistency"); 3840 break; 3841 } 3842 } 3843 } 3844 3845 // This is really inefficient, and should be redone by 3846 // using (not yet available) block-read and -write interfaces to the 3847 // stack and the work_queue. XXX FIX ME !!! 3848 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk, 3849 OopTaskQueue* work_q) { 3850 // Fast lock-free check 3851 if (ovflw_stk->length() == 0) { 3852 return false; 3853 } 3854 assert(work_q->size() == 0, "Shouldn't steal"); 3855 MutexLockerEx ml(ovflw_stk->par_lock(), 3856 Mutex::_no_safepoint_check_flag); 3857 // Grab up to 1/4 the size of the work queue 3858 size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 3859 (size_t)ParGCDesiredObjsFromOverflowList); 3860 num = MIN2(num, ovflw_stk->length()); 3861 for (int i = (int) num; i > 0; i--) { 3862 oop cur = ovflw_stk->pop(); 3863 assert(cur != NULL, "Counted wrong?"); 3864 work_q->push(cur); 3865 } 3866 return num > 0; 3867 } 3868 3869 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) { 3870 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); 3871 int n_tasks = pst->n_tasks(); 3872 // We allow that there may be no tasks to do here because 3873 // we are restarting after a stack overflow. 3874 assert(pst->valid() || n_tasks == 0, "Uninitialized use?"); 3875 int nth_task = 0; 3876 3877 HeapWord* aligned_start = sp->bottom(); 3878 if (sp->used_region().contains(_restart_addr)) { 3879 // Align down to a card boundary for the start of 0th task 3880 // for this space. 3881 aligned_start = 3882 (HeapWord*)align_size_down((uintptr_t)_restart_addr, 3883 CardTableModRefBS::card_size); 3884 } 3885 3886 size_t chunk_size = sp->marking_task_size(); 3887 while (!pst->is_task_claimed(/* reference */ nth_task)) { 3888 // Having claimed the nth task in this space, 3889 // compute the chunk that it corresponds to: 3890 MemRegion span = MemRegion(aligned_start + nth_task*chunk_size, 3891 aligned_start + (nth_task+1)*chunk_size); 3892 // Try and bump the global finger via a CAS; 3893 // note that we need to do the global finger bump 3894 // _before_ taking the intersection below, because 3895 // the task corresponding to that region will be 3896 // deemed done even if the used_region() expands 3897 // because of allocation -- as it almost certainly will 3898 // during start-up while the threads yield in the 3899 // closure below. 3900 HeapWord* finger = span.end(); 3901 bump_global_finger(finger); // atomically 3902 // There are null tasks here corresponding to chunks 3903 // beyond the "top" address of the space. 3904 span = span.intersection(sp->used_region()); 3905 if (!span.is_empty()) { // Non-null task 3906 HeapWord* prev_obj; 3907 assert(!span.contains(_restart_addr) || nth_task == 0, 3908 "Inconsistency"); 3909 if (nth_task == 0) { 3910 // For the 0th task, we'll not need to compute a block_start. 3911 if (span.contains(_restart_addr)) { 3912 // In the case of a restart because of stack overflow, 3913 // we might additionally skip a chunk prefix. 3914 prev_obj = _restart_addr; 3915 } else { 3916 prev_obj = span.start(); 3917 } 3918 } else { 3919 // We want to skip the first object because 3920 // the protocol is to scan any object in its entirety 3921 // that _starts_ in this span; a fortiori, any 3922 // object starting in an earlier span is scanned 3923 // as part of an earlier claimed task. 3924 // Below we use the "careful" version of block_start 3925 // so we do not try to navigate uninitialized objects. 3926 prev_obj = sp->block_start_careful(span.start()); 3927 // Below we use a variant of block_size that uses the 3928 // Printezis bits to avoid waiting for allocated 3929 // objects to become initialized/parsable. 3930 while (prev_obj < span.start()) { 3931 size_t sz = sp->block_size_no_stall(prev_obj, _collector); 3932 if (sz > 0) { 3933 prev_obj += sz; 3934 } else { 3935 // In this case we may end up doing a bit of redundant 3936 // scanning, but that appears unavoidable, short of 3937 // locking the free list locks; see bug 6324141. 3938 break; 3939 } 3940 } 3941 } 3942 if (prev_obj < span.end()) { 3943 MemRegion my_span = MemRegion(prev_obj, span.end()); 3944 // Do the marking work within a non-empty span -- 3945 // the last argument to the constructor indicates whether the 3946 // iteration should be incremental with periodic yields. 3947 Par_MarkFromRootsClosure cl(this, _collector, my_span, 3948 &_collector->_markBitMap, 3949 work_queue(i), 3950 &_collector->_markStack, 3951 &_collector->_revisitStack, 3952 _asynch); 3953 _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); 3954 } // else nothing to do for this task 3955 } // else nothing to do for this task 3956 } 3957 // We'd be tempted to assert here that since there are no 3958 // more tasks left to claim in this space, the global_finger 3959 // must exceed space->top() and a fortiori space->end(). However, 3960 // that would not quite be correct because the bumping of 3961 // global_finger occurs strictly after the claiming of a task, 3962 // so by the time we reach here the global finger may not yet 3963 // have been bumped up by the thread that claimed the last 3964 // task. 3965 pst->all_tasks_completed(); 3966 } 3967 3968 class Par_ConcMarkingClosure: public Par_KlassRememberingOopClosure { 3969 private: 3970 MemRegion _span; 3971 CMSBitMap* _bit_map; 3972 CMSMarkStack* _overflow_stack; 3973 OopTaskQueue* _work_queue; 3974 protected: 3975 DO_OOP_WORK_DEFN 3976 public: 3977 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, 3978 CMSBitMap* bit_map, CMSMarkStack* overflow_stack, 3979 CMSMarkStack* revisit_stack): 3980 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), 3981 _span(_collector->_span), 3982 _work_queue(work_queue), 3983 _bit_map(bit_map), 3984 _overflow_stack(overflow_stack) 3985 { } 3986 virtual void do_oop(oop* p); 3987 virtual void do_oop(narrowOop* p); 3988 void trim_queue(size_t max); 3989 void handle_stack_overflow(HeapWord* lost); 3990 }; 3991 3992 // Grey object scanning during work stealing phase -- 3993 // the salient assumption here is that any references 3994 // that are in these stolen objects being scanned must 3995 // already have been initialized (else they would not have 3996 // been published), so we do not need to check for 3997 // uninitialized objects before pushing here. 3998 void Par_ConcMarkingClosure::do_oop(oop obj) { 3999 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); 4000 HeapWord* addr = (HeapWord*)obj; 4001 // Check if oop points into the CMS generation 4002 // and is not marked 4003 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 4004 // a white object ... 4005 // If we manage to "claim" the object, by being the 4006 // first thread to mark it, then we push it on our 4007 // marking stack 4008 if (_bit_map->par_mark(addr)) { // ... now grey 4009 // push on work queue (grey set) 4010 bool simulate_overflow = false; 4011 NOT_PRODUCT( 4012 if (CMSMarkStackOverflowALot && 4013 _collector->simulate_overflow()) { 4014 // simulate a stack overflow 4015 simulate_overflow = true; 4016 } 4017 ) 4018 if (simulate_overflow || 4019 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { 4020 // stack overflow 4021 if (PrintCMSStatistics != 0) { 4022 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 4023 SIZE_FORMAT, _overflow_stack->capacity()); 4024 } 4025 // We cannot assert that the overflow stack is full because 4026 // it may have been emptied since. 4027 assert(simulate_overflow || 4028 _work_queue->size() == _work_queue->max_elems(), 4029 "Else push should have succeeded"); 4030 handle_stack_overflow(addr); 4031 } 4032 } // Else, some other thread got there first 4033 } 4034 } 4035 4036 void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); } 4037 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); } 4038 4039 void Par_ConcMarkingClosure::trim_queue(size_t max) { 4040 while (_work_queue->size() > max) { 4041 oop new_oop; 4042 if (_work_queue->pop_local(new_oop)) { 4043 assert(new_oop->is_oop(), "Should be an oop"); 4044 assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object"); 4045 assert(_span.contains((HeapWord*)new_oop), "Not in span"); 4046 assert(new_oop->is_parsable(), "Should be parsable"); 4047 new_oop->oop_iterate(this); // do_oop() above 4048 } 4049 } 4050 } 4051 4052 // Upon stack overflow, we discard (part of) the stack, 4053 // remembering the least address amongst those discarded 4054 // in CMSCollector's _restart_address. 4055 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { 4056 // We need to do this under a mutex to prevent other 4057 // workers from interfering with the work done below. 4058 MutexLockerEx ml(_overflow_stack->par_lock(), 4059 Mutex::_no_safepoint_check_flag); 4060 // Remember the least grey address discarded 4061 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); 4062 _collector->lower_restart_addr(ra); 4063 _overflow_stack->reset(); // discard stack contents 4064 _overflow_stack->expand(); // expand the stack if possible 4065 } 4066 4067 4068 void CMSConcMarkingTask::do_work_steal(int i) { 4069 OopTaskQueue* work_q = work_queue(i); 4070 oop obj_to_scan; 4071 CMSBitMap* bm = &(_collector->_markBitMap); 4072 CMSMarkStack* ovflw = &(_collector->_markStack); 4073 CMSMarkStack* revisit = &(_collector->_revisitStack); 4074 int* seed = _collector->hash_seed(i); 4075 Par_ConcMarkingClosure cl(_collector, work_q, bm, ovflw, revisit); 4076 while (true) { 4077 cl.trim_queue(0); 4078 assert(work_q->size() == 0, "Should have been emptied above"); 4079 if (get_work_from_overflow_stack(ovflw, work_q)) { 4080 // Can't assert below because the work obtained from the 4081 // overflow stack may already have been stolen from us. 4082 // assert(work_q->size() > 0, "Work from overflow stack"); 4083 continue; 4084 } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { 4085 assert(obj_to_scan->is_oop(), "Should be an oop"); 4086 assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object"); 4087 obj_to_scan->oop_iterate(&cl); 4088 } else if (terminator()->offer_termination()) { 4089 assert(work_q->size() == 0, "Impossible!"); 4090 break; 4091 } 4092 } 4093 } 4094 4095 // This is run by the CMS (coordinator) thread. 4096 void CMSConcMarkingTask::coordinator_yield() { 4097 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 4098 "CMS thread should hold CMS token"); 4099 DEBUG_ONLY(RememberKlassesChecker mux(false);) 4100 // First give up the locks, then yield, then re-lock 4101 // We should probably use a constructor/destructor idiom to 4102 // do this unlock/lock or modify the MutexUnlocker class to 4103 // serve our purpose. XXX 4104 assert_lock_strong(_bit_map_lock); 4105 _bit_map_lock->unlock(); 4106 ConcurrentMarkSweepThread::desynchronize(true); 4107 ConcurrentMarkSweepThread::acknowledge_yield_request(); 4108 _collector->stopTimer(); 4109 if (PrintCMSStatistics != 0) { 4110 _collector->incrementYields(); 4111 } 4112 _collector->icms_wait(); 4113 4114 // It is possible for whichever thread initiated the yield request 4115 // not to get a chance to wake up and take the bitmap lock between 4116 // this thread releasing it and reacquiring it. So, while the 4117 // should_yield() flag is on, let's sleep for a bit to give the 4118 // other thread a chance to wake up. The limit imposed on the number 4119 // of iterations is defensive, to avoid any unforseen circumstances 4120 // putting us into an infinite loop. Since it's always been this 4121 // (coordinator_yield()) method that was observed to cause the 4122 // problem, we are using a parameter (CMSCoordinatorYieldSleepCount) 4123 // which is by default non-zero. For the other seven methods that 4124 // also perform the yield operation, as are using a different 4125 // parameter (CMSYieldSleepCount) which is by default zero. This way we 4126 // can enable the sleeping for those methods too, if necessary. 4127 // See 6442774. 4128 // 4129 // We really need to reconsider the synchronization between the GC 4130 // thread and the yield-requesting threads in the future and we 4131 // should really use wait/notify, which is the recommended 4132 // way of doing this type of interaction. Additionally, we should 4133 // consolidate the eight methods that do the yield operation and they 4134 // are almost identical into one for better maintenability and 4135 // readability. See 6445193. 4136 // 4137 // Tony 2006.06.29 4138 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && 4139 ConcurrentMarkSweepThread::should_yield() && 4140 !CMSCollector::foregroundGCIsActive(); ++i) { 4141 os::sleep(Thread::current(), 1, false); 4142 ConcurrentMarkSweepThread::acknowledge_yield_request(); 4143 } 4144 4145 ConcurrentMarkSweepThread::synchronize(true); 4146 _bit_map_lock->lock_without_safepoint_check(); 4147 _collector->startTimer(); 4148 } 4149 4150 bool CMSCollector::do_marking_mt(bool asynch) { 4151 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition"); 4152 // In the future this would be determined ergonomically, based 4153 // on #cpu's, # active mutator threads (and load), and mutation rate. 4154 int num_workers = ConcGCThreads; 4155 4156 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); 4157 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); 4158 4159 CMSConcMarkingTask tsk(this, cms_space, perm_space, 4160 asynch, num_workers /* number requested XXX */, 4161 conc_workers(), task_queues()); 4162 4163 // Since the actual number of workers we get may be different 4164 // from the number we requested above, do we need to do anything different 4165 // below? In particular, may be we need to subclass the SequantialSubTasksDone 4166 // class?? XXX 4167 cms_space ->initialize_sequential_subtasks_for_marking(num_workers); 4168 perm_space->initialize_sequential_subtasks_for_marking(num_workers); 4169 4170 // Refs discovery is already non-atomic. 4171 assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic"); 4172 // Mutate the Refs discovery so it is MT during the 4173 // multi-threaded marking phase. 4174 ReferenceProcessorMTMutator mt(ref_processor(), num_workers > 1); 4175 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) 4176 conc_workers()->start_task(&tsk); 4177 while (tsk.yielded()) { 4178 tsk.coordinator_yield(); 4179 conc_workers()->continue_task(&tsk); 4180 } 4181 // If the task was aborted, _restart_addr will be non-NULL 4182 assert(tsk.completed() || _restart_addr != NULL, "Inconsistency"); 4183 while (_restart_addr != NULL) { 4184 // XXX For now we do not make use of ABORTED state and have not 4185 // yet implemented the right abort semantics (even in the original 4186 // single-threaded CMS case). That needs some more investigation 4187 // and is deferred for now; see CR# TBF. 07252005YSR. XXX 4188 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); 4189 // If _restart_addr is non-NULL, a marking stack overflow 4190 // occurred; we need to do a fresh marking iteration from the 4191 // indicated restart address. 4192 if (_foregroundGCIsActive && asynch) { 4193 // We may be running into repeated stack overflows, having 4194 // reached the limit of the stack size, while making very 4195 // slow forward progress. It may be best to bail out and 4196 // let the foreground collector do its job. 4197 // Clear _restart_addr, so that foreground GC 4198 // works from scratch. This avoids the headache of 4199 // a "rescan" which would otherwise be needed because 4200 // of the dirty mod union table & card table. 4201 _restart_addr = NULL; 4202 return false; 4203 } 4204 // Adjust the task to restart from _restart_addr 4205 tsk.reset(_restart_addr); 4206 cms_space ->initialize_sequential_subtasks_for_marking(num_workers, 4207 _restart_addr); 4208 perm_space->initialize_sequential_subtasks_for_marking(num_workers, 4209 _restart_addr); 4210 _restart_addr = NULL; 4211 // Get the workers going again 4212 conc_workers()->start_task(&tsk); 4213 while (tsk.yielded()) { 4214 tsk.coordinator_yield(); 4215 conc_workers()->continue_task(&tsk); 4216 } 4217 } 4218 assert(tsk.completed(), "Inconsistency"); 4219 assert(tsk.result() == true, "Inconsistency"); 4220 return true; 4221 } 4222 4223 bool CMSCollector::do_marking_st(bool asynch) { 4224 ResourceMark rm; 4225 HandleMark hm; 4226 4227 MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap, 4228 &_markStack, &_revisitStack, CMSYield && asynch); 4229 // the last argument to iterate indicates whether the iteration 4230 // should be incremental with periodic yields. 4231 _markBitMap.iterate(&markFromRootsClosure); 4232 // If _restart_addr is non-NULL, a marking stack overflow 4233 // occurred; we need to do a fresh iteration from the 4234 // indicated restart address. 4235 while (_restart_addr != NULL) { 4236 if (_foregroundGCIsActive && asynch) { 4237 // We may be running into repeated stack overflows, having 4238 // reached the limit of the stack size, while making very 4239 // slow forward progress. It may be best to bail out and 4240 // let the foreground collector do its job. 4241 // Clear _restart_addr, so that foreground GC 4242 // works from scratch. This avoids the headache of 4243 // a "rescan" which would otherwise be needed because 4244 // of the dirty mod union table & card table. 4245 _restart_addr = NULL; 4246 return false; // indicating failure to complete marking 4247 } 4248 // Deal with stack overflow: 4249 // we restart marking from _restart_addr 4250 HeapWord* ra = _restart_addr; 4251 markFromRootsClosure.reset(ra); 4252 _restart_addr = NULL; 4253 _markBitMap.iterate(&markFromRootsClosure, ra, _span.end()); 4254 } 4255 return true; 4256 } 4257 4258 void CMSCollector::preclean() { 4259 check_correct_thread_executing(); 4260 assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread"); 4261 verify_work_stacks_empty(); 4262 verify_overflow_empty(); 4263 _abort_preclean = false; 4264 if (CMSPrecleaningEnabled) { 4265 _eden_chunk_index = 0; 4266 size_t used = get_eden_used(); 4267 size_t capacity = get_eden_capacity(); 4268 // Don't start sampling unless we will get sufficiently 4269 // many samples. 4270 if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100) 4271 * CMSScheduleRemarkEdenPenetration)) { 4272 _start_sampling = true; 4273 } else { 4274 _start_sampling = false; 4275 } 4276 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 4277 CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails); 4278 preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1); 4279 } 4280 CMSTokenSync x(true); // is cms thread 4281 if (CMSPrecleaningEnabled) { 4282 sample_eden(); 4283 _collectorState = AbortablePreclean; 4284 } else { 4285 _collectorState = FinalMarking; 4286 } 4287 verify_work_stacks_empty(); 4288 verify_overflow_empty(); 4289 } 4290 4291 // Try and schedule the remark such that young gen 4292 // occupancy is CMSScheduleRemarkEdenPenetration %. 4293 void CMSCollector::abortable_preclean() { 4294 check_correct_thread_executing(); 4295 assert(CMSPrecleaningEnabled, "Inconsistent control state"); 4296 assert(_collectorState == AbortablePreclean, "Inconsistent control state"); 4297 4298 // If Eden's current occupancy is below this threshold, 4299 // immediately schedule the remark; else preclean 4300 // past the next scavenge in an effort to 4301 // schedule the pause as described avove. By choosing 4302 // CMSScheduleRemarkEdenSizeThreshold >= max eden size 4303 // we will never do an actual abortable preclean cycle. 4304 if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) { 4305 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 4306 CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails); 4307 // We need more smarts in the abortable preclean 4308 // loop below to deal with cases where allocation 4309 // in young gen is very very slow, and our precleaning 4310 // is running a losing race against a horde of 4311 // mutators intent on flooding us with CMS updates 4312 // (dirty cards). 4313 // One, admittedly dumb, strategy is to give up 4314 // after a certain number of abortable precleaning loops 4315 // or after a certain maximum time. We want to make 4316 // this smarter in the next iteration. 4317 // XXX FIX ME!!! YSR 4318 size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0; 4319 while (!(should_abort_preclean() || 4320 ConcurrentMarkSweepThread::should_terminate())) { 4321 workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2); 4322 cumworkdone += workdone; 4323 loops++; 4324 // Voluntarily terminate abortable preclean phase if we have 4325 // been at it for too long. 4326 if ((CMSMaxAbortablePrecleanLoops != 0) && 4327 loops >= CMSMaxAbortablePrecleanLoops) { 4328 if (PrintGCDetails) { 4329 gclog_or_tty->print(" CMS: abort preclean due to loops "); 4330 } 4331 break; 4332 } 4333 if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) { 4334 if (PrintGCDetails) { 4335 gclog_or_tty->print(" CMS: abort preclean due to time "); 4336 } 4337 break; 4338 } 4339 // If we are doing little work each iteration, we should 4340 // take a short break. 4341 if (workdone < CMSAbortablePrecleanMinWorkPerIteration) { 4342 // Sleep for some time, waiting for work to accumulate 4343 stopTimer(); 4344 cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis); 4345 startTimer(); 4346 waited++; 4347 } 4348 } 4349 if (PrintCMSStatistics > 0) { 4350 gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ", 4351 loops, waited, cumworkdone); 4352 } 4353 } 4354 CMSTokenSync x(true); // is cms thread 4355 if (_collectorState != Idling) { 4356 assert(_collectorState == AbortablePreclean, 4357 "Spontaneous state transition?"); 4358 _collectorState = FinalMarking; 4359 } // Else, a foreground collection completed this CMS cycle. 4360 return; 4361 } 4362 4363 // Respond to an Eden sampling opportunity 4364 void CMSCollector::sample_eden() { 4365 // Make sure a young gc cannot sneak in between our 4366 // reading and recording of a sample. 4367 assert(Thread::current()->is_ConcurrentGC_thread(), 4368 "Only the cms thread may collect Eden samples"); 4369 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 4370 "Should collect samples while holding CMS token"); 4371 if (!_start_sampling) { 4372 return; 4373 } 4374 if (_eden_chunk_array) { 4375 if (_eden_chunk_index < _eden_chunk_capacity) { 4376 _eden_chunk_array[_eden_chunk_index] = *_top_addr; // take sample 4377 assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr, 4378 "Unexpected state of Eden"); 4379 // We'd like to check that what we just sampled is an oop-start address; 4380 // however, we cannot do that here since the object may not yet have been 4381 // initialized. So we'll instead do the check when we _use_ this sample 4382 // later. 4383 if (_eden_chunk_index == 0 || 4384 (pointer_delta(_eden_chunk_array[_eden_chunk_index], 4385 _eden_chunk_array[_eden_chunk_index-1]) 4386 >= CMSSamplingGrain)) { 4387 _eden_chunk_index++; // commit sample 4388 } 4389 } 4390 } 4391 if ((_collectorState == AbortablePreclean) && !_abort_preclean) { 4392 size_t used = get_eden_used(); 4393 size_t capacity = get_eden_capacity(); 4394 assert(used <= capacity, "Unexpected state of Eden"); 4395 if (used > (capacity/100 * CMSScheduleRemarkEdenPenetration)) { 4396 _abort_preclean = true; 4397 } 4398 } 4399 } 4400 4401 4402 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) { 4403 assert(_collectorState == Precleaning || 4404 _collectorState == AbortablePreclean, "incorrect state"); 4405 ResourceMark rm; 4406 HandleMark hm; 4407 // Do one pass of scrubbing the discovered reference lists 4408 // to remove any reference objects with strongly-reachable 4409 // referents. 4410 if (clean_refs) { 4411 ReferenceProcessor* rp = ref_processor(); 4412 CMSPrecleanRefsYieldClosure yield_cl(this); 4413 assert(rp->span().equals(_span), "Spans should be equal"); 4414 CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap, 4415 &_markStack, &_revisitStack, 4416 true /* preclean */); 4417 CMSDrainMarkingStackClosure complete_trace(this, 4418 _span, &_markBitMap, &_markStack, 4419 &keep_alive, true /* preclean */); 4420 4421 // We don't want this step to interfere with a young 4422 // collection because we don't want to take CPU 4423 // or memory bandwidth away from the young GC threads 4424 // (which may be as many as there are CPUs). 4425 // Note that we don't need to protect ourselves from 4426 // interference with mutators because they can't 4427 // manipulate the discovered reference lists nor affect 4428 // the computed reachability of the referents, the 4429 // only properties manipulated by the precleaning 4430 // of these reference lists. 4431 stopTimer(); 4432 CMSTokenSyncWithLocks x(true /* is cms thread */, 4433 bitMapLock()); 4434 startTimer(); 4435 sample_eden(); 4436 4437 // The following will yield to allow foreground 4438 // collection to proceed promptly. XXX YSR: 4439 // The code in this method may need further 4440 // tweaking for better performance and some restructuring 4441 // for cleaner interfaces. 4442 rp->preclean_discovered_references( 4443 rp->is_alive_non_header(), &keep_alive, &complete_trace, 4444 &yield_cl, should_unload_classes()); 4445 } 4446 4447 if (clean_survivor) { // preclean the active survivor space(s) 4448 assert(_young_gen->kind() == Generation::DefNew || 4449 _young_gen->kind() == Generation::ParNew || 4450 _young_gen->kind() == Generation::ASParNew, 4451 "incorrect type for cast"); 4452 DefNewGeneration* dng = (DefNewGeneration*)_young_gen; 4453 PushAndMarkClosure pam_cl(this, _span, ref_processor(), 4454 &_markBitMap, &_modUnionTable, 4455 &_markStack, &_revisitStack, 4456 true /* precleaning phase */); 4457 stopTimer(); 4458 CMSTokenSyncWithLocks ts(true /* is cms thread */, 4459 bitMapLock()); 4460 startTimer(); 4461 unsigned int before_count = 4462 GenCollectedHeap::heap()->total_collections(); 4463 SurvivorSpacePrecleanClosure 4464 sss_cl(this, _span, &_markBitMap, &_markStack, 4465 &pam_cl, before_count, CMSYield); 4466 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());) 4467 dng->from()->object_iterate_careful(&sss_cl); 4468 dng->to()->object_iterate_careful(&sss_cl); 4469 } 4470 MarkRefsIntoAndScanClosure 4471 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, 4472 &_markStack, &_revisitStack, this, CMSYield, 4473 true /* precleaning phase */); 4474 // CAUTION: The following closure has persistent state that may need to 4475 // be reset upon a decrease in the sequence of addresses it 4476 // processes. 4477 ScanMarkedObjectsAgainCarefullyClosure 4478 smoac_cl(this, _span, 4479 &_markBitMap, &_markStack, &_revisitStack, &mrias_cl, CMSYield); 4480 4481 // Preclean dirty cards in ModUnionTable and CardTable using 4482 // appropriate convergence criterion; 4483 // repeat CMSPrecleanIter times unless we find that 4484 // we are losing. 4485 assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large"); 4486 assert(CMSPrecleanNumerator < CMSPrecleanDenominator, 4487 "Bad convergence multiplier"); 4488 assert(CMSPrecleanThreshold >= 100, 4489 "Unreasonably low CMSPrecleanThreshold"); 4490 4491 size_t numIter, cumNumCards, lastNumCards, curNumCards; 4492 for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0; 4493 numIter < CMSPrecleanIter; 4494 numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) { 4495 curNumCards = preclean_mod_union_table(_cmsGen, &smoac_cl); 4496 if (CMSPermGenPrecleaningEnabled) { 4497 curNumCards += preclean_mod_union_table(_permGen, &smoac_cl); 4498 } 4499 if (Verbose && PrintGCDetails) { 4500 gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards); 4501 } 4502 // Either there are very few dirty cards, so re-mark 4503 // pause will be small anyway, or our pre-cleaning isn't 4504 // that much faster than the rate at which cards are being 4505 // dirtied, so we might as well stop and re-mark since 4506 // precleaning won't improve our re-mark time by much. 4507 if (curNumCards <= CMSPrecleanThreshold || 4508 (numIter > 0 && 4509 (curNumCards * CMSPrecleanDenominator > 4510 lastNumCards * CMSPrecleanNumerator))) { 4511 numIter++; 4512 cumNumCards += curNumCards; 4513 break; 4514 } 4515 } 4516 curNumCards = preclean_card_table(_cmsGen, &smoac_cl); 4517 if (CMSPermGenPrecleaningEnabled) { 4518 curNumCards += preclean_card_table(_permGen, &smoac_cl); 4519 } 4520 cumNumCards += curNumCards; 4521 if (PrintGCDetails && PrintCMSStatistics != 0) { 4522 gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)", 4523 curNumCards, cumNumCards, numIter); 4524 } 4525 return cumNumCards; // as a measure of useful work done 4526 } 4527 4528 // PRECLEANING NOTES: 4529 // Precleaning involves: 4530 // . reading the bits of the modUnionTable and clearing the set bits. 4531 // . For the cards corresponding to the set bits, we scan the 4532 // objects on those cards. This means we need the free_list_lock 4533 // so that we can safely iterate over the CMS space when scanning 4534 // for oops. 4535 // . When we scan the objects, we'll be both reading and setting 4536 // marks in the marking bit map, so we'll need the marking bit map. 4537 // . For protecting _collector_state transitions, we take the CGC_lock. 4538 // Note that any races in the reading of of card table entries by the 4539 // CMS thread on the one hand and the clearing of those entries by the 4540 // VM thread or the setting of those entries by the mutator threads on the 4541 // other are quite benign. However, for efficiency it makes sense to keep 4542 // the VM thread from racing with the CMS thread while the latter is 4543 // dirty card info to the modUnionTable. We therefore also use the 4544 // CGC_lock to protect the reading of the card table and the mod union 4545 // table by the CM thread. 4546 // . We run concurrently with mutator updates, so scanning 4547 // needs to be done carefully -- we should not try to scan 4548 // potentially uninitialized objects. 4549 // 4550 // Locking strategy: While holding the CGC_lock, we scan over and 4551 // reset a maximal dirty range of the mod union / card tables, then lock 4552 // the free_list_lock and bitmap lock to do a full marking, then 4553 // release these locks; and repeat the cycle. This allows for a 4554 // certain amount of fairness in the sharing of these locks between 4555 // the CMS collector on the one hand, and the VM thread and the 4556 // mutators on the other. 4557 4558 // NOTE: preclean_mod_union_table() and preclean_card_table() 4559 // further below are largely identical; if you need to modify 4560 // one of these methods, please check the other method too. 4561 4562 size_t CMSCollector::preclean_mod_union_table( 4563 ConcurrentMarkSweepGeneration* gen, 4564 ScanMarkedObjectsAgainCarefullyClosure* cl) { 4565 verify_work_stacks_empty(); 4566 verify_overflow_empty(); 4567 4568 // Turn off checking for this method but turn it back on 4569 // selectively. There are yield points in this method 4570 // but it is difficult to turn the checking off just around 4571 // the yield points. It is simpler to selectively turn 4572 // it on. 4573 DEBUG_ONLY(RememberKlassesChecker mux(false);) 4574 4575 // strategy: starting with the first card, accumulate contiguous 4576 // ranges of dirty cards; clear these cards, then scan the region 4577 // covered by these cards. 4578 4579 // Since all of the MUT is committed ahead, we can just use 4580 // that, in case the generations expand while we are precleaning. 4581 // It might also be fine to just use the committed part of the 4582 // generation, but we might potentially miss cards when the 4583 // generation is rapidly expanding while we are in the midst 4584 // of precleaning. 4585 HeapWord* startAddr = gen->reserved().start(); 4586 HeapWord* endAddr = gen->reserved().end(); 4587 4588 cl->setFreelistLock(gen->freelistLock()); // needed for yielding 4589 4590 size_t numDirtyCards, cumNumDirtyCards; 4591 HeapWord *nextAddr, *lastAddr; 4592 for (cumNumDirtyCards = numDirtyCards = 0, 4593 nextAddr = lastAddr = startAddr; 4594 nextAddr < endAddr; 4595 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { 4596 4597 ResourceMark rm; 4598 HandleMark hm; 4599 4600 MemRegion dirtyRegion; 4601 { 4602 stopTimer(); 4603 // Potential yield point 4604 CMSTokenSync ts(true); 4605 startTimer(); 4606 sample_eden(); 4607 // Get dirty region starting at nextOffset (inclusive), 4608 // simultaneously clearing it. 4609 dirtyRegion = 4610 _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr); 4611 assert(dirtyRegion.start() >= nextAddr, 4612 "returned region inconsistent?"); 4613 } 4614 // Remember where the next search should begin. 4615 // The returned region (if non-empty) is a right open interval, 4616 // so lastOffset is obtained from the right end of that 4617 // interval. 4618 lastAddr = dirtyRegion.end(); 4619 // Should do something more transparent and less hacky XXX 4620 numDirtyCards = 4621 _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size()); 4622 4623 // We'll scan the cards in the dirty region (with periodic 4624 // yields for foreground GC as needed). 4625 if (!dirtyRegion.is_empty()) { 4626 assert(numDirtyCards > 0, "consistency check"); 4627 HeapWord* stop_point = NULL; 4628 stopTimer(); 4629 // Potential yield point 4630 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), 4631 bitMapLock()); 4632 startTimer(); 4633 { 4634 verify_work_stacks_empty(); 4635 verify_overflow_empty(); 4636 sample_eden(); 4637 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());) 4638 stop_point = 4639 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); 4640 } 4641 if (stop_point != NULL) { 4642 // The careful iteration stopped early either because it found an 4643 // uninitialized object, or because we were in the midst of an 4644 // "abortable preclean", which should now be aborted. Redirty 4645 // the bits corresponding to the partially-scanned or unscanned 4646 // cards. We'll either restart at the next block boundary or 4647 // abort the preclean. 4648 assert((CMSPermGenPrecleaningEnabled && (gen == _permGen)) || 4649 (_collectorState == AbortablePreclean && should_abort_preclean()), 4650 "Unparsable objects should only be in perm gen."); 4651 _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end())); 4652 if (should_abort_preclean()) { 4653 break; // out of preclean loop 4654 } else { 4655 // Compute the next address at which preclean should pick up; 4656 // might need bitMapLock in order to read P-bits. 4657 lastAddr = next_card_start_after_block(stop_point); 4658 } 4659 } 4660 } else { 4661 assert(lastAddr == endAddr, "consistency check"); 4662 assert(numDirtyCards == 0, "consistency check"); 4663 break; 4664 } 4665 } 4666 verify_work_stacks_empty(); 4667 verify_overflow_empty(); 4668 return cumNumDirtyCards; 4669 } 4670 4671 // NOTE: preclean_mod_union_table() above and preclean_card_table() 4672 // below are largely identical; if you need to modify 4673 // one of these methods, please check the other method too. 4674 4675 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen, 4676 ScanMarkedObjectsAgainCarefullyClosure* cl) { 4677 // strategy: it's similar to precleamModUnionTable above, in that 4678 // we accumulate contiguous ranges of dirty cards, mark these cards 4679 // precleaned, then scan the region covered by these cards. 4680 HeapWord* endAddr = (HeapWord*)(gen->_virtual_space.high()); 4681 HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low()); 4682 4683 cl->setFreelistLock(gen->freelistLock()); // needed for yielding 4684 4685 size_t numDirtyCards, cumNumDirtyCards; 4686 HeapWord *lastAddr, *nextAddr; 4687 4688 for (cumNumDirtyCards = numDirtyCards = 0, 4689 nextAddr = lastAddr = startAddr; 4690 nextAddr < endAddr; 4691 nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) { 4692 4693 ResourceMark rm; 4694 HandleMark hm; 4695 4696 MemRegion dirtyRegion; 4697 { 4698 // See comments in "Precleaning notes" above on why we 4699 // do this locking. XXX Could the locking overheads be 4700 // too high when dirty cards are sparse? [I don't think so.] 4701 stopTimer(); 4702 CMSTokenSync x(true); // is cms thread 4703 startTimer(); 4704 sample_eden(); 4705 // Get and clear dirty region from card table 4706 dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset( 4707 MemRegion(nextAddr, endAddr), 4708 true, 4709 CardTableModRefBS::precleaned_card_val()); 4710 4711 assert(dirtyRegion.start() >= nextAddr, 4712 "returned region inconsistent?"); 4713 } 4714 lastAddr = dirtyRegion.end(); 4715 numDirtyCards = 4716 dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words; 4717 4718 if (!dirtyRegion.is_empty()) { 4719 stopTimer(); 4720 CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock()); 4721 startTimer(); 4722 sample_eden(); 4723 verify_work_stacks_empty(); 4724 verify_overflow_empty(); 4725 DEBUG_ONLY(RememberKlassesChecker mx(should_unload_classes());) 4726 HeapWord* stop_point = 4727 gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl); 4728 if (stop_point != NULL) { 4729 // The careful iteration stopped early because it found an 4730 // uninitialized object. Redirty the bits corresponding to the 4731 // partially-scanned or unscanned cards, and start again at the 4732 // next block boundary. 4733 assert(CMSPermGenPrecleaningEnabled || 4734 (_collectorState == AbortablePreclean && should_abort_preclean()), 4735 "Unparsable objects should only be in perm gen."); 4736 _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end())); 4737 if (should_abort_preclean()) { 4738 break; // out of preclean loop 4739 } else { 4740 // Compute the next address at which preclean should pick up. 4741 lastAddr = next_card_start_after_block(stop_point); 4742 } 4743 } 4744 } else { 4745 break; 4746 } 4747 } 4748 verify_work_stacks_empty(); 4749 verify_overflow_empty(); 4750 return cumNumDirtyCards; 4751 } 4752 4753 void CMSCollector::checkpointRootsFinal(bool asynch, 4754 bool clear_all_soft_refs, bool init_mark_was_synchronous) { 4755 assert(_collectorState == FinalMarking, "incorrect state transition?"); 4756 check_correct_thread_executing(); 4757 // world is stopped at this checkpoint 4758 assert(SafepointSynchronize::is_at_safepoint(), 4759 "world should be stopped"); 4760 TraceCMSMemoryManagerStats tms(_collectorState); 4761 verify_work_stacks_empty(); 4762 verify_overflow_empty(); 4763 4764 SpecializationStats::clear(); 4765 if (PrintGCDetails) { 4766 gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]", 4767 _young_gen->used() / K, 4768 _young_gen->capacity() / K); 4769 } 4770 if (asynch) { 4771 if (CMSScavengeBeforeRemark) { 4772 GenCollectedHeap* gch = GenCollectedHeap::heap(); 4773 // Temporarily set flag to false, GCH->do_collection will 4774 // expect it to be false and set to true 4775 FlagSetting fl(gch->_is_gc_active, false); 4776 NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark", 4777 PrintGCDetails && Verbose, true, gclog_or_tty);) 4778 int level = _cmsGen->level() - 1; 4779 if (level >= 0) { 4780 gch->do_collection(true, // full (i.e. force, see below) 4781 false, // !clear_all_soft_refs 4782 0, // size 4783 false, // is_tlab 4784 level // max_level 4785 ); 4786 } 4787 } 4788 FreelistLocker x(this); 4789 MutexLockerEx y(bitMapLock(), 4790 Mutex::_no_safepoint_check_flag); 4791 assert(!init_mark_was_synchronous, "but that's impossible!"); 4792 checkpointRootsFinalWork(asynch, clear_all_soft_refs, false); 4793 } else { 4794 // already have all the locks 4795 checkpointRootsFinalWork(asynch, clear_all_soft_refs, 4796 init_mark_was_synchronous); 4797 } 4798 verify_work_stacks_empty(); 4799 verify_overflow_empty(); 4800 SpecializationStats::print(); 4801 } 4802 4803 void CMSCollector::checkpointRootsFinalWork(bool asynch, 4804 bool clear_all_soft_refs, bool init_mark_was_synchronous) { 4805 4806 NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);) 4807 4808 assert(haveFreelistLocks(), "must have free list locks"); 4809 assert_lock_strong(bitMapLock()); 4810 4811 if (UseAdaptiveSizePolicy) { 4812 size_policy()->checkpoint_roots_final_begin(); 4813 } 4814 4815 ResourceMark rm; 4816 HandleMark hm; 4817 4818 GenCollectedHeap* gch = GenCollectedHeap::heap(); 4819 4820 if (should_unload_classes()) { 4821 CodeCache::gc_prologue(); 4822 } 4823 assert(haveFreelistLocks(), "must have free list locks"); 4824 assert_lock_strong(bitMapLock()); 4825 4826 DEBUG_ONLY(RememberKlassesChecker fmx(should_unload_classes());) 4827 if (!init_mark_was_synchronous) { 4828 // We might assume that we need not fill TLAB's when 4829 // CMSScavengeBeforeRemark is set, because we may have just done 4830 // a scavenge which would have filled all TLAB's -- and besides 4831 // Eden would be empty. This however may not always be the case -- 4832 // for instance although we asked for a scavenge, it may not have 4833 // happened because of a JNI critical section. We probably need 4834 // a policy for deciding whether we can in that case wait until 4835 // the critical section releases and then do the remark following 4836 // the scavenge, and skip it here. In the absence of that policy, 4837 // or of an indication of whether the scavenge did indeed occur, 4838 // we cannot rely on TLAB's having been filled and must do 4839 // so here just in case a scavenge did not happen. 4840 gch->ensure_parsability(false); // fill TLAB's, but no need to retire them 4841 // Update the saved marks which may affect the root scans. 4842 gch->save_marks(); 4843 4844 { 4845 COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;) 4846 4847 // Note on the role of the mod union table: 4848 // Since the marker in "markFromRoots" marks concurrently with 4849 // mutators, it is possible for some reachable objects not to have been 4850 // scanned. For instance, an only reference to an object A was 4851 // placed in object B after the marker scanned B. Unless B is rescanned, 4852 // A would be collected. Such updates to references in marked objects 4853 // are detected via the mod union table which is the set of all cards 4854 // dirtied since the first checkpoint in this GC cycle and prior to 4855 // the most recent young generation GC, minus those cleaned up by the 4856 // concurrent precleaning. 4857 if (CMSParallelRemarkEnabled && ParallelGCThreads > 0) { 4858 TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty); 4859 do_remark_parallel(); 4860 } else { 4861 TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, 4862 gclog_or_tty); 4863 do_remark_non_parallel(); 4864 } 4865 } 4866 } else { 4867 assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode"); 4868 // The initial mark was stop-world, so there's no rescanning to 4869 // do; go straight on to the next step below. 4870 } 4871 verify_work_stacks_empty(); 4872 verify_overflow_empty(); 4873 4874 { 4875 NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);) 4876 refProcessingWork(asynch, clear_all_soft_refs); 4877 } 4878 verify_work_stacks_empty(); 4879 verify_overflow_empty(); 4880 4881 if (should_unload_classes()) { 4882 CodeCache::gc_epilogue(); 4883 } 4884 4885 // If we encountered any (marking stack / work queue) overflow 4886 // events during the current CMS cycle, take appropriate 4887 // remedial measures, where possible, so as to try and avoid 4888 // recurrence of that condition. 4889 assert(_markStack.isEmpty(), "No grey objects"); 4890 size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw + 4891 _ser_kac_ovflw + _ser_kac_preclean_ovflw; 4892 if (ser_ovflw > 0) { 4893 if (PrintCMSStatistics != 0) { 4894 gclog_or_tty->print_cr("Marking stack overflow (benign) " 4895 "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT 4896 ", kac_preclean="SIZE_FORMAT")", 4897 _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, 4898 _ser_kac_ovflw, _ser_kac_preclean_ovflw); 4899 } 4900 _markStack.expand(); 4901 _ser_pmc_remark_ovflw = 0; 4902 _ser_pmc_preclean_ovflw = 0; 4903 _ser_kac_preclean_ovflw = 0; 4904 _ser_kac_ovflw = 0; 4905 } 4906 if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) { 4907 if (PrintCMSStatistics != 0) { 4908 gclog_or_tty->print_cr("Work queue overflow (benign) " 4909 "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")", 4910 _par_pmc_remark_ovflw, _par_kac_ovflw); 4911 } 4912 _par_pmc_remark_ovflw = 0; 4913 _par_kac_ovflw = 0; 4914 } 4915 if (PrintCMSStatistics != 0) { 4916 if (_markStack._hit_limit > 0) { 4917 gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")", 4918 _markStack._hit_limit); 4919 } 4920 if (_markStack._failed_double > 0) { 4921 gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT")," 4922 " current capacity "SIZE_FORMAT, 4923 _markStack._failed_double, 4924 _markStack.capacity()); 4925 } 4926 } 4927 _markStack._hit_limit = 0; 4928 _markStack._failed_double = 0; 4929 4930 // Check that all the klasses have been checked 4931 assert(_revisitStack.isEmpty(), "Not all klasses revisited"); 4932 4933 if ((VerifyAfterGC || VerifyDuringGC) && 4934 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 4935 verify_after_remark(); 4936 } 4937 4938 // Change under the freelistLocks. 4939 _collectorState = Sweeping; 4940 // Call isAllClear() under bitMapLock 4941 assert(_modUnionTable.isAllClear(), "Should be clear by end of the" 4942 " final marking"); 4943 if (UseAdaptiveSizePolicy) { 4944 size_policy()->checkpoint_roots_final_end(gch->gc_cause()); 4945 } 4946 } 4947 4948 // Parallel remark task 4949 class CMSParRemarkTask: public AbstractGangTask { 4950 CMSCollector* _collector; 4951 WorkGang* _workers; 4952 int _n_workers; 4953 CompactibleFreeListSpace* _cms_space; 4954 CompactibleFreeListSpace* _perm_space; 4955 4956 // The per-thread work queues, available here for stealing. 4957 OopTaskQueueSet* _task_queues; 4958 ParallelTaskTerminator _term; 4959 4960 public: 4961 CMSParRemarkTask(CMSCollector* collector, 4962 CompactibleFreeListSpace* cms_space, 4963 CompactibleFreeListSpace* perm_space, 4964 int n_workers, WorkGang* workers, 4965 OopTaskQueueSet* task_queues): 4966 AbstractGangTask("Rescan roots and grey objects in parallel"), 4967 _collector(collector), 4968 _cms_space(cms_space), _perm_space(perm_space), 4969 _n_workers(n_workers), 4970 _workers(workers), 4971 _task_queues(task_queues), 4972 _term(workers->total_workers(), task_queues) { } 4973 4974 OopTaskQueueSet* task_queues() { return _task_queues; } 4975 4976 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } 4977 4978 ParallelTaskTerminator* terminator() { return &_term; } 4979 4980 void work(int i); 4981 4982 private: 4983 // Work method in support of parallel rescan ... of young gen spaces 4984 void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl, 4985 ContiguousSpace* space, 4986 HeapWord** chunk_array, size_t chunk_top); 4987 4988 // ... of dirty cards in old space 4989 void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, 4990 Par_MarkRefsIntoAndScanClosure* cl); 4991 4992 // ... work stealing for the above 4993 void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed); 4994 }; 4995 4996 void CMSParRemarkTask::work(int i) { 4997 elapsedTimer _timer; 4998 ResourceMark rm; 4999 HandleMark hm; 5000 5001 // ---------- rescan from roots -------------- 5002 _timer.start(); 5003 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5004 Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector, 5005 _collector->_span, _collector->ref_processor(), 5006 &(_collector->_markBitMap), 5007 work_queue(i), &(_collector->_revisitStack)); 5008 5009 // Rescan young gen roots first since these are likely 5010 // coarsely partitioned and may, on that account, constitute 5011 // the critical path; thus, it's best to start off that 5012 // work first. 5013 // ---------- young gen roots -------------- 5014 { 5015 DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration(); 5016 EdenSpace* eden_space = dng->eden(); 5017 ContiguousSpace* from_space = dng->from(); 5018 ContiguousSpace* to_space = dng->to(); 5019 5020 HeapWord** eca = _collector->_eden_chunk_array; 5021 size_t ect = _collector->_eden_chunk_index; 5022 HeapWord** sca = _collector->_survivor_chunk_array; 5023 size_t sct = _collector->_survivor_chunk_index; 5024 5025 assert(ect <= _collector->_eden_chunk_capacity, "out of bounds"); 5026 assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds"); 5027 5028 do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0); 5029 do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct); 5030 do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect); 5031 5032 _timer.stop(); 5033 if (PrintCMSStatistics != 0) { 5034 gclog_or_tty->print_cr( 5035 "Finished young gen rescan work in %dth thread: %3.3f sec", 5036 i, _timer.seconds()); 5037 } 5038 } 5039 5040 // ---------- remaining roots -------------- 5041 _timer.reset(); 5042 _timer.start(); 5043 gch->gen_process_strong_roots(_collector->_cmsGen->level(), 5044 false, // yg was scanned above 5045 false, // this is parallel code 5046 true, // collecting perm gen 5047 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()), 5048 &par_mrias_cl, 5049 true, // walk all of code cache if (so & SO_CodeCache) 5050 NULL); 5051 assert(_collector->should_unload_classes() 5052 || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache), 5053 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5054 _timer.stop(); 5055 if (PrintCMSStatistics != 0) { 5056 gclog_or_tty->print_cr( 5057 "Finished remaining root rescan work in %dth thread: %3.3f sec", 5058 i, _timer.seconds()); 5059 } 5060 5061 // ---------- rescan dirty cards ------------ 5062 _timer.reset(); 5063 _timer.start(); 5064 5065 // Do the rescan tasks for each of the two spaces 5066 // (cms_space and perm_space) in turn. 5067 do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl); 5068 do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl); 5069 _timer.stop(); 5070 if (PrintCMSStatistics != 0) { 5071 gclog_or_tty->print_cr( 5072 "Finished dirty card rescan work in %dth thread: %3.3f sec", 5073 i, _timer.seconds()); 5074 } 5075 5076 // ---------- steal work from other threads ... 5077 // ---------- ... and drain overflow list. 5078 _timer.reset(); 5079 _timer.start(); 5080 do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i)); 5081 _timer.stop(); 5082 if (PrintCMSStatistics != 0) { 5083 gclog_or_tty->print_cr( 5084 "Finished work stealing in %dth thread: %3.3f sec", 5085 i, _timer.seconds()); 5086 } 5087 } 5088 5089 void 5090 CMSParRemarkTask::do_young_space_rescan(int i, 5091 Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space, 5092 HeapWord** chunk_array, size_t chunk_top) { 5093 // Until all tasks completed: 5094 // . claim an unclaimed task 5095 // . compute region boundaries corresponding to task claimed 5096 // using chunk_array 5097 // . par_oop_iterate(cl) over that region 5098 5099 ResourceMark rm; 5100 HandleMark hm; 5101 5102 SequentialSubTasksDone* pst = space->par_seq_tasks(); 5103 assert(pst->valid(), "Uninitialized use?"); 5104 5105 int nth_task = 0; 5106 int n_tasks = pst->n_tasks(); 5107 5108 HeapWord *start, *end; 5109 while (!pst->is_task_claimed(/* reference */ nth_task)) { 5110 // We claimed task # nth_task; compute its boundaries. 5111 if (chunk_top == 0) { // no samples were taken 5112 assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task"); 5113 start = space->bottom(); 5114 end = space->top(); 5115 } else if (nth_task == 0) { 5116 start = space->bottom(); 5117 end = chunk_array[nth_task]; 5118 } else if (nth_task < (jint)chunk_top) { 5119 assert(nth_task >= 1, "Control point invariant"); 5120 start = chunk_array[nth_task - 1]; 5121 end = chunk_array[nth_task]; 5122 } else { 5123 assert(nth_task == (jint)chunk_top, "Control point invariant"); 5124 start = chunk_array[chunk_top - 1]; 5125 end = space->top(); 5126 } 5127 MemRegion mr(start, end); 5128 // Verify that mr is in space 5129 assert(mr.is_empty() || space->used_region().contains(mr), 5130 "Should be in space"); 5131 // Verify that "start" is an object boundary 5132 assert(mr.is_empty() || oop(mr.start())->is_oop(), 5133 "Should be an oop"); 5134 space->par_oop_iterate(mr, cl); 5135 } 5136 pst->all_tasks_completed(); 5137 } 5138 5139 void 5140 CMSParRemarkTask::do_dirty_card_rescan_tasks( 5141 CompactibleFreeListSpace* sp, int i, 5142 Par_MarkRefsIntoAndScanClosure* cl) { 5143 // Until all tasks completed: 5144 // . claim an unclaimed task 5145 // . compute region boundaries corresponding to task claimed 5146 // . transfer dirty bits ct->mut for that region 5147 // . apply rescanclosure to dirty mut bits for that region 5148 5149 ResourceMark rm; 5150 HandleMark hm; 5151 5152 OopTaskQueue* work_q = work_queue(i); 5153 ModUnionClosure modUnionClosure(&(_collector->_modUnionTable)); 5154 // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! 5155 // CAUTION: This closure has state that persists across calls to 5156 // the work method dirty_range_iterate_clear() in that it has 5157 // imbedded in it a (subtype of) UpwardsObjectClosure. The 5158 // use of that state in the imbedded UpwardsObjectClosure instance 5159 // assumes that the cards are always iterated (even if in parallel 5160 // by several threads) in monotonically increasing order per each 5161 // thread. This is true of the implementation below which picks 5162 // card ranges (chunks) in monotonically increasing order globally 5163 // and, a-fortiori, in monotonically increasing order per thread 5164 // (the latter order being a subsequence of the former). 5165 // If the work code below is ever reorganized into a more chaotic 5166 // work-partitioning form than the current "sequential tasks" 5167 // paradigm, the use of that persistent state will have to be 5168 // revisited and modified appropriately. See also related 5169 // bug 4756801 work on which should examine this code to make 5170 // sure that the changes there do not run counter to the 5171 // assumptions made here and necessary for correctness and 5172 // efficiency. Note also that this code might yield inefficient 5173 // behaviour in the case of very large objects that span one or 5174 // more work chunks. Such objects would potentially be scanned 5175 // several times redundantly. Work on 4756801 should try and 5176 // address that performance anomaly if at all possible. XXX 5177 MemRegion full_span = _collector->_span; 5178 CMSBitMap* bm = &(_collector->_markBitMap); // shared 5179 CMSMarkStack* rs = &(_collector->_revisitStack); // shared 5180 MarkFromDirtyCardsClosure 5181 greyRescanClosure(_collector, full_span, // entire span of interest 5182 sp, bm, work_q, rs, cl); 5183 5184 SequentialSubTasksDone* pst = sp->conc_par_seq_tasks(); 5185 assert(pst->valid(), "Uninitialized use?"); 5186 int nth_task = 0; 5187 const int alignment = CardTableModRefBS::card_size * BitsPerWord; 5188 MemRegion span = sp->used_region(); 5189 HeapWord* start_addr = span.start(); 5190 HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(), 5191 alignment); 5192 const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units 5193 assert((HeapWord*)round_to((intptr_t)start_addr, alignment) == 5194 start_addr, "Check alignment"); 5195 assert((size_t)round_to((intptr_t)chunk_size, alignment) == 5196 chunk_size, "Check alignment"); 5197 5198 while (!pst->is_task_claimed(/* reference */ nth_task)) { 5199 // Having claimed the nth_task, compute corresponding mem-region, 5200 // which is a-fortiori aligned correctly (i.e. at a MUT bopundary). 5201 // The alignment restriction ensures that we do not need any 5202 // synchronization with other gang-workers while setting or 5203 // clearing bits in thus chunk of the MUT. 5204 MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size, 5205 start_addr + (nth_task+1)*chunk_size); 5206 // The last chunk's end might be way beyond end of the 5207 // used region. In that case pull back appropriately. 5208 if (this_span.end() > end_addr) { 5209 this_span.set_end(end_addr); 5210 assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)"); 5211 } 5212 // Iterate over the dirty cards covering this chunk, marking them 5213 // precleaned, and setting the corresponding bits in the mod union 5214 // table. Since we have been careful to partition at Card and MUT-word 5215 // boundaries no synchronization is needed between parallel threads. 5216 _collector->_ct->ct_bs()->dirty_card_iterate(this_span, 5217 &modUnionClosure); 5218 5219 // Having transferred these marks into the modUnionTable, 5220 // rescan the marked objects on the dirty cards in the modUnionTable. 5221 // Even if this is at a synchronous collection, the initial marking 5222 // may have been done during an asynchronous collection so there 5223 // may be dirty bits in the mod-union table. 5224 _collector->_modUnionTable.dirty_range_iterate_clear( 5225 this_span, &greyRescanClosure); 5226 _collector->_modUnionTable.verifyNoOneBitsInRange( 5227 this_span.start(), 5228 this_span.end()); 5229 } 5230 pst->all_tasks_completed(); // declare that i am done 5231 } 5232 5233 // . see if we can share work_queues with ParNew? XXX 5234 void 5235 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, 5236 int* seed) { 5237 OopTaskQueue* work_q = work_queue(i); 5238 NOT_PRODUCT(int num_steals = 0;) 5239 oop obj_to_scan; 5240 CMSBitMap* bm = &(_collector->_markBitMap); 5241 5242 while (true) { 5243 // Completely finish any left over work from (an) earlier round(s) 5244 cl->trim_queue(0); 5245 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 5246 (size_t)ParGCDesiredObjsFromOverflowList); 5247 // Now check if there's any work in the overflow list 5248 if (_collector->par_take_from_overflow_list(num_from_overflow_list, 5249 work_q)) { 5250 // found something in global overflow list; 5251 // not yet ready to go stealing work from others. 5252 // We'd like to assert(work_q->size() != 0, ...) 5253 // because we just took work from the overflow list, 5254 // but of course we can't since all of that could have 5255 // been already stolen from us. 5256 // "He giveth and He taketh away." 5257 continue; 5258 } 5259 // Verify that we have no work before we resort to stealing 5260 assert(work_q->size() == 0, "Have work, shouldn't steal"); 5261 // Try to steal from other queues that have work 5262 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { 5263 NOT_PRODUCT(num_steals++;) 5264 assert(obj_to_scan->is_oop(), "Oops, not an oop!"); 5265 assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); 5266 // Do scanning work 5267 obj_to_scan->oop_iterate(cl); 5268 // Loop around, finish this work, and try to steal some more 5269 } else if (terminator()->offer_termination()) { 5270 break; // nirvana from the infinite cycle 5271 } 5272 } 5273 NOT_PRODUCT( 5274 if (PrintCMSStatistics != 0) { 5275 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); 5276 } 5277 ) 5278 assert(work_q->size() == 0 && _collector->overflow_list_is_empty(), 5279 "Else our work is not yet done"); 5280 } 5281 5282 // Return a thread-local PLAB recording array, as appropriate. 5283 void* CMSCollector::get_data_recorder(int thr_num) { 5284 if (_survivor_plab_array != NULL && 5285 (CMSPLABRecordAlways || 5286 (_collectorState > Marking && _collectorState < FinalMarking))) { 5287 assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds"); 5288 ChunkArray* ca = &_survivor_plab_array[thr_num]; 5289 ca->reset(); // clear it so that fresh data is recorded 5290 return (void*) ca; 5291 } else { 5292 return NULL; 5293 } 5294 } 5295 5296 // Reset all the thread-local PLAB recording arrays 5297 void CMSCollector::reset_survivor_plab_arrays() { 5298 for (uint i = 0; i < ParallelGCThreads; i++) { 5299 _survivor_plab_array[i].reset(); 5300 } 5301 } 5302 5303 // Merge the per-thread plab arrays into the global survivor chunk 5304 // array which will provide the partitioning of the survivor space 5305 // for CMS rescan. 5306 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv) { 5307 assert(_survivor_plab_array != NULL, "Error"); 5308 assert(_survivor_chunk_array != NULL, "Error"); 5309 assert(_collectorState == FinalMarking, "Error"); 5310 for (uint j = 0; j < ParallelGCThreads; j++) { 5311 _cursor[j] = 0; 5312 } 5313 HeapWord* top = surv->top(); 5314 size_t i; 5315 for (i = 0; i < _survivor_chunk_capacity; i++) { // all sca entries 5316 HeapWord* min_val = top; // Higher than any PLAB address 5317 uint min_tid = 0; // position of min_val this round 5318 for (uint j = 0; j < ParallelGCThreads; j++) { 5319 ChunkArray* cur_sca = &_survivor_plab_array[j]; 5320 if (_cursor[j] == cur_sca->end()) { 5321 continue; 5322 } 5323 assert(_cursor[j] < cur_sca->end(), "ctl pt invariant"); 5324 HeapWord* cur_val = cur_sca->nth(_cursor[j]); 5325 assert(surv->used_region().contains(cur_val), "Out of bounds value"); 5326 if (cur_val < min_val) { 5327 min_tid = j; 5328 min_val = cur_val; 5329 } else { 5330 assert(cur_val < top, "All recorded addresses should be less"); 5331 } 5332 } 5333 // At this point min_val and min_tid are respectively 5334 // the least address in _survivor_plab_array[j]->nth(_cursor[j]) 5335 // and the thread (j) that witnesses that address. 5336 // We record this address in the _survivor_chunk_array[i] 5337 // and increment _cursor[min_tid] prior to the next round i. 5338 if (min_val == top) { 5339 break; 5340 } 5341 _survivor_chunk_array[i] = min_val; 5342 _cursor[min_tid]++; 5343 } 5344 // We are all done; record the size of the _survivor_chunk_array 5345 _survivor_chunk_index = i; // exclusive: [0, i) 5346 if (PrintCMSStatistics > 0) { 5347 gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i); 5348 } 5349 // Verify that we used up all the recorded entries 5350 #ifdef ASSERT 5351 size_t total = 0; 5352 for (uint j = 0; j < ParallelGCThreads; j++) { 5353 assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant"); 5354 total += _cursor[j]; 5355 } 5356 assert(total == _survivor_chunk_index, "Ctl Pt Invariant"); 5357 // Check that the merged array is in sorted order 5358 if (total > 0) { 5359 for (size_t i = 0; i < total - 1; i++) { 5360 if (PrintCMSStatistics > 0) { 5361 gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ", 5362 i, _survivor_chunk_array[i]); 5363 } 5364 assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1], 5365 "Not sorted"); 5366 } 5367 } 5368 #endif // ASSERT 5369 } 5370 5371 // Set up the space's par_seq_tasks structure for work claiming 5372 // for parallel rescan of young gen. 5373 // See ParRescanTask where this is currently used. 5374 void 5375 CMSCollector:: 5376 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) { 5377 assert(n_threads > 0, "Unexpected n_threads argument"); 5378 DefNewGeneration* dng = (DefNewGeneration*)_young_gen; 5379 5380 // Eden space 5381 { 5382 SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks(); 5383 assert(!pst->valid(), "Clobbering existing data?"); 5384 // Each valid entry in [0, _eden_chunk_index) represents a task. 5385 size_t n_tasks = _eden_chunk_index + 1; 5386 assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error"); 5387 pst->set_par_threads(n_threads); 5388 pst->set_n_tasks((int)n_tasks); 5389 } 5390 5391 // Merge the survivor plab arrays into _survivor_chunk_array 5392 if (_survivor_plab_array != NULL) { 5393 merge_survivor_plab_arrays(dng->from()); 5394 } else { 5395 assert(_survivor_chunk_index == 0, "Error"); 5396 } 5397 5398 // To space 5399 { 5400 SequentialSubTasksDone* pst = dng->to()->par_seq_tasks(); 5401 assert(!pst->valid(), "Clobbering existing data?"); 5402 pst->set_par_threads(n_threads); 5403 pst->set_n_tasks(1); 5404 assert(pst->valid(), "Error"); 5405 } 5406 5407 // From space 5408 { 5409 SequentialSubTasksDone* pst = dng->from()->par_seq_tasks(); 5410 assert(!pst->valid(), "Clobbering existing data?"); 5411 size_t n_tasks = _survivor_chunk_index + 1; 5412 assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error"); 5413 pst->set_par_threads(n_threads); 5414 pst->set_n_tasks((int)n_tasks); 5415 assert(pst->valid(), "Error"); 5416 } 5417 } 5418 5419 // Parallel version of remark 5420 void CMSCollector::do_remark_parallel() { 5421 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5422 WorkGang* workers = gch->workers(); 5423 assert(workers != NULL, "Need parallel worker threads."); 5424 int n_workers = workers->total_workers(); 5425 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); 5426 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); 5427 5428 CMSParRemarkTask tsk(this, 5429 cms_space, perm_space, 5430 n_workers, workers, task_queues()); 5431 5432 // Set up for parallel process_strong_roots work. 5433 gch->set_par_threads(n_workers); 5434 // We won't be iterating over the cards in the card table updating 5435 // the younger_gen cards, so we shouldn't call the following else 5436 // the verification code as well as subsequent younger_refs_iterate 5437 // code would get confused. XXX 5438 // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel 5439 5440 // The young gen rescan work will not be done as part of 5441 // process_strong_roots (which currently doesn't knw how to 5442 // parallelize such a scan), but rather will be broken up into 5443 // a set of parallel tasks (via the sampling that the [abortable] 5444 // preclean phase did of EdenSpace, plus the [two] tasks of 5445 // scanning the [two] survivor spaces. Further fine-grain 5446 // parallelization of the scanning of the survivor spaces 5447 // themselves, and of precleaning of the younger gen itself 5448 // is deferred to the future. 5449 initialize_sequential_subtasks_for_young_gen_rescan(n_workers); 5450 5451 // The dirty card rescan work is broken up into a "sequence" 5452 // of parallel tasks (per constituent space) that are dynamically 5453 // claimed by the parallel threads. 5454 cms_space->initialize_sequential_subtasks_for_rescan(n_workers); 5455 perm_space->initialize_sequential_subtasks_for_rescan(n_workers); 5456 5457 // It turns out that even when we're using 1 thread, doing the work in a 5458 // separate thread causes wide variance in run times. We can't help this 5459 // in the multi-threaded case, but we special-case n=1 here to get 5460 // repeatable measurements of the 1-thread overhead of the parallel code. 5461 if (n_workers > 1) { 5462 // Make refs discovery MT-safe 5463 ReferenceProcessorMTMutator mt(ref_processor(), true); 5464 GenCollectedHeap::StrongRootsScope srs(gch); 5465 workers->run_task(&tsk); 5466 } else { 5467 GenCollectedHeap::StrongRootsScope srs(gch); 5468 tsk.work(0); 5469 } 5470 gch->set_par_threads(0); // 0 ==> non-parallel. 5471 // restore, single-threaded for now, any preserved marks 5472 // as a result of work_q overflow 5473 restore_preserved_marks_if_any(); 5474 } 5475 5476 // Non-parallel version of remark 5477 void CMSCollector::do_remark_non_parallel() { 5478 ResourceMark rm; 5479 HandleMark hm; 5480 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5481 MarkRefsIntoAndScanClosure 5482 mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable, 5483 &_markStack, &_revisitStack, this, 5484 false /* should_yield */, false /* not precleaning */); 5485 MarkFromDirtyCardsClosure 5486 markFromDirtyCardsClosure(this, _span, 5487 NULL, // space is set further below 5488 &_markBitMap, &_markStack, &_revisitStack, 5489 &mrias_cl); 5490 { 5491 TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty); 5492 // Iterate over the dirty cards, setting the corresponding bits in the 5493 // mod union table. 5494 { 5495 ModUnionClosure modUnionClosure(&_modUnionTable); 5496 _ct->ct_bs()->dirty_card_iterate( 5497 _cmsGen->used_region(), 5498 &modUnionClosure); 5499 _ct->ct_bs()->dirty_card_iterate( 5500 _permGen->used_region(), 5501 &modUnionClosure); 5502 } 5503 // Having transferred these marks into the modUnionTable, we just need 5504 // to rescan the marked objects on the dirty cards in the modUnionTable. 5505 // The initial marking may have been done during an asynchronous 5506 // collection so there may be dirty bits in the mod-union table. 5507 const int alignment = 5508 CardTableModRefBS::card_size * BitsPerWord; 5509 { 5510 // ... First handle dirty cards in CMS gen 5511 markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace()); 5512 MemRegion ur = _cmsGen->used_region(); 5513 HeapWord* lb = ur.start(); 5514 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); 5515 MemRegion cms_span(lb, ub); 5516 _modUnionTable.dirty_range_iterate_clear(cms_span, 5517 &markFromDirtyCardsClosure); 5518 verify_work_stacks_empty(); 5519 if (PrintCMSStatistics != 0) { 5520 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ", 5521 markFromDirtyCardsClosure.num_dirty_cards()); 5522 } 5523 } 5524 { 5525 // .. and then repeat for dirty cards in perm gen 5526 markFromDirtyCardsClosure.set_space(_permGen->cmsSpace()); 5527 MemRegion ur = _permGen->used_region(); 5528 HeapWord* lb = ur.start(); 5529 HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment); 5530 MemRegion perm_span(lb, ub); 5531 _modUnionTable.dirty_range_iterate_clear(perm_span, 5532 &markFromDirtyCardsClosure); 5533 verify_work_stacks_empty(); 5534 if (PrintCMSStatistics != 0) { 5535 gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in perm gen) ", 5536 markFromDirtyCardsClosure.num_dirty_cards()); 5537 } 5538 } 5539 } 5540 if (VerifyDuringGC && 5541 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { 5542 HandleMark hm; // Discard invalid handles created during verification 5543 Universe::verify(true); 5544 } 5545 { 5546 TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty); 5547 5548 verify_work_stacks_empty(); 5549 5550 gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel. 5551 GenCollectedHeap::StrongRootsScope srs(gch); 5552 gch->gen_process_strong_roots(_cmsGen->level(), 5553 true, // younger gens as roots 5554 false, // use the local StrongRootsScope 5555 true, // collecting perm gen 5556 SharedHeap::ScanningOption(roots_scanning_options()), 5557 &mrias_cl, 5558 true, // walk code active on stacks 5559 NULL); 5560 assert(should_unload_classes() 5561 || (roots_scanning_options() & SharedHeap::SO_CodeCache), 5562 "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops"); 5563 } 5564 verify_work_stacks_empty(); 5565 // Restore evacuated mark words, if any, used for overflow list links 5566 if (!CMSOverflowEarlyRestoration) { 5567 restore_preserved_marks_if_any(); 5568 } 5569 verify_overflow_empty(); 5570 } 5571 5572 //////////////////////////////////////////////////////// 5573 // Parallel Reference Processing Task Proxy Class 5574 //////////////////////////////////////////////////////// 5575 class CMSRefProcTaskProxy: public AbstractGangTask { 5576 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 5577 CMSCollector* _collector; 5578 CMSBitMap* _mark_bit_map; 5579 const MemRegion _span; 5580 OopTaskQueueSet* _task_queues; 5581 ParallelTaskTerminator _term; 5582 ProcessTask& _task; 5583 5584 public: 5585 CMSRefProcTaskProxy(ProcessTask& task, 5586 CMSCollector* collector, 5587 const MemRegion& span, 5588 CMSBitMap* mark_bit_map, 5589 int total_workers, 5590 OopTaskQueueSet* task_queues): 5591 AbstractGangTask("Process referents by policy in parallel"), 5592 _task(task), 5593 _collector(collector), _span(span), _mark_bit_map(mark_bit_map), 5594 _task_queues(task_queues), 5595 _term(total_workers, task_queues) 5596 { 5597 assert(_collector->_span.equals(_span) && !_span.is_empty(), 5598 "Inconsistency in _span"); 5599 } 5600 5601 OopTaskQueueSet* task_queues() { return _task_queues; } 5602 5603 OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); } 5604 5605 ParallelTaskTerminator* terminator() { return &_term; } 5606 5607 void do_work_steal(int i, 5608 CMSParDrainMarkingStackClosure* drain, 5609 CMSParKeepAliveClosure* keep_alive, 5610 int* seed); 5611 5612 virtual void work(int i); 5613 }; 5614 5615 void CMSRefProcTaskProxy::work(int i) { 5616 assert(_collector->_span.equals(_span), "Inconsistency in _span"); 5617 CMSParKeepAliveClosure par_keep_alive(_collector, _span, 5618 _mark_bit_map, 5619 &_collector->_revisitStack, 5620 work_queue(i)); 5621 CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span, 5622 _mark_bit_map, 5623 &_collector->_revisitStack, 5624 work_queue(i)); 5625 CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map); 5626 _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack); 5627 if (_task.marks_oops_alive()) { 5628 do_work_steal(i, &par_drain_stack, &par_keep_alive, 5629 _collector->hash_seed(i)); 5630 } 5631 assert(work_queue(i)->size() == 0, "work_queue should be empty"); 5632 assert(_collector->_overflow_list == NULL, "non-empty _overflow_list"); 5633 } 5634 5635 class CMSRefEnqueueTaskProxy: public AbstractGangTask { 5636 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 5637 EnqueueTask& _task; 5638 5639 public: 5640 CMSRefEnqueueTaskProxy(EnqueueTask& task) 5641 : AbstractGangTask("Enqueue reference objects in parallel"), 5642 _task(task) 5643 { } 5644 5645 virtual void work(int i) 5646 { 5647 _task.work(i); 5648 } 5649 }; 5650 5651 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector, 5652 MemRegion span, CMSBitMap* bit_map, CMSMarkStack* revisit_stack, 5653 OopTaskQueue* work_queue): 5654 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), 5655 _span(span), 5656 _bit_map(bit_map), 5657 _work_queue(work_queue), 5658 _mark_and_push(collector, span, bit_map, revisit_stack, work_queue), 5659 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), 5660 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))) 5661 { } 5662 5663 // . see if we can share work_queues with ParNew? XXX 5664 void CMSRefProcTaskProxy::do_work_steal(int i, 5665 CMSParDrainMarkingStackClosure* drain, 5666 CMSParKeepAliveClosure* keep_alive, 5667 int* seed) { 5668 OopTaskQueue* work_q = work_queue(i); 5669 NOT_PRODUCT(int num_steals = 0;) 5670 oop obj_to_scan; 5671 5672 while (true) { 5673 // Completely finish any left over work from (an) earlier round(s) 5674 drain->trim_queue(0); 5675 size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4, 5676 (size_t)ParGCDesiredObjsFromOverflowList); 5677 // Now check if there's any work in the overflow list 5678 if (_collector->par_take_from_overflow_list(num_from_overflow_list, 5679 work_q)) { 5680 // Found something in global overflow list; 5681 // not yet ready to go stealing work from others. 5682 // We'd like to assert(work_q->size() != 0, ...) 5683 // because we just took work from the overflow list, 5684 // but of course we can't, since all of that might have 5685 // been already stolen from us. 5686 continue; 5687 } 5688 // Verify that we have no work before we resort to stealing 5689 assert(work_q->size() == 0, "Have work, shouldn't steal"); 5690 // Try to steal from other queues that have work 5691 if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) { 5692 NOT_PRODUCT(num_steals++;) 5693 assert(obj_to_scan->is_oop(), "Oops, not an oop!"); 5694 assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?"); 5695 // Do scanning work 5696 obj_to_scan->oop_iterate(keep_alive); 5697 // Loop around, finish this work, and try to steal some more 5698 } else if (terminator()->offer_termination()) { 5699 break; // nirvana from the infinite cycle 5700 } 5701 } 5702 NOT_PRODUCT( 5703 if (PrintCMSStatistics != 0) { 5704 gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals); 5705 } 5706 ) 5707 } 5708 5709 void CMSRefProcTaskExecutor::execute(ProcessTask& task) 5710 { 5711 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5712 WorkGang* workers = gch->workers(); 5713 assert(workers != NULL, "Need parallel worker threads."); 5714 int n_workers = workers->total_workers(); 5715 CMSRefProcTaskProxy rp_task(task, &_collector, 5716 _collector.ref_processor()->span(), 5717 _collector.markBitMap(), 5718 n_workers, _collector.task_queues()); 5719 workers->run_task(&rp_task); 5720 } 5721 5722 void CMSRefProcTaskExecutor::execute(EnqueueTask& task) 5723 { 5724 5725 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5726 WorkGang* workers = gch->workers(); 5727 assert(workers != NULL, "Need parallel worker threads."); 5728 CMSRefEnqueueTaskProxy enq_task(task); 5729 workers->run_task(&enq_task); 5730 } 5731 5732 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) { 5733 5734 ResourceMark rm; 5735 HandleMark hm; 5736 5737 ReferenceProcessor* rp = ref_processor(); 5738 assert(rp->span().equals(_span), "Spans should be equal"); 5739 assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete"); 5740 // Process weak references. 5741 rp->setup_policy(clear_all_soft_refs); 5742 verify_work_stacks_empty(); 5743 5744 CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap, 5745 &_markStack, &_revisitStack, 5746 false /* !preclean */); 5747 CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this, 5748 _span, &_markBitMap, &_markStack, 5749 &cmsKeepAliveClosure, false /* !preclean */); 5750 { 5751 TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty); 5752 if (rp->processing_is_mt()) { 5753 CMSRefProcTaskExecutor task_executor(*this); 5754 rp->process_discovered_references(&_is_alive_closure, 5755 &cmsKeepAliveClosure, 5756 &cmsDrainMarkingStackClosure, 5757 &task_executor); 5758 } else { 5759 rp->process_discovered_references(&_is_alive_closure, 5760 &cmsKeepAliveClosure, 5761 &cmsDrainMarkingStackClosure, 5762 NULL); 5763 } 5764 verify_work_stacks_empty(); 5765 } 5766 5767 if (should_unload_classes()) { 5768 { 5769 TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty); 5770 5771 // Follow SystemDictionary roots and unload classes 5772 bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure); 5773 5774 // Follow CodeCache roots and unload any methods marked for unloading 5775 CodeCache::do_unloading(&_is_alive_closure, 5776 &cmsKeepAliveClosure, 5777 purged_class); 5778 5779 cmsDrainMarkingStackClosure.do_void(); 5780 verify_work_stacks_empty(); 5781 5782 // Update subklass/sibling/implementor links in KlassKlass descendants 5783 assert(!_revisitStack.isEmpty(), "revisit stack should not be empty"); 5784 oop k; 5785 while ((k = _revisitStack.pop()) != NULL) { 5786 ((Klass*)(oopDesc*)k)->follow_weak_klass_links( 5787 &_is_alive_closure, 5788 &cmsKeepAliveClosure); 5789 } 5790 assert(!ClassUnloading || 5791 (_markStack.isEmpty() && overflow_list_is_empty()), 5792 "Should not have found new reachable objects"); 5793 assert(_revisitStack.isEmpty(), "revisit stack should have been drained"); 5794 cmsDrainMarkingStackClosure.do_void(); 5795 verify_work_stacks_empty(); 5796 } 5797 5798 { 5799 TraceTime t("scrub symbol & string tables", PrintGCDetails, false, gclog_or_tty); 5800 // Now clean up stale oops in SymbolTable and StringTable 5801 SymbolTable::unlink(&_is_alive_closure); 5802 StringTable::unlink(&_is_alive_closure); 5803 } 5804 } 5805 5806 verify_work_stacks_empty(); 5807 // Restore any preserved marks as a result of mark stack or 5808 // work queue overflow 5809 restore_preserved_marks_if_any(); // done single-threaded for now 5810 5811 rp->set_enqueuing_is_done(true); 5812 if (rp->processing_is_mt()) { 5813 CMSRefProcTaskExecutor task_executor(*this); 5814 rp->enqueue_discovered_references(&task_executor); 5815 } else { 5816 rp->enqueue_discovered_references(NULL); 5817 } 5818 rp->verify_no_references_recorded(); 5819 assert(!rp->discovery_enabled(), "should have been disabled"); 5820 5821 // JVMTI object tagging is based on JNI weak refs. If any of these 5822 // refs were cleared then JVMTI needs to update its maps and 5823 // maybe post ObjectFrees to agents. 5824 JvmtiExport::cms_ref_processing_epilogue(); 5825 } 5826 5827 #ifndef PRODUCT 5828 void CMSCollector::check_correct_thread_executing() { 5829 Thread* t = Thread::current(); 5830 // Only the VM thread or the CMS thread should be here. 5831 assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(), 5832 "Unexpected thread type"); 5833 // If this is the vm thread, the foreground process 5834 // should not be waiting. Note that _foregroundGCIsActive is 5835 // true while the foreground collector is waiting. 5836 if (_foregroundGCShouldWait) { 5837 // We cannot be the VM thread 5838 assert(t->is_ConcurrentGC_thread(), 5839 "Should be CMS thread"); 5840 } else { 5841 // We can be the CMS thread only if we are in a stop-world 5842 // phase of CMS collection. 5843 if (t->is_ConcurrentGC_thread()) { 5844 assert(_collectorState == InitialMarking || 5845 _collectorState == FinalMarking, 5846 "Should be a stop-world phase"); 5847 // The CMS thread should be holding the CMS_token. 5848 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 5849 "Potential interference with concurrently " 5850 "executing VM thread"); 5851 } 5852 } 5853 } 5854 #endif 5855 5856 void CMSCollector::sweep(bool asynch) { 5857 assert(_collectorState == Sweeping, "just checking"); 5858 check_correct_thread_executing(); 5859 verify_work_stacks_empty(); 5860 verify_overflow_empty(); 5861 increment_sweep_count(); 5862 TraceCMSMemoryManagerStats tms(_collectorState); 5863 5864 _inter_sweep_timer.stop(); 5865 _inter_sweep_estimate.sample(_inter_sweep_timer.seconds()); 5866 size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free()); 5867 5868 // PermGen verification support: If perm gen sweeping is disabled in 5869 // this cycle, we preserve the perm gen object "deadness" information 5870 // in the perm_gen_verify_bit_map. In order to do that we traverse 5871 // all blocks in perm gen and mark all dead objects. 5872 if (verifying() && !should_unload_classes()) { 5873 assert(perm_gen_verify_bit_map()->sizeInBits() != 0, 5874 "Should have already been allocated"); 5875 MarkDeadObjectsClosure mdo(this, _permGen->cmsSpace(), 5876 markBitMap(), perm_gen_verify_bit_map()); 5877 if (asynch) { 5878 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), 5879 bitMapLock()); 5880 _permGen->cmsSpace()->blk_iterate(&mdo); 5881 } else { 5882 // In the case of synchronous sweep, we already have 5883 // the requisite locks/tokens. 5884 _permGen->cmsSpace()->blk_iterate(&mdo); 5885 } 5886 } 5887 5888 assert(!_intra_sweep_timer.is_active(), "Should not be active"); 5889 _intra_sweep_timer.reset(); 5890 _intra_sweep_timer.start(); 5891 if (asynch) { 5892 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 5893 CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails); 5894 // First sweep the old gen then the perm gen 5895 { 5896 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), 5897 bitMapLock()); 5898 sweepWork(_cmsGen, asynch); 5899 } 5900 5901 // Now repeat for perm gen 5902 if (should_unload_classes()) { 5903 CMSTokenSyncWithLocks ts(true, _permGen->freelistLock(), 5904 bitMapLock()); 5905 sweepWork(_permGen, asynch); 5906 } 5907 5908 // Update Universe::_heap_*_at_gc figures. 5909 // We need all the free list locks to make the abstract state 5910 // transition from Sweeping to Resetting. See detailed note 5911 // further below. 5912 { 5913 CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(), 5914 _permGen->freelistLock()); 5915 // Update heap occupancy information which is used as 5916 // input to soft ref clearing policy at the next gc. 5917 Universe::update_heap_info_at_gc(); 5918 _collectorState = Resizing; 5919 } 5920 } else { 5921 // already have needed locks 5922 sweepWork(_cmsGen, asynch); 5923 5924 if (should_unload_classes()) { 5925 sweepWork(_permGen, asynch); 5926 } 5927 // Update heap occupancy information which is used as 5928 // input to soft ref clearing policy at the next gc. 5929 Universe::update_heap_info_at_gc(); 5930 _collectorState = Resizing; 5931 } 5932 verify_work_stacks_empty(); 5933 verify_overflow_empty(); 5934 5935 _intra_sweep_timer.stop(); 5936 _intra_sweep_estimate.sample(_intra_sweep_timer.seconds()); 5937 5938 _inter_sweep_timer.reset(); 5939 _inter_sweep_timer.start(); 5940 5941 update_time_of_last_gc(os::javaTimeMillis()); 5942 5943 // NOTE on abstract state transitions: 5944 // Mutators allocate-live and/or mark the mod-union table dirty 5945 // based on the state of the collection. The former is done in 5946 // the interval [Marking, Sweeping] and the latter in the interval 5947 // [Marking, Sweeping). Thus the transitions into the Marking state 5948 // and out of the Sweeping state must be synchronously visible 5949 // globally to the mutators. 5950 // The transition into the Marking state happens with the world 5951 // stopped so the mutators will globally see it. Sweeping is 5952 // done asynchronously by the background collector so the transition 5953 // from the Sweeping state to the Resizing state must be done 5954 // under the freelistLock (as is the check for whether to 5955 // allocate-live and whether to dirty the mod-union table). 5956 assert(_collectorState == Resizing, "Change of collector state to" 5957 " Resizing must be done under the freelistLocks (plural)"); 5958 5959 // Now that sweeping has been completed, if the GCH's 5960 // incremental_collection_will_fail flag is set, clear it, 5961 // thus inviting a younger gen collection to promote into 5962 // this generation. If such a promotion may still fail, 5963 // the flag will be set again when a young collection is 5964 // attempted. 5965 // I think the incremental_collection_will_fail flag's use 5966 // is specific to a 2 generation collection policy, so i'll 5967 // assert that that's the configuration we are operating within. 5968 // The use of the flag can and should be generalized appropriately 5969 // in the future to deal with a general n-generation system. 5970 5971 GenCollectedHeap* gch = GenCollectedHeap::heap(); 5972 assert(gch->collector_policy()->is_two_generation_policy(), 5973 "Resetting of incremental_collection_will_fail flag" 5974 " may be incorrect otherwise"); 5975 gch->clear_incremental_collection_will_fail(); 5976 gch->update_full_collections_completed(_collection_count_start); 5977 } 5978 5979 // FIX ME!!! Looks like this belongs in CFLSpace, with 5980 // CMSGen merely delegating to it. 5981 void ConcurrentMarkSweepGeneration::setNearLargestChunk() { 5982 double nearLargestPercent = FLSLargestBlockCoalesceProximity; 5983 HeapWord* minAddr = _cmsSpace->bottom(); 5984 HeapWord* largestAddr = 5985 (HeapWord*) _cmsSpace->dictionary()->findLargestDict(); 5986 if (largestAddr == NULL) { 5987 // The dictionary appears to be empty. In this case 5988 // try to coalesce at the end of the heap. 5989 largestAddr = _cmsSpace->end(); 5990 } 5991 size_t largestOffset = pointer_delta(largestAddr, minAddr); 5992 size_t nearLargestOffset = 5993 (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize; 5994 if (PrintFLSStatistics != 0) { 5995 gclog_or_tty->print_cr( 5996 "CMS: Large Block: " PTR_FORMAT ";" 5997 " Proximity: " PTR_FORMAT " -> " PTR_FORMAT, 5998 largestAddr, 5999 _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset); 6000 } 6001 _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset); 6002 } 6003 6004 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) { 6005 return addr >= _cmsSpace->nearLargestChunk(); 6006 } 6007 6008 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() { 6009 return _cmsSpace->find_chunk_at_end(); 6010 } 6011 6012 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level, 6013 bool full) { 6014 // The next lower level has been collected. Gather any statistics 6015 // that are of interest at this point. 6016 if (!full && (current_level + 1) == level()) { 6017 // Gather statistics on the young generation collection. 6018 collector()->stats().record_gc0_end(used()); 6019 } 6020 } 6021 6022 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() { 6023 GenCollectedHeap* gch = GenCollectedHeap::heap(); 6024 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 6025 "Wrong type of heap"); 6026 CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*) 6027 gch->gen_policy()->size_policy(); 6028 assert(sp->is_gc_cms_adaptive_size_policy(), 6029 "Wrong type of size policy"); 6030 return sp; 6031 } 6032 6033 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() { 6034 if (PrintGCDetails && Verbose) { 6035 gclog_or_tty->print("Rotate from %d ", _debug_collection_type); 6036 } 6037 _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1); 6038 _debug_collection_type = 6039 (CollectionTypes) (_debug_collection_type % Unknown_collection_type); 6040 if (PrintGCDetails && Verbose) { 6041 gclog_or_tty->print_cr("to %d ", _debug_collection_type); 6042 } 6043 } 6044 6045 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen, 6046 bool asynch) { 6047 // We iterate over the space(s) underlying this generation, 6048 // checking the mark bit map to see if the bits corresponding 6049 // to specific blocks are marked or not. Blocks that are 6050 // marked are live and are not swept up. All remaining blocks 6051 // are swept up, with coalescing on-the-fly as we sweep up 6052 // contiguous free and/or garbage blocks: 6053 // We need to ensure that the sweeper synchronizes with allocators 6054 // and stop-the-world collectors. In particular, the following 6055 // locks are used: 6056 // . CMS token: if this is held, a stop the world collection cannot occur 6057 // . freelistLock: if this is held no allocation can occur from this 6058 // generation by another thread 6059 // . bitMapLock: if this is held, no other thread can access or update 6060 // 6061 6062 // Note that we need to hold the freelistLock if we use 6063 // block iterate below; else the iterator might go awry if 6064 // a mutator (or promotion) causes block contents to change 6065 // (for instance if the allocator divvies up a block). 6066 // If we hold the free list lock, for all practical purposes 6067 // young generation GC's can't occur (they'll usually need to 6068 // promote), so we might as well prevent all young generation 6069 // GC's while we do a sweeping step. For the same reason, we might 6070 // as well take the bit map lock for the entire duration 6071 6072 // check that we hold the requisite locks 6073 assert(have_cms_token(), "Should hold cms token"); 6074 assert( (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token()) 6075 || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()), 6076 "Should possess CMS token to sweep"); 6077 assert_lock_strong(gen->freelistLock()); 6078 assert_lock_strong(bitMapLock()); 6079 6080 assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context"); 6081 assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context"); 6082 gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()), 6083 _inter_sweep_estimate.padded_average(), 6084 _intra_sweep_estimate.padded_average()); 6085 gen->setNearLargestChunk(); 6086 6087 { 6088 SweepClosure sweepClosure(this, gen, &_markBitMap, 6089 CMSYield && asynch); 6090 gen->cmsSpace()->blk_iterate_careful(&sweepClosure); 6091 // We need to free-up/coalesce garbage/blocks from a 6092 // co-terminal free run. This is done in the SweepClosure 6093 // destructor; so, do not remove this scope, else the 6094 // end-of-sweep-census below will be off by a little bit. 6095 } 6096 gen->cmsSpace()->sweep_completed(); 6097 gen->cmsSpace()->endSweepFLCensus(sweep_count()); 6098 if (should_unload_classes()) { // unloaded classes this cycle, 6099 _concurrent_cycles_since_last_unload = 0; // ... reset count 6100 } else { // did not unload classes, 6101 _concurrent_cycles_since_last_unload++; // ... increment count 6102 } 6103 } 6104 6105 // Reset CMS data structures (for now just the marking bit map) 6106 // preparatory for the next cycle. 6107 void CMSCollector::reset(bool asynch) { 6108 GenCollectedHeap* gch = GenCollectedHeap::heap(); 6109 CMSAdaptiveSizePolicy* sp = size_policy(); 6110 AdaptiveSizePolicyOutput(sp, gch->total_collections()); 6111 if (asynch) { 6112 CMSTokenSyncWithLocks ts(true, bitMapLock()); 6113 6114 // If the state is not "Resetting", the foreground thread 6115 // has done a collection and the resetting. 6116 if (_collectorState != Resetting) { 6117 assert(_collectorState == Idling, "The state should only change" 6118 " because the foreground collector has finished the collection"); 6119 return; 6120 } 6121 6122 // Clear the mark bitmap (no grey objects to start with) 6123 // for the next cycle. 6124 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 6125 CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails); 6126 6127 HeapWord* curAddr = _markBitMap.startWord(); 6128 while (curAddr < _markBitMap.endWord()) { 6129 size_t remaining = pointer_delta(_markBitMap.endWord(), curAddr); 6130 MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining)); 6131 _markBitMap.clear_large_range(chunk); 6132 if (ConcurrentMarkSweepThread::should_yield() && 6133 !foregroundGCIsActive() && 6134 CMSYield) { 6135 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6136 "CMS thread should hold CMS token"); 6137 assert_lock_strong(bitMapLock()); 6138 bitMapLock()->unlock(); 6139 ConcurrentMarkSweepThread::desynchronize(true); 6140 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6141 stopTimer(); 6142 if (PrintCMSStatistics != 0) { 6143 incrementYields(); 6144 } 6145 icms_wait(); 6146 6147 // See the comment in coordinator_yield() 6148 for (unsigned i = 0; i < CMSYieldSleepCount && 6149 ConcurrentMarkSweepThread::should_yield() && 6150 !CMSCollector::foregroundGCIsActive(); ++i) { 6151 os::sleep(Thread::current(), 1, false); 6152 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6153 } 6154 6155 ConcurrentMarkSweepThread::synchronize(true); 6156 bitMapLock()->lock_without_safepoint_check(); 6157 startTimer(); 6158 } 6159 curAddr = chunk.end(); 6160 } 6161 // A successful mostly concurrent collection has been done. 6162 // Because only the full (i.e., concurrent mode failure) collections 6163 // are being measured for gc overhead limits, clean the "near" flag 6164 // and count. 6165 sp->reset_gc_overhead_limit_count(); 6166 _collectorState = Idling; 6167 } else { 6168 // already have the lock 6169 assert(_collectorState == Resetting, "just checking"); 6170 assert_lock_strong(bitMapLock()); 6171 _markBitMap.clear_all(); 6172 _collectorState = Idling; 6173 } 6174 6175 // Stop incremental mode after a cycle completes, so that any future cycles 6176 // are triggered by allocation. 6177 stop_icms(); 6178 6179 NOT_PRODUCT( 6180 if (RotateCMSCollectionTypes) { 6181 _cmsGen->rotate_debug_collection_type(); 6182 } 6183 ) 6184 } 6185 6186 void CMSCollector::do_CMS_operation(CMS_op_type op) { 6187 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps); 6188 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty); 6189 TraceTime t("GC", PrintGC, !PrintGCDetails, gclog_or_tty); 6190 TraceCollectorStats tcs(counters()); 6191 6192 switch (op) { 6193 case CMS_op_checkpointRootsInitial: { 6194 checkpointRootsInitial(true); // asynch 6195 if (PrintGC) { 6196 _cmsGen->printOccupancy("initial-mark"); 6197 } 6198 break; 6199 } 6200 case CMS_op_checkpointRootsFinal: { 6201 checkpointRootsFinal(true, // asynch 6202 false, // !clear_all_soft_refs 6203 false); // !init_mark_was_synchronous 6204 if (PrintGC) { 6205 _cmsGen->printOccupancy("remark"); 6206 } 6207 break; 6208 } 6209 default: 6210 fatal("No such CMS_op"); 6211 } 6212 } 6213 6214 #ifndef PRODUCT 6215 size_t const CMSCollector::skip_header_HeapWords() { 6216 return FreeChunk::header_size(); 6217 } 6218 6219 // Try and collect here conditions that should hold when 6220 // CMS thread is exiting. The idea is that the foreground GC 6221 // thread should not be blocked if it wants to terminate 6222 // the CMS thread and yet continue to run the VM for a while 6223 // after that. 6224 void CMSCollector::verify_ok_to_terminate() const { 6225 assert(Thread::current()->is_ConcurrentGC_thread(), 6226 "should be called by CMS thread"); 6227 assert(!_foregroundGCShouldWait, "should be false"); 6228 // We could check here that all the various low-level locks 6229 // are not held by the CMS thread, but that is overkill; see 6230 // also CMSThread::verify_ok_to_terminate() where the CGC_lock 6231 // is checked. 6232 } 6233 #endif 6234 6235 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const { 6236 assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1), 6237 "missing Printezis mark?"); 6238 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); 6239 size_t size = pointer_delta(nextOneAddr + 1, addr); 6240 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 6241 "alignment problem"); 6242 assert(size >= 3, "Necessary for Printezis marks to work"); 6243 return size; 6244 } 6245 6246 // A variant of the above (block_size_using_printezis_bits()) except 6247 // that we return 0 if the P-bits are not yet set. 6248 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const { 6249 if (_markBitMap.isMarked(addr)) { 6250 assert(_markBitMap.isMarked(addr + 1), "Missing Printezis bit?"); 6251 HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2); 6252 size_t size = pointer_delta(nextOneAddr + 1, addr); 6253 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 6254 "alignment problem"); 6255 assert(size >= 3, "Necessary for Printezis marks to work"); 6256 return size; 6257 } else { 6258 assert(!_markBitMap.isMarked(addr + 1), "Bit map inconsistency?"); 6259 return 0; 6260 } 6261 } 6262 6263 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const { 6264 size_t sz = 0; 6265 oop p = (oop)addr; 6266 if (p->klass_or_null() != NULL && p->is_parsable()) { 6267 sz = CompactibleFreeListSpace::adjustObjectSize(p->size()); 6268 } else { 6269 sz = block_size_using_printezis_bits(addr); 6270 } 6271 assert(sz > 0, "size must be nonzero"); 6272 HeapWord* next_block = addr + sz; 6273 HeapWord* next_card = (HeapWord*)round_to((uintptr_t)next_block, 6274 CardTableModRefBS::card_size); 6275 assert(round_down((uintptr_t)addr, CardTableModRefBS::card_size) < 6276 round_down((uintptr_t)next_card, CardTableModRefBS::card_size), 6277 "must be different cards"); 6278 return next_card; 6279 } 6280 6281 6282 // CMS Bit Map Wrapper ///////////////////////////////////////// 6283 6284 // Construct a CMS bit map infrastructure, but don't create the 6285 // bit vector itself. That is done by a separate call CMSBitMap::allocate() 6286 // further below. 6287 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name): 6288 _bm(), 6289 _shifter(shifter), 6290 _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL) 6291 { 6292 _bmStartWord = 0; 6293 _bmWordSize = 0; 6294 } 6295 6296 bool CMSBitMap::allocate(MemRegion mr) { 6297 _bmStartWord = mr.start(); 6298 _bmWordSize = mr.word_size(); 6299 ReservedSpace brs(ReservedSpace::allocation_align_size_up( 6300 (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1)); 6301 if (!brs.is_reserved()) { 6302 warning("CMS bit map allocation failure"); 6303 return false; 6304 } 6305 // For now we'll just commit all of the bit map up fromt. 6306 // Later on we'll try to be more parsimonious with swap. 6307 if (!_virtual_space.initialize(brs, brs.size())) { 6308 warning("CMS bit map backing store failure"); 6309 return false; 6310 } 6311 assert(_virtual_space.committed_size() == brs.size(), 6312 "didn't reserve backing store for all of CMS bit map?"); 6313 _bm.set_map((BitMap::bm_word_t*)_virtual_space.low()); 6314 assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >= 6315 _bmWordSize, "inconsistency in bit map sizing"); 6316 _bm.set_size(_bmWordSize >> _shifter); 6317 6318 // bm.clear(); // can we rely on getting zero'd memory? verify below 6319 assert(isAllClear(), 6320 "Expected zero'd memory from ReservedSpace constructor"); 6321 assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()), 6322 "consistency check"); 6323 return true; 6324 } 6325 6326 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) { 6327 HeapWord *next_addr, *end_addr, *last_addr; 6328 assert_locked(); 6329 assert(covers(mr), "out-of-range error"); 6330 // XXX assert that start and end are appropriately aligned 6331 for (next_addr = mr.start(), end_addr = mr.end(); 6332 next_addr < end_addr; next_addr = last_addr) { 6333 MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr); 6334 last_addr = dirty_region.end(); 6335 if (!dirty_region.is_empty()) { 6336 cl->do_MemRegion(dirty_region); 6337 } else { 6338 assert(last_addr == end_addr, "program logic"); 6339 return; 6340 } 6341 } 6342 } 6343 6344 #ifndef PRODUCT 6345 void CMSBitMap::assert_locked() const { 6346 CMSLockVerifier::assert_locked(lock()); 6347 } 6348 6349 bool CMSBitMap::covers(MemRegion mr) const { 6350 // assert(_bm.map() == _virtual_space.low(), "map inconsistency"); 6351 assert((size_t)_bm.size() == (_bmWordSize >> _shifter), 6352 "size inconsistency"); 6353 return (mr.start() >= _bmStartWord) && 6354 (mr.end() <= endWord()); 6355 } 6356 6357 bool CMSBitMap::covers(HeapWord* start, size_t size) const { 6358 return (start >= _bmStartWord && (start + size) <= endWord()); 6359 } 6360 6361 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) { 6362 // verify that there are no 1 bits in the interval [left, right) 6363 FalseBitMapClosure falseBitMapClosure; 6364 iterate(&falseBitMapClosure, left, right); 6365 } 6366 6367 void CMSBitMap::region_invariant(MemRegion mr) 6368 { 6369 assert_locked(); 6370 // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize)); 6371 assert(!mr.is_empty(), "unexpected empty region"); 6372 assert(covers(mr), "mr should be covered by bit map"); 6373 // convert address range into offset range 6374 size_t start_ofs = heapWordToOffset(mr.start()); 6375 // Make sure that end() is appropriately aligned 6376 assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(), 6377 (1 << (_shifter+LogHeapWordSize))), 6378 "Misaligned mr.end()"); 6379 size_t end_ofs = heapWordToOffset(mr.end()); 6380 assert(end_ofs > start_ofs, "Should mark at least one bit"); 6381 } 6382 6383 #endif 6384 6385 bool CMSMarkStack::allocate(size_t size) { 6386 // allocate a stack of the requisite depth 6387 ReservedSpace rs(ReservedSpace::allocation_align_size_up( 6388 size * sizeof(oop))); 6389 if (!rs.is_reserved()) { 6390 warning("CMSMarkStack allocation failure"); 6391 return false; 6392 } 6393 if (!_virtual_space.initialize(rs, rs.size())) { 6394 warning("CMSMarkStack backing store failure"); 6395 return false; 6396 } 6397 assert(_virtual_space.committed_size() == rs.size(), 6398 "didn't reserve backing store for all of CMS stack?"); 6399 _base = (oop*)(_virtual_space.low()); 6400 _index = 0; 6401 _capacity = size; 6402 NOT_PRODUCT(_max_depth = 0); 6403 return true; 6404 } 6405 6406 // XXX FIX ME !!! In the MT case we come in here holding a 6407 // leaf lock. For printing we need to take a further lock 6408 // which has lower rank. We need to recallibrate the two 6409 // lock-ranks involved in order to be able to rpint the 6410 // messages below. (Or defer the printing to the caller. 6411 // For now we take the expedient path of just disabling the 6412 // messages for the problematic case.) 6413 void CMSMarkStack::expand() { 6414 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); 6415 if (_capacity == MarkStackSizeMax) { 6416 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { 6417 // We print a warning message only once per CMS cycle. 6418 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); 6419 } 6420 return; 6421 } 6422 // Double capacity if possible 6423 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax); 6424 // Do not give up existing stack until we have managed to 6425 // get the double capacity that we desired. 6426 ReservedSpace rs(ReservedSpace::allocation_align_size_up( 6427 new_capacity * sizeof(oop))); 6428 if (rs.is_reserved()) { 6429 // Release the backing store associated with old stack 6430 _virtual_space.release(); 6431 // Reinitialize virtual space for new stack 6432 if (!_virtual_space.initialize(rs, rs.size())) { 6433 fatal("Not enough swap for expanded marking stack"); 6434 } 6435 _base = (oop*)(_virtual_space.low()); 6436 _index = 0; 6437 _capacity = new_capacity; 6438 } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { 6439 // Failed to double capacity, continue; 6440 // we print a detail message only once per CMS cycle. 6441 gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to " 6442 SIZE_FORMAT"K", 6443 _capacity / K, new_capacity / K); 6444 } 6445 } 6446 6447 6448 // Closures 6449 // XXX: there seems to be a lot of code duplication here; 6450 // should refactor and consolidate common code. 6451 6452 // This closure is used to mark refs into the CMS generation in 6453 // the CMS bit map. Called at the first checkpoint. This closure 6454 // assumes that we do not need to re-mark dirty cards; if the CMS 6455 // generation on which this is used is not an oldest (modulo perm gen) 6456 // generation then this will lose younger_gen cards! 6457 6458 MarkRefsIntoClosure::MarkRefsIntoClosure( 6459 MemRegion span, CMSBitMap* bitMap): 6460 _span(span), 6461 _bitMap(bitMap) 6462 { 6463 assert(_ref_processor == NULL, "deliberately left NULL"); 6464 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); 6465 } 6466 6467 void MarkRefsIntoClosure::do_oop(oop obj) { 6468 // if p points into _span, then mark corresponding bit in _markBitMap 6469 assert(obj->is_oop(), "expected an oop"); 6470 HeapWord* addr = (HeapWord*)obj; 6471 if (_span.contains(addr)) { 6472 // this should be made more efficient 6473 _bitMap->mark(addr); 6474 } 6475 } 6476 6477 void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } 6478 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } 6479 6480 // A variant of the above, used for CMS marking verification. 6481 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( 6482 MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm): 6483 _span(span), 6484 _verification_bm(verification_bm), 6485 _cms_bm(cms_bm) 6486 { 6487 assert(_ref_processor == NULL, "deliberately left NULL"); 6488 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch"); 6489 } 6490 6491 void MarkRefsIntoVerifyClosure::do_oop(oop obj) { 6492 // if p points into _span, then mark corresponding bit in _markBitMap 6493 assert(obj->is_oop(), "expected an oop"); 6494 HeapWord* addr = (HeapWord*)obj; 6495 if (_span.contains(addr)) { 6496 _verification_bm->mark(addr); 6497 if (!_cms_bm->isMarked(addr)) { 6498 oop(addr)->print(); 6499 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr); 6500 fatal("... aborting"); 6501 } 6502 } 6503 } 6504 6505 void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } 6506 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } 6507 6508 ////////////////////////////////////////////////// 6509 // MarkRefsIntoAndScanClosure 6510 ////////////////////////////////////////////////// 6511 6512 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span, 6513 ReferenceProcessor* rp, 6514 CMSBitMap* bit_map, 6515 CMSBitMap* mod_union_table, 6516 CMSMarkStack* mark_stack, 6517 CMSMarkStack* revisit_stack, 6518 CMSCollector* collector, 6519 bool should_yield, 6520 bool concurrent_precleaning): 6521 _collector(collector), 6522 _span(span), 6523 _bit_map(bit_map), 6524 _mark_stack(mark_stack), 6525 _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table, 6526 mark_stack, revisit_stack, concurrent_precleaning), 6527 _yield(should_yield), 6528 _concurrent_precleaning(concurrent_precleaning), 6529 _freelistLock(NULL) 6530 { 6531 _ref_processor = rp; 6532 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 6533 } 6534 6535 // This closure is used to mark refs into the CMS generation at the 6536 // second (final) checkpoint, and to scan and transitively follow 6537 // the unmarked oops. It is also used during the concurrent precleaning 6538 // phase while scanning objects on dirty cards in the CMS generation. 6539 // The marks are made in the marking bit map and the marking stack is 6540 // used for keeping the (newly) grey objects during the scan. 6541 // The parallel version (Par_...) appears further below. 6542 void MarkRefsIntoAndScanClosure::do_oop(oop obj) { 6543 if (obj != NULL) { 6544 assert(obj->is_oop(), "expected an oop"); 6545 HeapWord* addr = (HeapWord*)obj; 6546 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); 6547 assert(_collector->overflow_list_is_empty(), 6548 "overflow list should be empty"); 6549 if (_span.contains(addr) && 6550 !_bit_map->isMarked(addr)) { 6551 // mark bit map (object is now grey) 6552 _bit_map->mark(addr); 6553 // push on marking stack (stack should be empty), and drain the 6554 // stack by applying this closure to the oops in the oops popped 6555 // from the stack (i.e. blacken the grey objects) 6556 bool res = _mark_stack->push(obj); 6557 assert(res, "Should have space to push on empty stack"); 6558 do { 6559 oop new_oop = _mark_stack->pop(); 6560 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); 6561 assert(new_oop->is_parsable(), "Found unparsable oop"); 6562 assert(_bit_map->isMarked((HeapWord*)new_oop), 6563 "only grey objects on this stack"); 6564 // iterate over the oops in this oop, marking and pushing 6565 // the ones in CMS heap (i.e. in _span). 6566 new_oop->oop_iterate(&_pushAndMarkClosure); 6567 // check if it's time to yield 6568 do_yield_check(); 6569 } while (!_mark_stack->isEmpty() || 6570 (!_concurrent_precleaning && take_from_overflow_list())); 6571 // if marking stack is empty, and we are not doing this 6572 // during precleaning, then check the overflow list 6573 } 6574 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); 6575 assert(_collector->overflow_list_is_empty(), 6576 "overflow list was drained above"); 6577 // We could restore evacuated mark words, if any, used for 6578 // overflow list links here because the overflow list is 6579 // provably empty here. That would reduce the maximum 6580 // size requirements for preserved_{oop,mark}_stack. 6581 // But we'll just postpone it until we are all done 6582 // so we can just stream through. 6583 if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) { 6584 _collector->restore_preserved_marks_if_any(); 6585 assert(_collector->no_preserved_marks(), "No preserved marks"); 6586 } 6587 assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(), 6588 "All preserved marks should have been restored above"); 6589 } 6590 } 6591 6592 void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } 6593 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } 6594 6595 void MarkRefsIntoAndScanClosure::do_yield_work() { 6596 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6597 "CMS thread should hold CMS token"); 6598 assert_lock_strong(_freelistLock); 6599 assert_lock_strong(_bit_map->lock()); 6600 // relinquish the free_list_lock and bitMaplock() 6601 DEBUG_ONLY(RememberKlassesChecker mux(false);) 6602 _bit_map->lock()->unlock(); 6603 _freelistLock->unlock(); 6604 ConcurrentMarkSweepThread::desynchronize(true); 6605 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6606 _collector->stopTimer(); 6607 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); 6608 if (PrintCMSStatistics != 0) { 6609 _collector->incrementYields(); 6610 } 6611 _collector->icms_wait(); 6612 6613 // See the comment in coordinator_yield() 6614 for (unsigned i = 0; 6615 i < CMSYieldSleepCount && 6616 ConcurrentMarkSweepThread::should_yield() && 6617 !CMSCollector::foregroundGCIsActive(); 6618 ++i) { 6619 os::sleep(Thread::current(), 1, false); 6620 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6621 } 6622 6623 ConcurrentMarkSweepThread::synchronize(true); 6624 _freelistLock->lock_without_safepoint_check(); 6625 _bit_map->lock()->lock_without_safepoint_check(); 6626 _collector->startTimer(); 6627 } 6628 6629 /////////////////////////////////////////////////////////// 6630 // Par_MarkRefsIntoAndScanClosure: a parallel version of 6631 // MarkRefsIntoAndScanClosure 6632 /////////////////////////////////////////////////////////// 6633 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure( 6634 CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, 6635 CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* revisit_stack): 6636 _span(span), 6637 _bit_map(bit_map), 6638 _work_queue(work_queue), 6639 _low_water_mark(MIN2((uint)(work_queue->max_elems()/4), 6640 (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))), 6641 _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue, 6642 revisit_stack) 6643 { 6644 _ref_processor = rp; 6645 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 6646 } 6647 6648 // This closure is used to mark refs into the CMS generation at the 6649 // second (final) checkpoint, and to scan and transitively follow 6650 // the unmarked oops. The marks are made in the marking bit map and 6651 // the work_queue is used for keeping the (newly) grey objects during 6652 // the scan phase whence they are also available for stealing by parallel 6653 // threads. Since the marking bit map is shared, updates are 6654 // synchronized (via CAS). 6655 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) { 6656 if (obj != NULL) { 6657 // Ignore mark word because this could be an already marked oop 6658 // that may be chained at the end of the overflow list. 6659 assert(obj->is_oop(true), "expected an oop"); 6660 HeapWord* addr = (HeapWord*)obj; 6661 if (_span.contains(addr) && 6662 !_bit_map->isMarked(addr)) { 6663 // mark bit map (object will become grey): 6664 // It is possible for several threads to be 6665 // trying to "claim" this object concurrently; 6666 // the unique thread that succeeds in marking the 6667 // object first will do the subsequent push on 6668 // to the work queue (or overflow list). 6669 if (_bit_map->par_mark(addr)) { 6670 // push on work_queue (which may not be empty), and trim the 6671 // queue to an appropriate length by applying this closure to 6672 // the oops in the oops popped from the stack (i.e. blacken the 6673 // grey objects) 6674 bool res = _work_queue->push(obj); 6675 assert(res, "Low water mark should be less than capacity?"); 6676 trim_queue(_low_water_mark); 6677 } // Else, another thread claimed the object 6678 } 6679 } 6680 } 6681 6682 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 6683 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 6684 6685 // This closure is used to rescan the marked objects on the dirty cards 6686 // in the mod union table and the card table proper. 6687 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( 6688 oop p, MemRegion mr) { 6689 6690 size_t size = 0; 6691 HeapWord* addr = (HeapWord*)p; 6692 DEBUG_ONLY(_collector->verify_work_stacks_empty();) 6693 assert(_span.contains(addr), "we are scanning the CMS generation"); 6694 // check if it's time to yield 6695 if (do_yield_check()) { 6696 // We yielded for some foreground stop-world work, 6697 // and we have been asked to abort this ongoing preclean cycle. 6698 return 0; 6699 } 6700 if (_bitMap->isMarked(addr)) { 6701 // it's marked; is it potentially uninitialized? 6702 if (p->klass_or_null() != NULL) { 6703 // If is_conc_safe is false, the object may be undergoing 6704 // change by the VM outside a safepoint. Don't try to 6705 // scan it, but rather leave it for the remark phase. 6706 if (CMSPermGenPrecleaningEnabled && 6707 (!p->is_conc_safe() || !p->is_parsable())) { 6708 // Signal precleaning to redirty the card since 6709 // the klass pointer is already installed. 6710 assert(size == 0, "Initial value"); 6711 } else { 6712 assert(p->is_parsable(), "must be parsable."); 6713 // an initialized object; ignore mark word in verification below 6714 // since we are running concurrent with mutators 6715 assert(p->is_oop(true), "should be an oop"); 6716 if (p->is_objArray()) { 6717 // objArrays are precisely marked; restrict scanning 6718 // to dirty cards only. 6719 size = CompactibleFreeListSpace::adjustObjectSize( 6720 p->oop_iterate(_scanningClosure, mr)); 6721 } else { 6722 // A non-array may have been imprecisely marked; we need 6723 // to scan object in its entirety. 6724 size = CompactibleFreeListSpace::adjustObjectSize( 6725 p->oop_iterate(_scanningClosure)); 6726 } 6727 #ifdef DEBUG 6728 size_t direct_size = 6729 CompactibleFreeListSpace::adjustObjectSize(p->size()); 6730 assert(size == direct_size, "Inconsistency in size"); 6731 assert(size >= 3, "Necessary for Printezis marks to work"); 6732 if (!_bitMap->isMarked(addr+1)) { 6733 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size); 6734 } else { 6735 _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1); 6736 assert(_bitMap->isMarked(addr+size-1), 6737 "inconsistent Printezis mark"); 6738 } 6739 #endif // DEBUG 6740 } 6741 } else { 6742 // an unitialized object 6743 assert(_bitMap->isMarked(addr+1), "missing Printezis mark?"); 6744 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); 6745 size = pointer_delta(nextOneAddr + 1, addr); 6746 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 6747 "alignment problem"); 6748 // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass() 6749 // will dirty the card when the klass pointer is installed in the 6750 // object (signalling the completion of initialization). 6751 } 6752 } else { 6753 // Either a not yet marked object or an uninitialized object 6754 if (p->klass_or_null() == NULL || !p->is_parsable()) { 6755 // An uninitialized object, skip to the next card, since 6756 // we may not be able to read its P-bits yet. 6757 assert(size == 0, "Initial value"); 6758 } else { 6759 // An object not (yet) reached by marking: we merely need to 6760 // compute its size so as to go look at the next block. 6761 assert(p->is_oop(true), "should be an oop"); 6762 size = CompactibleFreeListSpace::adjustObjectSize(p->size()); 6763 } 6764 } 6765 DEBUG_ONLY(_collector->verify_work_stacks_empty();) 6766 return size; 6767 } 6768 6769 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() { 6770 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6771 "CMS thread should hold CMS token"); 6772 assert_lock_strong(_freelistLock); 6773 assert_lock_strong(_bitMap->lock()); 6774 DEBUG_ONLY(RememberKlassesChecker mux(false);) 6775 // relinquish the free_list_lock and bitMaplock() 6776 _bitMap->lock()->unlock(); 6777 _freelistLock->unlock(); 6778 ConcurrentMarkSweepThread::desynchronize(true); 6779 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6780 _collector->stopTimer(); 6781 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); 6782 if (PrintCMSStatistics != 0) { 6783 _collector->incrementYields(); 6784 } 6785 _collector->icms_wait(); 6786 6787 // See the comment in coordinator_yield() 6788 for (unsigned i = 0; i < CMSYieldSleepCount && 6789 ConcurrentMarkSweepThread::should_yield() && 6790 !CMSCollector::foregroundGCIsActive(); ++i) { 6791 os::sleep(Thread::current(), 1, false); 6792 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6793 } 6794 6795 ConcurrentMarkSweepThread::synchronize(true); 6796 _freelistLock->lock_without_safepoint_check(); 6797 _bitMap->lock()->lock_without_safepoint_check(); 6798 _collector->startTimer(); 6799 } 6800 6801 6802 ////////////////////////////////////////////////////////////////// 6803 // SurvivorSpacePrecleanClosure 6804 ////////////////////////////////////////////////////////////////// 6805 // This (single-threaded) closure is used to preclean the oops in 6806 // the survivor spaces. 6807 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) { 6808 6809 HeapWord* addr = (HeapWord*)p; 6810 DEBUG_ONLY(_collector->verify_work_stacks_empty();) 6811 assert(!_span.contains(addr), "we are scanning the survivor spaces"); 6812 assert(p->klass_or_null() != NULL, "object should be initializd"); 6813 assert(p->is_parsable(), "must be parsable."); 6814 // an initialized object; ignore mark word in verification below 6815 // since we are running concurrent with mutators 6816 assert(p->is_oop(true), "should be an oop"); 6817 // Note that we do not yield while we iterate over 6818 // the interior oops of p, pushing the relevant ones 6819 // on our marking stack. 6820 size_t size = p->oop_iterate(_scanning_closure); 6821 do_yield_check(); 6822 // Observe that below, we do not abandon the preclean 6823 // phase as soon as we should; rather we empty the 6824 // marking stack before returning. This is to satisfy 6825 // some existing assertions. In general, it may be a 6826 // good idea to abort immediately and complete the marking 6827 // from the grey objects at a later time. 6828 while (!_mark_stack->isEmpty()) { 6829 oop new_oop = _mark_stack->pop(); 6830 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); 6831 assert(new_oop->is_parsable(), "Found unparsable oop"); 6832 assert(_bit_map->isMarked((HeapWord*)new_oop), 6833 "only grey objects on this stack"); 6834 // iterate over the oops in this oop, marking and pushing 6835 // the ones in CMS heap (i.e. in _span). 6836 new_oop->oop_iterate(_scanning_closure); 6837 // check if it's time to yield 6838 do_yield_check(); 6839 } 6840 unsigned int after_count = 6841 GenCollectedHeap::heap()->total_collections(); 6842 bool abort = (_before_count != after_count) || 6843 _collector->should_abort_preclean(); 6844 return abort ? 0 : size; 6845 } 6846 6847 void SurvivorSpacePrecleanClosure::do_yield_work() { 6848 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 6849 "CMS thread should hold CMS token"); 6850 assert_lock_strong(_bit_map->lock()); 6851 DEBUG_ONLY(RememberKlassesChecker smx(false);) 6852 // Relinquish the bit map lock 6853 _bit_map->lock()->unlock(); 6854 ConcurrentMarkSweepThread::desynchronize(true); 6855 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6856 _collector->stopTimer(); 6857 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); 6858 if (PrintCMSStatistics != 0) { 6859 _collector->incrementYields(); 6860 } 6861 _collector->icms_wait(); 6862 6863 // See the comment in coordinator_yield() 6864 for (unsigned i = 0; i < CMSYieldSleepCount && 6865 ConcurrentMarkSweepThread::should_yield() && 6866 !CMSCollector::foregroundGCIsActive(); ++i) { 6867 os::sleep(Thread::current(), 1, false); 6868 ConcurrentMarkSweepThread::acknowledge_yield_request(); 6869 } 6870 6871 ConcurrentMarkSweepThread::synchronize(true); 6872 _bit_map->lock()->lock_without_safepoint_check(); 6873 _collector->startTimer(); 6874 } 6875 6876 // This closure is used to rescan the marked objects on the dirty cards 6877 // in the mod union table and the card table proper. In the parallel 6878 // case, although the bitMap is shared, we do a single read so the 6879 // isMarked() query is "safe". 6880 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) { 6881 // Ignore mark word because we are running concurrent with mutators 6882 assert(p->is_oop_or_null(true), "expected an oop or null"); 6883 HeapWord* addr = (HeapWord*)p; 6884 assert(_span.contains(addr), "we are scanning the CMS generation"); 6885 bool is_obj_array = false; 6886 #ifdef DEBUG 6887 if (!_parallel) { 6888 assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); 6889 assert(_collector->overflow_list_is_empty(), 6890 "overflow list should be empty"); 6891 6892 } 6893 #endif // DEBUG 6894 if (_bit_map->isMarked(addr)) { 6895 // Obj arrays are precisely marked, non-arrays are not; 6896 // so we scan objArrays precisely and non-arrays in their 6897 // entirety. 6898 if (p->is_objArray()) { 6899 is_obj_array = true; 6900 if (_parallel) { 6901 p->oop_iterate(_par_scan_closure, mr); 6902 } else { 6903 p->oop_iterate(_scan_closure, mr); 6904 } 6905 } else { 6906 if (_parallel) { 6907 p->oop_iterate(_par_scan_closure); 6908 } else { 6909 p->oop_iterate(_scan_closure); 6910 } 6911 } 6912 } 6913 #ifdef DEBUG 6914 if (!_parallel) { 6915 assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); 6916 assert(_collector->overflow_list_is_empty(), 6917 "overflow list should be empty"); 6918 6919 } 6920 #endif // DEBUG 6921 return is_obj_array; 6922 } 6923 6924 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector, 6925 MemRegion span, 6926 CMSBitMap* bitMap, CMSMarkStack* markStack, 6927 CMSMarkStack* revisitStack, 6928 bool should_yield, bool verifying): 6929 _collector(collector), 6930 _span(span), 6931 _bitMap(bitMap), 6932 _mut(&collector->_modUnionTable), 6933 _markStack(markStack), 6934 _revisitStack(revisitStack), 6935 _yield(should_yield), 6936 _skipBits(0) 6937 { 6938 assert(_markStack->isEmpty(), "stack should be empty"); 6939 _finger = _bitMap->startWord(); 6940 _threshold = _finger; 6941 assert(_collector->_restart_addr == NULL, "Sanity check"); 6942 assert(_span.contains(_finger), "Out of bounds _finger?"); 6943 DEBUG_ONLY(_verifying = verifying;) 6944 } 6945 6946 void MarkFromRootsClosure::reset(HeapWord* addr) { 6947 assert(_markStack->isEmpty(), "would cause duplicates on stack"); 6948 assert(_span.contains(addr), "Out of bounds _finger?"); 6949 _finger = addr; 6950 _threshold = (HeapWord*)round_to( 6951 (intptr_t)_finger, CardTableModRefBS::card_size); 6952 } 6953 6954 // Should revisit to see if this should be restructured for 6955 // greater efficiency. 6956 bool MarkFromRootsClosure::do_bit(size_t offset) { 6957 if (_skipBits > 0) { 6958 _skipBits--; 6959 return true; 6960 } 6961 // convert offset into a HeapWord* 6962 HeapWord* addr = _bitMap->startWord() + offset; 6963 assert(_bitMap->endWord() && addr < _bitMap->endWord(), 6964 "address out of range"); 6965 assert(_bitMap->isMarked(addr), "tautology"); 6966 if (_bitMap->isMarked(addr+1)) { 6967 // this is an allocated but not yet initialized object 6968 assert(_skipBits == 0, "tautology"); 6969 _skipBits = 2; // skip next two marked bits ("Printezis-marks") 6970 oop p = oop(addr); 6971 if (p->klass_or_null() == NULL || !p->is_parsable()) { 6972 DEBUG_ONLY(if (!_verifying) {) 6973 // We re-dirty the cards on which this object lies and increase 6974 // the _threshold so that we'll come back to scan this object 6975 // during the preclean or remark phase. (CMSCleanOnEnter) 6976 if (CMSCleanOnEnter) { 6977 size_t sz = _collector->block_size_using_printezis_bits(addr); 6978 HeapWord* end_card_addr = (HeapWord*)round_to( 6979 (intptr_t)(addr+sz), CardTableModRefBS::card_size); 6980 MemRegion redirty_range = MemRegion(addr, end_card_addr); 6981 assert(!redirty_range.is_empty(), "Arithmetical tautology"); 6982 // Bump _threshold to end_card_addr; note that 6983 // _threshold cannot possibly exceed end_card_addr, anyhow. 6984 // This prevents future clearing of the card as the scan proceeds 6985 // to the right. 6986 assert(_threshold <= end_card_addr, 6987 "Because we are just scanning into this object"); 6988 if (_threshold < end_card_addr) { 6989 _threshold = end_card_addr; 6990 } 6991 if (p->klass_or_null() != NULL) { 6992 // Redirty the range of cards... 6993 _mut->mark_range(redirty_range); 6994 } // ...else the setting of klass will dirty the card anyway. 6995 } 6996 DEBUG_ONLY(}) 6997 return true; 6998 } 6999 } 7000 scanOopsInOop(addr); 7001 return true; 7002 } 7003 7004 // We take a break if we've been at this for a while, 7005 // so as to avoid monopolizing the locks involved. 7006 void MarkFromRootsClosure::do_yield_work() { 7007 // First give up the locks, then yield, then re-lock 7008 // We should probably use a constructor/destructor idiom to 7009 // do this unlock/lock or modify the MutexUnlocker class to 7010 // serve our purpose. XXX 7011 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 7012 "CMS thread should hold CMS token"); 7013 assert_lock_strong(_bitMap->lock()); 7014 DEBUG_ONLY(RememberKlassesChecker mux(false);) 7015 _bitMap->lock()->unlock(); 7016 ConcurrentMarkSweepThread::desynchronize(true); 7017 ConcurrentMarkSweepThread::acknowledge_yield_request(); 7018 _collector->stopTimer(); 7019 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); 7020 if (PrintCMSStatistics != 0) { 7021 _collector->incrementYields(); 7022 } 7023 _collector->icms_wait(); 7024 7025 // See the comment in coordinator_yield() 7026 for (unsigned i = 0; i < CMSYieldSleepCount && 7027 ConcurrentMarkSweepThread::should_yield() && 7028 !CMSCollector::foregroundGCIsActive(); ++i) { 7029 os::sleep(Thread::current(), 1, false); 7030 ConcurrentMarkSweepThread::acknowledge_yield_request(); 7031 } 7032 7033 ConcurrentMarkSweepThread::synchronize(true); 7034 _bitMap->lock()->lock_without_safepoint_check(); 7035 _collector->startTimer(); 7036 } 7037 7038 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) { 7039 assert(_bitMap->isMarked(ptr), "expected bit to be set"); 7040 assert(_markStack->isEmpty(), 7041 "should drain stack to limit stack usage"); 7042 // convert ptr to an oop preparatory to scanning 7043 oop obj = oop(ptr); 7044 // Ignore mark word in verification below, since we 7045 // may be running concurrent with mutators. 7046 assert(obj->is_oop(true), "should be an oop"); 7047 assert(_finger <= ptr, "_finger runneth ahead"); 7048 // advance the finger to right end of this object 7049 _finger = ptr + obj->size(); 7050 assert(_finger > ptr, "we just incremented it above"); 7051 // On large heaps, it may take us some time to get through 7052 // the marking phase (especially if running iCMS). During 7053 // this time it's possible that a lot of mutations have 7054 // accumulated in the card table and the mod union table -- 7055 // these mutation records are redundant until we have 7056 // actually traced into the corresponding card. 7057 // Here, we check whether advancing the finger would make 7058 // us cross into a new card, and if so clear corresponding 7059 // cards in the MUT (preclean them in the card-table in the 7060 // future). 7061 7062 DEBUG_ONLY(if (!_verifying) {) 7063 // The clean-on-enter optimization is disabled by default, 7064 // until we fix 6178663. 7065 if (CMSCleanOnEnter && (_finger > _threshold)) { 7066 // [_threshold, _finger) represents the interval 7067 // of cards to be cleared in MUT (or precleaned in card table). 7068 // The set of cards to be cleared is all those that overlap 7069 // with the interval [_threshold, _finger); note that 7070 // _threshold is always kept card-aligned but _finger isn't 7071 // always card-aligned. 7072 HeapWord* old_threshold = _threshold; 7073 assert(old_threshold == (HeapWord*)round_to( 7074 (intptr_t)old_threshold, CardTableModRefBS::card_size), 7075 "_threshold should always be card-aligned"); 7076 _threshold = (HeapWord*)round_to( 7077 (intptr_t)_finger, CardTableModRefBS::card_size); 7078 MemRegion mr(old_threshold, _threshold); 7079 assert(!mr.is_empty(), "Control point invariant"); 7080 assert(_span.contains(mr), "Should clear within span"); 7081 // XXX When _finger crosses from old gen into perm gen 7082 // we may be doing unnecessary cleaning; do better in the 7083 // future by detecting that condition and clearing fewer 7084 // MUT/CT entries. 7085 _mut->clear_range(mr); 7086 } 7087 DEBUG_ONLY(}) 7088 // Note: the finger doesn't advance while we drain 7089 // the stack below. 7090 PushOrMarkClosure pushOrMarkClosure(_collector, 7091 _span, _bitMap, _markStack, 7092 _revisitStack, 7093 _finger, this); 7094 bool res = _markStack->push(obj); 7095 assert(res, "Empty non-zero size stack should have space for single push"); 7096 while (!_markStack->isEmpty()) { 7097 oop new_oop = _markStack->pop(); 7098 // Skip verifying header mark word below because we are 7099 // running concurrent with mutators. 7100 assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); 7101 // now scan this oop's oops 7102 new_oop->oop_iterate(&pushOrMarkClosure); 7103 do_yield_check(); 7104 } 7105 assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); 7106 } 7107 7108 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, 7109 CMSCollector* collector, MemRegion span, 7110 CMSBitMap* bit_map, 7111 OopTaskQueue* work_queue, 7112 CMSMarkStack* overflow_stack, 7113 CMSMarkStack* revisit_stack, 7114 bool should_yield): 7115 _collector(collector), 7116 _whole_span(collector->_span), 7117 _span(span), 7118 _bit_map(bit_map), 7119 _mut(&collector->_modUnionTable), 7120 _work_queue(work_queue), 7121 _overflow_stack(overflow_stack), 7122 _revisit_stack(revisit_stack), 7123 _yield(should_yield), 7124 _skip_bits(0), 7125 _task(task) 7126 { 7127 assert(_work_queue->size() == 0, "work_queue should be empty"); 7128 _finger = span.start(); 7129 _threshold = _finger; // XXX Defer clear-on-enter optimization for now 7130 assert(_span.contains(_finger), "Out of bounds _finger?"); 7131 } 7132 7133 // Should revisit to see if this should be restructured for 7134 // greater efficiency. 7135 bool Par_MarkFromRootsClosure::do_bit(size_t offset) { 7136 if (_skip_bits > 0) { 7137 _skip_bits--; 7138 return true; 7139 } 7140 // convert offset into a HeapWord* 7141 HeapWord* addr = _bit_map->startWord() + offset; 7142 assert(_bit_map->endWord() && addr < _bit_map->endWord(), 7143 "address out of range"); 7144 assert(_bit_map->isMarked(addr), "tautology"); 7145 if (_bit_map->isMarked(addr+1)) { 7146 // this is an allocated object that might not yet be initialized 7147 assert(_skip_bits == 0, "tautology"); 7148 _skip_bits = 2; // skip next two marked bits ("Printezis-marks") 7149 oop p = oop(addr); 7150 if (p->klass_or_null() == NULL || !p->is_parsable()) { 7151 // in the case of Clean-on-Enter optimization, redirty card 7152 // and avoid clearing card by increasing the threshold. 7153 return true; 7154 } 7155 } 7156 scan_oops_in_oop(addr); 7157 return true; 7158 } 7159 7160 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { 7161 assert(_bit_map->isMarked(ptr), "expected bit to be set"); 7162 // Should we assert that our work queue is empty or 7163 // below some drain limit? 7164 assert(_work_queue->size() == 0, 7165 "should drain stack to limit stack usage"); 7166 // convert ptr to an oop preparatory to scanning 7167 oop obj = oop(ptr); 7168 // Ignore mark word in verification below, since we 7169 // may be running concurrent with mutators. 7170 assert(obj->is_oop(true), "should be an oop"); 7171 assert(_finger <= ptr, "_finger runneth ahead"); 7172 // advance the finger to right end of this object 7173 _finger = ptr + obj->size(); 7174 assert(_finger > ptr, "we just incremented it above"); 7175 // On large heaps, it may take us some time to get through 7176 // the marking phase (especially if running iCMS). During 7177 // this time it's possible that a lot of mutations have 7178 // accumulated in the card table and the mod union table -- 7179 // these mutation records are redundant until we have 7180 // actually traced into the corresponding card. 7181 // Here, we check whether advancing the finger would make 7182 // us cross into a new card, and if so clear corresponding 7183 // cards in the MUT (preclean them in the card-table in the 7184 // future). 7185 7186 // The clean-on-enter optimization is disabled by default, 7187 // until we fix 6178663. 7188 if (CMSCleanOnEnter && (_finger > _threshold)) { 7189 // [_threshold, _finger) represents the interval 7190 // of cards to be cleared in MUT (or precleaned in card table). 7191 // The set of cards to be cleared is all those that overlap 7192 // with the interval [_threshold, _finger); note that 7193 // _threshold is always kept card-aligned but _finger isn't 7194 // always card-aligned. 7195 HeapWord* old_threshold = _threshold; 7196 assert(old_threshold == (HeapWord*)round_to( 7197 (intptr_t)old_threshold, CardTableModRefBS::card_size), 7198 "_threshold should always be card-aligned"); 7199 _threshold = (HeapWord*)round_to( 7200 (intptr_t)_finger, CardTableModRefBS::card_size); 7201 MemRegion mr(old_threshold, _threshold); 7202 assert(!mr.is_empty(), "Control point invariant"); 7203 assert(_span.contains(mr), "Should clear within span"); // _whole_span ?? 7204 // XXX When _finger crosses from old gen into perm gen 7205 // we may be doing unnecessary cleaning; do better in the 7206 // future by detecting that condition and clearing fewer 7207 // MUT/CT entries. 7208 _mut->clear_range(mr); 7209 } 7210 7211 // Note: the local finger doesn't advance while we drain 7212 // the stack below, but the global finger sure can and will. 7213 HeapWord** gfa = _task->global_finger_addr(); 7214 Par_PushOrMarkClosure pushOrMarkClosure(_collector, 7215 _span, _bit_map, 7216 _work_queue, 7217 _overflow_stack, 7218 _revisit_stack, 7219 _finger, 7220 gfa, this); 7221 bool res = _work_queue->push(obj); // overflow could occur here 7222 assert(res, "Will hold once we use workqueues"); 7223 while (true) { 7224 oop new_oop; 7225 if (!_work_queue->pop_local(new_oop)) { 7226 // We emptied our work_queue; check if there's stuff that can 7227 // be gotten from the overflow stack. 7228 if (CMSConcMarkingTask::get_work_from_overflow_stack( 7229 _overflow_stack, _work_queue)) { 7230 do_yield_check(); 7231 continue; 7232 } else { // done 7233 break; 7234 } 7235 } 7236 // Skip verifying header mark word below because we are 7237 // running concurrent with mutators. 7238 assert(new_oop->is_oop(true), "Oops! expected to pop an oop"); 7239 // now scan this oop's oops 7240 new_oop->oop_iterate(&pushOrMarkClosure); 7241 do_yield_check(); 7242 } 7243 assert(_work_queue->size() == 0, "tautology, emphasizing post-condition"); 7244 } 7245 7246 // Yield in response to a request from VM Thread or 7247 // from mutators. 7248 void Par_MarkFromRootsClosure::do_yield_work() { 7249 assert(_task != NULL, "sanity"); 7250 _task->yield(); 7251 } 7252 7253 // A variant of the above used for verifying CMS marking work. 7254 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector, 7255 MemRegion span, 7256 CMSBitMap* verification_bm, CMSBitMap* cms_bm, 7257 CMSMarkStack* mark_stack): 7258 _collector(collector), 7259 _span(span), 7260 _verification_bm(verification_bm), 7261 _cms_bm(cms_bm), 7262 _mark_stack(mark_stack), 7263 _pam_verify_closure(collector, span, verification_bm, cms_bm, 7264 mark_stack) 7265 { 7266 assert(_mark_stack->isEmpty(), "stack should be empty"); 7267 _finger = _verification_bm->startWord(); 7268 assert(_collector->_restart_addr == NULL, "Sanity check"); 7269 assert(_span.contains(_finger), "Out of bounds _finger?"); 7270 } 7271 7272 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) { 7273 assert(_mark_stack->isEmpty(), "would cause duplicates on stack"); 7274 assert(_span.contains(addr), "Out of bounds _finger?"); 7275 _finger = addr; 7276 } 7277 7278 // Should revisit to see if this should be restructured for 7279 // greater efficiency. 7280 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) { 7281 // convert offset into a HeapWord* 7282 HeapWord* addr = _verification_bm->startWord() + offset; 7283 assert(_verification_bm->endWord() && addr < _verification_bm->endWord(), 7284 "address out of range"); 7285 assert(_verification_bm->isMarked(addr), "tautology"); 7286 assert(_cms_bm->isMarked(addr), "tautology"); 7287 7288 assert(_mark_stack->isEmpty(), 7289 "should drain stack to limit stack usage"); 7290 // convert addr to an oop preparatory to scanning 7291 oop obj = oop(addr); 7292 assert(obj->is_oop(), "should be an oop"); 7293 assert(_finger <= addr, "_finger runneth ahead"); 7294 // advance the finger to right end of this object 7295 _finger = addr + obj->size(); 7296 assert(_finger > addr, "we just incremented it above"); 7297 // Note: the finger doesn't advance while we drain 7298 // the stack below. 7299 bool res = _mark_stack->push(obj); 7300 assert(res, "Empty non-zero size stack should have space for single push"); 7301 while (!_mark_stack->isEmpty()) { 7302 oop new_oop = _mark_stack->pop(); 7303 assert(new_oop->is_oop(), "Oops! expected to pop an oop"); 7304 // now scan this oop's oops 7305 new_oop->oop_iterate(&_pam_verify_closure); 7306 } 7307 assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition"); 7308 return true; 7309 } 7310 7311 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure( 7312 CMSCollector* collector, MemRegion span, 7313 CMSBitMap* verification_bm, CMSBitMap* cms_bm, 7314 CMSMarkStack* mark_stack): 7315 OopClosure(collector->ref_processor()), 7316 _collector(collector), 7317 _span(span), 7318 _verification_bm(verification_bm), 7319 _cms_bm(cms_bm), 7320 _mark_stack(mark_stack) 7321 { } 7322 7323 void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } 7324 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } 7325 7326 // Upon stack overflow, we discard (part of) the stack, 7327 // remembering the least address amongst those discarded 7328 // in CMSCollector's _restart_address. 7329 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) { 7330 // Remember the least grey address discarded 7331 HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost); 7332 _collector->lower_restart_addr(ra); 7333 _mark_stack->reset(); // discard stack contents 7334 _mark_stack->expand(); // expand the stack if possible 7335 } 7336 7337 void PushAndMarkVerifyClosure::do_oop(oop obj) { 7338 assert(obj->is_oop_or_null(), "expected an oop or NULL"); 7339 HeapWord* addr = (HeapWord*)obj; 7340 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { 7341 // Oop lies in _span and isn't yet grey or black 7342 _verification_bm->mark(addr); // now grey 7343 if (!_cms_bm->isMarked(addr)) { 7344 oop(addr)->print(); 7345 gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", 7346 addr); 7347 fatal("... aborting"); 7348 } 7349 7350 if (!_mark_stack->push(obj)) { // stack overflow 7351 if (PrintCMSStatistics != 0) { 7352 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 7353 SIZE_FORMAT, _mark_stack->capacity()); 7354 } 7355 assert(_mark_stack->isFull(), "Else push should have succeeded"); 7356 handle_stack_overflow(addr); 7357 } 7358 // anything including and to the right of _finger 7359 // will be scanned as we iterate over the remainder of the 7360 // bit map 7361 } 7362 } 7363 7364 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector, 7365 MemRegion span, 7366 CMSBitMap* bitMap, CMSMarkStack* markStack, 7367 CMSMarkStack* revisitStack, 7368 HeapWord* finger, MarkFromRootsClosure* parent) : 7369 KlassRememberingOopClosure(collector, collector->ref_processor(), revisitStack), 7370 _span(span), 7371 _bitMap(bitMap), 7372 _markStack(markStack), 7373 _finger(finger), 7374 _parent(parent) 7375 { } 7376 7377 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, 7378 MemRegion span, 7379 CMSBitMap* bit_map, 7380 OopTaskQueue* work_queue, 7381 CMSMarkStack* overflow_stack, 7382 CMSMarkStack* revisit_stack, 7383 HeapWord* finger, 7384 HeapWord** global_finger_addr, 7385 Par_MarkFromRootsClosure* parent) : 7386 Par_KlassRememberingOopClosure(collector, 7387 collector->ref_processor(), 7388 revisit_stack), 7389 _whole_span(collector->_span), 7390 _span(span), 7391 _bit_map(bit_map), 7392 _work_queue(work_queue), 7393 _overflow_stack(overflow_stack), 7394 _finger(finger), 7395 _global_finger_addr(global_finger_addr), 7396 _parent(parent) 7397 { } 7398 7399 // Assumes thread-safe access by callers, who are 7400 // responsible for mutual exclusion. 7401 void CMSCollector::lower_restart_addr(HeapWord* low) { 7402 assert(_span.contains(low), "Out of bounds addr"); 7403 if (_restart_addr == NULL) { 7404 _restart_addr = low; 7405 } else { 7406 _restart_addr = MIN2(_restart_addr, low); 7407 } 7408 } 7409 7410 // Upon stack overflow, we discard (part of) the stack, 7411 // remembering the least address amongst those discarded 7412 // in CMSCollector's _restart_address. 7413 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { 7414 // Remember the least grey address discarded 7415 HeapWord* ra = (HeapWord*)_markStack->least_value(lost); 7416 _collector->lower_restart_addr(ra); 7417 _markStack->reset(); // discard stack contents 7418 _markStack->expand(); // expand the stack if possible 7419 } 7420 7421 // Upon stack overflow, we discard (part of) the stack, 7422 // remembering the least address amongst those discarded 7423 // in CMSCollector's _restart_address. 7424 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { 7425 // We need to do this under a mutex to prevent other 7426 // workers from interfering with the work done below. 7427 MutexLockerEx ml(_overflow_stack->par_lock(), 7428 Mutex::_no_safepoint_check_flag); 7429 // Remember the least grey address discarded 7430 HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost); 7431 _collector->lower_restart_addr(ra); 7432 _overflow_stack->reset(); // discard stack contents 7433 _overflow_stack->expand(); // expand the stack if possible 7434 } 7435 7436 void PushOrMarkClosure::do_oop(oop obj) { 7437 // Ignore mark word because we are running concurrent with mutators. 7438 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); 7439 HeapWord* addr = (HeapWord*)obj; 7440 if (_span.contains(addr) && !_bitMap->isMarked(addr)) { 7441 // Oop lies in _span and isn't yet grey or black 7442 _bitMap->mark(addr); // now grey 7443 if (addr < _finger) { 7444 // the bit map iteration has already either passed, or 7445 // sampled, this bit in the bit map; we'll need to 7446 // use the marking stack to scan this oop's oops. 7447 bool simulate_overflow = false; 7448 NOT_PRODUCT( 7449 if (CMSMarkStackOverflowALot && 7450 _collector->simulate_overflow()) { 7451 // simulate a stack overflow 7452 simulate_overflow = true; 7453 } 7454 ) 7455 if (simulate_overflow || !_markStack->push(obj)) { // stack overflow 7456 if (PrintCMSStatistics != 0) { 7457 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 7458 SIZE_FORMAT, _markStack->capacity()); 7459 } 7460 assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded"); 7461 handle_stack_overflow(addr); 7462 } 7463 } 7464 // anything including and to the right of _finger 7465 // will be scanned as we iterate over the remainder of the 7466 // bit map 7467 do_yield_check(); 7468 } 7469 } 7470 7471 void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); } 7472 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } 7473 7474 void Par_PushOrMarkClosure::do_oop(oop obj) { 7475 // Ignore mark word because we are running concurrent with mutators. 7476 assert(obj->is_oop_or_null(true), "expected an oop or NULL"); 7477 HeapWord* addr = (HeapWord*)obj; 7478 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { 7479 // Oop lies in _span and isn't yet grey or black 7480 // We read the global_finger (volatile read) strictly after marking oop 7481 bool res = _bit_map->par_mark(addr); // now grey 7482 volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr; 7483 // Should we push this marked oop on our stack? 7484 // -- if someone else marked it, nothing to do 7485 // -- if target oop is above global finger nothing to do 7486 // -- if target oop is in chunk and above local finger 7487 // then nothing to do 7488 // -- else push on work queue 7489 if ( !res // someone else marked it, they will deal with it 7490 || (addr >= *gfa) // will be scanned in a later task 7491 || (_span.contains(addr) && addr >= _finger)) { // later in this chunk 7492 return; 7493 } 7494 // the bit map iteration has already either passed, or 7495 // sampled, this bit in the bit map; we'll need to 7496 // use the marking stack to scan this oop's oops. 7497 bool simulate_overflow = false; 7498 NOT_PRODUCT( 7499 if (CMSMarkStackOverflowALot && 7500 _collector->simulate_overflow()) { 7501 // simulate a stack overflow 7502 simulate_overflow = true; 7503 } 7504 ) 7505 if (simulate_overflow || 7506 !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { 7507 // stack overflow 7508 if (PrintCMSStatistics != 0) { 7509 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 7510 SIZE_FORMAT, _overflow_stack->capacity()); 7511 } 7512 // We cannot assert that the overflow stack is full because 7513 // it may have been emptied since. 7514 assert(simulate_overflow || 7515 _work_queue->size() == _work_queue->max_elems(), 7516 "Else push should have succeeded"); 7517 handle_stack_overflow(addr); 7518 } 7519 do_yield_check(); 7520 } 7521 } 7522 7523 void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 7524 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 7525 7526 KlassRememberingOopClosure::KlassRememberingOopClosure(CMSCollector* collector, 7527 ReferenceProcessor* rp, 7528 CMSMarkStack* revisit_stack) : 7529 OopClosure(rp), 7530 _collector(collector), 7531 _revisit_stack(revisit_stack), 7532 _should_remember_klasses(collector->should_unload_classes()) {} 7533 7534 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, 7535 MemRegion span, 7536 ReferenceProcessor* rp, 7537 CMSBitMap* bit_map, 7538 CMSBitMap* mod_union_table, 7539 CMSMarkStack* mark_stack, 7540 CMSMarkStack* revisit_stack, 7541 bool concurrent_precleaning): 7542 KlassRememberingOopClosure(collector, rp, revisit_stack), 7543 _span(span), 7544 _bit_map(bit_map), 7545 _mod_union_table(mod_union_table), 7546 _mark_stack(mark_stack), 7547 _concurrent_precleaning(concurrent_precleaning) 7548 { 7549 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 7550 } 7551 7552 // Grey object rescan during pre-cleaning and second checkpoint phases -- 7553 // the non-parallel version (the parallel version appears further below.) 7554 void PushAndMarkClosure::do_oop(oop obj) { 7555 // Ignore mark word verification. If during concurrent precleaning, 7556 // the object monitor may be locked. If during the checkpoint 7557 // phases, the object may already have been reached by a different 7558 // path and may be at the end of the global overflow list (so 7559 // the mark word may be NULL). 7560 assert(obj->is_oop_or_null(true /* ignore mark word */), 7561 "expected an oop or NULL"); 7562 HeapWord* addr = (HeapWord*)obj; 7563 // Check if oop points into the CMS generation 7564 // and is not marked 7565 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 7566 // a white object ... 7567 _bit_map->mark(addr); // ... now grey 7568 // push on the marking stack (grey set) 7569 bool simulate_overflow = false; 7570 NOT_PRODUCT( 7571 if (CMSMarkStackOverflowALot && 7572 _collector->simulate_overflow()) { 7573 // simulate a stack overflow 7574 simulate_overflow = true; 7575 } 7576 ) 7577 if (simulate_overflow || !_mark_stack->push(obj)) { 7578 if (_concurrent_precleaning) { 7579 // During precleaning we can just dirty the appropriate card(s) 7580 // in the mod union table, thus ensuring that the object remains 7581 // in the grey set and continue. In the case of object arrays 7582 // we need to dirty all of the cards that the object spans, 7583 // since the rescan of object arrays will be limited to the 7584 // dirty cards. 7585 // Note that no one can be intefering with us in this action 7586 // of dirtying the mod union table, so no locking or atomics 7587 // are required. 7588 if (obj->is_objArray()) { 7589 size_t sz = obj->size(); 7590 HeapWord* end_card_addr = (HeapWord*)round_to( 7591 (intptr_t)(addr+sz), CardTableModRefBS::card_size); 7592 MemRegion redirty_range = MemRegion(addr, end_card_addr); 7593 assert(!redirty_range.is_empty(), "Arithmetical tautology"); 7594 _mod_union_table->mark_range(redirty_range); 7595 } else { 7596 _mod_union_table->mark(addr); 7597 } 7598 _collector->_ser_pmc_preclean_ovflw++; 7599 } else { 7600 // During the remark phase, we need to remember this oop 7601 // in the overflow list. 7602 _collector->push_on_overflow_list(obj); 7603 _collector->_ser_pmc_remark_ovflw++; 7604 } 7605 } 7606 } 7607 } 7608 7609 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector, 7610 MemRegion span, 7611 ReferenceProcessor* rp, 7612 CMSBitMap* bit_map, 7613 OopTaskQueue* work_queue, 7614 CMSMarkStack* revisit_stack): 7615 Par_KlassRememberingOopClosure(collector, rp, revisit_stack), 7616 _span(span), 7617 _bit_map(bit_map), 7618 _work_queue(work_queue) 7619 { 7620 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 7621 } 7622 7623 void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } 7624 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } 7625 7626 // Grey object rescan during second checkpoint phase -- 7627 // the parallel version. 7628 void Par_PushAndMarkClosure::do_oop(oop obj) { 7629 // In the assert below, we ignore the mark word because 7630 // this oop may point to an already visited object that is 7631 // on the overflow stack (in which case the mark word has 7632 // been hijacked for chaining into the overflow stack -- 7633 // if this is the last object in the overflow stack then 7634 // its mark word will be NULL). Because this object may 7635 // have been subsequently popped off the global overflow 7636 // stack, and the mark word possibly restored to the prototypical 7637 // value, by the time we get to examined this failing assert in 7638 // the debugger, is_oop_or_null(false) may subsequently start 7639 // to hold. 7640 assert(obj->is_oop_or_null(true), 7641 "expected an oop or NULL"); 7642 HeapWord* addr = (HeapWord*)obj; 7643 // Check if oop points into the CMS generation 7644 // and is not marked 7645 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 7646 // a white object ... 7647 // If we manage to "claim" the object, by being the 7648 // first thread to mark it, then we push it on our 7649 // marking stack 7650 if (_bit_map->par_mark(addr)) { // ... now grey 7651 // push on work queue (grey set) 7652 bool simulate_overflow = false; 7653 NOT_PRODUCT( 7654 if (CMSMarkStackOverflowALot && 7655 _collector->par_simulate_overflow()) { 7656 // simulate a stack overflow 7657 simulate_overflow = true; 7658 } 7659 ) 7660 if (simulate_overflow || !_work_queue->push(obj)) { 7661 _collector->par_push_on_overflow_list(obj); 7662 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS 7663 } 7664 } // Else, some other thread got there first 7665 } 7666 } 7667 7668 void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 7669 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 7670 7671 void PushAndMarkClosure::remember_mdo(DataLayout* v) { 7672 // TBD 7673 } 7674 7675 void Par_PushAndMarkClosure::remember_mdo(DataLayout* v) { 7676 // TBD 7677 } 7678 7679 void CMSPrecleanRefsYieldClosure::do_yield_work() { 7680 DEBUG_ONLY(RememberKlassesChecker mux(false);) 7681 Mutex* bml = _collector->bitMapLock(); 7682 assert_lock_strong(bml); 7683 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 7684 "CMS thread should hold CMS token"); 7685 7686 bml->unlock(); 7687 ConcurrentMarkSweepThread::desynchronize(true); 7688 7689 ConcurrentMarkSweepThread::acknowledge_yield_request(); 7690 7691 _collector->stopTimer(); 7692 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); 7693 if (PrintCMSStatistics != 0) { 7694 _collector->incrementYields(); 7695 } 7696 _collector->icms_wait(); 7697 7698 // See the comment in coordinator_yield() 7699 for (unsigned i = 0; i < CMSYieldSleepCount && 7700 ConcurrentMarkSweepThread::should_yield() && 7701 !CMSCollector::foregroundGCIsActive(); ++i) { 7702 os::sleep(Thread::current(), 1, false); 7703 ConcurrentMarkSweepThread::acknowledge_yield_request(); 7704 } 7705 7706 ConcurrentMarkSweepThread::synchronize(true); 7707 bml->lock(); 7708 7709 _collector->startTimer(); 7710 } 7711 7712 bool CMSPrecleanRefsYieldClosure::should_return() { 7713 if (ConcurrentMarkSweepThread::should_yield()) { 7714 do_yield_work(); 7715 } 7716 return _collector->foregroundGCIsActive(); 7717 } 7718 7719 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) { 7720 assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0, 7721 "mr should be aligned to start at a card boundary"); 7722 // We'd like to assert: 7723 // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0, 7724 // "mr should be a range of cards"); 7725 // However, that would be too strong in one case -- the last 7726 // partition ends at _unallocated_block which, in general, can be 7727 // an arbitrary boundary, not necessarily card aligned. 7728 if (PrintCMSStatistics != 0) { 7729 _num_dirty_cards += 7730 mr.word_size()/CardTableModRefBS::card_size_in_words; 7731 } 7732 _space->object_iterate_mem(mr, &_scan_cl); 7733 } 7734 7735 SweepClosure::SweepClosure(CMSCollector* collector, 7736 ConcurrentMarkSweepGeneration* g, 7737 CMSBitMap* bitMap, bool should_yield) : 7738 _collector(collector), 7739 _g(g), 7740 _sp(g->cmsSpace()), 7741 _limit(_sp->sweep_limit()), 7742 _freelistLock(_sp->freelistLock()), 7743 _bitMap(bitMap), 7744 _yield(should_yield), 7745 _inFreeRange(false), // No free range at beginning of sweep 7746 _freeRangeInFreeLists(false), // No free range at beginning of sweep 7747 _lastFreeRangeCoalesced(false), 7748 _freeFinger(g->used_region().start()) 7749 { 7750 NOT_PRODUCT( 7751 _numObjectsFreed = 0; 7752 _numWordsFreed = 0; 7753 _numObjectsLive = 0; 7754 _numWordsLive = 0; 7755 _numObjectsAlreadyFree = 0; 7756 _numWordsAlreadyFree = 0; 7757 _last_fc = NULL; 7758 7759 _sp->initializeIndexedFreeListArrayReturnedBytes(); 7760 _sp->dictionary()->initializeDictReturnedBytes(); 7761 ) 7762 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), 7763 "sweep _limit out of bounds"); 7764 if (CMSTraceSweeper) { 7765 gclog_or_tty->print("\n====================\nStarting new sweep\n"); 7766 } 7767 } 7768 7769 // We need this destructor to reclaim any space at the end 7770 // of the space, which do_blk below may not have added back to 7771 // the free lists. [basically dealing with the "fringe effect"] 7772 SweepClosure::~SweepClosure() { 7773 assert_lock_strong(_freelistLock); 7774 // this should be treated as the end of a free run if any 7775 // The current free range should be returned to the free lists 7776 // as one coalesced chunk. 7777 if (inFreeRange()) { 7778 flushCurFreeChunk(freeFinger(), 7779 pointer_delta(_limit, freeFinger())); 7780 assert(freeFinger() < _limit, "the finger pointeth off base"); 7781 if (CMSTraceSweeper) { 7782 gclog_or_tty->print("destructor:"); 7783 gclog_or_tty->print("Sweep:put_free_blk 0x%x ("SIZE_FORMAT") " 7784 "[coalesced:"SIZE_FORMAT"]\n", 7785 freeFinger(), pointer_delta(_limit, freeFinger()), 7786 lastFreeRangeCoalesced()); 7787 } 7788 } 7789 NOT_PRODUCT( 7790 if (Verbose && PrintGC) { 7791 gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " 7792 SIZE_FORMAT " bytes", 7793 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord)); 7794 gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects, " 7795 SIZE_FORMAT" bytes " 7796 "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes", 7797 _numObjectsLive, _numWordsLive*sizeof(HeapWord), 7798 _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord)); 7799 size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * 7800 sizeof(HeapWord); 7801 gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes); 7802 7803 if (PrintCMSStatistics && CMSVerifyReturnedBytes) { 7804 size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes(); 7805 size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes(); 7806 size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes; 7807 gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes); 7808 gclog_or_tty->print(" Indexed List Returned "SIZE_FORMAT" bytes", 7809 indexListReturnedBytes); 7810 gclog_or_tty->print_cr(" Dictionary Returned "SIZE_FORMAT" bytes", 7811 dictReturnedBytes); 7812 } 7813 } 7814 ) 7815 // Now, in debug mode, just null out the sweep_limit 7816 NOT_PRODUCT(_sp->clear_sweep_limit();) 7817 if (CMSTraceSweeper) { 7818 gclog_or_tty->print("end of sweep\n================\n"); 7819 } 7820 } 7821 7822 void SweepClosure::initialize_free_range(HeapWord* freeFinger, 7823 bool freeRangeInFreeLists) { 7824 if (CMSTraceSweeper) { 7825 gclog_or_tty->print("---- Start free range 0x%x with free block [%d] (%d)\n", 7826 freeFinger, _sp->block_size(freeFinger), 7827 freeRangeInFreeLists); 7828 } 7829 assert(!inFreeRange(), "Trampling existing free range"); 7830 set_inFreeRange(true); 7831 set_lastFreeRangeCoalesced(false); 7832 7833 set_freeFinger(freeFinger); 7834 set_freeRangeInFreeLists(freeRangeInFreeLists); 7835 if (CMSTestInFreeList) { 7836 if (freeRangeInFreeLists) { 7837 FreeChunk* fc = (FreeChunk*) freeFinger; 7838 assert(fc->isFree(), "A chunk on the free list should be free."); 7839 assert(fc->size() > 0, "Free range should have a size"); 7840 assert(_sp->verifyChunkInFreeLists(fc), "Chunk is not in free lists"); 7841 } 7842 } 7843 } 7844 7845 // Note that the sweeper runs concurrently with mutators. Thus, 7846 // it is possible for direct allocation in this generation to happen 7847 // in the middle of the sweep. Note that the sweeper also coalesces 7848 // contiguous free blocks. Thus, unless the sweeper and the allocator 7849 // synchronize appropriately freshly allocated blocks may get swept up. 7850 // This is accomplished by the sweeper locking the free lists while 7851 // it is sweeping. Thus blocks that are determined to be free are 7852 // indeed free. There is however one additional complication: 7853 // blocks that have been allocated since the final checkpoint and 7854 // mark, will not have been marked and so would be treated as 7855 // unreachable and swept up. To prevent this, the allocator marks 7856 // the bit map when allocating during the sweep phase. This leads, 7857 // however, to a further complication -- objects may have been allocated 7858 // but not yet initialized -- in the sense that the header isn't yet 7859 // installed. The sweeper can not then determine the size of the block 7860 // in order to skip over it. To deal with this case, we use a technique 7861 // (due to Printezis) to encode such uninitialized block sizes in the 7862 // bit map. Since the bit map uses a bit per every HeapWord, but the 7863 // CMS generation has a minimum object size of 3 HeapWords, it follows 7864 // that "normal marks" won't be adjacent in the bit map (there will 7865 // always be at least two 0 bits between successive 1 bits). We make use 7866 // of these "unused" bits to represent uninitialized blocks -- the bit 7867 // corresponding to the start of the uninitialized object and the next 7868 // bit are both set. Finally, a 1 bit marks the end of the object that 7869 // started with the two consecutive 1 bits to indicate its potentially 7870 // uninitialized state. 7871 7872 size_t SweepClosure::do_blk_careful(HeapWord* addr) { 7873 FreeChunk* fc = (FreeChunk*)addr; 7874 size_t res; 7875 7876 // check if we are done sweepinrg 7877 if (addr == _limit) { // we have swept up to the limit, do nothing more 7878 assert(_limit >= _sp->bottom() && _limit <= _sp->end(), 7879 "sweep _limit out of bounds"); 7880 // help the closure application finish 7881 return pointer_delta(_sp->end(), _limit); 7882 } 7883 assert(addr <= _limit, "sweep invariant"); 7884 7885 // check if we should yield 7886 do_yield_check(addr); 7887 if (fc->isFree()) { 7888 // Chunk that is already free 7889 res = fc->size(); 7890 doAlreadyFreeChunk(fc); 7891 debug_only(_sp->verifyFreeLists()); 7892 assert(res == fc->size(), "Don't expect the size to change"); 7893 NOT_PRODUCT( 7894 _numObjectsAlreadyFree++; 7895 _numWordsAlreadyFree += res; 7896 ) 7897 NOT_PRODUCT(_last_fc = fc;) 7898 } else if (!_bitMap->isMarked(addr)) { 7899 // Chunk is fresh garbage 7900 res = doGarbageChunk(fc); 7901 debug_only(_sp->verifyFreeLists()); 7902 NOT_PRODUCT( 7903 _numObjectsFreed++; 7904 _numWordsFreed += res; 7905 ) 7906 } else { 7907 // Chunk that is alive. 7908 res = doLiveChunk(fc); 7909 debug_only(_sp->verifyFreeLists()); 7910 NOT_PRODUCT( 7911 _numObjectsLive++; 7912 _numWordsLive += res; 7913 ) 7914 } 7915 return res; 7916 } 7917 7918 // For the smart allocation, record following 7919 // split deaths - a free chunk is removed from its free list because 7920 // it is being split into two or more chunks. 7921 // split birth - a free chunk is being added to its free list because 7922 // a larger free chunk has been split and resulted in this free chunk. 7923 // coal death - a free chunk is being removed from its free list because 7924 // it is being coalesced into a large free chunk. 7925 // coal birth - a free chunk is being added to its free list because 7926 // it was created when two or more free chunks where coalesced into 7927 // this free chunk. 7928 // 7929 // These statistics are used to determine the desired number of free 7930 // chunks of a given size. The desired number is chosen to be relative 7931 // to the end of a CMS sweep. The desired number at the end of a sweep 7932 // is the 7933 // count-at-end-of-previous-sweep (an amount that was enough) 7934 // - count-at-beginning-of-current-sweep (the excess) 7935 // + split-births (gains in this size during interval) 7936 // - split-deaths (demands on this size during interval) 7937 // where the interval is from the end of one sweep to the end of the 7938 // next. 7939 // 7940 // When sweeping the sweeper maintains an accumulated chunk which is 7941 // the chunk that is made up of chunks that have been coalesced. That 7942 // will be termed the left-hand chunk. A new chunk of garbage that 7943 // is being considered for coalescing will be referred to as the 7944 // right-hand chunk. 7945 // 7946 // When making a decision on whether to coalesce a right-hand chunk with 7947 // the current left-hand chunk, the current count vs. the desired count 7948 // of the left-hand chunk is considered. Also if the right-hand chunk 7949 // is near the large chunk at the end of the heap (see 7950 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the 7951 // left-hand chunk is coalesced. 7952 // 7953 // When making a decision about whether to split a chunk, the desired count 7954 // vs. the current count of the candidate to be split is also considered. 7955 // If the candidate is underpopulated (currently fewer chunks than desired) 7956 // a chunk of an overpopulated (currently more chunks than desired) size may 7957 // be chosen. The "hint" associated with a free list, if non-null, points 7958 // to a free list which may be overpopulated. 7959 // 7960 7961 void SweepClosure::doAlreadyFreeChunk(FreeChunk* fc) { 7962 size_t size = fc->size(); 7963 // Chunks that cannot be coalesced are not in the 7964 // free lists. 7965 if (CMSTestInFreeList && !fc->cantCoalesce()) { 7966 assert(_sp->verifyChunkInFreeLists(fc), 7967 "free chunk should be in free lists"); 7968 } 7969 // a chunk that is already free, should not have been 7970 // marked in the bit map 7971 HeapWord* addr = (HeapWord*) fc; 7972 assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); 7973 // Verify that the bit map has no bits marked between 7974 // addr and purported end of this block. 7975 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); 7976 7977 // Some chunks cannot be coalesced in under any circumstances. 7978 // See the definition of cantCoalesce(). 7979 if (!fc->cantCoalesce()) { 7980 // This chunk can potentially be coalesced. 7981 if (_sp->adaptive_freelists()) { 7982 // All the work is done in 7983 doPostIsFreeOrGarbageChunk(fc, size); 7984 } else { // Not adaptive free lists 7985 // this is a free chunk that can potentially be coalesced by the sweeper; 7986 if (!inFreeRange()) { 7987 // if the next chunk is a free block that can't be coalesced 7988 // it doesn't make sense to remove this chunk from the free lists 7989 FreeChunk* nextChunk = (FreeChunk*)(addr + size); 7990 assert((HeapWord*)nextChunk <= _limit, "sweep invariant"); 7991 if ((HeapWord*)nextChunk < _limit && // there's a next chunk... 7992 nextChunk->isFree() && // which is free... 7993 nextChunk->cantCoalesce()) { // ... but cant be coalesced 7994 // nothing to do 7995 } else { 7996 // Potentially the start of a new free range: 7997 // Don't eagerly remove it from the free lists. 7998 // No need to remove it if it will just be put 7999 // back again. (Also from a pragmatic point of view 8000 // if it is a free block in a region that is beyond 8001 // any allocated blocks, an assertion will fail) 8002 // Remember the start of a free run. 8003 initialize_free_range(addr, true); 8004 // end - can coalesce with next chunk 8005 } 8006 } else { 8007 // the midst of a free range, we are coalescing 8008 debug_only(record_free_block_coalesced(fc);) 8009 if (CMSTraceSweeper) { 8010 gclog_or_tty->print(" -- pick up free block 0x%x (%d)\n", fc, size); 8011 } 8012 // remove it from the free lists 8013 _sp->removeFreeChunkFromFreeLists(fc); 8014 set_lastFreeRangeCoalesced(true); 8015 // If the chunk is being coalesced and the current free range is 8016 // in the free lists, remove the current free range so that it 8017 // will be returned to the free lists in its entirety - all 8018 // the coalesced pieces included. 8019 if (freeRangeInFreeLists()) { 8020 FreeChunk* ffc = (FreeChunk*) freeFinger(); 8021 assert(ffc->size() == pointer_delta(addr, freeFinger()), 8022 "Size of free range is inconsistent with chunk size."); 8023 if (CMSTestInFreeList) { 8024 assert(_sp->verifyChunkInFreeLists(ffc), 8025 "free range is not in free lists"); 8026 } 8027 _sp->removeFreeChunkFromFreeLists(ffc); 8028 set_freeRangeInFreeLists(false); 8029 } 8030 } 8031 } 8032 } else { 8033 // Code path common to both original and adaptive free lists. 8034 8035 // cant coalesce with previous block; this should be treated 8036 // as the end of a free run if any 8037 if (inFreeRange()) { 8038 // we kicked some butt; time to pick up the garbage 8039 assert(freeFinger() < addr, "the finger pointeth off base"); 8040 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger())); 8041 } 8042 // else, nothing to do, just continue 8043 } 8044 } 8045 8046 size_t SweepClosure::doGarbageChunk(FreeChunk* fc) { 8047 // This is a chunk of garbage. It is not in any free list. 8048 // Add it to a free list or let it possibly be coalesced into 8049 // a larger chunk. 8050 HeapWord* addr = (HeapWord*) fc; 8051 size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); 8052 8053 if (_sp->adaptive_freelists()) { 8054 // Verify that the bit map has no bits marked between 8055 // addr and purported end of just dead object. 8056 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); 8057 8058 doPostIsFreeOrGarbageChunk(fc, size); 8059 } else { 8060 if (!inFreeRange()) { 8061 // start of a new free range 8062 assert(size > 0, "A free range should have a size"); 8063 initialize_free_range(addr, false); 8064 8065 } else { 8066 // this will be swept up when we hit the end of the 8067 // free range 8068 if (CMSTraceSweeper) { 8069 gclog_or_tty->print(" -- pick up garbage 0x%x (%d) \n", fc, size); 8070 } 8071 // If the chunk is being coalesced and the current free range is 8072 // in the free lists, remove the current free range so that it 8073 // will be returned to the free lists in its entirety - all 8074 // the coalesced pieces included. 8075 if (freeRangeInFreeLists()) { 8076 FreeChunk* ffc = (FreeChunk*)freeFinger(); 8077 assert(ffc->size() == pointer_delta(addr, freeFinger()), 8078 "Size of free range is inconsistent with chunk size."); 8079 if (CMSTestInFreeList) { 8080 assert(_sp->verifyChunkInFreeLists(ffc), 8081 "free range is not in free lists"); 8082 } 8083 _sp->removeFreeChunkFromFreeLists(ffc); 8084 set_freeRangeInFreeLists(false); 8085 } 8086 set_lastFreeRangeCoalesced(true); 8087 } 8088 // this will be swept up when we hit the end of the free range 8089 8090 // Verify that the bit map has no bits marked between 8091 // addr and purported end of just dead object. 8092 _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size); 8093 } 8094 return size; 8095 } 8096 8097 size_t SweepClosure::doLiveChunk(FreeChunk* fc) { 8098 HeapWord* addr = (HeapWord*) fc; 8099 // The sweeper has just found a live object. Return any accumulated 8100 // left hand chunk to the free lists. 8101 if (inFreeRange()) { 8102 if (_sp->adaptive_freelists()) { 8103 flushCurFreeChunk(freeFinger(), 8104 pointer_delta(addr, freeFinger())); 8105 } else { // not adaptive freelists 8106 set_inFreeRange(false); 8107 // Add the free range back to the free list if it is not already 8108 // there. 8109 if (!freeRangeInFreeLists()) { 8110 assert(freeFinger() < addr, "the finger pointeth off base"); 8111 if (CMSTraceSweeper) { 8112 gclog_or_tty->print("Sweep:put_free_blk 0x%x (%d) " 8113 "[coalesced:%d]\n", 8114 freeFinger(), pointer_delta(addr, freeFinger()), 8115 lastFreeRangeCoalesced()); 8116 } 8117 _sp->addChunkAndRepairOffsetTable(freeFinger(), 8118 pointer_delta(addr, freeFinger()), lastFreeRangeCoalesced()); 8119 } 8120 } 8121 } 8122 8123 // Common code path for original and adaptive free lists. 8124 8125 // this object is live: we'd normally expect this to be 8126 // an oop, and like to assert the following: 8127 // assert(oop(addr)->is_oop(), "live block should be an oop"); 8128 // However, as we commented above, this may be an object whose 8129 // header hasn't yet been initialized. 8130 size_t size; 8131 assert(_bitMap->isMarked(addr), "Tautology for this control point"); 8132 if (_bitMap->isMarked(addr + 1)) { 8133 // Determine the size from the bit map, rather than trying to 8134 // compute it from the object header. 8135 HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2); 8136 size = pointer_delta(nextOneAddr + 1, addr); 8137 assert(size == CompactibleFreeListSpace::adjustObjectSize(size), 8138 "alignment problem"); 8139 8140 #ifdef DEBUG 8141 if (oop(addr)->klass_or_null() != NULL && 8142 ( !_collector->should_unload_classes() 8143 || (oop(addr)->is_parsable()) && 8144 oop(addr)->is_conc_safe())) { 8145 // Ignore mark word because we are running concurrent with mutators 8146 assert(oop(addr)->is_oop(true), "live block should be an oop"); 8147 // is_conc_safe is checked before performing this assertion 8148 // because an object that is not is_conc_safe may yet have 8149 // the return from size() correct. 8150 assert(size == 8151 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), 8152 "P-mark and computed size do not agree"); 8153 } 8154 #endif 8155 8156 } else { 8157 // This should be an initialized object that's alive. 8158 assert(oop(addr)->klass_or_null() != NULL && 8159 (!_collector->should_unload_classes() 8160 || oop(addr)->is_parsable()), 8161 "Should be an initialized object"); 8162 // Note that there are objects used during class redefinition 8163 // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite() 8164 // which are discarded with their is_conc_safe state still 8165 // false. These object may be floating garbage so may be 8166 // seen here. If they are floating garbage their size 8167 // should be attainable from their klass. Do not that 8168 // is_conc_safe() is true for oop(addr). 8169 // Ignore mark word because we are running concurrent with mutators 8170 assert(oop(addr)->is_oop(true), "live block should be an oop"); 8171 // Verify that the bit map has no bits marked between 8172 // addr and purported end of this block. 8173 size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()); 8174 assert(size >= 3, "Necessary for Printezis marks to work"); 8175 assert(!_bitMap->isMarked(addr+1), "Tautology for this control point"); 8176 DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);) 8177 } 8178 return size; 8179 } 8180 8181 void SweepClosure::doPostIsFreeOrGarbageChunk(FreeChunk* fc, 8182 size_t chunkSize) { 8183 // doPostIsFreeOrGarbageChunk() should only be called in the smart allocation 8184 // scheme. 8185 bool fcInFreeLists = fc->isFree(); 8186 assert(_sp->adaptive_freelists(), "Should only be used in this case."); 8187 assert((HeapWord*)fc <= _limit, "sweep invariant"); 8188 if (CMSTestInFreeList && fcInFreeLists) { 8189 assert(_sp->verifyChunkInFreeLists(fc), 8190 "free chunk is not in free lists"); 8191 } 8192 8193 8194 if (CMSTraceSweeper) { 8195 gclog_or_tty->print_cr(" -- pick up another chunk at 0x%x (%d)", fc, chunkSize); 8196 } 8197 8198 HeapWord* addr = (HeapWord*) fc; 8199 8200 bool coalesce; 8201 size_t left = pointer_delta(addr, freeFinger()); 8202 size_t right = chunkSize; 8203 switch (FLSCoalescePolicy) { 8204 // numeric value forms a coalition aggressiveness metric 8205 case 0: { // never coalesce 8206 coalesce = false; 8207 break; 8208 } 8209 case 1: { // coalesce if left & right chunks on overpopulated lists 8210 coalesce = _sp->coalOverPopulated(left) && 8211 _sp->coalOverPopulated(right); 8212 break; 8213 } 8214 case 2: { // coalesce if left chunk on overpopulated list (default) 8215 coalesce = _sp->coalOverPopulated(left); 8216 break; 8217 } 8218 case 3: { // coalesce if left OR right chunk on overpopulated list 8219 coalesce = _sp->coalOverPopulated(left) || 8220 _sp->coalOverPopulated(right); 8221 break; 8222 } 8223 case 4: { // always coalesce 8224 coalesce = true; 8225 break; 8226 } 8227 default: 8228 ShouldNotReachHere(); 8229 } 8230 8231 // Should the current free range be coalesced? 8232 // If the chunk is in a free range and either we decided to coalesce above 8233 // or the chunk is near the large block at the end of the heap 8234 // (isNearLargestChunk() returns true), then coalesce this chunk. 8235 bool doCoalesce = inFreeRange() && 8236 (coalesce || _g->isNearLargestChunk((HeapWord*)fc)); 8237 if (doCoalesce) { 8238 // Coalesce the current free range on the left with the new 8239 // chunk on the right. If either is on a free list, 8240 // it must be removed from the list and stashed in the closure. 8241 if (freeRangeInFreeLists()) { 8242 FreeChunk* ffc = (FreeChunk*)freeFinger(); 8243 assert(ffc->size() == pointer_delta(addr, freeFinger()), 8244 "Size of free range is inconsistent with chunk size."); 8245 if (CMSTestInFreeList) { 8246 assert(_sp->verifyChunkInFreeLists(ffc), 8247 "Chunk is not in free lists"); 8248 } 8249 _sp->coalDeath(ffc->size()); 8250 _sp->removeFreeChunkFromFreeLists(ffc); 8251 set_freeRangeInFreeLists(false); 8252 } 8253 if (fcInFreeLists) { 8254 _sp->coalDeath(chunkSize); 8255 assert(fc->size() == chunkSize, 8256 "The chunk has the wrong size or is not in the free lists"); 8257 _sp->removeFreeChunkFromFreeLists(fc); 8258 } 8259 set_lastFreeRangeCoalesced(true); 8260 } else { // not in a free range and/or should not coalesce 8261 // Return the current free range and start a new one. 8262 if (inFreeRange()) { 8263 // In a free range but cannot coalesce with the right hand chunk. 8264 // Put the current free range into the free lists. 8265 flushCurFreeChunk(freeFinger(), 8266 pointer_delta(addr, freeFinger())); 8267 } 8268 // Set up for new free range. Pass along whether the right hand 8269 // chunk is in the free lists. 8270 initialize_free_range((HeapWord*)fc, fcInFreeLists); 8271 } 8272 } 8273 void SweepClosure::flushCurFreeChunk(HeapWord* chunk, size_t size) { 8274 assert(inFreeRange(), "Should only be called if currently in a free range."); 8275 assert(size > 0, 8276 "A zero sized chunk cannot be added to the free lists."); 8277 if (!freeRangeInFreeLists()) { 8278 if(CMSTestInFreeList) { 8279 FreeChunk* fc = (FreeChunk*) chunk; 8280 fc->setSize(size); 8281 assert(!_sp->verifyChunkInFreeLists(fc), 8282 "chunk should not be in free lists yet"); 8283 } 8284 if (CMSTraceSweeper) { 8285 gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists", 8286 chunk, size); 8287 } 8288 // A new free range is going to be starting. The current 8289 // free range has not been added to the free lists yet or 8290 // was removed so add it back. 8291 // If the current free range was coalesced, then the death 8292 // of the free range was recorded. Record a birth now. 8293 if (lastFreeRangeCoalesced()) { 8294 _sp->coalBirth(size); 8295 } 8296 _sp->addChunkAndRepairOffsetTable(chunk, size, 8297 lastFreeRangeCoalesced()); 8298 } 8299 set_inFreeRange(false); 8300 set_freeRangeInFreeLists(false); 8301 } 8302 8303 // We take a break if we've been at this for a while, 8304 // so as to avoid monopolizing the locks involved. 8305 void SweepClosure::do_yield_work(HeapWord* addr) { 8306 // Return current free chunk being used for coalescing (if any) 8307 // to the appropriate freelist. After yielding, the next 8308 // free block encountered will start a coalescing range of 8309 // free blocks. If the next free block is adjacent to the 8310 // chunk just flushed, they will need to wait for the next 8311 // sweep to be coalesced. 8312 if (inFreeRange()) { 8313 flushCurFreeChunk(freeFinger(), pointer_delta(addr, freeFinger())); 8314 } 8315 8316 // First give up the locks, then yield, then re-lock. 8317 // We should probably use a constructor/destructor idiom to 8318 // do this unlock/lock or modify the MutexUnlocker class to 8319 // serve our purpose. XXX 8320 assert_lock_strong(_bitMap->lock()); 8321 assert_lock_strong(_freelistLock); 8322 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 8323 "CMS thread should hold CMS token"); 8324 _bitMap->lock()->unlock(); 8325 _freelistLock->unlock(); 8326 ConcurrentMarkSweepThread::desynchronize(true); 8327 ConcurrentMarkSweepThread::acknowledge_yield_request(); 8328 _collector->stopTimer(); 8329 GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr()); 8330 if (PrintCMSStatistics != 0) { 8331 _collector->incrementYields(); 8332 } 8333 _collector->icms_wait(); 8334 8335 // See the comment in coordinator_yield() 8336 for (unsigned i = 0; i < CMSYieldSleepCount && 8337 ConcurrentMarkSweepThread::should_yield() && 8338 !CMSCollector::foregroundGCIsActive(); ++i) { 8339 os::sleep(Thread::current(), 1, false); 8340 ConcurrentMarkSweepThread::acknowledge_yield_request(); 8341 } 8342 8343 ConcurrentMarkSweepThread::synchronize(true); 8344 _freelistLock->lock(); 8345 _bitMap->lock()->lock_without_safepoint_check(); 8346 _collector->startTimer(); 8347 } 8348 8349 #ifndef PRODUCT 8350 // This is actually very useful in a product build if it can 8351 // be called from the debugger. Compile it into the product 8352 // as needed. 8353 bool debug_verifyChunkInFreeLists(FreeChunk* fc) { 8354 return debug_cms_space->verifyChunkInFreeLists(fc); 8355 } 8356 8357 void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const { 8358 if (CMSTraceSweeper) { 8359 gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size()); 8360 } 8361 } 8362 #endif 8363 8364 // CMSIsAliveClosure 8365 bool CMSIsAliveClosure::do_object_b(oop obj) { 8366 HeapWord* addr = (HeapWord*)obj; 8367 return addr != NULL && 8368 (!_span.contains(addr) || _bit_map->isMarked(addr)); 8369 } 8370 8371 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector, 8372 MemRegion span, 8373 CMSBitMap* bit_map, CMSMarkStack* mark_stack, 8374 CMSMarkStack* revisit_stack, bool cpc): 8375 KlassRememberingOopClosure(collector, NULL, revisit_stack), 8376 _span(span), 8377 _bit_map(bit_map), 8378 _mark_stack(mark_stack), 8379 _concurrent_precleaning(cpc) { 8380 assert(!_span.is_empty(), "Empty span could spell trouble"); 8381 } 8382 8383 8384 // CMSKeepAliveClosure: the serial version 8385 void CMSKeepAliveClosure::do_oop(oop obj) { 8386 HeapWord* addr = (HeapWord*)obj; 8387 if (_span.contains(addr) && 8388 !_bit_map->isMarked(addr)) { 8389 _bit_map->mark(addr); 8390 bool simulate_overflow = false; 8391 NOT_PRODUCT( 8392 if (CMSMarkStackOverflowALot && 8393 _collector->simulate_overflow()) { 8394 // simulate a stack overflow 8395 simulate_overflow = true; 8396 } 8397 ) 8398 if (simulate_overflow || !_mark_stack->push(obj)) { 8399 if (_concurrent_precleaning) { 8400 // We dirty the overflown object and let the remark 8401 // phase deal with it. 8402 assert(_collector->overflow_list_is_empty(), "Error"); 8403 // In the case of object arrays, we need to dirty all of 8404 // the cards that the object spans. No locking or atomics 8405 // are needed since no one else can be mutating the mod union 8406 // table. 8407 if (obj->is_objArray()) { 8408 size_t sz = obj->size(); 8409 HeapWord* end_card_addr = 8410 (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size); 8411 MemRegion redirty_range = MemRegion(addr, end_card_addr); 8412 assert(!redirty_range.is_empty(), "Arithmetical tautology"); 8413 _collector->_modUnionTable.mark_range(redirty_range); 8414 } else { 8415 _collector->_modUnionTable.mark(addr); 8416 } 8417 _collector->_ser_kac_preclean_ovflw++; 8418 } else { 8419 _collector->push_on_overflow_list(obj); 8420 _collector->_ser_kac_ovflw++; 8421 } 8422 } 8423 } 8424 } 8425 8426 void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } 8427 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } 8428 8429 // CMSParKeepAliveClosure: a parallel version of the above. 8430 // The work queues are private to each closure (thread), 8431 // but (may be) available for stealing by other threads. 8432 void CMSParKeepAliveClosure::do_oop(oop obj) { 8433 HeapWord* addr = (HeapWord*)obj; 8434 if (_span.contains(addr) && 8435 !_bit_map->isMarked(addr)) { 8436 // In general, during recursive tracing, several threads 8437 // may be concurrently getting here; the first one to 8438 // "tag" it, claims it. 8439 if (_bit_map->par_mark(addr)) { 8440 bool res = _work_queue->push(obj); 8441 assert(res, "Low water mark should be much less than capacity"); 8442 // Do a recursive trim in the hope that this will keep 8443 // stack usage lower, but leave some oops for potential stealers 8444 trim_queue(_low_water_mark); 8445 } // Else, another thread got there first 8446 } 8447 } 8448 8449 void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } 8450 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } 8451 8452 void CMSParKeepAliveClosure::trim_queue(uint max) { 8453 while (_work_queue->size() > max) { 8454 oop new_oop; 8455 if (_work_queue->pop_local(new_oop)) { 8456 assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop"); 8457 assert(_bit_map->isMarked((HeapWord*)new_oop), 8458 "no white objects on this stack!"); 8459 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); 8460 // iterate over the oops in this oop, marking and pushing 8461 // the ones in CMS heap (i.e. in _span). 8462 new_oop->oop_iterate(&_mark_and_push); 8463 } 8464 } 8465 } 8466 8467 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure( 8468 CMSCollector* collector, 8469 MemRegion span, CMSBitMap* bit_map, 8470 CMSMarkStack* revisit_stack, 8471 OopTaskQueue* work_queue): 8472 Par_KlassRememberingOopClosure(collector, NULL, revisit_stack), 8473 _span(span), 8474 _bit_map(bit_map), 8475 _work_queue(work_queue) { } 8476 8477 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { 8478 HeapWord* addr = (HeapWord*)obj; 8479 if (_span.contains(addr) && 8480 !_bit_map->isMarked(addr)) { 8481 if (_bit_map->par_mark(addr)) { 8482 bool simulate_overflow = false; 8483 NOT_PRODUCT( 8484 if (CMSMarkStackOverflowALot && 8485 _collector->par_simulate_overflow()) { 8486 // simulate a stack overflow 8487 simulate_overflow = true; 8488 } 8489 ) 8490 if (simulate_overflow || !_work_queue->push(obj)) { 8491 _collector->par_push_on_overflow_list(obj); 8492 _collector->_par_kac_ovflw++; 8493 } 8494 } // Else another thread got there already 8495 } 8496 } 8497 8498 void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 8499 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 8500 8501 ////////////////////////////////////////////////////////////////// 8502 // CMSExpansionCause ///////////////////////////// 8503 ////////////////////////////////////////////////////////////////// 8504 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) { 8505 switch (cause) { 8506 case _no_expansion: 8507 return "No expansion"; 8508 case _satisfy_free_ratio: 8509 return "Free ratio"; 8510 case _satisfy_promotion: 8511 return "Satisfy promotion"; 8512 case _satisfy_allocation: 8513 return "allocation"; 8514 case _allocate_par_lab: 8515 return "Par LAB"; 8516 case _allocate_par_spooling_space: 8517 return "Par Spooling Space"; 8518 case _adaptive_size_policy: 8519 return "Ergonomics"; 8520 default: 8521 return "unknown"; 8522 } 8523 } 8524 8525 void CMSDrainMarkingStackClosure::do_void() { 8526 // the max number to take from overflow list at a time 8527 const size_t num = _mark_stack->capacity()/4; 8528 assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(), 8529 "Overflow list should be NULL during concurrent phases"); 8530 while (!_mark_stack->isEmpty() || 8531 // if stack is empty, check the overflow list 8532 _collector->take_from_overflow_list(num, _mark_stack)) { 8533 oop obj = _mark_stack->pop(); 8534 HeapWord* addr = (HeapWord*)obj; 8535 assert(_span.contains(addr), "Should be within span"); 8536 assert(_bit_map->isMarked(addr), "Should be marked"); 8537 assert(obj->is_oop(), "Should be an oop"); 8538 obj->oop_iterate(_keep_alive); 8539 } 8540 } 8541 8542 void CMSParDrainMarkingStackClosure::do_void() { 8543 // drain queue 8544 trim_queue(0); 8545 } 8546 8547 // Trim our work_queue so its length is below max at return 8548 void CMSParDrainMarkingStackClosure::trim_queue(uint max) { 8549 while (_work_queue->size() > max) { 8550 oop new_oop; 8551 if (_work_queue->pop_local(new_oop)) { 8552 assert(new_oop->is_oop(), "Expected an oop"); 8553 assert(_bit_map->isMarked((HeapWord*)new_oop), 8554 "no white objects on this stack!"); 8555 assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop"); 8556 // iterate over the oops in this oop, marking and pushing 8557 // the ones in CMS heap (i.e. in _span). 8558 new_oop->oop_iterate(&_mark_and_push); 8559 } 8560 } 8561 } 8562 8563 //////////////////////////////////////////////////////////////////// 8564 // Support for Marking Stack Overflow list handling and related code 8565 //////////////////////////////////////////////////////////////////// 8566 // Much of the following code is similar in shape and spirit to the 8567 // code used in ParNewGC. We should try and share that code 8568 // as much as possible in the future. 8569 8570 #ifndef PRODUCT 8571 // Debugging support for CMSStackOverflowALot 8572 8573 // It's OK to call this multi-threaded; the worst thing 8574 // that can happen is that we'll get a bunch of closely 8575 // spaced simulated oveflows, but that's OK, in fact 8576 // probably good as it would exercise the overflow code 8577 // under contention. 8578 bool CMSCollector::simulate_overflow() { 8579 if (_overflow_counter-- <= 0) { // just being defensive 8580 _overflow_counter = CMSMarkStackOverflowInterval; 8581 return true; 8582 } else { 8583 return false; 8584 } 8585 } 8586 8587 bool CMSCollector::par_simulate_overflow() { 8588 return simulate_overflow(); 8589 } 8590 #endif 8591 8592 // Single-threaded 8593 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) { 8594 assert(stack->isEmpty(), "Expected precondition"); 8595 assert(stack->capacity() > num, "Shouldn't bite more than can chew"); 8596 size_t i = num; 8597 oop cur = _overflow_list; 8598 const markOop proto = markOopDesc::prototype(); 8599 NOT_PRODUCT(ssize_t n = 0;) 8600 for (oop next; i > 0 && cur != NULL; cur = next, i--) { 8601 next = oop(cur->mark()); 8602 cur->set_mark(proto); // until proven otherwise 8603 assert(cur->is_oop(), "Should be an oop"); 8604 bool res = stack->push(cur); 8605 assert(res, "Bit off more than can chew?"); 8606 NOT_PRODUCT(n++;) 8607 } 8608 _overflow_list = cur; 8609 #ifndef PRODUCT 8610 assert(_num_par_pushes >= n, "Too many pops?"); 8611 _num_par_pushes -=n; 8612 #endif 8613 return !stack->isEmpty(); 8614 } 8615 8616 #define BUSY (oop(0x1aff1aff)) 8617 // (MT-safe) Get a prefix of at most "num" from the list. 8618 // The overflow list is chained through the mark word of 8619 // each object in the list. We fetch the entire list, 8620 // break off a prefix of the right size and return the 8621 // remainder. If other threads try to take objects from 8622 // the overflow list at that time, they will wait for 8623 // some time to see if data becomes available. If (and 8624 // only if) another thread places one or more object(s) 8625 // on the global list before we have returned the suffix 8626 // to the global list, we will walk down our local list 8627 // to find its end and append the global list to 8628 // our suffix before returning it. This suffix walk can 8629 // prove to be expensive (quadratic in the amount of traffic) 8630 // when there are many objects in the overflow list and 8631 // there is much producer-consumer contention on the list. 8632 // *NOTE*: The overflow list manipulation code here and 8633 // in ParNewGeneration:: are very similar in shape, 8634 // except that in the ParNew case we use the old (from/eden) 8635 // copy of the object to thread the list via its klass word. 8636 // Because of the common code, if you make any changes in 8637 // the code below, please check the ParNew version to see if 8638 // similar changes might be needed. 8639 // CR 6797058 has been filed to consolidate the common code. 8640 bool CMSCollector::par_take_from_overflow_list(size_t num, 8641 OopTaskQueue* work_q) { 8642 assert(work_q->size() == 0, "First empty local work queue"); 8643 assert(num < work_q->max_elems(), "Can't bite more than we can chew"); 8644 if (_overflow_list == NULL) { 8645 return false; 8646 } 8647 // Grab the entire list; we'll put back a suffix 8648 oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 8649 Thread* tid = Thread::current(); 8650 size_t CMSOverflowSpinCount = (size_t)ParallelGCThreads; 8651 size_t sleep_time_millis = MAX2((size_t)1, num/100); 8652 // If the list is busy, we spin for a short while, 8653 // sleeping between attempts to get the list. 8654 for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) { 8655 os::sleep(tid, sleep_time_millis, false); 8656 if (_overflow_list == NULL) { 8657 // Nothing left to take 8658 return false; 8659 } else if (_overflow_list != BUSY) { 8660 // Try and grab the prefix 8661 prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list); 8662 } 8663 } 8664 // If the list was found to be empty, or we spun long 8665 // enough, we give up and return empty-handed. If we leave 8666 // the list in the BUSY state below, it must be the case that 8667 // some other thread holds the overflow list and will set it 8668 // to a non-BUSY state in the future. 8669 if (prefix == NULL || prefix == BUSY) { 8670 // Nothing to take or waited long enough 8671 if (prefix == NULL) { 8672 // Write back the NULL in case we overwrote it with BUSY above 8673 // and it is still the same value. 8674 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 8675 } 8676 return false; 8677 } 8678 assert(prefix != NULL && prefix != BUSY, "Error"); 8679 size_t i = num; 8680 oop cur = prefix; 8681 // Walk down the first "num" objects, unless we reach the end. 8682 for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--); 8683 if (cur->mark() == NULL) { 8684 // We have "num" or fewer elements in the list, so there 8685 // is nothing to return to the global list. 8686 // Write back the NULL in lieu of the BUSY we wrote 8687 // above, if it is still the same value. 8688 if (_overflow_list == BUSY) { 8689 (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY); 8690 } 8691 } else { 8692 // Chop off the suffix and rerturn it to the global list. 8693 assert(cur->mark() != BUSY, "Error"); 8694 oop suffix_head = cur->mark(); // suffix will be put back on global list 8695 cur->set_mark(NULL); // break off suffix 8696 // It's possible that the list is still in the empty(busy) state 8697 // we left it in a short while ago; in that case we may be 8698 // able to place back the suffix without incurring the cost 8699 // of a walk down the list. 8700 oop observed_overflow_list = _overflow_list; 8701 oop cur_overflow_list = observed_overflow_list; 8702 bool attached = false; 8703 while (observed_overflow_list == BUSY || observed_overflow_list == NULL) { 8704 observed_overflow_list = 8705 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); 8706 if (cur_overflow_list == observed_overflow_list) { 8707 attached = true; 8708 break; 8709 } else cur_overflow_list = observed_overflow_list; 8710 } 8711 if (!attached) { 8712 // Too bad, someone else sneaked in (at least) an element; we'll need 8713 // to do a splice. Find tail of suffix so we can prepend suffix to global 8714 // list. 8715 for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark())); 8716 oop suffix_tail = cur; 8717 assert(suffix_tail != NULL && suffix_tail->mark() == NULL, 8718 "Tautology"); 8719 observed_overflow_list = _overflow_list; 8720 do { 8721 cur_overflow_list = observed_overflow_list; 8722 if (cur_overflow_list != BUSY) { 8723 // Do the splice ... 8724 suffix_tail->set_mark(markOop(cur_overflow_list)); 8725 } else { // cur_overflow_list == BUSY 8726 suffix_tail->set_mark(NULL); 8727 } 8728 // ... and try to place spliced list back on overflow_list ... 8729 observed_overflow_list = 8730 (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list); 8731 } while (cur_overflow_list != observed_overflow_list); 8732 // ... until we have succeeded in doing so. 8733 } 8734 } 8735 8736 // Push the prefix elements on work_q 8737 assert(prefix != NULL, "control point invariant"); 8738 const markOop proto = markOopDesc::prototype(); 8739 oop next; 8740 NOT_PRODUCT(ssize_t n = 0;) 8741 for (cur = prefix; cur != NULL; cur = next) { 8742 next = oop(cur->mark()); 8743 cur->set_mark(proto); // until proven otherwise 8744 assert(cur->is_oop(), "Should be an oop"); 8745 bool res = work_q->push(cur); 8746 assert(res, "Bit off more than we can chew?"); 8747 NOT_PRODUCT(n++;) 8748 } 8749 #ifndef PRODUCT 8750 assert(_num_par_pushes >= n, "Too many pops?"); 8751 Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes); 8752 #endif 8753 return true; 8754 } 8755 8756 // Single-threaded 8757 void CMSCollector::push_on_overflow_list(oop p) { 8758 NOT_PRODUCT(_num_par_pushes++;) 8759 assert(p->is_oop(), "Not an oop"); 8760 preserve_mark_if_necessary(p); 8761 p->set_mark((markOop)_overflow_list); 8762 _overflow_list = p; 8763 } 8764 8765 // Multi-threaded; use CAS to prepend to overflow list 8766 void CMSCollector::par_push_on_overflow_list(oop p) { 8767 NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);) 8768 assert(p->is_oop(), "Not an oop"); 8769 par_preserve_mark_if_necessary(p); 8770 oop observed_overflow_list = _overflow_list; 8771 oop cur_overflow_list; 8772 do { 8773 cur_overflow_list = observed_overflow_list; 8774 if (cur_overflow_list != BUSY) { 8775 p->set_mark(markOop(cur_overflow_list)); 8776 } else { 8777 p->set_mark(NULL); 8778 } 8779 observed_overflow_list = 8780 (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list); 8781 } while (cur_overflow_list != observed_overflow_list); 8782 } 8783 #undef BUSY 8784 8785 // Single threaded 8786 // General Note on GrowableArray: pushes may silently fail 8787 // because we are (temporarily) out of C-heap for expanding 8788 // the stack. The problem is quite ubiquitous and affects 8789 // a lot of code in the JVM. The prudent thing for GrowableArray 8790 // to do (for now) is to exit with an error. However, that may 8791 // be too draconian in some cases because the caller may be 8792 // able to recover without much harm. For such cases, we 8793 // should probably introduce a "soft_push" method which returns 8794 // an indication of success or failure with the assumption that 8795 // the caller may be able to recover from a failure; code in 8796 // the VM can then be changed, incrementally, to deal with such 8797 // failures where possible, thus, incrementally hardening the VM 8798 // in such low resource situations. 8799 void CMSCollector::preserve_mark_work(oop p, markOop m) { 8800 if (_preserved_oop_stack == NULL) { 8801 assert(_preserved_mark_stack == NULL, 8802 "bijection with preserved_oop_stack"); 8803 // Allocate the stacks 8804 _preserved_oop_stack = new (ResourceObj::C_HEAP) 8805 GrowableArray<oop>(PreserveMarkStackSize, true); 8806 _preserved_mark_stack = new (ResourceObj::C_HEAP) 8807 GrowableArray<markOop>(PreserveMarkStackSize, true); 8808 if (_preserved_oop_stack == NULL || _preserved_mark_stack == NULL) { 8809 vm_exit_out_of_memory(2* PreserveMarkStackSize * sizeof(oop) /* punt */, 8810 "Preserved Mark/Oop Stack for CMS (C-heap)"); 8811 } 8812 } 8813 _preserved_oop_stack->push(p); 8814 _preserved_mark_stack->push(m); 8815 assert(m == p->mark(), "Mark word changed"); 8816 assert(_preserved_oop_stack->length() == _preserved_mark_stack->length(), 8817 "bijection"); 8818 } 8819 8820 // Single threaded 8821 void CMSCollector::preserve_mark_if_necessary(oop p) { 8822 markOop m = p->mark(); 8823 if (m->must_be_preserved(p)) { 8824 preserve_mark_work(p, m); 8825 } 8826 } 8827 8828 void CMSCollector::par_preserve_mark_if_necessary(oop p) { 8829 markOop m = p->mark(); 8830 if (m->must_be_preserved(p)) { 8831 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); 8832 // Even though we read the mark word without holding 8833 // the lock, we are assured that it will not change 8834 // because we "own" this oop, so no other thread can 8835 // be trying to push it on the overflow list; see 8836 // the assertion in preserve_mark_work() that checks 8837 // that m == p->mark(). 8838 preserve_mark_work(p, m); 8839 } 8840 } 8841 8842 // We should be able to do this multi-threaded, 8843 // a chunk of stack being a task (this is 8844 // correct because each oop only ever appears 8845 // once in the overflow list. However, it's 8846 // not very easy to completely overlap this with 8847 // other operations, so will generally not be done 8848 // until all work's been completed. Because we 8849 // expect the preserved oop stack (set) to be small, 8850 // it's probably fine to do this single-threaded. 8851 // We can explore cleverer concurrent/overlapped/parallel 8852 // processing of preserved marks if we feel the 8853 // need for this in the future. Stack overflow should 8854 // be so rare in practice and, when it happens, its 8855 // effect on performance so great that this will 8856 // likely just be in the noise anyway. 8857 void CMSCollector::restore_preserved_marks_if_any() { 8858 if (_preserved_oop_stack == NULL) { 8859 assert(_preserved_mark_stack == NULL, 8860 "bijection with preserved_oop_stack"); 8861 return; 8862 } 8863 8864 assert(SafepointSynchronize::is_at_safepoint(), 8865 "world should be stopped"); 8866 assert(Thread::current()->is_ConcurrentGC_thread() || 8867 Thread::current()->is_VM_thread(), 8868 "should be single-threaded"); 8869 8870 int length = _preserved_oop_stack->length(); 8871 assert(_preserved_mark_stack->length() == length, "bijection"); 8872 for (int i = 0; i < length; i++) { 8873 oop p = _preserved_oop_stack->at(i); 8874 assert(p->is_oop(), "Should be an oop"); 8875 assert(_span.contains(p), "oop should be in _span"); 8876 assert(p->mark() == markOopDesc::prototype(), 8877 "Set when taken from overflow list"); 8878 markOop m = _preserved_mark_stack->at(i); 8879 p->set_mark(m); 8880 } 8881 _preserved_mark_stack->clear(); 8882 _preserved_oop_stack->clear(); 8883 assert(_preserved_mark_stack->is_empty() && 8884 _preserved_oop_stack->is_empty(), 8885 "stacks were cleared above"); 8886 } 8887 8888 #ifndef PRODUCT 8889 bool CMSCollector::no_preserved_marks() const { 8890 return ( ( _preserved_mark_stack == NULL 8891 && _preserved_oop_stack == NULL) 8892 || ( _preserved_mark_stack->is_empty() 8893 && _preserved_oop_stack->is_empty())); 8894 } 8895 #endif 8896 8897 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const 8898 { 8899 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap(); 8900 CMSAdaptiveSizePolicy* size_policy = 8901 (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy(); 8902 assert(size_policy->is_gc_cms_adaptive_size_policy(), 8903 "Wrong type for size policy"); 8904 return size_policy; 8905 } 8906 8907 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size, 8908 size_t desired_promo_size) { 8909 if (cur_promo_size < desired_promo_size) { 8910 size_t expand_bytes = desired_promo_size - cur_promo_size; 8911 if (PrintAdaptiveSizePolicy && Verbose) { 8912 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize " 8913 "Expanding tenured generation by " SIZE_FORMAT " (bytes)", 8914 expand_bytes); 8915 } 8916 expand(expand_bytes, 8917 MinHeapDeltaBytes, 8918 CMSExpansionCause::_adaptive_size_policy); 8919 } else if (desired_promo_size < cur_promo_size) { 8920 size_t shrink_bytes = cur_promo_size - desired_promo_size; 8921 if (PrintAdaptiveSizePolicy && Verbose) { 8922 gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize " 8923 "Shrinking tenured generation by " SIZE_FORMAT " (bytes)", 8924 shrink_bytes); 8925 } 8926 shrink(shrink_bytes); 8927 } 8928 } 8929 8930 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() { 8931 GenCollectedHeap* gch = GenCollectedHeap::heap(); 8932 CMSGCAdaptivePolicyCounters* counters = 8933 (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters(); 8934 assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind, 8935 "Wrong kind of counters"); 8936 return counters; 8937 } 8938 8939 8940 void ASConcurrentMarkSweepGeneration::update_counters() { 8941 if (UsePerfData) { 8942 _space_counters->update_all(); 8943 _gen_counters->update_all(); 8944 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); 8945 GenCollectedHeap* gch = GenCollectedHeap::heap(); 8946 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats(); 8947 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind, 8948 "Wrong gc statistics type"); 8949 counters->update_counters(gc_stats_l); 8950 } 8951 } 8952 8953 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) { 8954 if (UsePerfData) { 8955 _space_counters->update_used(used); 8956 _space_counters->update_capacity(); 8957 _gen_counters->update_all(); 8958 8959 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); 8960 GenCollectedHeap* gch = GenCollectedHeap::heap(); 8961 CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats(); 8962 assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind, 8963 "Wrong gc statistics type"); 8964 counters->update_counters(gc_stats_l); 8965 } 8966 } 8967 8968 // The desired expansion delta is computed so that: 8969 // . desired free percentage or greater is used 8970 void ASConcurrentMarkSweepGeneration::compute_new_size() { 8971 assert_locked_or_safepoint(Heap_lock); 8972 8973 GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap(); 8974 8975 // If incremental collection failed, we just want to expand 8976 // to the limit. 8977 if (incremental_collection_failed()) { 8978 clear_incremental_collection_failed(); 8979 grow_to_reserved(); 8980 return; 8981 } 8982 8983 assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing"); 8984 8985 assert(gch->kind() == CollectedHeap::GenCollectedHeap, 8986 "Wrong type of heap"); 8987 int prev_level = level() - 1; 8988 assert(prev_level >= 0, "The cms generation is the lowest generation"); 8989 Generation* prev_gen = gch->get_gen(prev_level); 8990 assert(prev_gen->kind() == Generation::ASParNew, 8991 "Wrong type of young generation"); 8992 ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen; 8993 size_t cur_eden = younger_gen->eden()->capacity(); 8994 CMSAdaptiveSizePolicy* size_policy = cms_size_policy(); 8995 size_t cur_promo = free(); 8996 size_policy->compute_tenured_generation_free_space(cur_promo, 8997 max_available(), 8998 cur_eden); 8999 resize(cur_promo, size_policy->promo_size()); 9000 9001 // Record the new size of the space in the cms generation 9002 // that is available for promotions. This is temporary. 9003 // It should be the desired promo size. 9004 size_policy->avg_cms_promo()->sample(free()); 9005 size_policy->avg_old_live()->sample(used()); 9006 9007 if (UsePerfData) { 9008 CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters(); 9009 counters->update_cms_capacity_counter(capacity()); 9010 } 9011 } 9012 9013 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) { 9014 assert_locked_or_safepoint(Heap_lock); 9015 assert_lock_strong(freelistLock()); 9016 HeapWord* old_end = _cmsSpace->end(); 9017 HeapWord* unallocated_start = _cmsSpace->unallocated_block(); 9018 assert(old_end >= unallocated_start, "Miscalculation of unallocated_start"); 9019 FreeChunk* chunk_at_end = find_chunk_at_end(); 9020 if (chunk_at_end == NULL) { 9021 // No room to shrink 9022 if (PrintGCDetails && Verbose) { 9023 gclog_or_tty->print_cr("No room to shrink: old_end " 9024 PTR_FORMAT " unallocated_start " PTR_FORMAT 9025 " chunk_at_end " PTR_FORMAT, 9026 old_end, unallocated_start, chunk_at_end); 9027 } 9028 return; 9029 } else { 9030 9031 // Find the chunk at the end of the space and determine 9032 // how much it can be shrunk. 9033 size_t shrinkable_size_in_bytes = chunk_at_end->size(); 9034 size_t aligned_shrinkable_size_in_bytes = 9035 align_size_down(shrinkable_size_in_bytes, os::vm_page_size()); 9036 assert(unallocated_start <= chunk_at_end->end(), 9037 "Inconsistent chunk at end of space"); 9038 size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes); 9039 size_t word_size_before = heap_word_size(_virtual_space.committed_size()); 9040 9041 // Shrink the underlying space 9042 _virtual_space.shrink_by(bytes); 9043 if (PrintGCDetails && Verbose) { 9044 gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:" 9045 " desired_bytes " SIZE_FORMAT 9046 " shrinkable_size_in_bytes " SIZE_FORMAT 9047 " aligned_shrinkable_size_in_bytes " SIZE_FORMAT 9048 " bytes " SIZE_FORMAT, 9049 desired_bytes, shrinkable_size_in_bytes, 9050 aligned_shrinkable_size_in_bytes, bytes); 9051 gclog_or_tty->print_cr(" old_end " SIZE_FORMAT 9052 " unallocated_start " SIZE_FORMAT, 9053 old_end, unallocated_start); 9054 } 9055 9056 // If the space did shrink (shrinking is not guaranteed), 9057 // shrink the chunk at the end by the appropriate amount. 9058 if (((HeapWord*)_virtual_space.high()) < old_end) { 9059 size_t new_word_size = 9060 heap_word_size(_virtual_space.committed_size()); 9061 9062 // Have to remove the chunk from the dictionary because it is changing 9063 // size and might be someplace elsewhere in the dictionary. 9064 9065 // Get the chunk at end, shrink it, and put it 9066 // back. 9067 _cmsSpace->removeChunkFromDictionary(chunk_at_end); 9068 size_t word_size_change = word_size_before - new_word_size; 9069 size_t chunk_at_end_old_size = chunk_at_end->size(); 9070 assert(chunk_at_end_old_size >= word_size_change, 9071 "Shrink is too large"); 9072 chunk_at_end->setSize(chunk_at_end_old_size - 9073 word_size_change); 9074 _cmsSpace->freed((HeapWord*) chunk_at_end->end(), 9075 word_size_change); 9076 9077 _cmsSpace->returnChunkToDictionary(chunk_at_end); 9078 9079 MemRegion mr(_cmsSpace->bottom(), new_word_size); 9080 _bts->resize(new_word_size); // resize the block offset shared array 9081 Universe::heap()->barrier_set()->resize_covered_region(mr); 9082 _cmsSpace->assert_locked(); 9083 _cmsSpace->set_end((HeapWord*)_virtual_space.high()); 9084 9085 NOT_PRODUCT(_cmsSpace->dictionary()->verify()); 9086 9087 // update the space and generation capacity counters 9088 if (UsePerfData) { 9089 _space_counters->update_capacity(); 9090 _gen_counters->update_all(); 9091 } 9092 9093 if (Verbose && PrintGCDetails) { 9094 size_t new_mem_size = _virtual_space.committed_size(); 9095 size_t old_mem_size = new_mem_size + bytes; 9096 gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK", 9097 name(), old_mem_size/K, bytes/K, new_mem_size/K); 9098 } 9099 } 9100 9101 assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(), 9102 "Inconsistency at end of space"); 9103 assert(chunk_at_end->end() == _cmsSpace->end(), 9104 "Shrinking is inconsistent"); 9105 return; 9106 } 9107 } 9108 9109 // Transfer some number of overflown objects to usual marking 9110 // stack. Return true if some objects were transferred. 9111 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() { 9112 size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4, 9113 (size_t)ParGCDesiredObjsFromOverflowList); 9114 9115 bool res = _collector->take_from_overflow_list(num, _mark_stack); 9116 assert(_collector->overflow_list_is_empty() || res, 9117 "If list is not empty, we should have taken something"); 9118 assert(!res || !_mark_stack->isEmpty(), 9119 "If we took something, it should now be on our stack"); 9120 return res; 9121 } 9122 9123 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) { 9124 size_t res = _sp->block_size_no_stall(addr, _collector); 9125 assert(res != 0, "Should always be able to compute a size"); 9126 if (_sp->block_is_obj(addr)) { 9127 if (_live_bit_map->isMarked(addr)) { 9128 // It can't have been dead in a previous cycle 9129 guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!"); 9130 } else { 9131 _dead_bit_map->mark(addr); // mark the dead object 9132 } 9133 } 9134 return res; 9135 } 9136 9137 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase): TraceMemoryManagerStats() { 9138 9139 switch (phase) { 9140 case CMSCollector::InitialMarking: 9141 initialize(true /* fullGC */ , 9142 true /* recordGCBeginTime */, 9143 true /* recordPreGCUsage */, 9144 false /* recordPeakUsage */, 9145 false /* recordPostGCusage */, 9146 true /* recordAccumulatedGCTime */, 9147 false /* recordGCEndTime */, 9148 false /* countCollection */ ); 9149 break; 9150 9151 case CMSCollector::FinalMarking: 9152 initialize(true /* fullGC */ , 9153 false /* recordGCBeginTime */, 9154 false /* recordPreGCUsage */, 9155 false /* recordPeakUsage */, 9156 false /* recordPostGCusage */, 9157 true /* recordAccumulatedGCTime */, 9158 false /* recordGCEndTime */, 9159 false /* countCollection */ ); 9160 break; 9161 9162 case CMSCollector::Sweeping: 9163 initialize(true /* fullGC */ , 9164 false /* recordGCBeginTime */, 9165 false /* recordPreGCUsage */, 9166 true /* recordPeakUsage */, 9167 true /* recordPostGCusage */, 9168 false /* recordAccumulatedGCTime */, 9169 true /* recordGCEndTime */, 9170 true /* countCollection */ ); 9171 break; 9172 9173 default: 9174 ShouldNotReachHere(); 9175 } 9176 } 9177 9178 // when bailing out of cms in concurrent mode failure 9179 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(): TraceMemoryManagerStats() { 9180 initialize(true /* fullGC */ , 9181 true /* recordGCBeginTime */, 9182 true /* recordPreGCUsage */, 9183 true /* recordPeakUsage */, 9184 true /* recordPostGCusage */, 9185 true /* recordAccumulatedGCTime */, 9186 true /* recordGCEndTime */, 9187 true /* countCollection */ ); 9188 } 9189