1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
  31 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
  32 #include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
  33 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
  34 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  35 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  36 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  37 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
  38 #include "gc_implementation/parNew/parNewGeneration.hpp"
  39 #include "gc_implementation/shared/collectorCounters.hpp"
  40 #include "gc_implementation/shared/isGCActiveMark.hpp"
  41 #include "gc_interface/collectedHeap.inline.hpp"
  42 #include "memory/cardTableRS.hpp"
  43 #include "memory/collectorPolicy.hpp"
  44 #include "memory/gcLocker.inline.hpp"
  45 #include "memory/genCollectedHeap.hpp"
  46 #include "memory/genMarkSweep.hpp"
  47 #include "memory/genOopClosures.inline.hpp"
  48 #include "memory/iterator.hpp"
  49 #include "memory/referencePolicy.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "prims/jvmtiExport.hpp"
  53 #include "runtime/globals_extension.hpp"
  54 #include "runtime/handles.inline.hpp"
  55 #include "runtime/java.hpp"
  56 #include "runtime/vmThread.hpp"
  57 #include "services/memoryService.hpp"
  58 #include "services/runtimeService.hpp"
  59 
  60 // statics
  61 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  62 bool          CMSCollector::_full_gc_requested          = false;
  63 
  64 //////////////////////////////////////////////////////////////////
  65 // In support of CMS/VM thread synchronization
  66 //////////////////////////////////////////////////////////////////
  67 // We split use of the CGC_lock into 2 "levels".
  68 // The low-level locking is of the usual CGC_lock monitor. We introduce
  69 // a higher level "token" (hereafter "CMS token") built on top of the
  70 // low level monitor (hereafter "CGC lock").
  71 // The token-passing protocol gives priority to the VM thread. The
  72 // CMS-lock doesn't provide any fairness guarantees, but clients
  73 // should ensure that it is only held for very short, bounded
  74 // durations.
  75 //
  76 // When either of the CMS thread or the VM thread is involved in
  77 // collection operations during which it does not want the other
  78 // thread to interfere, it obtains the CMS token.
  79 //
  80 // If either thread tries to get the token while the other has
  81 // it, that thread waits. However, if the VM thread and CMS thread
  82 // both want the token, then the VM thread gets priority while the
  83 // CMS thread waits. This ensures, for instance, that the "concurrent"
  84 // phases of the CMS thread's work do not block out the VM thread
  85 // for long periods of time as the CMS thread continues to hog
  86 // the token. (See bug 4616232).
  87 //
  88 // The baton-passing functions are, however, controlled by the
  89 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
  90 // and here the low-level CMS lock, not the high level token,
  91 // ensures mutual exclusion.
  92 //
  93 // Two important conditions that we have to satisfy:
  94 // 1. if a thread does a low-level wait on the CMS lock, then it
  95 //    relinquishes the CMS token if it were holding that token
  96 //    when it acquired the low-level CMS lock.
  97 // 2. any low-level notifications on the low-level lock
  98 //    should only be sent when a thread has relinquished the token.
  99 //
 100 // In the absence of either property, we'd have potential deadlock.
 101 //
 102 // We protect each of the CMS (concurrent and sequential) phases
 103 // with the CMS _token_, not the CMS _lock_.
 104 //
 105 // The only code protected by CMS lock is the token acquisition code
 106 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 107 // baton-passing code.
 108 //
 109 // Unfortunately, i couldn't come up with a good abstraction to factor and
 110 // hide the naked CGC_lock manipulation in the baton-passing code
 111 // further below. That's something we should try to do. Also, the proof
 112 // of correctness of this 2-level locking scheme is far from obvious,
 113 // and potentially quite slippery. We have an uneasy supsicion, for instance,
 114 // that there may be a theoretical possibility of delay/starvation in the
 115 // low-level lock/wait/notify scheme used for the baton-passing because of
 116 // potential intereference with the priority scheme embodied in the
 117 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 118 // invocation further below and marked with "XXX 20011219YSR".
 119 // Indeed, as we note elsewhere, this may become yet more slippery
 120 // in the presence of multiple CMS and/or multiple VM threads. XXX
 121 
 122 class CMSTokenSync: public StackObj {
 123  private:
 124   bool _is_cms_thread;
 125  public:
 126   CMSTokenSync(bool is_cms_thread):
 127     _is_cms_thread(is_cms_thread) {
 128     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 129            "Incorrect argument to constructor");
 130     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 131   }
 132 
 133   ~CMSTokenSync() {
 134     assert(_is_cms_thread ?
 135              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 136              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 137           "Incorrect state");
 138     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 139   }
 140 };
 141 
 142 // Convenience class that does a CMSTokenSync, and then acquires
 143 // upto three locks.
 144 class CMSTokenSyncWithLocks: public CMSTokenSync {
 145  private:
 146   // Note: locks are acquired in textual declaration order
 147   // and released in the opposite order
 148   MutexLockerEx _locker1, _locker2, _locker3;
 149  public:
 150   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 151                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 152     CMSTokenSync(is_cms_thread),
 153     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 154     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 155     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 156   { }
 157 };
 158 
 159 
 160 // Wrapper class to temporarily disable icms during a foreground cms collection.
 161 class ICMSDisabler: public StackObj {
 162  public:
 163   // The ctor disables icms and wakes up the thread so it notices the change;
 164   // the dtor re-enables icms.  Note that the CMSCollector methods will check
 165   // CMSIncrementalMode.
 166   ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
 167   ~ICMSDisabler() { CMSCollector::enable_icms(); }
 168 };
 169 
 170 //////////////////////////////////////////////////////////////////
 171 //  Concurrent Mark-Sweep Generation /////////////////////////////
 172 //////////////////////////////////////////////////////////////////
 173 
 174 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 175 
 176 // This struct contains per-thread things necessary to support parallel
 177 // young-gen collection.
 178 class CMSParGCThreadState: public CHeapObj<mtGC> {
 179  public:
 180   CFLS_LAB lab;
 181   PromotionInfo promo;
 182 
 183   // Constructor.
 184   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 185     promo.setSpace(cfls);
 186   }
 187 };
 188 
 189 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 190      ReservedSpace rs, size_t initial_byte_size, int level,
 191      CardTableRS* ct, bool use_adaptive_freelists,
 192      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
 193   CardGeneration(rs, initial_byte_size, level, ct),
 194   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 195   _debug_collection_type(Concurrent_collection_type)
 196 {
 197   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 198   HeapWord* end    = (HeapWord*) _virtual_space.high();
 199 
 200   _direct_allocated_words = 0;
 201   NOT_PRODUCT(
 202     _numObjectsPromoted = 0;
 203     _numWordsPromoted = 0;
 204     _numObjectsAllocated = 0;
 205     _numWordsAllocated = 0;
 206   )
 207 
 208   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end),
 209                                            use_adaptive_freelists,
 210                                            dictionaryChoice);
 211   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 212   if (_cmsSpace == NULL) {
 213     vm_exit_during_initialization(
 214       "CompactibleFreeListSpace allocation failure");
 215   }
 216   _cmsSpace->_gen = this;
 217 
 218   _gc_stats = new CMSGCStats();
 219 
 220   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 221   // offsets match. The ability to tell free chunks from objects
 222   // depends on this property.
 223   debug_only(
 224     FreeChunk* junk = NULL;
 225     assert(UseCompressedOops ||
 226            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 227            "Offset of FreeChunk::_prev within FreeChunk must match"
 228            "  that of OopDesc::_klass within OopDesc");
 229   )
 230   if (CollectedHeap::use_parallel_gc_threads()) {
 231     typedef CMSParGCThreadState* CMSParGCThreadStatePtr;
 232     _par_gc_thread_states =
 233       NEW_C_HEAP_ARRAY(CMSParGCThreadStatePtr, ParallelGCThreads, mtGC);
 234     if (_par_gc_thread_states == NULL) {
 235       vm_exit_during_initialization("Could not allocate par gc structs");
 236     }
 237     for (uint i = 0; i < ParallelGCThreads; i++) {
 238       _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 239       if (_par_gc_thread_states[i] == NULL) {
 240         vm_exit_during_initialization("Could not allocate par gc structs");
 241       }
 242     }
 243   } else {
 244     _par_gc_thread_states = NULL;
 245   }
 246   _incremental_collection_failed = false;
 247   // The "dilatation_factor" is the expansion that can occur on
 248   // account of the fact that the minimum object size in the CMS
 249   // generation may be larger than that in, say, a contiguous young
 250   //  generation.
 251   // Ideally, in the calculation below, we'd compute the dilatation
 252   // factor as: MinChunkSize/(promoting_gen's min object size)
 253   // Since we do not have such a general query interface for the
 254   // promoting generation, we'll instead just use the mimimum
 255   // object size (which today is a header's worth of space);
 256   // note that all arithmetic is in units of HeapWords.
 257   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 258   assert(_dilatation_factor >= 1.0, "from previous assert");
 259 }
 260 
 261 
 262 // The field "_initiating_occupancy" represents the occupancy percentage
 263 // at which we trigger a new collection cycle.  Unless explicitly specified
 264 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 265 // is calculated by:
 266 //
 267 //   Let "f" be MinHeapFreeRatio in
 268 //
 269 //    _intiating_occupancy = 100-f +
 270 //                           f * (CMSTriggerRatio/100)
 271 //   where CMSTriggerRatio is the argument "tr" below.
 272 //
 273 // That is, if we assume the heap is at its desired maximum occupancy at the
 274 // end of a collection, we let CMSTriggerRatio of the (purported) free
 275 // space be allocated before initiating a new collection cycle.
 276 //
 277 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, intx tr) {
 278   assert(io <= 100 && tr >= 0 && tr <= 100, "Check the arguments");
 279   if (io >= 0) {
 280     _initiating_occupancy = (double)io / 100.0;
 281   } else {
 282     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 283                              (double)(tr * MinHeapFreeRatio) / 100.0)
 284                             / 100.0;
 285   }
 286 }
 287 
 288 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 289   assert(collector() != NULL, "no collector");
 290   collector()->ref_processor_init();
 291 }
 292 
 293 void CMSCollector::ref_processor_init() {
 294   if (_ref_processor == NULL) {
 295     // Allocate and initialize a reference processor
 296     _ref_processor =
 297       new ReferenceProcessor(_span,                               // span
 298                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 299                              (int) ParallelGCThreads,             // mt processing degree
 300                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 301                              (int) MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 302                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 303                              &_is_alive_closure,                  // closure for liveness info
 304                              false);                              // next field updates do not need write barrier
 305     // Initialize the _ref_processor field of CMSGen
 306     _cmsGen->set_ref_processor(_ref_processor);
 307 
 308   }
 309 }
 310 
 311 CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
 312   GenCollectedHeap* gch = GenCollectedHeap::heap();
 313   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
 314     "Wrong type of heap");
 315   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
 316     gch->gen_policy()->size_policy();
 317   assert(sp->is_gc_cms_adaptive_size_policy(),
 318     "Wrong type of size policy");
 319   return sp;
 320 }
 321 
 322 CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
 323   CMSGCAdaptivePolicyCounters* results =
 324     (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
 325   assert(
 326     results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
 327     "Wrong gc policy counter kind");
 328   return results;
 329 }
 330 
 331 
 332 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 333 
 334   const char* gen_name = "old";
 335 
 336   // Generation Counters - generation 1, 1 subspace
 337   _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);
 338 
 339   _space_counters = new GSpaceCounters(gen_name, 0,
 340                                        _virtual_space.reserved_size(),
 341                                        this, _gen_counters);
 342 }
 343 
 344 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 345   _cms_gen(cms_gen)
 346 {
 347   assert(alpha <= 100, "bad value");
 348   _saved_alpha = alpha;
 349 
 350   // Initialize the alphas to the bootstrap value of 100.
 351   _gc0_alpha = _cms_alpha = 100;
 352 
 353   _cms_begin_time.update();
 354   _cms_end_time.update();
 355 
 356   _gc0_duration = 0.0;
 357   _gc0_period = 0.0;
 358   _gc0_promoted = 0;
 359 
 360   _cms_duration = 0.0;
 361   _cms_period = 0.0;
 362   _cms_allocated = 0;
 363 
 364   _cms_used_at_gc0_begin = 0;
 365   _cms_used_at_gc0_end = 0;
 366   _allow_duty_cycle_reduction = false;
 367   _valid_bits = 0;
 368   _icms_duty_cycle = CMSIncrementalDutyCycle;
 369 }
 370 
 371 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 372   // TBD: CR 6909490
 373   return 1.0;
 374 }
 375 
 376 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 377 }
 378 
 379 // If promotion failure handling is on use
 380 // the padded average size of the promotion for each
 381 // young generation collection.
 382 double CMSStats::time_until_cms_gen_full() const {
 383   size_t cms_free = _cms_gen->cmsSpace()->free();
 384   GenCollectedHeap* gch = GenCollectedHeap::heap();
 385   size_t expected_promotion = MIN2(gch->get_gen(0)->capacity(),
 386                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 387   if (cms_free > expected_promotion) {
 388     // Start a cms collection if there isn't enough space to promote
 389     // for the next minor collection.  Use the padded average as
 390     // a safety factor.
 391     cms_free -= expected_promotion;
 392 
 393     // Adjust by the safety factor.
 394     double cms_free_dbl = (double)cms_free;
 395     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
 396     // Apply a further correction factor which tries to adjust
 397     // for recent occurance of concurrent mode failures.
 398     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 399     cms_free_dbl = cms_free_dbl * cms_adjustment;
 400 
 401     if (PrintGCDetails && Verbose) {
 402       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 403         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 404         cms_free, expected_promotion);
 405       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 406         cms_free_dbl, cms_consumption_rate() + 1.0);
 407     }
 408     // Add 1 in case the consumption rate goes to zero.
 409     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 410   }
 411   return 0.0;
 412 }
 413 
 414 // Compare the duration of the cms collection to the
 415 // time remaining before the cms generation is empty.
 416 // Note that the time from the start of the cms collection
 417 // to the start of the cms sweep (less than the total
 418 // duration of the cms collection) can be used.  This
 419 // has been tried and some applications experienced
 420 // promotion failures early in execution.  This was
 421 // possibly because the averages were not accurate
 422 // enough at the beginning.
 423 double CMSStats::time_until_cms_start() const {
 424   // We add "gc0_period" to the "work" calculation
 425   // below because this query is done (mostly) at the
 426   // end of a scavenge, so we need to conservatively
 427   // account for that much possible delay
 428   // in the query so as to avoid concurrent mode failures
 429   // due to starting the collection just a wee bit too
 430   // late.
 431   double work = cms_duration() + gc0_period();
 432   double deadline = time_until_cms_gen_full();
 433   // If a concurrent mode failure occurred recently, we want to be
 434   // more conservative and halve our expected time_until_cms_gen_full()
 435   if (work > deadline) {
 436     if (Verbose && PrintGCDetails) {
 437       gclog_or_tty->print(
 438         " CMSCollector: collect because of anticipated promotion "
 439         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 440         gc0_period(), time_until_cms_gen_full());
 441     }
 442     return 0.0;
 443   }
 444   return work - deadline;
 445 }
 446 
 447 // Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
 448 // amount of change to prevent wild oscillation.
 449 unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
 450                                               unsigned int new_duty_cycle) {
 451   assert(old_duty_cycle <= 100, "bad input value");
 452   assert(new_duty_cycle <= 100, "bad input value");
 453 
 454   // Note:  use subtraction with caution since it may underflow (values are
 455   // unsigned).  Addition is safe since we're in the range 0-100.
 456   unsigned int damped_duty_cycle = new_duty_cycle;
 457   if (new_duty_cycle < old_duty_cycle) {
 458     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
 459     if (new_duty_cycle + largest_delta < old_duty_cycle) {
 460       damped_duty_cycle = old_duty_cycle - largest_delta;
 461     }
 462   } else if (new_duty_cycle > old_duty_cycle) {
 463     const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
 464     if (new_duty_cycle > old_duty_cycle + largest_delta) {
 465       damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
 466     }
 467   }
 468   assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
 469 
 470   if (CMSTraceIncrementalPacing) {
 471     gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
 472                            old_duty_cycle, new_duty_cycle, damped_duty_cycle);
 473   }
 474   return damped_duty_cycle;
 475 }
 476 
 477 unsigned int CMSStats::icms_update_duty_cycle_impl() {
 478   assert(CMSIncrementalPacing && valid(),
 479          "should be handled in icms_update_duty_cycle()");
 480 
 481   double cms_time_so_far = cms_timer().seconds();
 482   double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
 483   double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
 484 
 485   // Avoid division by 0.
 486   double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
 487   double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
 488 
 489   unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
 490   if (new_duty_cycle > _icms_duty_cycle) {
 491     // Avoid very small duty cycles (1 or 2); 0 is allowed.
 492     if (new_duty_cycle > 2) {
 493       _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
 494                                                 new_duty_cycle);
 495     }
 496   } else if (_allow_duty_cycle_reduction) {
 497     // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
 498     new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
 499     // Respect the minimum duty cycle.
 500     unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
 501     _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
 502   }
 503 
 504   if (PrintGCDetails || CMSTraceIncrementalPacing) {
 505     gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
 506   }
 507 
 508   _allow_duty_cycle_reduction = false;
 509   return _icms_duty_cycle;
 510 }
 511 
 512 #ifndef PRODUCT
 513 void CMSStats::print_on(outputStream *st) const {
 514   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 515   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 516                gc0_duration(), gc0_period(), gc0_promoted());
 517   st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 518             cms_duration(), cms_duration_per_mb(),
 519             cms_period(), cms_allocated());
 520   st->print(",cms_since_beg=%g,cms_since_end=%g",
 521             cms_time_since_begin(), cms_time_since_end());
 522   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 523             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 524   if (CMSIncrementalMode) {
 525     st->print(",dc=%d", icms_duty_cycle());
 526   }
 527 
 528   if (valid()) {
 529     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 530               promotion_rate(), cms_allocation_rate());
 531     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 532               cms_consumption_rate(), time_until_cms_gen_full());
 533   }
 534   st->print(" ");
 535 }
 536 #endif // #ifndef PRODUCT
 537 
 538 CMSCollector::CollectorState CMSCollector::_collectorState =
 539                              CMSCollector::Idling;
 540 bool CMSCollector::_foregroundGCIsActive = false;
 541 bool CMSCollector::_foregroundGCShouldWait = false;
 542 
 543 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 544                            CardTableRS*                   ct,
 545                            ConcurrentMarkSweepPolicy*     cp):
 546   _cmsGen(cmsGen),
 547   _ct(ct),
 548   _ref_processor(NULL),    // will be set later
 549   _conc_workers(NULL),     // may be set later
 550   _abort_preclean(false),
 551   _start_sampling(false),
 552   _between_prologue_and_epilogue(false),
 553   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 554   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 555                  -1 /* lock-free */, "No_lock" /* dummy */),
 556   _modUnionClosure(&_modUnionTable),
 557   _modUnionClosurePar(&_modUnionTable),
 558   // Adjust my span to cover old (cms) gen
 559   _span(cmsGen->reserved()),
 560   // Construct the is_alive_closure with _span & markBitMap
 561   _is_alive_closure(_span, &_markBitMap),
 562   _restart_addr(NULL),
 563   _overflow_list(NULL),
 564   _stats(cmsGen),
 565   _eden_chunk_array(NULL),     // may be set in ctor body
 566   _eden_chunk_capacity(0),     // -- ditto --
 567   _eden_chunk_index(0),        // -- ditto --
 568   _survivor_plab_array(NULL),  // -- ditto --
 569   _survivor_chunk_array(NULL), // -- ditto --
 570   _survivor_chunk_capacity(0), // -- ditto --
 571   _survivor_chunk_index(0),    // -- ditto --
 572   _ser_pmc_preclean_ovflw(0),
 573   _ser_kac_preclean_ovflw(0),
 574   _ser_pmc_remark_ovflw(0),
 575   _par_pmc_remark_ovflw(0),
 576   _ser_kac_ovflw(0),
 577   _par_kac_ovflw(0),
 578 #ifndef PRODUCT
 579   _num_par_pushes(0),
 580 #endif
 581   _collection_count_start(0),
 582   _verifying(false),
 583   _icms_start_limit(NULL),
 584   _icms_stop_limit(NULL),
 585   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 586   _completed_initialization(false),
 587   _collector_policy(cp),
 588   _should_unload_classes(false),
 589   _concurrent_cycles_since_last_unload(0),
 590   _roots_scanning_options(0),
 591   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 592   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
 593 {
 594   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 595     ExplicitGCInvokesConcurrent = true;
 596   }
 597   // Now expand the span and allocate the collection support structures
 598   // (MUT, marking bit map etc.) to cover both generations subject to
 599   // collection.
 600 
 601   // For use by dirty card to oop closures.
 602   _cmsGen->cmsSpace()->set_collector(this);
 603 
 604   // Allocate MUT and marking bit map
 605   {
 606     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 607     if (!_markBitMap.allocate(_span)) {
 608       warning("Failed to allocate CMS Bit Map");
 609       return;
 610     }
 611     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 612   }
 613   {
 614     _modUnionTable.allocate(_span);
 615     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 616   }
 617 
 618   if (!_markStack.allocate(MarkStackSize)) {
 619     warning("Failed to allocate CMS Marking Stack");
 620     return;
 621   }
 622 
 623   // Support for multi-threaded concurrent phases
 624   if (CMSConcurrentMTEnabled) {
 625     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 626       // just for now
 627       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
 628     }
 629     if (ConcGCThreads > 1) {
 630       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
 631                                  ConcGCThreads, true);
 632       if (_conc_workers == NULL) {
 633         warning("GC/CMS: _conc_workers allocation failure: "
 634               "forcing -CMSConcurrentMTEnabled");
 635         CMSConcurrentMTEnabled = false;
 636       } else {
 637         _conc_workers->initialize_workers();
 638       }
 639     } else {
 640       CMSConcurrentMTEnabled = false;
 641     }
 642   }
 643   if (!CMSConcurrentMTEnabled) {
 644     ConcGCThreads = 0;
 645   } else {
 646     // Turn off CMSCleanOnEnter optimization temporarily for
 647     // the MT case where it's not fixed yet; see 6178663.
 648     CMSCleanOnEnter = false;
 649   }
 650   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 651          "Inconsistency");
 652 
 653   // Parallel task queues; these are shared for the
 654   // concurrent and stop-world phases of CMS, but
 655   // are not shared with parallel scavenge (ParNew).
 656   {
 657     uint i;
 658     uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 659 
 660     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 661          || ParallelRefProcEnabled)
 662         && num_queues > 0) {
 663       _task_queues = new OopTaskQueueSet(num_queues);
 664       if (_task_queues == NULL) {
 665         warning("task_queues allocation failure.");
 666         return;
 667       }
 668       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 669       if (_hash_seed == NULL) {
 670         warning("_hash_seed array allocation failure");
 671         return;
 672       }
 673 
 674       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 675       for (i = 0; i < num_queues; i++) {
 676         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 677         if (q == NULL) {
 678           warning("work_queue allocation failure.");
 679           return;
 680         }
 681         _task_queues->register_queue(i, q);
 682       }
 683       for (i = 0; i < num_queues; i++) {
 684         _task_queues->queue(i)->initialize();
 685         _hash_seed[i] = 17;  // copied from ParNew
 686       }
 687     }
 688   }
 689 
 690   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 691 
 692   // Clip CMSBootstrapOccupancy between 0 and 100.
 693   _bootstrap_occupancy = ((double)MIN2((uintx)100, MAX2((uintx)0, CMSBootstrapOccupancy)))
 694                          /(double)100;
 695 
 696   _full_gcs_since_conc_gc = 0;
 697 
 698   // Now tell CMS generations the identity of their collector
 699   ConcurrentMarkSweepGeneration::set_collector(this);
 700 
 701   // Create & start a CMS thread for this CMS collector
 702   _cmsThread = ConcurrentMarkSweepThread::start(this);
 703   assert(cmsThread() != NULL, "CMS Thread should have been created");
 704   assert(cmsThread()->collector() == this,
 705          "CMS Thread should refer to this gen");
 706   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 707 
 708   // Support for parallelizing young gen rescan
 709   GenCollectedHeap* gch = GenCollectedHeap::heap();
 710   _young_gen = gch->prev_gen(_cmsGen);
 711   if (gch->supports_inline_contig_alloc()) {
 712     _top_addr = gch->top_addr();
 713     _end_addr = gch->end_addr();
 714     assert(_young_gen != NULL, "no _young_gen");
 715     _eden_chunk_index = 0;
 716     _eden_chunk_capacity = (_young_gen->max_capacity()+CMSSamplingGrain)/CMSSamplingGrain;
 717     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 718     if (_eden_chunk_array == NULL) {
 719       _eden_chunk_capacity = 0;
 720       warning("GC/CMS: _eden_chunk_array allocation failure");
 721     }
 722   }
 723   assert(_eden_chunk_array != NULL || _eden_chunk_capacity == 0, "Error");
 724 
 725   // Support for parallelizing survivor space rescan
 726   if (CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) {
 727     const size_t max_plab_samples =
 728       ((DefNewGeneration*)_young_gen)->max_survivor_size()/MinTLABSize;
 729 
 730     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 731     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, 2*max_plab_samples, mtGC);
 732     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 733     if (_survivor_plab_array == NULL || _survivor_chunk_array == NULL
 734         || _cursor == NULL) {
 735       warning("Failed to allocate survivor plab/chunk array");
 736       if (_survivor_plab_array  != NULL) {
 737         FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 738         _survivor_plab_array = NULL;
 739       }
 740       if (_survivor_chunk_array != NULL) {
 741         FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 742         _survivor_chunk_array = NULL;
 743       }
 744       if (_cursor != NULL) {
 745         FREE_C_HEAP_ARRAY(size_t, _cursor, mtGC);
 746         _cursor = NULL;
 747       }
 748     } else {
 749       _survivor_chunk_capacity = 2*max_plab_samples;
 750       for (uint i = 0; i < ParallelGCThreads; i++) {
 751         HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 752         if (vec == NULL) {
 753           warning("Failed to allocate survivor plab array");
 754           for (int j = i; j > 0; j--) {
 755             FREE_C_HEAP_ARRAY(HeapWord*, _survivor_plab_array[j-1].array(), mtGC);
 756           }
 757           FREE_C_HEAP_ARRAY(ChunkArray, _survivor_plab_array, mtGC);
 758           FREE_C_HEAP_ARRAY(HeapWord*, _survivor_chunk_array, mtGC);
 759           _survivor_plab_array = NULL;
 760           _survivor_chunk_array = NULL;
 761           _survivor_chunk_capacity = 0;
 762           break;
 763         } else {
 764           ChunkArray* cur =
 765             ::new (&_survivor_plab_array[i]) ChunkArray(vec,
 766                                                         max_plab_samples);
 767           assert(cur->end() == 0, "Should be 0");
 768           assert(cur->array() == vec, "Should be vec");
 769           assert(cur->capacity() == max_plab_samples, "Error");
 770         }
 771       }
 772     }
 773   }
 774   assert(   (   _survivor_plab_array  != NULL
 775              && _survivor_chunk_array != NULL)
 776          || (   _survivor_chunk_capacity == 0
 777              && _survivor_chunk_index == 0),
 778          "Error");
 779 
 780   // Choose what strong roots should be scanned depending on verification options
 781   if (!CMSClassUnloadingEnabled) {
 782     // If class unloading is disabled we want to include all classes into the root set.
 783     add_root_scanning_option(SharedHeap::SO_AllClasses);
 784   } else {
 785     add_root_scanning_option(SharedHeap::SO_SystemClasses);
 786   }
 787 
 788   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 789   _gc_counters = new CollectorCounters("CMS", 1);
 790   _completed_initialization = true;
 791   _inter_sweep_timer.start();  // start of time
 792 }
 793 
 794 const char* ConcurrentMarkSweepGeneration::name() const {
 795   return "concurrent mark-sweep generation";
 796 }
 797 void ConcurrentMarkSweepGeneration::update_counters() {
 798   if (UsePerfData) {
 799     _space_counters->update_all();
 800     _gen_counters->update_all();
 801   }
 802 }
 803 
 804 // this is an optimized version of update_counters(). it takes the
 805 // used value as a parameter rather than computing it.
 806 //
 807 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 808   if (UsePerfData) {
 809     _space_counters->update_used(used);
 810     _space_counters->update_capacity();
 811     _gen_counters->update_all();
 812   }
 813 }
 814 
 815 void ConcurrentMarkSweepGeneration::print() const {
 816   Generation::print();
 817   cmsSpace()->print();
 818 }
 819 
 820 #ifndef PRODUCT
 821 void ConcurrentMarkSweepGeneration::print_statistics() {
 822   cmsSpace()->printFLCensus(0);
 823 }
 824 #endif
 825 
 826 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 827   GenCollectedHeap* gch = GenCollectedHeap::heap();
 828   if (PrintGCDetails) {
 829     if (Verbose) {
 830       gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"("SIZE_FORMAT")]",
 831         level(), short_name(), s, used(), capacity());
 832     } else {
 833       gclog_or_tty->print(" [%d %s-%s: "SIZE_FORMAT"K("SIZE_FORMAT"K)]",
 834         level(), short_name(), s, used() / K, capacity() / K);
 835     }
 836   }
 837   if (Verbose) {
 838     gclog_or_tty->print(" "SIZE_FORMAT"("SIZE_FORMAT")",
 839               gch->used(), gch->capacity());
 840   } else {
 841     gclog_or_tty->print(" "SIZE_FORMAT"K("SIZE_FORMAT"K)",
 842               gch->used() / K, gch->capacity() / K);
 843   }
 844 }
 845 
 846 size_t
 847 ConcurrentMarkSweepGeneration::contiguous_available() const {
 848   // dld proposes an improvement in precision here. If the committed
 849   // part of the space ends in a free block we should add that to
 850   // uncommitted size in the calculation below. Will make this
 851   // change later, staying with the approximation below for the
 852   // time being. -- ysr.
 853   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 854 }
 855 
 856 size_t
 857 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 858   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 859 }
 860 
 861 size_t ConcurrentMarkSweepGeneration::max_available() const {
 862   return free() + _virtual_space.uncommitted_size();
 863 }
 864 
 865 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 866   size_t available = max_available();
 867   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 868   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 869   if (Verbose && PrintGCDetails) {
 870     gclog_or_tty->print_cr(
 871       "CMS: promo attempt is%s safe: available("SIZE_FORMAT") %s av_promo("SIZE_FORMAT"),"
 872       "max_promo("SIZE_FORMAT")",
 873       res? "":" not", available, res? ">=":"<",
 874       av_promo, max_promotion_in_bytes);
 875   }
 876   return res;
 877 }
 878 
 879 // At a promotion failure dump information on block layout in heap
 880 // (cms old generation).
 881 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 882   if (CMSDumpAtPromotionFailure) {
 883     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 884   }
 885 }
 886 
 887 CompactibleSpace*
 888 ConcurrentMarkSweepGeneration::first_compaction_space() const {
 889   return _cmsSpace;
 890 }
 891 
 892 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 893   // Clear the promotion information.  These pointers can be adjusted
 894   // along with all the other pointers into the heap but
 895   // compaction is expected to be a rare event with
 896   // a heap using cms so don't do it without seeing the need.
 897   if (CollectedHeap::use_parallel_gc_threads()) {
 898     for (uint i = 0; i < ParallelGCThreads; i++) {
 899       _par_gc_thread_states[i]->promo.reset();
 900     }
 901   }
 902 }
 903 
 904 void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
 905   blk->do_space(_cmsSpace);
 906 }
 907 
 908 void ConcurrentMarkSweepGeneration::compute_new_size() {
 909   assert_locked_or_safepoint(Heap_lock);
 910 
 911   // If incremental collection failed, we just want to expand
 912   // to the limit.
 913   if (incremental_collection_failed()) {
 914     clear_incremental_collection_failed();
 915     grow_to_reserved();
 916     return;
 917   }
 918 
 919   size_t expand_bytes = 0;
 920   double free_percentage = ((double) free()) / capacity();
 921   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 922   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 923 
 924   // compute expansion delta needed for reaching desired free percentage
 925   if (free_percentage < desired_free_percentage) {
 926     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 927     assert(desired_capacity >= capacity(), "invalid expansion size");
 928     expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 929   }
 930   if (expand_bytes > 0) {
 931     if (PrintGCDetails && Verbose) {
 932       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 933       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 934       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 935       gclog_or_tty->print_cr("  Desired free fraction %f",
 936         desired_free_percentage);
 937       gclog_or_tty->print_cr("  Maximum free fraction %f",
 938         maximum_free_percentage);
 939       gclog_or_tty->print_cr("  Capactiy "SIZE_FORMAT, capacity()/1000);
 940       gclog_or_tty->print_cr("  Desired capacity "SIZE_FORMAT,
 941         desired_capacity/1000);
 942       int prev_level = level() - 1;
 943       if (prev_level >= 0) {
 944         size_t prev_size = 0;
 945         GenCollectedHeap* gch = GenCollectedHeap::heap();
 946         Generation* prev_gen = gch->_gens[prev_level];
 947         prev_size = prev_gen->capacity();
 948           gclog_or_tty->print_cr("  Younger gen size "SIZE_FORMAT,
 949                                  prev_size/1000);
 950       }
 951       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc "SIZE_FORMAT,
 952         unsafe_max_alloc_nogc()/1000);
 953       gclog_or_tty->print_cr("  contiguous available "SIZE_FORMAT,
 954         contiguous_available()/1000);
 955       gclog_or_tty->print_cr("  Expand by "SIZE_FORMAT" (bytes)",
 956         expand_bytes);
 957     }
 958     // safe if expansion fails
 959     expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 960     if (PrintGCDetails && Verbose) {
 961       gclog_or_tty->print_cr("  Expanded free fraction %f",
 962         ((double) free()) / capacity());
 963     }
 964   }
 965 }
 966 
 967 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 968   return cmsSpace()->freelistLock();
 969 }
 970 
 971 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size,
 972                                                   bool   tlab) {
 973   CMSSynchronousYieldRequest yr;
 974   MutexLockerEx x(freelistLock(),
 975                   Mutex::_no_safepoint_check_flag);
 976   return have_lock_and_allocate(size, tlab);
 977 }
 978 
 979 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
 980                                                   bool   tlab /* ignored */) {
 981   assert_lock_strong(freelistLock());
 982   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
 983   HeapWord* res = cmsSpace()->allocate(adjustedSize);
 984   // Allocate the object live (grey) if the background collector has
 985   // started marking. This is necessary because the marker may
 986   // have passed this address and consequently this object will
 987   // not otherwise be greyed and would be incorrectly swept up.
 988   // Note that if this object contains references, the writing
 989   // of those references will dirty the card containing this object
 990   // allowing the object to be blackened (and its references scanned)
 991   // either during a preclean phase or at the final checkpoint.
 992   if (res != NULL) {
 993     // We may block here with an uninitialized object with
 994     // its mark-bit or P-bits not yet set. Such objects need
 995     // to be safely navigable by block_start().
 996     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
 997     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
 998     collector()->direct_allocated(res, adjustedSize);
 999     _direct_allocated_words += adjustedSize;
1000     // allocation counters
1001     NOT_PRODUCT(
1002       _numObjectsAllocated++;
1003       _numWordsAllocated += (int)adjustedSize;
1004     )
1005   }
1006   return res;
1007 }
1008 
1009 // In the case of direct allocation by mutators in a generation that
1010 // is being concurrently collected, the object must be allocated
1011 // live (grey) if the background collector has started marking.
1012 // This is necessary because the marker may
1013 // have passed this address and consequently this object will
1014 // not otherwise be greyed and would be incorrectly swept up.
1015 // Note that if this object contains references, the writing
1016 // of those references will dirty the card containing this object
1017 // allowing the object to be blackened (and its references scanned)
1018 // either during a preclean phase or at the final checkpoint.
1019 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
1020   assert(_markBitMap.covers(start, size), "Out of bounds");
1021   if (_collectorState >= Marking) {
1022     MutexLockerEx y(_markBitMap.lock(),
1023                     Mutex::_no_safepoint_check_flag);
1024     // [see comments preceding SweepClosure::do_blk() below for details]
1025     //
1026     // Can the P-bits be deleted now?  JJJ
1027     //
1028     // 1. need to mark the object as live so it isn't collected
1029     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
1030     // 3. need to mark the end of the object so marking, precleaning or sweeping
1031     //    can skip over uninitialized or unparsable objects. An allocated
1032     //    object is considered uninitialized for our purposes as long as
1033     //    its klass word is NULL.  All old gen objects are parsable
1034     //    as soon as they are initialized.)
1035     _markBitMap.mark(start);          // object is live
1036     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
1037     _markBitMap.mark(start + size - 1);
1038                                       // mark end of object
1039   }
1040   // check that oop looks uninitialized
1041   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
1042 }
1043 
1044 void CMSCollector::promoted(bool par, HeapWord* start,
1045                             bool is_obj_array, size_t obj_size) {
1046   assert(_markBitMap.covers(start), "Out of bounds");
1047   // See comment in direct_allocated() about when objects should
1048   // be allocated live.
1049   if (_collectorState >= Marking) {
1050     // we already hold the marking bit map lock, taken in
1051     // the prologue
1052     if (par) {
1053       _markBitMap.par_mark(start);
1054     } else {
1055       _markBitMap.mark(start);
1056     }
1057     // We don't need to mark the object as uninitialized (as
1058     // in direct_allocated above) because this is being done with the
1059     // world stopped and the object will be initialized by the
1060     // time the marking, precleaning or sweeping get to look at it.
1061     // But see the code for copying objects into the CMS generation,
1062     // where we need to ensure that concurrent readers of the
1063     // block offset table are able to safely navigate a block that
1064     // is in flux from being free to being allocated (and in
1065     // transition while being copied into) and subsequently
1066     // becoming a bona-fide object when the copy/promotion is complete.
1067     assert(SafepointSynchronize::is_at_safepoint(),
1068            "expect promotion only at safepoints");
1069 
1070     if (_collectorState < Sweeping) {
1071       // Mark the appropriate cards in the modUnionTable, so that
1072       // this object gets scanned before the sweep. If this is
1073       // not done, CMS generation references in the object might
1074       // not get marked.
1075       // For the case of arrays, which are otherwise precisely
1076       // marked, we need to dirty the entire array, not just its head.
1077       if (is_obj_array) {
1078         // The [par_]mark_range() method expects mr.end() below to
1079         // be aligned to the granularity of a bit's representation
1080         // in the heap. In the case of the MUT below, that's a
1081         // card size.
1082         MemRegion mr(start,
1083                      (HeapWord*)round_to((intptr_t)(start + obj_size),
1084                         CardTableModRefBS::card_size /* bytes */));
1085         if (par) {
1086           _modUnionTable.par_mark_range(mr);
1087         } else {
1088           _modUnionTable.mark_range(mr);
1089         }
1090       } else {  // not an obj array; we can just mark the head
1091         if (par) {
1092           _modUnionTable.par_mark(start);
1093         } else {
1094           _modUnionTable.mark(start);
1095         }
1096       }
1097     }
1098   }
1099 }
1100 
1101 static inline size_t percent_of_space(Space* space, HeapWord* addr)
1102 {
1103   size_t delta = pointer_delta(addr, space->bottom());
1104   return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
1105 }
1106 
1107 void CMSCollector::icms_update_allocation_limits()
1108 {
1109   Generation* gen0 = GenCollectedHeap::heap()->get_gen(0);
1110   EdenSpace* eden = gen0->as_DefNewGeneration()->eden();
1111 
1112   const unsigned int duty_cycle = stats().icms_update_duty_cycle();
1113   if (CMSTraceIncrementalPacing) {
1114     stats().print();
1115   }
1116 
1117   assert(duty_cycle <= 100, "invalid duty cycle");
1118   if (duty_cycle != 0) {
1119     // The duty_cycle is a percentage between 0 and 100; convert to words and
1120     // then compute the offset from the endpoints of the space.
1121     size_t free_words = eden->free() / HeapWordSize;
1122     double free_words_dbl = (double)free_words;
1123     size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
1124     size_t offset_words = (free_words - duty_cycle_words) / 2;
1125 
1126     _icms_start_limit = eden->top() + offset_words;
1127     _icms_stop_limit = eden->end() - offset_words;
1128 
1129     // The limits may be adjusted (shifted to the right) by
1130     // CMSIncrementalOffset, to allow the application more mutator time after a
1131     // young gen gc (when all mutators were stopped) and before CMS starts and
1132     // takes away one or more cpus.
1133     if (CMSIncrementalOffset != 0) {
1134       double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
1135       size_t adjustment = (size_t)adjustment_dbl;
1136       HeapWord* tmp_stop = _icms_stop_limit + adjustment;
1137       if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
1138         _icms_start_limit += adjustment;
1139         _icms_stop_limit = tmp_stop;
1140       }
1141     }
1142   }
1143   if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
1144     _icms_start_limit = _icms_stop_limit = eden->end();
1145   }
1146 
1147   // Install the new start limit.
1148   eden->set_soft_end(_icms_start_limit);
1149 
1150   if (CMSTraceIncrementalMode) {
1151     gclog_or_tty->print(" icms alloc limits:  "
1152                            PTR_FORMAT "," PTR_FORMAT
1153                            " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
1154                            _icms_start_limit, _icms_stop_limit,
1155                            percent_of_space(eden, _icms_start_limit),
1156                            percent_of_space(eden, _icms_stop_limit));
1157     if (Verbose) {
1158       gclog_or_tty->print("eden:  ");
1159       eden->print_on(gclog_or_tty);
1160     }
1161   }
1162 }
1163 
1164 // Any changes here should try to maintain the invariant
1165 // that if this method is called with _icms_start_limit
1166 // and _icms_stop_limit both NULL, then it should return NULL
1167 // and not notify the icms thread.
1168 HeapWord*
1169 CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
1170                                        size_t word_size)
1171 {
1172   // A start_limit equal to end() means the duty cycle is 0, so treat that as a
1173   // nop.
1174   if (CMSIncrementalMode && _icms_start_limit != space->end()) {
1175     if (top <= _icms_start_limit) {
1176       if (CMSTraceIncrementalMode) {
1177         space->print_on(gclog_or_tty);
1178         gclog_or_tty->stamp();
1179         gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
1180                                ", new limit=" PTR_FORMAT
1181                                " (" SIZE_FORMAT "%%)",
1182                                top, _icms_stop_limit,
1183                                percent_of_space(space, _icms_stop_limit));
1184       }
1185       ConcurrentMarkSweepThread::start_icms();
1186       assert(top < _icms_stop_limit, "Tautology");
1187       if (word_size < pointer_delta(_icms_stop_limit, top)) {
1188         return _icms_stop_limit;
1189       }
1190 
1191       // The allocation will cross both the _start and _stop limits, so do the
1192       // stop notification also and return end().
1193       if (CMSTraceIncrementalMode) {
1194         space->print_on(gclog_or_tty);
1195         gclog_or_tty->stamp();
1196         gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
1197                                ", new limit=" PTR_FORMAT
1198                                " (" SIZE_FORMAT "%%)",
1199                                top, space->end(),
1200                                percent_of_space(space, space->end()));
1201       }
1202       ConcurrentMarkSweepThread::stop_icms();
1203       return space->end();
1204     }
1205 
1206     if (top <= _icms_stop_limit) {
1207       if (CMSTraceIncrementalMode) {
1208         space->print_on(gclog_or_tty);
1209         gclog_or_tty->stamp();
1210         gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
1211                                ", new limit=" PTR_FORMAT
1212                                " (" SIZE_FORMAT "%%)",
1213                                top, space->end(),
1214                                percent_of_space(space, space->end()));
1215       }
1216       ConcurrentMarkSweepThread::stop_icms();
1217       return space->end();
1218     }
1219 
1220     if (CMSTraceIncrementalMode) {
1221       space->print_on(gclog_or_tty);
1222       gclog_or_tty->stamp();
1223       gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
1224                              ", new limit=" PTR_FORMAT,
1225                              top, NULL);
1226     }
1227   }
1228 
1229   return NULL;
1230 }
1231 
1232 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
1233   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1234   // allocate, copy and if necessary update promoinfo --
1235   // delegate to underlying space.
1236   assert_lock_strong(freelistLock());
1237 
1238 #ifndef PRODUCT
1239   if (Universe::heap()->promotion_should_fail()) {
1240     return NULL;
1241   }
1242 #endif  // #ifndef PRODUCT
1243 
1244   oop res = _cmsSpace->promote(obj, obj_size);
1245   if (res == NULL) {
1246     // expand and retry
1247     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
1248     expand(s*HeapWordSize, MinHeapDeltaBytes,
1249       CMSExpansionCause::_satisfy_promotion);
1250     // Since there's currently no next generation, we don't try to promote
1251     // into a more senior generation.
1252     assert(next_gen() == NULL, "assumption, based upon which no attempt "
1253                                "is made to pass on a possibly failing "
1254                                "promotion to next generation");
1255     res = _cmsSpace->promote(obj, obj_size);
1256   }
1257   if (res != NULL) {
1258     // See comment in allocate() about when objects should
1259     // be allocated live.
1260     assert(obj->is_oop(), "Will dereference klass pointer below");
1261     collector()->promoted(false,           // Not parallel
1262                           (HeapWord*)res, obj->is_objArray(), obj_size);
1263     // promotion counters
1264     NOT_PRODUCT(
1265       _numObjectsPromoted++;
1266       _numWordsPromoted +=
1267         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
1268     )
1269   }
1270   return res;
1271 }
1272 
1273 
1274 HeapWord*
1275 ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
1276                                              HeapWord* top,
1277                                              size_t word_sz)
1278 {
1279   return collector()->allocation_limit_reached(space, top, word_sz);
1280 }
1281 
1282 // IMPORTANT: Notes on object size recognition in CMS.
1283 // ---------------------------------------------------
1284 // A block of storage in the CMS generation is always in
1285 // one of three states. A free block (FREE), an allocated
1286 // object (OBJECT) whose size() method reports the correct size,
1287 // and an intermediate state (TRANSIENT) in which its size cannot
1288 // be accurately determined.
1289 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
1290 // -----------------------------------------------------
1291 // FREE:      klass_word & 1 == 1; mark_word holds block size
1292 //
1293 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1294 //            obj->size() computes correct size
1295 //
1296 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1297 //
1298 // STATE IDENTIFICATION: (64 bit+COOPS)
1299 // ------------------------------------
1300 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1301 //
1302 // OBJECT:    klass_word installed; klass_word != 0;
1303 //            obj->size() computes correct size
1304 //
1305 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1306 //
1307 //
1308 // STATE TRANSITION DIAGRAM
1309 //
1310 //        mut / parnew                     mut  /  parnew
1311 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1312 //  ^                                                                   |
1313 //  |------------------------ DEAD <------------------------------------|
1314 //         sweep                            mut
1315 //
1316 // While a block is in TRANSIENT state its size cannot be determined
1317 // so readers will either need to come back later or stall until
1318 // the size can be determined. Note that for the case of direct
1319 // allocation, P-bits, when available, may be used to determine the
1320 // size of an object that may not yet have been initialized.
1321 
1322 // Things to support parallel young-gen collection.
1323 oop
1324 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1325                                            oop old, markOop m,
1326                                            size_t word_sz) {
1327 #ifndef PRODUCT
1328   if (Universe::heap()->promotion_should_fail()) {
1329     return NULL;
1330   }
1331 #endif  // #ifndef PRODUCT
1332 
1333   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1334   PromotionInfo* promoInfo = &ps->promo;
1335   // if we are tracking promotions, then first ensure space for
1336   // promotion (including spooling space for saving header if necessary).
1337   // then allocate and copy, then track promoted info if needed.
1338   // When tracking (see PromotionInfo::track()), the mark word may
1339   // be displaced and in this case restoration of the mark word
1340   // occurs in the (oop_since_save_marks_)iterate phase.
1341   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1342     // Out of space for allocating spooling buffers;
1343     // try expanding and allocating spooling buffers.
1344     if (!expand_and_ensure_spooling_space(promoInfo)) {
1345       return NULL;
1346     }
1347   }
1348   assert(promoInfo->has_spooling_space(), "Control point invariant");
1349   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1350   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1351   if (obj_ptr == NULL) {
1352      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1353      if (obj_ptr == NULL) {
1354        return NULL;
1355      }
1356   }
1357   oop obj = oop(obj_ptr);
1358   OrderAccess::storestore();
1359   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1360   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1361   // IMPORTANT: See note on object initialization for CMS above.
1362   // Otherwise, copy the object.  Here we must be careful to insert the
1363   // klass pointer last, since this marks the block as an allocated object.
1364   // Except with compressed oops it's the mark word.
1365   HeapWord* old_ptr = (HeapWord*)old;
1366   // Restore the mark word copied above.
1367   obj->set_mark(m);
1368   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1369   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1370   OrderAccess::storestore();
1371 
1372   if (UseCompressedKlassPointers) {
1373     // Copy gap missed by (aligned) header size calculation below
1374     obj->set_klass_gap(old->klass_gap());
1375   }
1376   if (word_sz > (size_t)oopDesc::header_size()) {
1377     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1378                                  obj_ptr + oopDesc::header_size(),
1379                                  word_sz - oopDesc::header_size());
1380   }
1381 
1382   // Now we can track the promoted object, if necessary.  We take care
1383   // to delay the transition from uninitialized to full object
1384   // (i.e., insertion of klass pointer) until after, so that it
1385   // atomically becomes a promoted object.
1386   if (promoInfo->tracking()) {
1387     promoInfo->track((PromotedObject*)obj, old->klass());
1388   }
1389   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1390   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1391   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1392 
1393   // Finally, install the klass pointer (this should be volatile).
1394   OrderAccess::storestore();
1395   obj->set_klass(old->klass());
1396   // We should now be able to calculate the right size for this object
1397   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1398 
1399   collector()->promoted(true,          // parallel
1400                         obj_ptr, old->is_objArray(), word_sz);
1401 
1402   NOT_PRODUCT(
1403     Atomic::inc_ptr(&_numObjectsPromoted);
1404     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1405   )
1406 
1407   return obj;
1408 }
1409 
1410 void
1411 ConcurrentMarkSweepGeneration::
1412 par_promote_alloc_undo(int thread_num,
1413                        HeapWord* obj, size_t word_sz) {
1414   // CMS does not support promotion undo.
1415   ShouldNotReachHere();
1416 }
1417 
1418 void
1419 ConcurrentMarkSweepGeneration::
1420 par_promote_alloc_done(int thread_num) {
1421   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1422   ps->lab.retire(thread_num);
1423 }
1424 
1425 void
1426 ConcurrentMarkSweepGeneration::
1427 par_oop_since_save_marks_iterate_done(int thread_num) {
1428   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1429   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1430   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1431 }
1432 
1433 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1434                                                    size_t size,
1435                                                    bool   tlab)
1436 {
1437   // We allow a STW collection only if a full
1438   // collection was requested.
1439   return full || should_allocate(size, tlab); // FIX ME !!!
1440   // This and promotion failure handling are connected at the
1441   // hip and should be fixed by untying them.
1442 }
1443 
1444 bool CMSCollector::shouldConcurrentCollect() {
1445   if (_full_gc_requested) {
1446     if (Verbose && PrintGCDetails) {
1447       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1448                              " gc request (or gc_locker)");
1449     }
1450     return true;
1451   }
1452 
1453   // For debugging purposes, change the type of collection.
1454   // If the rotation is not on the concurrent collection
1455   // type, don't start a concurrent collection.
1456   NOT_PRODUCT(
1457     if (RotateCMSCollectionTypes &&
1458         (_cmsGen->debug_collection_type() !=
1459           ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
1460       assert(_cmsGen->debug_collection_type() !=
1461         ConcurrentMarkSweepGeneration::Unknown_collection_type,
1462         "Bad cms collection type");
1463       return false;
1464     }
1465   )
1466 
1467   FreelistLocker x(this);
1468   // ------------------------------------------------------------------
1469   // Print out lots of information which affects the initiation of
1470   // a collection.
1471   if (PrintCMSInitiationStatistics && stats().valid()) {
1472     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1473     gclog_or_tty->stamp();
1474     gclog_or_tty->print_cr("");
1475     stats().print_on(gclog_or_tty);
1476     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1477       stats().time_until_cms_gen_full());
1478     gclog_or_tty->print_cr("free="SIZE_FORMAT, _cmsGen->free());
1479     gclog_or_tty->print_cr("contiguous_available="SIZE_FORMAT,
1480                            _cmsGen->contiguous_available());
1481     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1482     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1483     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1484     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1485     gclog_or_tty->print_cr("metadata initialized %d",
1486       MetaspaceGC::should_concurrent_collect());
1487   }
1488   // ------------------------------------------------------------------
1489 
1490   // If the estimated time to complete a cms collection (cms_duration())
1491   // is less than the estimated time remaining until the cms generation
1492   // is full, start a collection.
1493   if (!UseCMSInitiatingOccupancyOnly) {
1494     if (stats().valid()) {
1495       if (stats().time_until_cms_start() == 0.0) {
1496         return true;
1497       }
1498     } else {
1499       // We want to conservatively collect somewhat early in order
1500       // to try and "bootstrap" our CMS/promotion statistics;
1501       // this branch will not fire after the first successful CMS
1502       // collection because the stats should then be valid.
1503       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1504         if (Verbose && PrintGCDetails) {
1505           gclog_or_tty->print_cr(
1506             " CMSCollector: collect for bootstrapping statistics:"
1507             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1508             _bootstrap_occupancy);
1509         }
1510         return true;
1511       }
1512     }
1513   }
1514 
1515   // Otherwise, we start a collection cycle if
1516   // old gen want a collection cycle started. Each may use
1517   // an appropriate criterion for making this decision.
1518   // XXX We need to make sure that the gen expansion
1519   // criterion dovetails well with this. XXX NEED TO FIX THIS
1520   if (_cmsGen->should_concurrent_collect()) {
1521     if (Verbose && PrintGCDetails) {
1522       gclog_or_tty->print_cr("CMS old gen initiated");
1523     }
1524     return true;
1525   }
1526 
1527   // We start a collection if we believe an incremental collection may fail;
1528   // this is not likely to be productive in practice because it's probably too
1529   // late anyway.
1530   GenCollectedHeap* gch = GenCollectedHeap::heap();
1531   assert(gch->collector_policy()->is_two_generation_policy(),
1532          "You may want to check the correctness of the following");
1533   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1534     if (Verbose && PrintGCDetails) {
1535       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1536     }
1537     return true;
1538   }
1539 
1540   if (MetaspaceGC::should_concurrent_collect()) {
1541       if (Verbose && PrintGCDetails) {
1542       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1543       }
1544       return true;
1545     }
1546 
1547   return false;
1548 }
1549 
1550 // Clear _expansion_cause fields of constituent generations
1551 void CMSCollector::clear_expansion_cause() {
1552   _cmsGen->clear_expansion_cause();
1553 }
1554 
1555 // We should be conservative in starting a collection cycle.  To
1556 // start too eagerly runs the risk of collecting too often in the
1557 // extreme.  To collect too rarely falls back on full collections,
1558 // which works, even if not optimum in terms of concurrent work.
1559 // As a work around for too eagerly collecting, use the flag
1560 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1561 // giving the user an easily understandable way of controlling the
1562 // collections.
1563 // We want to start a new collection cycle if any of the following
1564 // conditions hold:
1565 // . our current occupancy exceeds the configured initiating occupancy
1566 //   for this generation, or
1567 // . we recently needed to expand this space and have not, since that
1568 //   expansion, done a collection of this generation, or
1569 // . the underlying space believes that it may be a good idea to initiate
1570 //   a concurrent collection (this may be based on criteria such as the
1571 //   following: the space uses linear allocation and linear allocation is
1572 //   going to fail, or there is believed to be excessive fragmentation in
1573 //   the generation, etc... or ...
1574 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1575 //   the case of the old generation; see CR 6543076):
1576 //   we may be approaching a point at which allocation requests may fail because
1577 //   we will be out of sufficient free space given allocation rate estimates.]
1578 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1579 
1580   assert_lock_strong(freelistLock());
1581   if (occupancy() > initiating_occupancy()) {
1582     if (PrintGCDetails && Verbose) {
1583       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1584         short_name(), occupancy(), initiating_occupancy());
1585     }
1586     return true;
1587   }
1588   if (UseCMSInitiatingOccupancyOnly) {
1589     return false;
1590   }
1591   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1592     if (PrintGCDetails && Verbose) {
1593       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1594         short_name());
1595     }
1596     return true;
1597   }
1598   if (_cmsSpace->should_concurrent_collect()) {
1599     if (PrintGCDetails && Verbose) {
1600       gclog_or_tty->print(" %s: collect because cmsSpace says so ",
1601         short_name());
1602     }
1603     return true;
1604   }
1605   return false;
1606 }
1607 
1608 void ConcurrentMarkSweepGeneration::collect(bool   full,
1609                                             bool   clear_all_soft_refs,
1610                                             size_t size,
1611                                             bool   tlab)
1612 {
1613   collector()->collect(full, clear_all_soft_refs, size, tlab);
1614 }
1615 
1616 void CMSCollector::collect(bool   full,
1617                            bool   clear_all_soft_refs,
1618                            size_t size,
1619                            bool   tlab)
1620 {
1621   if (!UseCMSCollectionPassing && _collectorState > Idling) {
1622     // For debugging purposes skip the collection if the state
1623     // is not currently idle
1624     if (TraceCMSState) {
1625       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
1626         Thread::current(), full, _collectorState);
1627     }
1628     return;
1629   }
1630 
1631   // The following "if" branch is present for defensive reasons.
1632   // In the current uses of this interface, it can be replaced with:
1633   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1634   // But I am not placing that assert here to allow future
1635   // generality in invoking this interface.
1636   if (GC_locker::is_active()) {
1637     // A consistency test for GC_locker
1638     assert(GC_locker::needs_gc(), "Should have been set already");
1639     // Skip this foreground collection, instead
1640     // expanding the heap if necessary.
1641     // Need the free list locks for the call to free() in compute_new_size()
1642     compute_new_size();
1643     return;
1644   }
1645   acquire_control_and_collect(full, clear_all_soft_refs);
1646   _full_gcs_since_conc_gc++;
1647 
1648 }
1649 
1650 void CMSCollector::request_full_gc(unsigned int full_gc_count) {
1651   GenCollectedHeap* gch = GenCollectedHeap::heap();
1652   unsigned int gc_count = gch->total_full_collections();
1653   if (gc_count == full_gc_count) {
1654     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1655     _full_gc_requested = true;
1656     CGC_lock->notify();   // nudge CMS thread
1657   } else {
1658     assert(gc_count > full_gc_count, "Error: causal loop");
1659   }
1660 }
1661 
1662 
1663 // The foreground and background collectors need to coordinate in order
1664 // to make sure that they do not mutually interfere with CMS collections.
1665 // When a background collection is active,
1666 // the foreground collector may need to take over (preempt) and
1667 // synchronously complete an ongoing collection. Depending on the
1668 // frequency of the background collections and the heap usage
1669 // of the application, this preemption can be seldom or frequent.
1670 // There are only certain
1671 // points in the background collection that the "collection-baton"
1672 // can be passed to the foreground collector.
1673 //
1674 // The foreground collector will wait for the baton before
1675 // starting any part of the collection.  The foreground collector
1676 // will only wait at one location.
1677 //
1678 // The background collector will yield the baton before starting a new
1679 // phase of the collection (e.g., before initial marking, marking from roots,
1680 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1681 // of the loop which switches the phases. The background collector does some
1682 // of the phases (initial mark, final re-mark) with the world stopped.
1683 // Because of locking involved in stopping the world,
1684 // the foreground collector should not block waiting for the background
1685 // collector when it is doing a stop-the-world phase.  The background
1686 // collector will yield the baton at an additional point just before
1687 // it enters a stop-the-world phase.  Once the world is stopped, the
1688 // background collector checks the phase of the collection.  If the
1689 // phase has not changed, it proceeds with the collection.  If the
1690 // phase has changed, it skips that phase of the collection.  See
1691 // the comments on the use of the Heap_lock in collect_in_background().
1692 //
1693 // Variable used in baton passing.
1694 //   _foregroundGCIsActive - Set to true by the foreground collector when
1695 //      it wants the baton.  The foreground clears it when it has finished
1696 //      the collection.
1697 //   _foregroundGCShouldWait - Set to true by the background collector
1698 //        when it is running.  The foreground collector waits while
1699 //      _foregroundGCShouldWait is true.
1700 //  CGC_lock - monitor used to protect access to the above variables
1701 //      and to notify the foreground and background collectors.
1702 //  _collectorState - current state of the CMS collection.
1703 //
1704 // The foreground collector
1705 //   acquires the CGC_lock
1706 //   sets _foregroundGCIsActive
1707 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1708 //     various locks acquired in preparation for the collection
1709 //     are released so as not to block the background collector
1710 //     that is in the midst of a collection
1711 //   proceeds with the collection
1712 //   clears _foregroundGCIsActive
1713 //   returns
1714 //
1715 // The background collector in a loop iterating on the phases of the
1716 //      collection
1717 //   acquires the CGC_lock
1718 //   sets _foregroundGCShouldWait
1719 //   if _foregroundGCIsActive is set
1720 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1721 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1722 //     and exits the loop.
1723 //   otherwise
1724 //     proceed with that phase of the collection
1725 //     if the phase is a stop-the-world phase,
1726 //       yield the baton once more just before enqueueing
1727 //       the stop-world CMS operation (executed by the VM thread).
1728 //   returns after all phases of the collection are done
1729 //
1730 
1731 void CMSCollector::acquire_control_and_collect(bool full,
1732         bool clear_all_soft_refs) {
1733   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1734   assert(!Thread::current()->is_ConcurrentGC_thread(),
1735          "shouldn't try to acquire control from self!");
1736 
1737   // Start the protocol for acquiring control of the
1738   // collection from the background collector (aka CMS thread).
1739   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1740          "VM thread should have CMS token");
1741   // Remember the possibly interrupted state of an ongoing
1742   // concurrent collection
1743   CollectorState first_state = _collectorState;
1744 
1745   // Signal to a possibly ongoing concurrent collection that
1746   // we want to do a foreground collection.
1747   _foregroundGCIsActive = true;
1748 
1749   // Disable incremental mode during a foreground collection.
1750   ICMSDisabler icms_disabler;
1751 
1752   // release locks and wait for a notify from the background collector
1753   // releasing the locks in only necessary for phases which
1754   // do yields to improve the granularity of the collection.
1755   assert_lock_strong(bitMapLock());
1756   // We need to lock the Free list lock for the space that we are
1757   // currently collecting.
1758   assert(haveFreelistLocks(), "Must be holding free list locks");
1759   bitMapLock()->unlock();
1760   releaseFreelistLocks();
1761   {
1762     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1763     if (_foregroundGCShouldWait) {
1764       // We are going to be waiting for action for the CMS thread;
1765       // it had better not be gone (for instance at shutdown)!
1766       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1767              "CMS thread must be running");
1768       // Wait here until the background collector gives us the go-ahead
1769       ConcurrentMarkSweepThread::clear_CMS_flag(
1770         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1771       // Get a possibly blocked CMS thread going:
1772       //   Note that we set _foregroundGCIsActive true above,
1773       //   without protection of the CGC_lock.
1774       CGC_lock->notify();
1775       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1776              "Possible deadlock");
1777       while (_foregroundGCShouldWait) {
1778         // wait for notification
1779         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1780         // Possibility of delay/starvation here, since CMS token does
1781         // not know to give priority to VM thread? Actually, i think
1782         // there wouldn't be any delay/starvation, but the proof of
1783         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1784       }
1785       ConcurrentMarkSweepThread::set_CMS_flag(
1786         ConcurrentMarkSweepThread::CMS_vm_has_token);
1787     }
1788   }
1789   // The CMS_token is already held.  Get back the other locks.
1790   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1791          "VM thread should have CMS token");
1792   getFreelistLocks();
1793   bitMapLock()->lock_without_safepoint_check();
1794   if (TraceCMSState) {
1795     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1796       INTPTR_FORMAT " with first state %d", Thread::current(), first_state);
1797     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1798   }
1799 
1800   // Check if we need to do a compaction, or if not, whether
1801   // we need to start the mark-sweep from scratch.
1802   bool should_compact    = false;
1803   bool should_start_over = false;
1804   decide_foreground_collection_type(clear_all_soft_refs,
1805     &should_compact, &should_start_over);
1806 
1807 NOT_PRODUCT(
1808   if (RotateCMSCollectionTypes) {
1809     if (_cmsGen->debug_collection_type() ==
1810         ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
1811       should_compact = true;
1812     } else if (_cmsGen->debug_collection_type() ==
1813                ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
1814       should_compact = false;
1815     }
1816   }
1817 )
1818 
1819   if (PrintGCDetails && first_state > Idling) {
1820     GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1821     if (GCCause::is_user_requested_gc(cause) ||
1822         GCCause::is_serviceability_requested_gc(cause)) {
1823       gclog_or_tty->print(" (concurrent mode interrupted)");
1824     } else {
1825       gclog_or_tty->print(" (concurrent mode failure)");
1826     }
1827   }
1828 
1829   if (should_compact) {
1830     // If the collection is being acquired from the background
1831     // collector, there may be references on the discovered
1832     // references lists that have NULL referents (being those
1833     // that were concurrently cleared by a mutator) or
1834     // that are no longer active (having been enqueued concurrently
1835     // by the mutator).
1836     // Scrub the list of those references because Mark-Sweep-Compact
1837     // code assumes referents are not NULL and that all discovered
1838     // Reference objects are active.
1839     ref_processor()->clean_up_discovered_references();
1840 
1841     do_compaction_work(clear_all_soft_refs);
1842 
1843     // Has the GC time limit been exceeded?
1844     DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
1845     size_t max_eden_size = young_gen->max_capacity() -
1846                            young_gen->to()->capacity() -
1847                            young_gen->from()->capacity();
1848     GenCollectedHeap* gch = GenCollectedHeap::heap();
1849     GCCause::Cause gc_cause = gch->gc_cause();
1850     size_policy()->check_gc_overhead_limit(_young_gen->used(),
1851                                            young_gen->eden()->used(),
1852                                            _cmsGen->max_capacity(),
1853                                            max_eden_size,
1854                                            full,
1855                                            gc_cause,
1856                                            gch->collector_policy());
1857   } else {
1858     do_mark_sweep_work(clear_all_soft_refs, first_state,
1859       should_start_over);
1860   }
1861   // Reset the expansion cause, now that we just completed
1862   // a collection cycle.
1863   clear_expansion_cause();
1864   _foregroundGCIsActive = false;
1865   return;
1866 }
1867 
1868 // Resize the tenured generation
1869 // after obtaining the free list locks for the
1870 // two generations.
1871 void CMSCollector::compute_new_size() {
1872   assert_locked_or_safepoint(Heap_lock);
1873   FreelistLocker z(this);
1874   MetaspaceGC::compute_new_size();
1875   _cmsGen->compute_new_size();
1876 }
1877 
1878 // A work method used by foreground collection to determine
1879 // what type of collection (compacting or not, continuing or fresh)
1880 // it should do.
1881 // NOTE: the intent is to make UseCMSCompactAtFullCollection
1882 // and CMSCompactWhenClearAllSoftRefs the default in the future
1883 // and do away with the flags after a suitable period.
1884 void CMSCollector::decide_foreground_collection_type(
1885   bool clear_all_soft_refs, bool* should_compact,
1886   bool* should_start_over) {
1887   // Normally, we'll compact only if the UseCMSCompactAtFullCollection
1888   // flag is set, and we have either requested a System.gc() or
1889   // the number of full gc's since the last concurrent cycle
1890   // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
1891   // or if an incremental collection has failed
1892   GenCollectedHeap* gch = GenCollectedHeap::heap();
1893   assert(gch->collector_policy()->is_two_generation_policy(),
1894          "You may want to check the correctness of the following");
1895   // Inform cms gen if this was due to partial collection failing.
1896   // The CMS gen may use this fact to determine its expansion policy.
1897   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1898     assert(!_cmsGen->incremental_collection_failed(),
1899            "Should have been noticed, reacted to and cleared");
1900     _cmsGen->set_incremental_collection_failed();
1901   }
1902   *should_compact =
1903     UseCMSCompactAtFullCollection &&
1904     ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
1905      GCCause::is_user_requested_gc(gch->gc_cause()) ||
1906      gch->incremental_collection_will_fail(true /* consult_young */));
1907   *should_start_over = false;
1908   if (clear_all_soft_refs && !*should_compact) {
1909     // We are about to do a last ditch collection attempt
1910     // so it would normally make sense to do a compaction
1911     // to reclaim as much space as possible.
1912     if (CMSCompactWhenClearAllSoftRefs) {
1913       // Default: The rationale is that in this case either
1914       // we are past the final marking phase, in which case
1915       // we'd have to start over, or so little has been done
1916       // that there's little point in saving that work. Compaction
1917       // appears to be the sensible choice in either case.
1918       *should_compact = true;
1919     } else {
1920       // We have been asked to clear all soft refs, but not to
1921       // compact. Make sure that we aren't past the final checkpoint
1922       // phase, for that is where we process soft refs. If we are already
1923       // past that phase, we'll need to redo the refs discovery phase and
1924       // if necessary clear soft refs that weren't previously
1925       // cleared. We do so by remembering the phase in which
1926       // we came in, and if we are past the refs processing
1927       // phase, we'll choose to just redo the mark-sweep
1928       // collection from scratch.
1929       if (_collectorState > FinalMarking) {
1930         // We are past the refs processing phase;
1931         // start over and do a fresh synchronous CMS cycle
1932         _collectorState = Resetting; // skip to reset to start new cycle
1933         reset(false /* == !asynch */);
1934         *should_start_over = true;
1935       } // else we can continue a possibly ongoing current cycle
1936     }
1937   }
1938 }
1939 
1940 // A work method used by the foreground collector to do
1941 // a mark-sweep-compact.
1942 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1943   GenCollectedHeap* gch = GenCollectedHeap::heap();
1944   TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
1945   if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
1946     gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
1947       "collections passed to foreground collector", _full_gcs_since_conc_gc);
1948   }
1949 
1950   // Sample collection interval time and reset for collection pause.
1951   if (UseAdaptiveSizePolicy) {
1952     size_policy()->msc_collection_begin();
1953   }
1954 
1955   // Temporarily widen the span of the weak reference processing to
1956   // the entire heap.
1957   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1958   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1959   // Temporarily, clear the "is_alive_non_header" field of the
1960   // reference processor.
1961   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1962   // Temporarily make reference _processing_ single threaded (non-MT).
1963   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1964   // Temporarily make refs discovery atomic
1965   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1966   // Temporarily make reference _discovery_ single threaded (non-MT)
1967   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1968 
1969   ref_processor()->set_enqueuing_is_done(false);
1970   ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
1971   ref_processor()->setup_policy(clear_all_soft_refs);
1972   // If an asynchronous collection finishes, the _modUnionTable is
1973   // all clear.  If we are assuming the collection from an asynchronous
1974   // collection, clear the _modUnionTable.
1975   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1976     "_modUnionTable should be clear if the baton was not passed");
1977   _modUnionTable.clear_all();
1978   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1979     "mod union for klasses should be clear if the baton was passed");
1980   _ct->klass_rem_set()->clear_mod_union();
1981 
1982   // We must adjust the allocation statistics being maintained
1983   // in the free list space. We do so by reading and clearing
1984   // the sweep timer and updating the block flux rate estimates below.
1985   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1986   if (_inter_sweep_timer.is_active()) {
1987     _inter_sweep_timer.stop();
1988     // Note that we do not use this sample to update the _inter_sweep_estimate.
1989     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1990                                             _inter_sweep_estimate.padded_average(),
1991                                             _intra_sweep_estimate.padded_average());
1992   }
1993 
1994   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
1995     ref_processor(), clear_all_soft_refs);
1996   #ifdef ASSERT
1997     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1998     size_t free_size = cms_space->free();
1999     assert(free_size ==
2000            pointer_delta(cms_space->end(), cms_space->compaction_top())
2001            * HeapWordSize,
2002       "All the free space should be compacted into one chunk at top");
2003     assert(cms_space->dictionary()->total_chunk_size(
2004                                       debug_only(cms_space->freelistLock())) == 0 ||
2005            cms_space->totalSizeInIndexedFreeLists() == 0,
2006       "All the free space should be in a single chunk");
2007     size_t num = cms_space->totalCount();
2008     assert((free_size == 0 && num == 0) ||
2009            (free_size > 0  && (num == 1 || num == 2)),
2010          "There should be at most 2 free chunks after compaction");
2011   #endif // ASSERT
2012   _collectorState = Resetting;
2013   assert(_restart_addr == NULL,
2014          "Should have been NULL'd before baton was passed");
2015   reset(false /* == !asynch */);
2016   _cmsGen->reset_after_compaction();
2017   _concurrent_cycles_since_last_unload = 0;
2018 
2019   // Clear any data recorded in the PLAB chunk arrays.
2020   if (_survivor_plab_array != NULL) {
2021     reset_survivor_plab_arrays();
2022   }
2023 
2024   // Adjust the per-size allocation stats for the next epoch.
2025   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
2026   // Restart the "inter sweep timer" for the next epoch.
2027   _inter_sweep_timer.reset();
2028   _inter_sweep_timer.start();
2029 
2030   // Sample collection pause time and reset for collection interval.
2031   if (UseAdaptiveSizePolicy) {
2032     size_policy()->msc_collection_end(gch->gc_cause());
2033   }
2034 
2035   // For a mark-sweep-compact, compute_new_size() will be called
2036   // in the heap's do_collection() method.
2037 }
2038 
2039 // A work method used by the foreground collector to do
2040 // a mark-sweep, after taking over from a possibly on-going
2041 // concurrent mark-sweep collection.
2042 void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
2043   CollectorState first_state, bool should_start_over) {
2044   if (PrintGC && Verbose) {
2045     gclog_or_tty->print_cr("Pass concurrent collection to foreground "
2046       "collector with count %d",
2047       _full_gcs_since_conc_gc);
2048   }
2049   switch (_collectorState) {
2050     case Idling:
2051       if (first_state == Idling || should_start_over) {
2052         // The background GC was not active, or should
2053         // restarted from scratch;  start the cycle.
2054         _collectorState = InitialMarking;
2055       }
2056       // If first_state was not Idling, then a background GC
2057       // was in progress and has now finished.  No need to do it
2058       // again.  Leave the state as Idling.
2059       break;
2060     case Precleaning:
2061       // In the foreground case don't do the precleaning since
2062       // it is not done concurrently and there is extra work
2063       // required.
2064       _collectorState = FinalMarking;
2065   }
2066   if (PrintGCDetails &&
2067       (_collectorState > Idling ||
2068        !GCCause::is_user_requested_gc(GenCollectedHeap::heap()->gc_cause()))) {
2069     gclog_or_tty->print(" (concurrent mode failure)");
2070   }
2071   collect_in_foreground(clear_all_soft_refs);
2072 
2073   // For a mark-sweep, compute_new_size() will be called
2074   // in the heap's do_collection() method.
2075 }
2076 
2077 
2078 void CMSCollector::getFreelistLocks() const {
2079   // Get locks for all free lists in all generations that this
2080   // collector is responsible for
2081   _cmsGen->freelistLock()->lock_without_safepoint_check();
2082 }
2083 
2084 void CMSCollector::releaseFreelistLocks() const {
2085   // Release locks for all free lists in all generations that this
2086   // collector is responsible for
2087   _cmsGen->freelistLock()->unlock();
2088 }
2089 
2090 bool CMSCollector::haveFreelistLocks() const {
2091   // Check locks for all free lists in all generations that this
2092   // collector is responsible for
2093   assert_lock_strong(_cmsGen->freelistLock());
2094   PRODUCT_ONLY(ShouldNotReachHere());
2095   return true;
2096 }
2097 
2098 // A utility class that is used by the CMS collector to
2099 // temporarily "release" the foreground collector from its
2100 // usual obligation to wait for the background collector to
2101 // complete an ongoing phase before proceeding.
2102 class ReleaseForegroundGC: public StackObj {
2103  private:
2104   CMSCollector* _c;
2105  public:
2106   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
2107     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
2108     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2109     // allow a potentially blocked foreground collector to proceed
2110     _c->_foregroundGCShouldWait = false;
2111     if (_c->_foregroundGCIsActive) {
2112       CGC_lock->notify();
2113     }
2114     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2115            "Possible deadlock");
2116   }
2117 
2118   ~ReleaseForegroundGC() {
2119     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
2120     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2121     _c->_foregroundGCShouldWait = true;
2122   }
2123 };
2124 
2125 // There are separate collect_in_background and collect_in_foreground because of
2126 // the different locking requirements of the background collector and the
2127 // foreground collector.  There was originally an attempt to share
2128 // one "collect" method between the background collector and the foreground
2129 // collector but the if-then-else required made it cleaner to have
2130 // separate methods.
2131 void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
2132   assert(Thread::current()->is_ConcurrentGC_thread(),
2133     "A CMS asynchronous collection is only allowed on a CMS thread.");
2134 
2135   GenCollectedHeap* gch = GenCollectedHeap::heap();
2136   {
2137     bool safepoint_check = Mutex::_no_safepoint_check_flag;
2138     MutexLockerEx hl(Heap_lock, safepoint_check);
2139     FreelistLocker fll(this);
2140     MutexLockerEx x(CGC_lock, safepoint_check);
2141     if (_foregroundGCIsActive || !UseAsyncConcMarkSweepGC) {
2142       // The foreground collector is active or we're
2143       // not using asynchronous collections.  Skip this
2144       // background collection.
2145       assert(!_foregroundGCShouldWait, "Should be clear");
2146       return;
2147     } else {
2148       assert(_collectorState == Idling, "Should be idling before start.");
2149       _collectorState = InitialMarking;
2150       // Reset the expansion cause, now that we are about to begin
2151       // a new cycle.
2152       clear_expansion_cause();
2153 
2154       // Clear the MetaspaceGC flag since a concurrent collection
2155       // is starting but also clear it after the collection.
2156       MetaspaceGC::set_should_concurrent_collect(false);
2157     }
2158     // Decide if we want to enable class unloading as part of the
2159     // ensuing concurrent GC cycle.
2160     update_should_unload_classes();
2161     _full_gc_requested = false;           // acks all outstanding full gc requests
2162     // Signal that we are about to start a collection
2163     gch->increment_total_full_collections();  // ... starting a collection cycle
2164     _collection_count_start = gch->total_full_collections();
2165   }
2166 
2167   // Used for PrintGC
2168   size_t prev_used;
2169   if (PrintGC && Verbose) {
2170     prev_used = _cmsGen->used(); // XXXPERM
2171   }
2172 
2173   // The change of the collection state is normally done at this level;
2174   // the exceptions are phases that are executed while the world is
2175   // stopped.  For those phases the change of state is done while the
2176   // world is stopped.  For baton passing purposes this allows the
2177   // background collector to finish the phase and change state atomically.
2178   // The foreground collector cannot wait on a phase that is done
2179   // while the world is stopped because the foreground collector already
2180   // has the world stopped and would deadlock.
2181   while (_collectorState != Idling) {
2182     if (TraceCMSState) {
2183       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2184         Thread::current(), _collectorState);
2185     }
2186     // The foreground collector
2187     //   holds the Heap_lock throughout its collection.
2188     //   holds the CMS token (but not the lock)
2189     //     except while it is waiting for the background collector to yield.
2190     //
2191     // The foreground collector should be blocked (not for long)
2192     //   if the background collector is about to start a phase
2193     //   executed with world stopped.  If the background
2194     //   collector has already started such a phase, the
2195     //   foreground collector is blocked waiting for the
2196     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
2197     //   are executed in the VM thread.
2198     //
2199     // The locking order is
2200     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
2201     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
2202     //   CMS token  (claimed in
2203     //                stop_world_and_do() -->
2204     //                  safepoint_synchronize() -->
2205     //                    CMSThread::synchronize())
2206 
2207     {
2208       // Check if the FG collector wants us to yield.
2209       CMSTokenSync x(true); // is cms thread
2210       if (waitForForegroundGC()) {
2211         // We yielded to a foreground GC, nothing more to be
2212         // done this round.
2213         assert(_foregroundGCShouldWait == false, "We set it to false in "
2214                "waitForForegroundGC()");
2215         if (TraceCMSState) {
2216           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2217             " exiting collection CMS state %d",
2218             Thread::current(), _collectorState);
2219         }
2220         return;
2221       } else {
2222         // The background collector can run but check to see if the
2223         // foreground collector has done a collection while the
2224         // background collector was waiting to get the CGC_lock
2225         // above.  If yes, break so that _foregroundGCShouldWait
2226         // is cleared before returning.
2227         if (_collectorState == Idling) {
2228           break;
2229         }
2230       }
2231     }
2232 
2233     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
2234       "should be waiting");
2235 
2236     switch (_collectorState) {
2237       case InitialMarking:
2238         {
2239           ReleaseForegroundGC x(this);
2240           stats().record_cms_begin();
2241 
2242           VM_CMS_Initial_Mark initial_mark_op(this);
2243           VMThread::execute(&initial_mark_op);
2244         }
2245         // The collector state may be any legal state at this point
2246         // since the background collector may have yielded to the
2247         // foreground collector.
2248         break;
2249       case Marking:
2250         // initial marking in checkpointRootsInitialWork has been completed
2251         if (markFromRoots(true)) { // we were successful
2252           assert(_collectorState == Precleaning, "Collector state should "
2253             "have changed");
2254         } else {
2255           assert(_foregroundGCIsActive, "Internal state inconsistency");
2256         }
2257         break;
2258       case Precleaning:
2259         if (UseAdaptiveSizePolicy) {
2260           size_policy()->concurrent_precleaning_begin();
2261         }
2262         // marking from roots in markFromRoots has been completed
2263         preclean();
2264         if (UseAdaptiveSizePolicy) {
2265           size_policy()->concurrent_precleaning_end();
2266         }
2267         assert(_collectorState == AbortablePreclean ||
2268                _collectorState == FinalMarking,
2269                "Collector state should have changed");
2270         break;
2271       case AbortablePreclean:
2272         if (UseAdaptiveSizePolicy) {
2273         size_policy()->concurrent_phases_resume();
2274         }
2275         abortable_preclean();
2276         if (UseAdaptiveSizePolicy) {
2277           size_policy()->concurrent_precleaning_end();
2278         }
2279         assert(_collectorState == FinalMarking, "Collector state should "
2280           "have changed");
2281         break;
2282       case FinalMarking:
2283         {
2284           ReleaseForegroundGC x(this);
2285 
2286           VM_CMS_Final_Remark final_remark_op(this);
2287           VMThread::execute(&final_remark_op);
2288         }
2289         assert(_foregroundGCShouldWait, "block post-condition");
2290         break;
2291       case Sweeping:
2292         if (UseAdaptiveSizePolicy) {
2293           size_policy()->concurrent_sweeping_begin();
2294         }
2295         // final marking in checkpointRootsFinal has been completed
2296         sweep(true);
2297         assert(_collectorState == Resizing, "Collector state change "
2298           "to Resizing must be done under the free_list_lock");
2299         _full_gcs_since_conc_gc = 0;
2300 
2301         // Stop the timers for adaptive size policy for the concurrent phases
2302         if (UseAdaptiveSizePolicy) {
2303           size_policy()->concurrent_sweeping_end();
2304           size_policy()->concurrent_phases_end(gch->gc_cause(),
2305                                              gch->prev_gen(_cmsGen)->capacity(),
2306                                              _cmsGen->free());
2307         }
2308 
2309       case Resizing: {
2310         // Sweeping has been completed...
2311         // At this point the background collection has completed.
2312         // Don't move the call to compute_new_size() down
2313         // into code that might be executed if the background
2314         // collection was preempted.
2315         {
2316           ReleaseForegroundGC x(this);   // unblock FG collection
2317           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
2318           CMSTokenSync        z(true);   // not strictly needed.
2319           if (_collectorState == Resizing) {
2320             compute_new_size();
2321             _collectorState = Resetting;
2322           } else {
2323             assert(_collectorState == Idling, "The state should only change"
2324                    " because the foreground collector has finished the collection");
2325           }
2326         }
2327         break;
2328       }
2329       case Resetting:
2330         // CMS heap resizing has been completed
2331         reset(true);
2332         assert(_collectorState == Idling, "Collector state should "
2333           "have changed");
2334 
2335         MetaspaceGC::set_should_concurrent_collect(false);
2336 
2337         stats().record_cms_end();
2338         // Don't move the concurrent_phases_end() and compute_new_size()
2339         // calls to here because a preempted background collection
2340         // has it's state set to "Resetting".
2341         break;
2342       case Idling:
2343       default:
2344         ShouldNotReachHere();
2345         break;
2346     }
2347     if (TraceCMSState) {
2348       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2349         Thread::current(), _collectorState);
2350     }
2351     assert(_foregroundGCShouldWait, "block post-condition");
2352   }
2353 
2354   // Should this be in gc_epilogue?
2355   collector_policy()->counters()->update_counters();
2356 
2357   {
2358     // Clear _foregroundGCShouldWait and, in the event that the
2359     // foreground collector is waiting, notify it, before
2360     // returning.
2361     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2362     _foregroundGCShouldWait = false;
2363     if (_foregroundGCIsActive) {
2364       CGC_lock->notify();
2365     }
2366     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2367            "Possible deadlock");
2368   }
2369   if (TraceCMSState) {
2370     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2371       " exiting collection CMS state %d",
2372       Thread::current(), _collectorState);
2373   }
2374   if (PrintGC && Verbose) {
2375     _cmsGen->print_heap_change(prev_used);
2376   }
2377 }
2378 
2379 void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
2380   assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
2381          "Foreground collector should be waiting, not executing");
2382   assert(Thread::current()->is_VM_thread(), "A foreground collection"
2383     "may only be done by the VM Thread with the world stopped");
2384   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
2385          "VM thread should have CMS token");
2386 
2387   NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
2388     true, gclog_or_tty);)
2389   if (UseAdaptiveSizePolicy) {
2390     size_policy()->ms_collection_begin();
2391   }
2392   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
2393 
2394   HandleMark hm;  // Discard invalid handles created during verification
2395 
2396   if (VerifyBeforeGC &&
2397       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2398     Universe::verify(true);
2399   }
2400 
2401   // Snapshot the soft reference policy to be used in this collection cycle.
2402   ref_processor()->setup_policy(clear_all_soft_refs);
2403 
2404   bool init_mark_was_synchronous = false; // until proven otherwise
2405   while (_collectorState != Idling) {
2406     if (TraceCMSState) {
2407       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
2408         Thread::current(), _collectorState);
2409     }
2410     switch (_collectorState) {
2411       case InitialMarking:
2412         init_mark_was_synchronous = true;  // fact to be exploited in re-mark
2413         checkpointRootsInitial(false);
2414         assert(_collectorState == Marking, "Collector state should have changed"
2415           " within checkpointRootsInitial()");
2416         break;
2417       case Marking:
2418         // initial marking in checkpointRootsInitialWork has been completed
2419         if (VerifyDuringGC &&
2420             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2421           gclog_or_tty->print("Verify before initial mark: ");
2422           Universe::verify(true);
2423         }
2424         {
2425           bool res = markFromRoots(false);
2426           assert(res && _collectorState == FinalMarking, "Collector state should "
2427             "have changed");
2428           break;
2429         }
2430       case FinalMarking:
2431         if (VerifyDuringGC &&
2432             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2433           gclog_or_tty->print("Verify before re-mark: ");
2434           Universe::verify(true);
2435         }
2436         checkpointRootsFinal(false, clear_all_soft_refs,
2437                              init_mark_was_synchronous);
2438         assert(_collectorState == Sweeping, "Collector state should not "
2439           "have changed within checkpointRootsFinal()");
2440         break;
2441       case Sweeping:
2442         // final marking in checkpointRootsFinal has been completed
2443         if (VerifyDuringGC &&
2444             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2445           gclog_or_tty->print("Verify before sweep: ");
2446           Universe::verify(true);
2447         }
2448         sweep(false);
2449         assert(_collectorState == Resizing, "Incorrect state");
2450         break;
2451       case Resizing: {
2452         // Sweeping has been completed; the actual resize in this case
2453         // is done separately; nothing to be done in this state.
2454         _collectorState = Resetting;
2455         break;
2456       }
2457       case Resetting:
2458         // The heap has been resized.
2459         if (VerifyDuringGC &&
2460             GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2461           gclog_or_tty->print("Verify before reset: ");
2462           Universe::verify(true);
2463         }
2464         reset(false);
2465         assert(_collectorState == Idling, "Collector state should "
2466           "have changed");
2467         break;
2468       case Precleaning:
2469       case AbortablePreclean:
2470         // Elide the preclean phase
2471         _collectorState = FinalMarking;
2472         break;
2473       default:
2474         ShouldNotReachHere();
2475     }
2476     if (TraceCMSState) {
2477       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
2478         Thread::current(), _collectorState);
2479     }
2480   }
2481 
2482   if (UseAdaptiveSizePolicy) {
2483     GenCollectedHeap* gch = GenCollectedHeap::heap();
2484     size_policy()->ms_collection_end(gch->gc_cause());
2485   }
2486 
2487   if (VerifyAfterGC &&
2488       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
2489     Universe::verify(true);
2490   }
2491   if (TraceCMSState) {
2492     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
2493       " exiting collection CMS state %d",
2494       Thread::current(), _collectorState);
2495   }
2496 }
2497 
2498 bool CMSCollector::waitForForegroundGC() {
2499   bool res = false;
2500   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2501          "CMS thread should have CMS token");
2502   // Block the foreground collector until the
2503   // background collectors decides whether to
2504   // yield.
2505   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2506   _foregroundGCShouldWait = true;
2507   if (_foregroundGCIsActive) {
2508     // The background collector yields to the
2509     // foreground collector and returns a value
2510     // indicating that it has yielded.  The foreground
2511     // collector can proceed.
2512     res = true;
2513     _foregroundGCShouldWait = false;
2514     ConcurrentMarkSweepThread::clear_CMS_flag(
2515       ConcurrentMarkSweepThread::CMS_cms_has_token);
2516     ConcurrentMarkSweepThread::set_CMS_flag(
2517       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2518     // Get a possibly blocked foreground thread going
2519     CGC_lock->notify();
2520     if (TraceCMSState) {
2521       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2522         Thread::current(), _collectorState);
2523     }
2524     while (_foregroundGCIsActive) {
2525       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2526     }
2527     ConcurrentMarkSweepThread::set_CMS_flag(
2528       ConcurrentMarkSweepThread::CMS_cms_has_token);
2529     ConcurrentMarkSweepThread::clear_CMS_flag(
2530       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2531   }
2532   if (TraceCMSState) {
2533     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2534       Thread::current(), _collectorState);
2535   }
2536   return res;
2537 }
2538 
2539 // Because of the need to lock the free lists and other structures in
2540 // the collector, common to all the generations that the collector is
2541 // collecting, we need the gc_prologues of individual CMS generations
2542 // delegate to their collector. It may have been simpler had the
2543 // current infrastructure allowed one to call a prologue on a
2544 // collector. In the absence of that we have the generation's
2545 // prologue delegate to the collector, which delegates back
2546 // some "local" work to a worker method in the individual generations
2547 // that it's responsible for collecting, while itself doing any
2548 // work common to all generations it's responsible for. A similar
2549 // comment applies to the  gc_epilogue()'s.
2550 // The role of the varaible _between_prologue_and_epilogue is to
2551 // enforce the invocation protocol.
2552 void CMSCollector::gc_prologue(bool full) {
2553   // Call gc_prologue_work() for the CMSGen
2554   // we are responsible for.
2555 
2556   // The following locking discipline assumes that we are only called
2557   // when the world is stopped.
2558   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2559 
2560   // The CMSCollector prologue must call the gc_prologues for the
2561   // "generations" that it's responsible
2562   // for.
2563 
2564   assert(   Thread::current()->is_VM_thread()
2565          || (   CMSScavengeBeforeRemark
2566              && Thread::current()->is_ConcurrentGC_thread()),
2567          "Incorrect thread type for prologue execution");
2568 
2569   if (_between_prologue_and_epilogue) {
2570     // We have already been invoked; this is a gc_prologue delegation
2571     // from yet another CMS generation that we are responsible for, just
2572     // ignore it since all relevant work has already been done.
2573     return;
2574   }
2575 
2576   // set a bit saying prologue has been called; cleared in epilogue
2577   _between_prologue_and_epilogue = true;
2578   // Claim locks for common data structures, then call gc_prologue_work()
2579   // for each CMSGen.
2580 
2581   getFreelistLocks();   // gets free list locks on constituent spaces
2582   bitMapLock()->lock_without_safepoint_check();
2583 
2584   // Should call gc_prologue_work() for all cms gens we are responsible for
2585   bool duringMarking =    _collectorState >= Marking
2586                          && _collectorState < Sweeping;
2587 
2588   // The young collections clear the modified oops state, which tells if
2589   // there are any modified oops in the class. The remark phase also needs
2590   // that information. Tell the young collection to save the union of all
2591   // modified klasses.
2592   if (duringMarking) {
2593     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2594   }
2595 
2596   bool registerClosure = duringMarking;
2597 
2598   ModUnionClosure* muc = CollectedHeap::use_parallel_gc_threads() ?
2599                                                &_modUnionClosurePar
2600                                                : &_modUnionClosure;
2601   _cmsGen->gc_prologue_work(full, registerClosure, muc);
2602 
2603   if (!full) {
2604     stats().record_gc0_begin();
2605   }
2606 }
2607 
2608 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2609   // Delegate to CMScollector which knows how to coordinate between
2610   // this and any other CMS generations that it is responsible for
2611   // collecting.
2612   collector()->gc_prologue(full);
2613 }
2614 
2615 // This is a "private" interface for use by this generation's CMSCollector.
2616 // Not to be called directly by any other entity (for instance,
2617 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2618 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2619   bool registerClosure, ModUnionClosure* modUnionClosure) {
2620   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2621   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2622     "Should be NULL");
2623   if (registerClosure) {
2624     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2625   }
2626   cmsSpace()->gc_prologue();
2627   // Clear stat counters
2628   NOT_PRODUCT(
2629     assert(_numObjectsPromoted == 0, "check");
2630     assert(_numWordsPromoted   == 0, "check");
2631     if (Verbose && PrintGC) {
2632       gclog_or_tty->print("Allocated "SIZE_FORMAT" objects, "
2633                           SIZE_FORMAT" bytes concurrently",
2634       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2635     }
2636     _numObjectsAllocated = 0;
2637     _numWordsAllocated   = 0;
2638   )
2639 }
2640 
2641 void CMSCollector::gc_epilogue(bool full) {
2642   // The following locking discipline assumes that we are only called
2643   // when the world is stopped.
2644   assert(SafepointSynchronize::is_at_safepoint(),
2645          "world is stopped assumption");
2646 
2647   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2648   // if linear allocation blocks need to be appropriately marked to allow the
2649   // the blocks to be parsable. We also check here whether we need to nudge the
2650   // CMS collector thread to start a new cycle (if it's not already active).
2651   assert(   Thread::current()->is_VM_thread()
2652          || (   CMSScavengeBeforeRemark
2653              && Thread::current()->is_ConcurrentGC_thread()),
2654          "Incorrect thread type for epilogue execution");
2655 
2656   if (!_between_prologue_and_epilogue) {
2657     // We have already been invoked; this is a gc_epilogue delegation
2658     // from yet another CMS generation that we are responsible for, just
2659     // ignore it since all relevant work has already been done.
2660     return;
2661   }
2662   assert(haveFreelistLocks(), "must have freelist locks");
2663   assert_lock_strong(bitMapLock());
2664 
2665   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2666 
2667   _cmsGen->gc_epilogue_work(full);
2668 
2669   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2670     // in case sampling was not already enabled, enable it
2671     _start_sampling = true;
2672   }
2673   // reset _eden_chunk_array so sampling starts afresh
2674   _eden_chunk_index = 0;
2675 
2676   size_t cms_used   = _cmsGen->cmsSpace()->used();
2677 
2678   // update performance counters - this uses a special version of
2679   // update_counters() that allows the utilization to be passed as a
2680   // parameter, avoiding multiple calls to used().
2681   //
2682   _cmsGen->update_counters(cms_used);
2683 
2684   if (CMSIncrementalMode) {
2685     icms_update_allocation_limits();
2686   }
2687 
2688   bitMapLock()->unlock();
2689   releaseFreelistLocks();
2690 
2691   if (!CleanChunkPoolAsync) {
2692     Chunk::clean_chunk_pool();
2693   }
2694 
2695   _between_prologue_and_epilogue = false;  // ready for next cycle
2696 }
2697 
2698 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2699   collector()->gc_epilogue(full);
2700 
2701   // Also reset promotion tracking in par gc thread states.
2702   if (CollectedHeap::use_parallel_gc_threads()) {
2703     for (uint i = 0; i < ParallelGCThreads; i++) {
2704       _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2705     }
2706   }
2707 }
2708 
2709 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2710   assert(!incremental_collection_failed(), "Should have been cleared");
2711   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2712   cmsSpace()->gc_epilogue();
2713     // Print stat counters
2714   NOT_PRODUCT(
2715     assert(_numObjectsAllocated == 0, "check");
2716     assert(_numWordsAllocated == 0, "check");
2717     if (Verbose && PrintGC) {
2718       gclog_or_tty->print("Promoted "SIZE_FORMAT" objects, "
2719                           SIZE_FORMAT" bytes",
2720                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2721     }
2722     _numObjectsPromoted = 0;
2723     _numWordsPromoted   = 0;
2724   )
2725 
2726   if (PrintGC && Verbose) {
2727     // Call down the chain in contiguous_available needs the freelistLock
2728     // so print this out before releasing the freeListLock.
2729     gclog_or_tty->print(" Contiguous available "SIZE_FORMAT" bytes ",
2730                         contiguous_available());
2731   }
2732 }
2733 
2734 #ifndef PRODUCT
2735 bool CMSCollector::have_cms_token() {
2736   Thread* thr = Thread::current();
2737   if (thr->is_VM_thread()) {
2738     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2739   } else if (thr->is_ConcurrentGC_thread()) {
2740     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2741   } else if (thr->is_GC_task_thread()) {
2742     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2743            ParGCRareEvent_lock->owned_by_self();
2744   }
2745   return false;
2746 }
2747 #endif
2748 
2749 // Check reachability of the given heap address in CMS generation,
2750 // treating all other generations as roots.
2751 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2752   // We could "guarantee" below, rather than assert, but i'll
2753   // leave these as "asserts" so that an adventurous debugger
2754   // could try this in the product build provided some subset of
2755   // the conditions were met, provided they were intersted in the
2756   // results and knew that the computation below wouldn't interfere
2757   // with other concurrent computations mutating the structures
2758   // being read or written.
2759   assert(SafepointSynchronize::is_at_safepoint(),
2760          "Else mutations in object graph will make answer suspect");
2761   assert(have_cms_token(), "Should hold cms token");
2762   assert(haveFreelistLocks(), "must hold free list locks");
2763   assert_lock_strong(bitMapLock());
2764 
2765   // Clear the marking bit map array before starting, but, just
2766   // for kicks, first report if the given address is already marked
2767   gclog_or_tty->print_cr("Start: Address 0x%x is%s marked", addr,
2768                 _markBitMap.isMarked(addr) ? "" : " not");
2769 
2770   if (verify_after_remark()) {
2771     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2772     bool result = verification_mark_bm()->isMarked(addr);
2773     gclog_or_tty->print_cr("TransitiveMark: Address 0x%x %s marked", addr,
2774                            result ? "IS" : "is NOT");
2775     return result;
2776   } else {
2777     gclog_or_tty->print_cr("Could not compute result");
2778     return false;
2779   }
2780 }
2781 
2782 ////////////////////////////////////////////////////////
2783 // CMS Verification Support
2784 ////////////////////////////////////////////////////////
2785 // Following the remark phase, the following invariant
2786 // should hold -- each object in the CMS heap which is
2787 // marked in markBitMap() should be marked in the verification_mark_bm().
2788 
2789 class VerifyMarkedClosure: public BitMapClosure {
2790   CMSBitMap* _marks;
2791   bool       _failed;
2792 
2793  public:
2794   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2795 
2796   bool do_bit(size_t offset) {
2797     HeapWord* addr = _marks->offsetToHeapWord(offset);
2798     if (!_marks->isMarked(addr)) {
2799       oop(addr)->print_on(gclog_or_tty);
2800       gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
2801       _failed = true;
2802     }
2803     return true;
2804   }
2805 
2806   bool failed() { return _failed; }
2807 };
2808 
2809 bool CMSCollector::verify_after_remark() {
2810   gclog_or_tty->print(" [Verifying CMS Marking... ");
2811   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2812   static bool init = false;
2813 
2814   assert(SafepointSynchronize::is_at_safepoint(),
2815          "Else mutations in object graph will make answer suspect");
2816   assert(have_cms_token(),
2817          "Else there may be mutual interference in use of "
2818          " verification data structures");
2819   assert(_collectorState > Marking && _collectorState <= Sweeping,
2820          "Else marking info checked here may be obsolete");
2821   assert(haveFreelistLocks(), "must hold free list locks");
2822   assert_lock_strong(bitMapLock());
2823 
2824 
2825   // Allocate marking bit map if not already allocated
2826   if (!init) { // first time
2827     if (!verification_mark_bm()->allocate(_span)) {
2828       return false;
2829     }
2830     init = true;
2831   }
2832 
2833   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2834 
2835   // Turn off refs discovery -- so we will be tracing through refs.
2836   // This is as intended, because by this time
2837   // GC must already have cleared any refs that need to be cleared,
2838   // and traced those that need to be marked; moreover,
2839   // the marking done here is not going to intefere in any
2840   // way with the marking information used by GC.
2841   NoRefDiscovery no_discovery(ref_processor());
2842 
2843   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
2844 
2845   // Clear any marks from a previous round
2846   verification_mark_bm()->clear_all();
2847   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2848   verify_work_stacks_empty();
2849 
2850   GenCollectedHeap* gch = GenCollectedHeap::heap();
2851   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2852   // Update the saved marks which may affect the root scans.
2853   gch->save_marks();
2854 
2855   if (CMSRemarkVerifyVariant == 1) {
2856     // In this first variant of verification, we complete
2857     // all marking, then check if the new marks-verctor is
2858     // a subset of the CMS marks-vector.
2859     verify_after_remark_work_1();
2860   } else if (CMSRemarkVerifyVariant == 2) {
2861     // In this second variant of verification, we flag an error
2862     // (i.e. an object reachable in the new marks-vector not reachable
2863     // in the CMS marks-vector) immediately, also indicating the
2864     // identify of an object (A) that references the unmarked object (B) --
2865     // presumably, a mutation to A failed to be picked up by preclean/remark?
2866     verify_after_remark_work_2();
2867   } else {
2868     warning("Unrecognized value %d for CMSRemarkVerifyVariant",
2869             CMSRemarkVerifyVariant);
2870   }
2871   gclog_or_tty->print(" done] ");
2872   return true;
2873 }
2874 
2875 void CMSCollector::verify_after_remark_work_1() {
2876   ResourceMark rm;
2877   HandleMark  hm;
2878   GenCollectedHeap* gch = GenCollectedHeap::heap();
2879 
2880   // Get a clear set of claim bits for the strong roots processing to work with.
2881   ClassLoaderDataGraph::clear_claimed_marks();
2882 
2883   // Mark from roots one level into CMS
2884   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2885   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2886 
2887   gch->gen_process_strong_roots(_cmsGen->level(),
2888                                 true,   // younger gens are roots
2889                                 true,   // activate StrongRootsScope
2890                                 false,  // not scavenging
2891                                 SharedHeap::ScanningOption(roots_scanning_options()),
2892                                 &notOlder,
2893                                 true,   // walk code active on stacks
2894                                 NULL,
2895                                 NULL); // SSS: Provide correct closure
2896 
2897   // Now mark from the roots
2898   MarkFromRootsClosure markFromRootsClosure(this, _span,
2899     verification_mark_bm(), verification_mark_stack(),
2900     false /* don't yield */, true /* verifying */);
2901   assert(_restart_addr == NULL, "Expected pre-condition");
2902   verification_mark_bm()->iterate(&markFromRootsClosure);
2903   while (_restart_addr != NULL) {
2904     // Deal with stack overflow: by restarting at the indicated
2905     // address.
2906     HeapWord* ra = _restart_addr;
2907     markFromRootsClosure.reset(ra);
2908     _restart_addr = NULL;
2909     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2910   }
2911   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2912   verify_work_stacks_empty();
2913 
2914   // Marking completed -- now verify that each bit marked in
2915   // verification_mark_bm() is also marked in markBitMap(); flag all
2916   // errors by printing corresponding objects.
2917   VerifyMarkedClosure vcl(markBitMap());
2918   verification_mark_bm()->iterate(&vcl);
2919   if (vcl.failed()) {
2920     gclog_or_tty->print("Verification failed");
2921     Universe::heap()->print_on(gclog_or_tty);
2922     fatal("CMS: failed marking verification after remark");
2923   }
2924 }
2925 
2926 class VerifyKlassOopsKlassClosure : public KlassClosure {
2927   class VerifyKlassOopsClosure : public OopClosure {
2928     CMSBitMap* _bitmap;
2929    public:
2930     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2931     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2932     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2933   } _oop_closure;
2934  public:
2935   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2936   void do_klass(Klass* k) {
2937     k->oops_do(&_oop_closure);
2938   }
2939 };
2940 
2941 void CMSCollector::verify_after_remark_work_2() {
2942   ResourceMark rm;
2943   HandleMark  hm;
2944   GenCollectedHeap* gch = GenCollectedHeap::heap();
2945 
2946   // Get a clear set of claim bits for the strong roots processing to work with.
2947   ClassLoaderDataGraph::clear_claimed_marks();
2948 
2949   // Mark from roots one level into CMS
2950   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2951                                      markBitMap());
2952   CMKlassClosure klass_closure(&notOlder);
2953 
2954   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2955   gch->gen_process_strong_roots(_cmsGen->level(),
2956                                 true,   // younger gens are roots
2957                                 true,   // activate StrongRootsScope
2958                                 false,  // not scavenging
2959                                 SharedHeap::ScanningOption(roots_scanning_options()),
2960                                 &notOlder,
2961                                 true,   // walk code active on stacks
2962                                 NULL,
2963                                 &klass_closure);
2964 
2965   // Now mark from the roots
2966   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2967     verification_mark_bm(), markBitMap(), verification_mark_stack());
2968   assert(_restart_addr == NULL, "Expected pre-condition");
2969   verification_mark_bm()->iterate(&markFromRootsClosure);
2970   while (_restart_addr != NULL) {
2971     // Deal with stack overflow: by restarting at the indicated
2972     // address.
2973     HeapWord* ra = _restart_addr;
2974     markFromRootsClosure.reset(ra);
2975     _restart_addr = NULL;
2976     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2977   }
2978   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2979   verify_work_stacks_empty();
2980 
2981   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2982   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2983 
2984   // Marking completed -- now verify that each bit marked in
2985   // verification_mark_bm() is also marked in markBitMap(); flag all
2986   // errors by printing corresponding objects.
2987   VerifyMarkedClosure vcl(markBitMap());
2988   verification_mark_bm()->iterate(&vcl);
2989   assert(!vcl.failed(), "Else verification above should not have succeeded");
2990 }
2991 
2992 void ConcurrentMarkSweepGeneration::save_marks() {
2993   // delegate to CMS space
2994   cmsSpace()->save_marks();
2995   for (uint i = 0; i < ParallelGCThreads; i++) {
2996     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2997   }
2998 }
2999 
3000 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
3001   return cmsSpace()->no_allocs_since_save_marks();
3002 }
3003 
3004 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
3005                                                                 \
3006 void ConcurrentMarkSweepGeneration::                            \
3007 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
3008   cl->set_generation(this);                                     \
3009   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
3010   cl->reset_generation();                                       \
3011   save_marks();                                                 \
3012 }
3013 
3014 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
3015 
3016 void
3017 ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
3018 {
3019   // Not currently implemented; need to do the following. -- ysr.
3020   // dld -- I think that is used for some sort of allocation profiler.  So it
3021   // really means the objects allocated by the mutator since the last
3022   // GC.  We could potentially implement this cheaply by recording only
3023   // the direct allocations in a side data structure.
3024   //
3025   // I think we probably ought not to be required to support these
3026   // iterations at any arbitrary point; I think there ought to be some
3027   // call to enable/disable allocation profiling in a generation/space,
3028   // and the iterator ought to return the objects allocated in the
3029   // gen/space since the enable call, or the last iterator call (which
3030   // will probably be at a GC.)  That way, for gens like CM&S that would
3031   // require some extra data structure to support this, we only pay the
3032   // cost when it's in use...
3033   cmsSpace()->object_iterate_since_last_GC(blk);
3034 }
3035 
3036 void
3037 ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
3038   cl->set_generation(this);
3039   younger_refs_in_space_iterate(_cmsSpace, cl);
3040   cl->reset_generation();
3041 }
3042 
3043 void
3044 ConcurrentMarkSweepGeneration::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
3045   if (freelistLock()->owned_by_self()) {
3046     Generation::oop_iterate(mr, cl);
3047   } else {
3048     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3049     Generation::oop_iterate(mr, cl);
3050   }
3051 }
3052 
3053 void
3054 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
3055   if (freelistLock()->owned_by_self()) {
3056     Generation::oop_iterate(cl);
3057   } else {
3058     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3059     Generation::oop_iterate(cl);
3060   }
3061 }
3062 
3063 void
3064 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
3065   if (freelistLock()->owned_by_self()) {
3066     Generation::object_iterate(cl);
3067   } else {
3068     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3069     Generation::object_iterate(cl);
3070   }
3071 }
3072 
3073 void
3074 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
3075   if (freelistLock()->owned_by_self()) {
3076     Generation::safe_object_iterate(cl);
3077   } else {
3078     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3079     Generation::safe_object_iterate(cl);
3080   }
3081 }
3082 
3083 void
3084 ConcurrentMarkSweepGeneration::post_compact() {
3085 }
3086 
3087 void
3088 ConcurrentMarkSweepGeneration::prepare_for_verify() {
3089   // Fix the linear allocation blocks to look like free blocks.
3090 
3091   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3092   // are not called when the heap is verified during universe initialization and
3093   // at vm shutdown.
3094   if (freelistLock()->owned_by_self()) {
3095     cmsSpace()->prepare_for_verify();
3096   } else {
3097     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3098     cmsSpace()->prepare_for_verify();
3099   }
3100 }
3101 
3102 void
3103 ConcurrentMarkSweepGeneration::verify() {
3104   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
3105   // are not called when the heap is verified during universe initialization and
3106   // at vm shutdown.
3107   if (freelistLock()->owned_by_self()) {
3108     cmsSpace()->verify();
3109   } else {
3110     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
3111     cmsSpace()->verify();
3112   }
3113 }
3114 
3115 void CMSCollector::verify() {
3116   _cmsGen->verify();
3117 }
3118 
3119 #ifndef PRODUCT
3120 bool CMSCollector::overflow_list_is_empty() const {
3121   assert(_num_par_pushes >= 0, "Inconsistency");
3122   if (_overflow_list == NULL) {
3123     assert(_num_par_pushes == 0, "Inconsistency");
3124   }
3125   return _overflow_list == NULL;
3126 }
3127 
3128 // The methods verify_work_stacks_empty() and verify_overflow_empty()
3129 // merely consolidate assertion checks that appear to occur together frequently.
3130 void CMSCollector::verify_work_stacks_empty() const {
3131   assert(_markStack.isEmpty(), "Marking stack should be empty");
3132   assert(overflow_list_is_empty(), "Overflow list should be empty");
3133 }
3134 
3135 void CMSCollector::verify_overflow_empty() const {
3136   assert(overflow_list_is_empty(), "Overflow list should be empty");
3137   assert(no_preserved_marks(), "No preserved marks");
3138 }
3139 #endif // PRODUCT
3140 
3141 // Decide if we want to enable class unloading as part of the
3142 // ensuing concurrent GC cycle. We will collect and
3143 // unload classes if it's the case that:
3144 // (1) an explicit gc request has been made and the flag
3145 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
3146 // (2) (a) class unloading is enabled at the command line, and
3147 //     (b) old gen is getting really full
3148 // NOTE: Provided there is no change in the state of the heap between
3149 // calls to this method, it should have idempotent results. Moreover,
3150 // its results should be monotonically increasing (i.e. going from 0 to 1,
3151 // but not 1 to 0) between successive calls between which the heap was
3152 // not collected. For the implementation below, it must thus rely on
3153 // the property that concurrent_cycles_since_last_unload()
3154 // will not decrease unless a collection cycle happened and that
3155 // _cmsGen->is_too_full() are
3156 // themselves also monotonic in that sense. See check_monotonicity()
3157 // below.
3158 void CMSCollector::update_should_unload_classes() {
3159   _should_unload_classes = false;
3160   // Condition 1 above
3161   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
3162     _should_unload_classes = true;
3163   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
3164     // Disjuncts 2.b.(i,ii,iii) above
3165     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
3166                               CMSClassUnloadingMaxInterval)
3167                            || _cmsGen->is_too_full();
3168   }
3169 }
3170 
3171 bool ConcurrentMarkSweepGeneration::is_too_full() const {
3172   bool res = should_concurrent_collect();
3173   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
3174   return res;
3175 }
3176 
3177 void CMSCollector::setup_cms_unloading_and_verification_state() {
3178   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
3179                              || VerifyBeforeExit;
3180   const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
3181 
3182   if (should_unload_classes()) {   // Should unload classes this cycle
3183     remove_root_scanning_option(rso);  // Shrink the root set appropriately
3184     set_verifying(should_verify);    // Set verification state for this cycle
3185     return;                            // Nothing else needs to be done at this time
3186   }
3187 
3188   // Not unloading classes this cycle
3189   assert(!should_unload_classes(), "Inconsitency!");
3190   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
3191     // Include symbols, strings and code cache elements to prevent their resurrection.
3192     add_root_scanning_option(rso);
3193     set_verifying(true);
3194   } else if (verifying() && !should_verify) {
3195     // We were verifying, but some verification flags got disabled.
3196     set_verifying(false);
3197     // Exclude symbols, strings and code cache elements from root scanning to
3198     // reduce IM and RM pauses.
3199     remove_root_scanning_option(rso);
3200   }
3201 }
3202 
3203 
3204 #ifndef PRODUCT
3205 HeapWord* CMSCollector::block_start(const void* p) const {
3206   const HeapWord* addr = (HeapWord*)p;
3207   if (_span.contains(p)) {
3208     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
3209       return _cmsGen->cmsSpace()->block_start(p);
3210     }
3211   }
3212   return NULL;
3213 }
3214 #endif
3215 
3216 HeapWord*
3217 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
3218                                                    bool   tlab,
3219                                                    bool   parallel) {
3220   CMSSynchronousYieldRequest yr;
3221   assert(!tlab, "Can't deal with TLAB allocation");
3222   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
3223   expand(word_size*HeapWordSize, MinHeapDeltaBytes,
3224     CMSExpansionCause::_satisfy_allocation);
3225   if (GCExpandToAllocateDelayMillis > 0) {
3226     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3227   }
3228   return have_lock_and_allocate(word_size, tlab);
3229 }
3230 
3231 // YSR: All of this generation expansion/shrinking stuff is an exact copy of
3232 // OneContigSpaceCardGeneration, which makes me wonder if we should move this
3233 // to CardGeneration and share it...
3234 bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
3235   return CardGeneration::expand(bytes, expand_bytes);
3236 }
3237 
3238 void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
3239   CMSExpansionCause::Cause cause)
3240 {
3241 
3242   bool success = expand(bytes, expand_bytes);
3243 
3244   // remember why we expanded; this information is used
3245   // by shouldConcurrentCollect() when making decisions on whether to start
3246   // a new CMS cycle.
3247   if (success) {
3248     set_expansion_cause(cause);
3249     if (PrintGCDetails && Verbose) {
3250       gclog_or_tty->print_cr("Expanded CMS gen for %s",
3251         CMSExpansionCause::to_string(cause));
3252     }
3253   }
3254 }
3255 
3256 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
3257   HeapWord* res = NULL;
3258   MutexLocker x(ParGCRareEvent_lock);
3259   while (true) {
3260     // Expansion by some other thread might make alloc OK now:
3261     res = ps->lab.alloc(word_sz);
3262     if (res != NULL) return res;
3263     // If there's not enough expansion space available, give up.
3264     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
3265       return NULL;
3266     }
3267     // Otherwise, we try expansion.
3268     expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
3269       CMSExpansionCause::_allocate_par_lab);
3270     // Now go around the loop and try alloc again;
3271     // A competing par_promote might beat us to the expansion space,
3272     // so we may go around the loop again if promotion fails agaion.
3273     if (GCExpandToAllocateDelayMillis > 0) {
3274       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3275     }
3276   }
3277 }
3278 
3279 
3280 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
3281   PromotionInfo* promo) {
3282   MutexLocker x(ParGCRareEvent_lock);
3283   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
3284   while (true) {
3285     // Expansion by some other thread might make alloc OK now:
3286     if (promo->ensure_spooling_space()) {
3287       assert(promo->has_spooling_space(),
3288              "Post-condition of successful ensure_spooling_space()");
3289       return true;
3290     }
3291     // If there's not enough expansion space available, give up.
3292     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
3293       return false;
3294     }
3295     // Otherwise, we try expansion.
3296     expand(refill_size_bytes, MinHeapDeltaBytes,
3297       CMSExpansionCause::_allocate_par_spooling_space);
3298     // Now go around the loop and try alloc again;
3299     // A competing allocation might beat us to the expansion space,
3300     // so we may go around the loop again if allocation fails again.
3301     if (GCExpandToAllocateDelayMillis > 0) {
3302       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
3303     }
3304   }
3305 }
3306 
3307 
3308 
3309 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
3310   assert_locked_or_safepoint(Heap_lock);
3311   size_t size = ReservedSpace::page_align_size_down(bytes);
3312   if (size > 0) {
3313     shrink_by(size);
3314   }
3315 }
3316 
3317 bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
3318   assert_locked_or_safepoint(Heap_lock);
3319   bool result = _virtual_space.expand_by(bytes);
3320   if (result) {
3321     HeapWord* old_end = _cmsSpace->end();
3322     size_t new_word_size =
3323       heap_word_size(_virtual_space.committed_size());
3324     MemRegion mr(_cmsSpace->bottom(), new_word_size);
3325     _bts->resize(new_word_size);  // resize the block offset shared array
3326     Universe::heap()->barrier_set()->resize_covered_region(mr);
3327     // Hmmmm... why doesn't CFLS::set_end verify locking?
3328     // This is quite ugly; FIX ME XXX
3329     _cmsSpace->assert_locked(freelistLock());
3330     _cmsSpace->set_end((HeapWord*)_virtual_space.high());
3331 
3332     // update the space and generation capacity counters
3333     if (UsePerfData) {
3334       _space_counters->update_capacity();
3335       _gen_counters->update_all();
3336     }
3337 
3338     if (Verbose && PrintGC) {
3339       size_t new_mem_size = _virtual_space.committed_size();
3340       size_t old_mem_size = new_mem_size - bytes;
3341       gclog_or_tty->print_cr("Expanding %s from %ldK by %ldK to %ldK",
3342                     name(), old_mem_size/K, bytes/K, new_mem_size/K);
3343     }
3344   }
3345   return result;
3346 }
3347 
3348 bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
3349   assert_locked_or_safepoint(Heap_lock);
3350   bool success = true;
3351   const size_t remaining_bytes = _virtual_space.uncommitted_size();
3352   if (remaining_bytes > 0) {
3353     success = grow_by(remaining_bytes);
3354     DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
3355   }
3356   return success;
3357 }
3358 
3359 void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
3360   assert_locked_or_safepoint(Heap_lock);
3361   assert_lock_strong(freelistLock());
3362   // XXX Fix when compaction is implemented.
3363   warning("Shrinking of CMS not yet implemented");
3364   return;
3365 }
3366 
3367 
3368 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
3369 // phases.
3370 class CMSPhaseAccounting: public StackObj {
3371  public:
3372   CMSPhaseAccounting(CMSCollector *collector,
3373                      const char *phase,
3374                      bool print_cr = true);
3375   ~CMSPhaseAccounting();
3376 
3377  private:
3378   CMSCollector *_collector;
3379   const char *_phase;
3380   elapsedTimer _wallclock;
3381   bool _print_cr;
3382 
3383  public:
3384   // Not MT-safe; so do not pass around these StackObj's
3385   // where they may be accessed by other threads.
3386   jlong wallclock_millis() {
3387     assert(_wallclock.is_active(), "Wall clock should not stop");
3388     _wallclock.stop();  // to record time
3389     jlong ret = _wallclock.milliseconds();
3390     _wallclock.start(); // restart
3391     return ret;
3392   }
3393 };
3394 
3395 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
3396                                        const char *phase,
3397                                        bool print_cr) :
3398   _collector(collector), _phase(phase), _print_cr(print_cr) {
3399 
3400   if (PrintCMSStatistics != 0) {
3401     _collector->resetYields();
3402   }
3403   if (PrintGCDetails && PrintGCTimeStamps) {
3404     gclog_or_tty->date_stamp(PrintGCDateStamps);
3405     gclog_or_tty->stamp();
3406     gclog_or_tty->print_cr(": [%s-concurrent-%s-start]",
3407       _collector->cmsGen()->short_name(), _phase);
3408   }
3409   _collector->resetTimer();
3410   _wallclock.start();
3411   _collector->startTimer();
3412 }
3413 
3414 CMSPhaseAccounting::~CMSPhaseAccounting() {
3415   assert(_wallclock.is_active(), "Wall clock should not have stopped");
3416   _collector->stopTimer();
3417   _wallclock.stop();
3418   if (PrintGCDetails) {
3419     gclog_or_tty->date_stamp(PrintGCDateStamps);
3420     gclog_or_tty->stamp(PrintGCTimeStamps);
3421     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
3422                  _collector->cmsGen()->short_name(),
3423                  _phase, _collector->timerValue(), _wallclock.seconds());
3424     if (_print_cr) {
3425       gclog_or_tty->print_cr("");
3426     }
3427     if (PrintCMSStatistics != 0) {
3428       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
3429                     _collector->yields());
3430     }
3431   }
3432 }
3433 
3434 // CMS work
3435 
3436 // Checkpoint the roots into this generation from outside
3437 // this generation. [Note this initial checkpoint need only
3438 // be approximate -- we'll do a catch up phase subsequently.]
3439 void CMSCollector::checkpointRootsInitial(bool asynch) {
3440   assert(_collectorState == InitialMarking, "Wrong collector state");
3441   check_correct_thread_executing();
3442   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
3443 
3444   ReferenceProcessor* rp = ref_processor();
3445   SpecializationStats::clear();
3446   assert(_restart_addr == NULL, "Control point invariant");
3447   if (asynch) {
3448     // acquire locks for subsequent manipulations
3449     MutexLockerEx x(bitMapLock(),
3450                     Mutex::_no_safepoint_check_flag);
3451     checkpointRootsInitialWork(asynch);
3452     // enable ("weak") refs discovery
3453     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
3454     _collectorState = Marking;
3455   } else {
3456     // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
3457     // which recognizes if we are a CMS generation, and doesn't try to turn on
3458     // discovery; verify that they aren't meddling.
3459     assert(!rp->discovery_is_atomic(),
3460            "incorrect setting of discovery predicate");
3461     assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
3462            "ref discovery for this generation kind");
3463     // already have locks
3464     checkpointRootsInitialWork(asynch);
3465     // now enable ("weak") refs discovery
3466     rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
3467     _collectorState = Marking;
3468   }
3469   SpecializationStats::print();
3470 }
3471 
3472 void CMSCollector::checkpointRootsInitialWork(bool asynch) {
3473   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
3474   assert(_collectorState == InitialMarking, "just checking");
3475 
3476   // If there has not been a GC[n-1] since last GC[n] cycle completed,
3477   // precede our marking with a collection of all
3478   // younger generations to keep floating garbage to a minimum.
3479   // XXX: we won't do this for now -- it's an optimization to be done later.
3480 
3481   // already have locks
3482   assert_lock_strong(bitMapLock());
3483   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
3484 
3485   // Setup the verification and class unloading state for this
3486   // CMS collection cycle.
3487   setup_cms_unloading_and_verification_state();
3488 
3489   NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
3490     PrintGCDetails && Verbose, true, gclog_or_tty);)
3491   if (UseAdaptiveSizePolicy) {
3492     size_policy()->checkpoint_roots_initial_begin();
3493   }
3494 
3495   // Reset all the PLAB chunk arrays if necessary.
3496   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
3497     reset_survivor_plab_arrays();
3498   }
3499 
3500   ResourceMark rm;
3501   HandleMark  hm;
3502 
3503   FalseClosure falseClosure;
3504   // In the case of a synchronous collection, we will elide the
3505   // remark step, so it's important to catch all the nmethod oops
3506   // in this step.
3507   // The final 'true' flag to gen_process_strong_roots will ensure this.
3508   // If 'async' is true, we can relax the nmethod tracing.
3509   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
3510   GenCollectedHeap* gch = GenCollectedHeap::heap();
3511 
3512   verify_work_stacks_empty();
3513   verify_overflow_empty();
3514 
3515   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
3516   // Update the saved marks which may affect the root scans.
3517   gch->save_marks();
3518 
3519   // weak reference processing has not started yet.
3520   ref_processor()->set_enqueuing_is_done(false);
3521 
3522   // Need to remember all newly created CLDs,
3523   // so that we can guarantee that the remark finds them.
3524   ClassLoaderDataGraph::remember_new_clds(true);
3525 
3526   // Whenever a CLD is found, it will be claimed before proceeding to mark
3527   // the klasses. The claimed marks need to be cleared before marking starts.
3528   ClassLoaderDataGraph::clear_claimed_marks();
3529 
3530   CMKlassClosure klass_closure(&notOlder);
3531   {
3532     COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
3533     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
3534     gch->gen_process_strong_roots(_cmsGen->level(),
3535                                   true,   // younger gens are roots
3536                                   true,   // activate StrongRootsScope
3537                                   false,  // not scavenging
3538                                   SharedHeap::ScanningOption(roots_scanning_options()),
3539                                   &notOlder,
3540                                   true,   // walk all of code cache if (so & SO_CodeCache)
3541                                   NULL,
3542                                   &klass_closure);
3543   }
3544 
3545   // Clear mod-union table; it will be dirtied in the prologue of
3546   // CMS generation per each younger generation collection.
3547 
3548   assert(_modUnionTable.isAllClear(),
3549        "Was cleared in most recent final checkpoint phase"
3550        " or no bits are set in the gc_prologue before the start of the next "
3551        "subsequent marking phase.");
3552 
3553   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3554 
3555   // Save the end of the used_region of the constituent generations
3556   // to be used to limit the extent of sweep in each generation.
3557   save_sweep_limits();
3558   if (UseAdaptiveSizePolicy) {
3559     size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
3560   }
3561   verify_overflow_empty();
3562 }
3563 
3564 bool CMSCollector::markFromRoots(bool asynch) {
3565   // we might be tempted to assert that:
3566   // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
3567   //        "inconsistent argument?");
3568   // However that wouldn't be right, because it's possible that
3569   // a safepoint is indeed in progress as a younger generation
3570   // stop-the-world GC happens even as we mark in this generation.
3571   assert(_collectorState == Marking, "inconsistent state?");
3572   check_correct_thread_executing();
3573   verify_overflow_empty();
3574 
3575   bool res;
3576   if (asynch) {
3577 
3578     // Start the timers for adaptive size policy for the concurrent phases
3579     // Do it here so that the foreground MS can use the concurrent
3580     // timer since a foreground MS might has the sweep done concurrently
3581     // or STW.
3582     if (UseAdaptiveSizePolicy) {
3583       size_policy()->concurrent_marking_begin();
3584     }
3585 
3586     // Weak ref discovery note: We may be discovering weak
3587     // refs in this generation concurrent (but interleaved) with
3588     // weak ref discovery by a younger generation collector.
3589 
3590     CMSTokenSyncWithLocks ts(true, bitMapLock());
3591     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3592     CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3593     res = markFromRootsWork(asynch);
3594     if (res) {
3595       _collectorState = Precleaning;
3596     } else { // We failed and a foreground collection wants to take over
3597       assert(_foregroundGCIsActive, "internal state inconsistency");
3598       assert(_restart_addr == NULL,  "foreground will restart from scratch");
3599       if (PrintGCDetails) {
3600         gclog_or_tty->print_cr("bailing out to foreground collection");
3601       }
3602     }
3603     if (UseAdaptiveSizePolicy) {
3604       size_policy()->concurrent_marking_end();
3605     }
3606   } else {
3607     assert(SafepointSynchronize::is_at_safepoint(),
3608            "inconsistent with asynch == false");
3609     if (UseAdaptiveSizePolicy) {
3610       size_policy()->ms_collection_marking_begin();
3611     }
3612     // already have locks
3613     res = markFromRootsWork(asynch);
3614     _collectorState = FinalMarking;
3615     if (UseAdaptiveSizePolicy) {
3616       GenCollectedHeap* gch = GenCollectedHeap::heap();
3617       size_policy()->ms_collection_marking_end(gch->gc_cause());
3618     }
3619   }
3620   verify_overflow_empty();
3621   return res;
3622 }
3623 
3624 bool CMSCollector::markFromRootsWork(bool asynch) {
3625   // iterate over marked bits in bit map, doing a full scan and mark
3626   // from these roots using the following algorithm:
3627   // . if oop is to the right of the current scan pointer,
3628   //   mark corresponding bit (we'll process it later)
3629   // . else (oop is to left of current scan pointer)
3630   //   push oop on marking stack
3631   // . drain the marking stack
3632 
3633   // Note that when we do a marking step we need to hold the
3634   // bit map lock -- recall that direct allocation (by mutators)
3635   // and promotion (by younger generation collectors) is also
3636   // marking the bit map. [the so-called allocate live policy.]
3637   // Because the implementation of bit map marking is not
3638   // robust wrt simultaneous marking of bits in the same word,
3639   // we need to make sure that there is no such interference
3640   // between concurrent such updates.
3641 
3642   // already have locks
3643   assert_lock_strong(bitMapLock());
3644 
3645   verify_work_stacks_empty();
3646   verify_overflow_empty();
3647   bool result = false;
3648   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3649     result = do_marking_mt(asynch);
3650   } else {
3651     result = do_marking_st(asynch);
3652   }
3653   return result;
3654 }
3655 
3656 // Forward decl
3657 class CMSConcMarkingTask;
3658 
3659 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3660   CMSCollector*       _collector;
3661   CMSConcMarkingTask* _task;
3662  public:
3663   virtual void yield();
3664 
3665   // "n_threads" is the number of threads to be terminated.
3666   // "queue_set" is a set of work queues of other threads.
3667   // "collector" is the CMS collector associated with this task terminator.
3668   // "yield" indicates whether we need the gang as a whole to yield.
3669   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3670     ParallelTaskTerminator(n_threads, queue_set),
3671     _collector(collector) { }
3672 
3673   void set_task(CMSConcMarkingTask* task) {
3674     _task = task;
3675   }
3676 };
3677 
3678 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3679   CMSConcMarkingTask* _task;
3680  public:
3681   bool should_exit_termination();
3682   void set_task(CMSConcMarkingTask* task) {
3683     _task = task;
3684   }
3685 };
3686 
3687 // MT Concurrent Marking Task
3688 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3689   CMSCollector* _collector;
3690   int           _n_workers;                  // requested/desired # workers
3691   bool          _asynch;
3692   bool          _result;
3693   CompactibleFreeListSpace*  _cms_space;
3694   char          _pad_front[64];   // padding to ...
3695   HeapWord*     _global_finger;   // ... avoid sharing cache line
3696   char          _pad_back[64];
3697   HeapWord*     _restart_addr;
3698 
3699   //  Exposed here for yielding support
3700   Mutex* const _bit_map_lock;
3701 
3702   // The per thread work queues, available here for stealing
3703   OopTaskQueueSet*  _task_queues;
3704 
3705   // Termination (and yielding) support
3706   CMSConcMarkingTerminator _term;
3707   CMSConcMarkingTerminatorTerminator _term_term;
3708 
3709  public:
3710   CMSConcMarkingTask(CMSCollector* collector,
3711                  CompactibleFreeListSpace* cms_space,
3712                  bool asynch,
3713                  YieldingFlexibleWorkGang* workers,
3714                  OopTaskQueueSet* task_queues):
3715     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3716     _collector(collector),
3717     _cms_space(cms_space),
3718     _asynch(asynch), _n_workers(0), _result(true),
3719     _task_queues(task_queues),
3720     _term(_n_workers, task_queues, _collector),
3721     _bit_map_lock(collector->bitMapLock())
3722   {
3723     _requested_size = _n_workers;
3724     _term.set_task(this);
3725     _term_term.set_task(this);
3726     _restart_addr = _global_finger = _cms_space->bottom();
3727   }
3728 
3729 
3730   OopTaskQueueSet* task_queues()  { return _task_queues; }
3731 
3732   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3733 
3734   HeapWord** global_finger_addr() { return &_global_finger; }
3735 
3736   CMSConcMarkingTerminator* terminator() { return &_term; }
3737 
3738   virtual void set_for_termination(int active_workers) {
3739     terminator()->reset_for_reuse(active_workers);
3740   }
3741 
3742   void work(uint worker_id);
3743   bool should_yield() {
3744     return    ConcurrentMarkSweepThread::should_yield()
3745            && !_collector->foregroundGCIsActive()
3746            && _asynch;
3747   }
3748 
3749   virtual void coordinator_yield();  // stuff done by coordinator
3750   bool result() { return _result; }
3751 
3752   void reset(HeapWord* ra) {
3753     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3754     _restart_addr = _global_finger = ra;
3755     _term.reset_for_reuse();
3756   }
3757 
3758   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3759                                            OopTaskQueue* work_q);
3760 
3761  private:
3762   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3763   void do_work_steal(int i);
3764   void bump_global_finger(HeapWord* f);
3765 };
3766 
3767 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3768   assert(_task != NULL, "Error");
3769   return _task->yielding();
3770   // Note that we do not need the disjunct || _task->should_yield() above
3771   // because we want terminating threads to yield only if the task
3772   // is already in the midst of yielding, which happens only after at least one
3773   // thread has yielded.
3774 }
3775 
3776 void CMSConcMarkingTerminator::yield() {
3777   if (_task->should_yield()) {
3778     _task->yield();
3779   } else {
3780     ParallelTaskTerminator::yield();
3781   }
3782 }
3783 
3784 ////////////////////////////////////////////////////////////////
3785 // Concurrent Marking Algorithm Sketch
3786 ////////////////////////////////////////////////////////////////
3787 // Until all tasks exhausted (both spaces):
3788 // -- claim next available chunk
3789 // -- bump global finger via CAS
3790 // -- find first object that starts in this chunk
3791 //    and start scanning bitmap from that position
3792 // -- scan marked objects for oops
3793 // -- CAS-mark target, and if successful:
3794 //    . if target oop is above global finger (volatile read)
3795 //      nothing to do
3796 //    . if target oop is in chunk and above local finger
3797 //        then nothing to do
3798 //    . else push on work-queue
3799 // -- Deal with possible overflow issues:
3800 //    . local work-queue overflow causes stuff to be pushed on
3801 //      global (common) overflow queue
3802 //    . always first empty local work queue
3803 //    . then get a batch of oops from global work queue if any
3804 //    . then do work stealing
3805 // -- When all tasks claimed (both spaces)
3806 //    and local work queue empty,
3807 //    then in a loop do:
3808 //    . check global overflow stack; steal a batch of oops and trace
3809 //    . try to steal from other threads oif GOS is empty
3810 //    . if neither is available, offer termination
3811 // -- Terminate and return result
3812 //
3813 void CMSConcMarkingTask::work(uint worker_id) {
3814   elapsedTimer _timer;
3815   ResourceMark rm;
3816   HandleMark hm;
3817 
3818   DEBUG_ONLY(_collector->verify_overflow_empty();)
3819 
3820   // Before we begin work, our work queue should be empty
3821   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3822   // Scan the bitmap covering _cms_space, tracing through grey objects.
3823   _timer.start();
3824   do_scan_and_mark(worker_id, _cms_space);
3825   _timer.stop();
3826   if (PrintCMSStatistics != 0) {
3827     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3828       worker_id, _timer.seconds());
3829       // XXX: need xxx/xxx type of notation, two timers
3830   }
3831 
3832   // ... do work stealing
3833   _timer.reset();
3834   _timer.start();
3835   do_work_steal(worker_id);
3836   _timer.stop();
3837   if (PrintCMSStatistics != 0) {
3838     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3839       worker_id, _timer.seconds());
3840       // XXX: need xxx/xxx type of notation, two timers
3841   }
3842   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3843   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3844   // Note that under the current task protocol, the
3845   // following assertion is true even of the spaces
3846   // expanded since the completion of the concurrent
3847   // marking. XXX This will likely change under a strict
3848   // ABORT semantics.
3849   // After perm removal the comparison was changed to
3850   // greater than or equal to from strictly greater than.
3851   // Before perm removal the highest address sweep would
3852   // have been at the end of perm gen but now is at the
3853   // end of the tenured gen.
3854   assert(_global_finger >=  _cms_space->end(),
3855          "All tasks have been completed");
3856   DEBUG_ONLY(_collector->verify_overflow_empty();)
3857 }
3858 
3859 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3860   HeapWord* read = _global_finger;
3861   HeapWord* cur  = read;
3862   while (f > read) {
3863     cur = read;
3864     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3865     if (cur == read) {
3866       // our cas succeeded
3867       assert(_global_finger >= f, "protocol consistency");
3868       break;
3869     }
3870   }
3871 }
3872 
3873 // This is really inefficient, and should be redone by
3874 // using (not yet available) block-read and -write interfaces to the
3875 // stack and the work_queue. XXX FIX ME !!!
3876 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3877                                                       OopTaskQueue* work_q) {
3878   // Fast lock-free check
3879   if (ovflw_stk->length() == 0) {
3880     return false;
3881   }
3882   assert(work_q->size() == 0, "Shouldn't steal");
3883   MutexLockerEx ml(ovflw_stk->par_lock(),
3884                    Mutex::_no_safepoint_check_flag);
3885   // Grab up to 1/4 the size of the work queue
3886   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3887                     (size_t)ParGCDesiredObjsFromOverflowList);
3888   num = MIN2(num, ovflw_stk->length());
3889   for (int i = (int) num; i > 0; i--) {
3890     oop cur = ovflw_stk->pop();
3891     assert(cur != NULL, "Counted wrong?");
3892     work_q->push(cur);
3893   }
3894   return num > 0;
3895 }
3896 
3897 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3898   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3899   int n_tasks = pst->n_tasks();
3900   // We allow that there may be no tasks to do here because
3901   // we are restarting after a stack overflow.
3902   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3903   uint nth_task = 0;
3904 
3905   HeapWord* aligned_start = sp->bottom();
3906   if (sp->used_region().contains(_restart_addr)) {
3907     // Align down to a card boundary for the start of 0th task
3908     // for this space.
3909     aligned_start =
3910       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3911                                  CardTableModRefBS::card_size);
3912   }
3913 
3914   size_t chunk_size = sp->marking_task_size();
3915   while (!pst->is_task_claimed(/* reference */ nth_task)) {
3916     // Having claimed the nth task in this space,
3917     // compute the chunk that it corresponds to:
3918     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3919                                aligned_start + (nth_task+1)*chunk_size);
3920     // Try and bump the global finger via a CAS;
3921     // note that we need to do the global finger bump
3922     // _before_ taking the intersection below, because
3923     // the task corresponding to that region will be
3924     // deemed done even if the used_region() expands
3925     // because of allocation -- as it almost certainly will
3926     // during start-up while the threads yield in the
3927     // closure below.
3928     HeapWord* finger = span.end();
3929     bump_global_finger(finger);   // atomically
3930     // There are null tasks here corresponding to chunks
3931     // beyond the "top" address of the space.
3932     span = span.intersection(sp->used_region());
3933     if (!span.is_empty()) {  // Non-null task
3934       HeapWord* prev_obj;
3935       assert(!span.contains(_restart_addr) || nth_task == 0,
3936              "Inconsistency");
3937       if (nth_task == 0) {
3938         // For the 0th task, we'll not need to compute a block_start.
3939         if (span.contains(_restart_addr)) {
3940           // In the case of a restart because of stack overflow,
3941           // we might additionally skip a chunk prefix.
3942           prev_obj = _restart_addr;
3943         } else {
3944           prev_obj = span.start();
3945         }
3946       } else {
3947         // We want to skip the first object because
3948         // the protocol is to scan any object in its entirety
3949         // that _starts_ in this span; a fortiori, any
3950         // object starting in an earlier span is scanned
3951         // as part of an earlier claimed task.
3952         // Below we use the "careful" version of block_start
3953         // so we do not try to navigate uninitialized objects.
3954         prev_obj = sp->block_start_careful(span.start());
3955         // Below we use a variant of block_size that uses the
3956         // Printezis bits to avoid waiting for allocated
3957         // objects to become initialized/parsable.
3958         while (prev_obj < span.start()) {
3959           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3960           if (sz > 0) {
3961             prev_obj += sz;
3962           } else {
3963             // In this case we may end up doing a bit of redundant
3964             // scanning, but that appears unavoidable, short of
3965             // locking the free list locks; see bug 6324141.
3966             break;
3967           }
3968         }
3969       }
3970       if (prev_obj < span.end()) {
3971         MemRegion my_span = MemRegion(prev_obj, span.end());
3972         // Do the marking work within a non-empty span --
3973         // the last argument to the constructor indicates whether the
3974         // iteration should be incremental with periodic yields.
3975         Par_MarkFromRootsClosure cl(this, _collector, my_span,
3976                                     &_collector->_markBitMap,
3977                                     work_queue(i),
3978                                     &_collector->_markStack,
3979                                     _asynch);
3980         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3981       } // else nothing to do for this task
3982     }   // else nothing to do for this task
3983   }
3984   // We'd be tempted to assert here that since there are no
3985   // more tasks left to claim in this space, the global_finger
3986   // must exceed space->top() and a fortiori space->end(). However,
3987   // that would not quite be correct because the bumping of
3988   // global_finger occurs strictly after the claiming of a task,
3989   // so by the time we reach here the global finger may not yet
3990   // have been bumped up by the thread that claimed the last
3991   // task.
3992   pst->all_tasks_completed();
3993 }
3994 
3995 class Par_ConcMarkingClosure: public CMSOopClosure {
3996  private:
3997   CMSCollector* _collector;
3998   CMSConcMarkingTask* _task;
3999   MemRegion     _span;
4000   CMSBitMap*    _bit_map;
4001   CMSMarkStack* _overflow_stack;
4002   OopTaskQueue* _work_queue;
4003  protected:
4004   DO_OOP_WORK_DEFN
4005  public:
4006   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
4007                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
4008     CMSOopClosure(collector->ref_processor()),
4009     _collector(collector),
4010     _task(task),
4011     _span(collector->_span),
4012     _work_queue(work_queue),
4013     _bit_map(bit_map),
4014     _overflow_stack(overflow_stack)
4015   { }
4016   virtual void do_oop(oop* p);
4017   virtual void do_oop(narrowOop* p);
4018 
4019   void trim_queue(size_t max);
4020   void handle_stack_overflow(HeapWord* lost);
4021   void do_yield_check() {
4022     if (_task->should_yield()) {
4023       _task->yield();
4024     }
4025   }
4026 };
4027 
4028 // Grey object scanning during work stealing phase --
4029 // the salient assumption here is that any references
4030 // that are in these stolen objects being scanned must
4031 // already have been initialized (else they would not have
4032 // been published), so we do not need to check for
4033 // uninitialized objects before pushing here.
4034 void Par_ConcMarkingClosure::do_oop(oop obj) {
4035   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
4036   HeapWord* addr = (HeapWord*)obj;
4037   // Check if oop points into the CMS generation
4038   // and is not marked
4039   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
4040     // a white object ...
4041     // If we manage to "claim" the object, by being the
4042     // first thread to mark it, then we push it on our
4043     // marking stack
4044     if (_bit_map->par_mark(addr)) {     // ... now grey
4045       // push on work queue (grey set)
4046       bool simulate_overflow = false;
4047       NOT_PRODUCT(
4048         if (CMSMarkStackOverflowALot &&
4049             _collector->simulate_overflow()) {
4050           // simulate a stack overflow
4051           simulate_overflow = true;
4052         }
4053       )
4054       if (simulate_overflow ||
4055           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
4056         // stack overflow
4057         if (PrintCMSStatistics != 0) {
4058           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
4059                                  SIZE_FORMAT, _overflow_stack->capacity());
4060         }
4061         // We cannot assert that the overflow stack is full because
4062         // it may have been emptied since.
4063         assert(simulate_overflow ||
4064                _work_queue->size() == _work_queue->max_elems(),
4065               "Else push should have succeeded");
4066         handle_stack_overflow(addr);
4067       }
4068     } // Else, some other thread got there first
4069     do_yield_check();
4070   }
4071 }
4072 
4073 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
4074 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
4075 
4076 void Par_ConcMarkingClosure::trim_queue(size_t max) {
4077   while (_work_queue->size() > max) {
4078     oop new_oop;
4079     if (_work_queue->pop_local(new_oop)) {
4080       assert(new_oop->is_oop(), "Should be an oop");
4081       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
4082       assert(_span.contains((HeapWord*)new_oop), "Not in span");
4083       new_oop->oop_iterate(this);  // do_oop() above
4084       do_yield_check();
4085     }
4086   }
4087 }
4088 
4089 // Upon stack overflow, we discard (part of) the stack,
4090 // remembering the least address amongst those discarded
4091 // in CMSCollector's _restart_address.
4092 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
4093   // We need to do this under a mutex to prevent other
4094   // workers from interfering with the work done below.
4095   MutexLockerEx ml(_overflow_stack->par_lock(),
4096                    Mutex::_no_safepoint_check_flag);
4097   // Remember the least grey address discarded
4098   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
4099   _collector->lower_restart_addr(ra);
4100   _overflow_stack->reset();  // discard stack contents
4101   _overflow_stack->expand(); // expand the stack if possible
4102 }
4103 
4104 
4105 void CMSConcMarkingTask::do_work_steal(int i) {
4106   OopTaskQueue* work_q = work_queue(i);
4107   oop obj_to_scan;
4108   CMSBitMap* bm = &(_collector->_markBitMap);
4109   CMSMarkStack* ovflw = &(_collector->_markStack);
4110   int* seed = _collector->hash_seed(i);
4111   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
4112   while (true) {
4113     cl.trim_queue(0);
4114     assert(work_q->size() == 0, "Should have been emptied above");
4115     if (get_work_from_overflow_stack(ovflw, work_q)) {
4116       // Can't assert below because the work obtained from the
4117       // overflow stack may already have been stolen from us.
4118       // assert(work_q->size() > 0, "Work from overflow stack");
4119       continue;
4120     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4121       assert(obj_to_scan->is_oop(), "Should be an oop");
4122       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
4123       obj_to_scan->oop_iterate(&cl);
4124     } else if (terminator()->offer_termination(&_term_term)) {
4125       assert(work_q->size() == 0, "Impossible!");
4126       break;
4127     } else if (yielding() || should_yield()) {
4128       yield();
4129     }
4130   }
4131 }
4132 
4133 // This is run by the CMS (coordinator) thread.
4134 void CMSConcMarkingTask::coordinator_yield() {
4135   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4136          "CMS thread should hold CMS token");
4137   // First give up the locks, then yield, then re-lock
4138   // We should probably use a constructor/destructor idiom to
4139   // do this unlock/lock or modify the MutexUnlocker class to
4140   // serve our purpose. XXX
4141   assert_lock_strong(_bit_map_lock);
4142   _bit_map_lock->unlock();
4143   ConcurrentMarkSweepThread::desynchronize(true);
4144   ConcurrentMarkSweepThread::acknowledge_yield_request();
4145   _collector->stopTimer();
4146   if (PrintCMSStatistics != 0) {
4147     _collector->incrementYields();
4148   }
4149   _collector->icms_wait();
4150 
4151   // It is possible for whichever thread initiated the yield request
4152   // not to get a chance to wake up and take the bitmap lock between
4153   // this thread releasing it and reacquiring it. So, while the
4154   // should_yield() flag is on, let's sleep for a bit to give the
4155   // other thread a chance to wake up. The limit imposed on the number
4156   // of iterations is defensive, to avoid any unforseen circumstances
4157   // putting us into an infinite loop. Since it's always been this
4158   // (coordinator_yield()) method that was observed to cause the
4159   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
4160   // which is by default non-zero. For the other seven methods that
4161   // also perform the yield operation, as are using a different
4162   // parameter (CMSYieldSleepCount) which is by default zero. This way we
4163   // can enable the sleeping for those methods too, if necessary.
4164   // See 6442774.
4165   //
4166   // We really need to reconsider the synchronization between the GC
4167   // thread and the yield-requesting threads in the future and we
4168   // should really use wait/notify, which is the recommended
4169   // way of doing this type of interaction. Additionally, we should
4170   // consolidate the eight methods that do the yield operation and they
4171   // are almost identical into one for better maintenability and
4172   // readability. See 6445193.
4173   //
4174   // Tony 2006.06.29
4175   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
4176                    ConcurrentMarkSweepThread::should_yield() &&
4177                    !CMSCollector::foregroundGCIsActive(); ++i) {
4178     os::sleep(Thread::current(), 1, false);
4179     ConcurrentMarkSweepThread::acknowledge_yield_request();
4180   }
4181 
4182   ConcurrentMarkSweepThread::synchronize(true);
4183   _bit_map_lock->lock_without_safepoint_check();
4184   _collector->startTimer();
4185 }
4186 
4187 bool CMSCollector::do_marking_mt(bool asynch) {
4188   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
4189   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
4190                                        conc_workers()->total_workers(),
4191                                        conc_workers()->active_workers(),
4192                                        Threads::number_of_non_daemon_threads());
4193   conc_workers()->set_active_workers(num_workers);
4194 
4195   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4196 
4197   CMSConcMarkingTask tsk(this,
4198                          cms_space,
4199                          asynch,
4200                          conc_workers(),
4201                          task_queues());
4202 
4203   // Since the actual number of workers we get may be different
4204   // from the number we requested above, do we need to do anything different
4205   // below? In particular, may be we need to subclass the SequantialSubTasksDone
4206   // class?? XXX
4207   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
4208 
4209   // Refs discovery is already non-atomic.
4210   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
4211   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
4212   conc_workers()->start_task(&tsk);
4213   while (tsk.yielded()) {
4214     tsk.coordinator_yield();
4215     conc_workers()->continue_task(&tsk);
4216   }
4217   // If the task was aborted, _restart_addr will be non-NULL
4218   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
4219   while (_restart_addr != NULL) {
4220     // XXX For now we do not make use of ABORTED state and have not
4221     // yet implemented the right abort semantics (even in the original
4222     // single-threaded CMS case). That needs some more investigation
4223     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
4224     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
4225     // If _restart_addr is non-NULL, a marking stack overflow
4226     // occurred; we need to do a fresh marking iteration from the
4227     // indicated restart address.
4228     if (_foregroundGCIsActive && asynch) {
4229       // We may be running into repeated stack overflows, having
4230       // reached the limit of the stack size, while making very
4231       // slow forward progress. It may be best to bail out and
4232       // let the foreground collector do its job.
4233       // Clear _restart_addr, so that foreground GC
4234       // works from scratch. This avoids the headache of
4235       // a "rescan" which would otherwise be needed because
4236       // of the dirty mod union table & card table.
4237       _restart_addr = NULL;
4238       return false;
4239     }
4240     // Adjust the task to restart from _restart_addr
4241     tsk.reset(_restart_addr);
4242     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
4243                   _restart_addr);
4244     _restart_addr = NULL;
4245     // Get the workers going again
4246     conc_workers()->start_task(&tsk);
4247     while (tsk.yielded()) {
4248       tsk.coordinator_yield();
4249       conc_workers()->continue_task(&tsk);
4250     }
4251   }
4252   assert(tsk.completed(), "Inconsistency");
4253   assert(tsk.result() == true, "Inconsistency");
4254   return true;
4255 }
4256 
4257 bool CMSCollector::do_marking_st(bool asynch) {
4258   ResourceMark rm;
4259   HandleMark   hm;
4260 
4261   // Temporarily make refs discovery single threaded (non-MT)
4262   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
4263   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
4264     &_markStack, CMSYield && asynch);
4265   // the last argument to iterate indicates whether the iteration
4266   // should be incremental with periodic yields.
4267   _markBitMap.iterate(&markFromRootsClosure);
4268   // If _restart_addr is non-NULL, a marking stack overflow
4269   // occurred; we need to do a fresh iteration from the
4270   // indicated restart address.
4271   while (_restart_addr != NULL) {
4272     if (_foregroundGCIsActive && asynch) {
4273       // We may be running into repeated stack overflows, having
4274       // reached the limit of the stack size, while making very
4275       // slow forward progress. It may be best to bail out and
4276       // let the foreground collector do its job.
4277       // Clear _restart_addr, so that foreground GC
4278       // works from scratch. This avoids the headache of
4279       // a "rescan" which would otherwise be needed because
4280       // of the dirty mod union table & card table.
4281       _restart_addr = NULL;
4282       return false;  // indicating failure to complete marking
4283     }
4284     // Deal with stack overflow:
4285     // we restart marking from _restart_addr
4286     HeapWord* ra = _restart_addr;
4287     markFromRootsClosure.reset(ra);
4288     _restart_addr = NULL;
4289     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
4290   }
4291   return true;
4292 }
4293 
4294 void CMSCollector::preclean() {
4295   check_correct_thread_executing();
4296   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
4297   verify_work_stacks_empty();
4298   verify_overflow_empty();
4299   _abort_preclean = false;
4300   if (CMSPrecleaningEnabled) {
4301     _eden_chunk_index = 0;
4302     size_t used = get_eden_used();
4303     size_t capacity = get_eden_capacity();
4304     // Don't start sampling unless we will get sufficiently
4305     // many samples.
4306     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
4307                 * CMSScheduleRemarkEdenPenetration)) {
4308       _start_sampling = true;
4309     } else {
4310       _start_sampling = false;
4311     }
4312     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4313     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
4314     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
4315   }
4316   CMSTokenSync x(true); // is cms thread
4317   if (CMSPrecleaningEnabled) {
4318     sample_eden();
4319     _collectorState = AbortablePreclean;
4320   } else {
4321     _collectorState = FinalMarking;
4322   }
4323   verify_work_stacks_empty();
4324   verify_overflow_empty();
4325 }
4326 
4327 // Try and schedule the remark such that young gen
4328 // occupancy is CMSScheduleRemarkEdenPenetration %.
4329 void CMSCollector::abortable_preclean() {
4330   check_correct_thread_executing();
4331   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
4332   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
4333 
4334   // If Eden's current occupancy is below this threshold,
4335   // immediately schedule the remark; else preclean
4336   // past the next scavenge in an effort to
4337   // schedule the pause as described avove. By choosing
4338   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
4339   // we will never do an actual abortable preclean cycle.
4340   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
4341     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
4342     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
4343     // We need more smarts in the abortable preclean
4344     // loop below to deal with cases where allocation
4345     // in young gen is very very slow, and our precleaning
4346     // is running a losing race against a horde of
4347     // mutators intent on flooding us with CMS updates
4348     // (dirty cards).
4349     // One, admittedly dumb, strategy is to give up
4350     // after a certain number of abortable precleaning loops
4351     // or after a certain maximum time. We want to make
4352     // this smarter in the next iteration.
4353     // XXX FIX ME!!! YSR
4354     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
4355     while (!(should_abort_preclean() ||
4356              ConcurrentMarkSweepThread::should_terminate())) {
4357       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
4358       cumworkdone += workdone;
4359       loops++;
4360       // Voluntarily terminate abortable preclean phase if we have
4361       // been at it for too long.
4362       if ((CMSMaxAbortablePrecleanLoops != 0) &&
4363           loops >= CMSMaxAbortablePrecleanLoops) {
4364         if (PrintGCDetails) {
4365           gclog_or_tty->print(" CMS: abort preclean due to loops ");
4366         }
4367         break;
4368       }
4369       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
4370         if (PrintGCDetails) {
4371           gclog_or_tty->print(" CMS: abort preclean due to time ");
4372         }
4373         break;
4374       }
4375       // If we are doing little work each iteration, we should
4376       // take a short break.
4377       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
4378         // Sleep for some time, waiting for work to accumulate
4379         stopTimer();
4380         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
4381         startTimer();
4382         waited++;
4383       }
4384     }
4385     if (PrintCMSStatistics > 0) {
4386       gclog_or_tty->print(" [%d iterations, %d waits, %d cards)] ",
4387                           loops, waited, cumworkdone);
4388     }
4389   }
4390   CMSTokenSync x(true); // is cms thread
4391   if (_collectorState != Idling) {
4392     assert(_collectorState == AbortablePreclean,
4393            "Spontaneous state transition?");
4394     _collectorState = FinalMarking;
4395   } // Else, a foreground collection completed this CMS cycle.
4396   return;
4397 }
4398 
4399 // Respond to an Eden sampling opportunity
4400 void CMSCollector::sample_eden() {
4401   // Make sure a young gc cannot sneak in between our
4402   // reading and recording of a sample.
4403   assert(Thread::current()->is_ConcurrentGC_thread(),
4404          "Only the cms thread may collect Eden samples");
4405   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
4406          "Should collect samples while holding CMS token");
4407   if (!_start_sampling) {
4408     return;
4409   }
4410   if (_eden_chunk_array) {
4411     if (_eden_chunk_index < _eden_chunk_capacity) {
4412       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
4413       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4414              "Unexpected state of Eden");
4415       // We'd like to check that what we just sampled is an oop-start address;
4416       // however, we cannot do that here since the object may not yet have been
4417       // initialized. So we'll instead do the check when we _use_ this sample
4418       // later.
4419       if (_eden_chunk_index == 0 ||
4420           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4421                          _eden_chunk_array[_eden_chunk_index-1])
4422            >= CMSSamplingGrain)) {
4423         _eden_chunk_index++;  // commit sample
4424       }
4425     }
4426   }
4427   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
4428     size_t used = get_eden_used();
4429     size_t capacity = get_eden_capacity();
4430     assert(used <= capacity, "Unexpected state of Eden");
4431     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
4432       _abort_preclean = true;
4433     }
4434   }
4435 }
4436 
4437 
4438 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
4439   assert(_collectorState == Precleaning ||
4440          _collectorState == AbortablePreclean, "incorrect state");
4441   ResourceMark rm;
4442   HandleMark   hm;
4443 
4444   // Precleaning is currently not MT but the reference processor
4445   // may be set for MT.  Disable it temporarily here.
4446   ReferenceProcessor* rp = ref_processor();
4447   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
4448 
4449   // Do one pass of scrubbing the discovered reference lists
4450   // to remove any reference objects with strongly-reachable
4451   // referents.
4452   if (clean_refs) {
4453     CMSPrecleanRefsYieldClosure yield_cl(this);
4454     assert(rp->span().equals(_span), "Spans should be equal");
4455     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
4456                                    &_markStack, true /* preclean */);
4457     CMSDrainMarkingStackClosure complete_trace(this,
4458                                    _span, &_markBitMap, &_markStack,
4459                                    &keep_alive, true /* preclean */);
4460 
4461     // We don't want this step to interfere with a young
4462     // collection because we don't want to take CPU
4463     // or memory bandwidth away from the young GC threads
4464     // (which may be as many as there are CPUs).
4465     // Note that we don't need to protect ourselves from
4466     // interference with mutators because they can't
4467     // manipulate the discovered reference lists nor affect
4468     // the computed reachability of the referents, the
4469     // only properties manipulated by the precleaning
4470     // of these reference lists.
4471     stopTimer();
4472     CMSTokenSyncWithLocks x(true /* is cms thread */,
4473                             bitMapLock());
4474     startTimer();
4475     sample_eden();
4476 
4477     // The following will yield to allow foreground
4478     // collection to proceed promptly. XXX YSR:
4479     // The code in this method may need further
4480     // tweaking for better performance and some restructuring
4481     // for cleaner interfaces.
4482     rp->preclean_discovered_references(
4483           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl);
4484   }
4485 
4486   if (clean_survivor) {  // preclean the active survivor space(s)
4487     assert(_young_gen->kind() == Generation::DefNew ||
4488            _young_gen->kind() == Generation::ParNew ||
4489            _young_gen->kind() == Generation::ASParNew,
4490          "incorrect type for cast");
4491     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
4492     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
4493                              &_markBitMap, &_modUnionTable,
4494                              &_markStack, true /* precleaning phase */);
4495     stopTimer();
4496     CMSTokenSyncWithLocks ts(true /* is cms thread */,
4497                              bitMapLock());
4498     startTimer();
4499     unsigned int before_count =
4500       GenCollectedHeap::heap()->total_collections();
4501     SurvivorSpacePrecleanClosure
4502       sss_cl(this, _span, &_markBitMap, &_markStack,
4503              &pam_cl, before_count, CMSYield);
4504     dng->from()->object_iterate_careful(&sss_cl);
4505     dng->to()->object_iterate_careful(&sss_cl);
4506   }
4507   MarkRefsIntoAndScanClosure
4508     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
4509              &_markStack, this, CMSYield,
4510              true /* precleaning phase */);
4511   // CAUTION: The following closure has persistent state that may need to
4512   // be reset upon a decrease in the sequence of addresses it
4513   // processes.
4514   ScanMarkedObjectsAgainCarefullyClosure
4515     smoac_cl(this, _span,
4516       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
4517 
4518   // Preclean dirty cards in ModUnionTable and CardTable using
4519   // appropriate convergence criterion;
4520   // repeat CMSPrecleanIter times unless we find that
4521   // we are losing.
4522   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
4523   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
4524          "Bad convergence multiplier");
4525   assert(CMSPrecleanThreshold >= 100,
4526          "Unreasonably low CMSPrecleanThreshold");
4527 
4528   size_t numIter, cumNumCards, lastNumCards, curNumCards;
4529   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
4530        numIter < CMSPrecleanIter;
4531        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
4532     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
4533     if (Verbose && PrintGCDetails) {
4534       gclog_or_tty->print(" (modUnionTable: %d cards)", curNumCards);
4535     }
4536     // Either there are very few dirty cards, so re-mark
4537     // pause will be small anyway, or our pre-cleaning isn't
4538     // that much faster than the rate at which cards are being
4539     // dirtied, so we might as well stop and re-mark since
4540     // precleaning won't improve our re-mark time by much.
4541     if (curNumCards <= CMSPrecleanThreshold ||
4542         (numIter > 0 &&
4543          (curNumCards * CMSPrecleanDenominator >
4544          lastNumCards * CMSPrecleanNumerator))) {
4545       numIter++;
4546       cumNumCards += curNumCards;
4547       break;
4548     }
4549   }
4550 
4551   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
4552 
4553   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
4554   cumNumCards += curNumCards;
4555   if (PrintGCDetails && PrintCMSStatistics != 0) {
4556     gclog_or_tty->print_cr(" (cardTable: %d cards, re-scanned %d cards, %d iterations)",
4557                   curNumCards, cumNumCards, numIter);
4558   }
4559   return cumNumCards;   // as a measure of useful work done
4560 }
4561 
4562 // PRECLEANING NOTES:
4563 // Precleaning involves:
4564 // . reading the bits of the modUnionTable and clearing the set bits.
4565 // . For the cards corresponding to the set bits, we scan the
4566 //   objects on those cards. This means we need the free_list_lock
4567 //   so that we can safely iterate over the CMS space when scanning
4568 //   for oops.
4569 // . When we scan the objects, we'll be both reading and setting
4570 //   marks in the marking bit map, so we'll need the marking bit map.
4571 // . For protecting _collector_state transitions, we take the CGC_lock.
4572 //   Note that any races in the reading of of card table entries by the
4573 //   CMS thread on the one hand and the clearing of those entries by the
4574 //   VM thread or the setting of those entries by the mutator threads on the
4575 //   other are quite benign. However, for efficiency it makes sense to keep
4576 //   the VM thread from racing with the CMS thread while the latter is
4577 //   dirty card info to the modUnionTable. We therefore also use the
4578 //   CGC_lock to protect the reading of the card table and the mod union
4579 //   table by the CM thread.
4580 // . We run concurrently with mutator updates, so scanning
4581 //   needs to be done carefully  -- we should not try to scan
4582 //   potentially uninitialized objects.
4583 //
4584 // Locking strategy: While holding the CGC_lock, we scan over and
4585 // reset a maximal dirty range of the mod union / card tables, then lock
4586 // the free_list_lock and bitmap lock to do a full marking, then
4587 // release these locks; and repeat the cycle. This allows for a
4588 // certain amount of fairness in the sharing of these locks between
4589 // the CMS collector on the one hand, and the VM thread and the
4590 // mutators on the other.
4591 
4592 // NOTE: preclean_mod_union_table() and preclean_card_table()
4593 // further below are largely identical; if you need to modify
4594 // one of these methods, please check the other method too.
4595 
4596 size_t CMSCollector::preclean_mod_union_table(
4597   ConcurrentMarkSweepGeneration* gen,
4598   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4599   verify_work_stacks_empty();
4600   verify_overflow_empty();
4601 
4602   // strategy: starting with the first card, accumulate contiguous
4603   // ranges of dirty cards; clear these cards, then scan the region
4604   // covered by these cards.
4605 
4606   // Since all of the MUT is committed ahead, we can just use
4607   // that, in case the generations expand while we are precleaning.
4608   // It might also be fine to just use the committed part of the
4609   // generation, but we might potentially miss cards when the
4610   // generation is rapidly expanding while we are in the midst
4611   // of precleaning.
4612   HeapWord* startAddr = gen->reserved().start();
4613   HeapWord* endAddr   = gen->reserved().end();
4614 
4615   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4616 
4617   size_t numDirtyCards, cumNumDirtyCards;
4618   HeapWord *nextAddr, *lastAddr;
4619   for (cumNumDirtyCards = numDirtyCards = 0,
4620        nextAddr = lastAddr = startAddr;
4621        nextAddr < endAddr;
4622        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4623 
4624     ResourceMark rm;
4625     HandleMark   hm;
4626 
4627     MemRegion dirtyRegion;
4628     {
4629       stopTimer();
4630       // Potential yield point
4631       CMSTokenSync ts(true);
4632       startTimer();
4633       sample_eden();
4634       // Get dirty region starting at nextOffset (inclusive),
4635       // simultaneously clearing it.
4636       dirtyRegion =
4637         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4638       assert(dirtyRegion.start() >= nextAddr,
4639              "returned region inconsistent?");
4640     }
4641     // Remember where the next search should begin.
4642     // The returned region (if non-empty) is a right open interval,
4643     // so lastOffset is obtained from the right end of that
4644     // interval.
4645     lastAddr = dirtyRegion.end();
4646     // Should do something more transparent and less hacky XXX
4647     numDirtyCards =
4648       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4649 
4650     // We'll scan the cards in the dirty region (with periodic
4651     // yields for foreground GC as needed).
4652     if (!dirtyRegion.is_empty()) {
4653       assert(numDirtyCards > 0, "consistency check");
4654       HeapWord* stop_point = NULL;
4655       stopTimer();
4656       // Potential yield point
4657       CMSTokenSyncWithLocks ts(true, gen->freelistLock(),
4658                                bitMapLock());
4659       startTimer();
4660       {
4661         verify_work_stacks_empty();
4662         verify_overflow_empty();
4663         sample_eden();
4664         stop_point =
4665           gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4666       }
4667       if (stop_point != NULL) {
4668         // The careful iteration stopped early either because it found an
4669         // uninitialized object, or because we were in the midst of an
4670         // "abortable preclean", which should now be aborted. Redirty
4671         // the bits corresponding to the partially-scanned or unscanned
4672         // cards. We'll either restart at the next block boundary or
4673         // abort the preclean.
4674         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4675                "Should only be AbortablePreclean.");
4676         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4677         if (should_abort_preclean()) {
4678           break; // out of preclean loop
4679         } else {
4680           // Compute the next address at which preclean should pick up;
4681           // might need bitMapLock in order to read P-bits.
4682           lastAddr = next_card_start_after_block(stop_point);
4683         }
4684       }
4685     } else {
4686       assert(lastAddr == endAddr, "consistency check");
4687       assert(numDirtyCards == 0, "consistency check");
4688       break;
4689     }
4690   }
4691   verify_work_stacks_empty();
4692   verify_overflow_empty();
4693   return cumNumDirtyCards;
4694 }
4695 
4696 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4697 // below are largely identical; if you need to modify
4698 // one of these methods, please check the other method too.
4699 
4700 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* gen,
4701   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4702   // strategy: it's similar to precleamModUnionTable above, in that
4703   // we accumulate contiguous ranges of dirty cards, mark these cards
4704   // precleaned, then scan the region covered by these cards.
4705   HeapWord* endAddr   = (HeapWord*)(gen->_virtual_space.high());
4706   HeapWord* startAddr = (HeapWord*)(gen->_virtual_space.low());
4707 
4708   cl->setFreelistLock(gen->freelistLock());   // needed for yielding
4709 
4710   size_t numDirtyCards, cumNumDirtyCards;
4711   HeapWord *lastAddr, *nextAddr;
4712 
4713   for (cumNumDirtyCards = numDirtyCards = 0,
4714        nextAddr = lastAddr = startAddr;
4715        nextAddr < endAddr;
4716        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4717 
4718     ResourceMark rm;
4719     HandleMark   hm;
4720 
4721     MemRegion dirtyRegion;
4722     {
4723       // See comments in "Precleaning notes" above on why we
4724       // do this locking. XXX Could the locking overheads be
4725       // too high when dirty cards are sparse? [I don't think so.]
4726       stopTimer();
4727       CMSTokenSync x(true); // is cms thread
4728       startTimer();
4729       sample_eden();
4730       // Get and clear dirty region from card table
4731       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4732                                     MemRegion(nextAddr, endAddr),
4733                                     true,
4734                                     CardTableModRefBS::precleaned_card_val());
4735 
4736       assert(dirtyRegion.start() >= nextAddr,
4737              "returned region inconsistent?");
4738     }
4739     lastAddr = dirtyRegion.end();
4740     numDirtyCards =
4741       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4742 
4743     if (!dirtyRegion.is_empty()) {
4744       stopTimer();
4745       CMSTokenSyncWithLocks ts(true, gen->freelistLock(), bitMapLock());
4746       startTimer();
4747       sample_eden();
4748       verify_work_stacks_empty();
4749       verify_overflow_empty();
4750       HeapWord* stop_point =
4751         gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4752       if (stop_point != NULL) {
4753         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4754                "Should only be AbortablePreclean.");
4755         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4756         if (should_abort_preclean()) {
4757           break; // out of preclean loop
4758         } else {
4759           // Compute the next address at which preclean should pick up.
4760           lastAddr = next_card_start_after_block(stop_point);
4761         }
4762       }
4763     } else {
4764       break;
4765     }
4766   }
4767   verify_work_stacks_empty();
4768   verify_overflow_empty();
4769   return cumNumDirtyCards;
4770 }
4771 
4772 class PrecleanKlassClosure : public KlassClosure {
4773   CMKlassClosure _cm_klass_closure;
4774  public:
4775   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4776   void do_klass(Klass* k) {
4777     if (k->has_accumulated_modified_oops()) {
4778       k->clear_accumulated_modified_oops();
4779 
4780       _cm_klass_closure.do_klass(k);
4781     }
4782   }
4783 };
4784 
4785 // The freelist lock is needed to prevent asserts, is it really needed?
4786 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4787 
4788   cl->set_freelistLock(freelistLock);
4789 
4790   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4791 
4792   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4793   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4794   PrecleanKlassClosure preclean_klass_closure(cl);
4795   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4796 
4797   verify_work_stacks_empty();
4798   verify_overflow_empty();
4799 }
4800 
4801 void CMSCollector::checkpointRootsFinal(bool asynch,
4802   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4803   assert(_collectorState == FinalMarking, "incorrect state transition?");
4804   check_correct_thread_executing();
4805   // world is stopped at this checkpoint
4806   assert(SafepointSynchronize::is_at_safepoint(),
4807          "world should be stopped");
4808   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4809 
4810   verify_work_stacks_empty();
4811   verify_overflow_empty();
4812 
4813   SpecializationStats::clear();
4814   if (PrintGCDetails) {
4815     gclog_or_tty->print("[YG occupancy: "SIZE_FORMAT" K ("SIZE_FORMAT" K)]",
4816                         _young_gen->used() / K,
4817                         _young_gen->capacity() / K);
4818   }
4819   if (asynch) {
4820     if (CMSScavengeBeforeRemark) {
4821       GenCollectedHeap* gch = GenCollectedHeap::heap();
4822       // Temporarily set flag to false, GCH->do_collection will
4823       // expect it to be false and set to true
4824       FlagSetting fl(gch->_is_gc_active, false);
4825       NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
4826         PrintGCDetails && Verbose, true, gclog_or_tty);)
4827       int level = _cmsGen->level() - 1;
4828       if (level >= 0) {
4829         gch->do_collection(true,        // full (i.e. force, see below)
4830                            false,       // !clear_all_soft_refs
4831                            0,           // size
4832                            false,       // is_tlab
4833                            level        // max_level
4834                           );
4835       }
4836     }
4837     FreelistLocker x(this);
4838     MutexLockerEx y(bitMapLock(),
4839                     Mutex::_no_safepoint_check_flag);
4840     assert(!init_mark_was_synchronous, "but that's impossible!");
4841     checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
4842   } else {
4843     // already have all the locks
4844     checkpointRootsFinalWork(asynch, clear_all_soft_refs,
4845                              init_mark_was_synchronous);
4846   }
4847   verify_work_stacks_empty();
4848   verify_overflow_empty();
4849   SpecializationStats::print();
4850 }
4851 
4852 void CMSCollector::checkpointRootsFinalWork(bool asynch,
4853   bool clear_all_soft_refs, bool init_mark_was_synchronous) {
4854 
4855   NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
4856 
4857   assert(haveFreelistLocks(), "must have free list locks");
4858   assert_lock_strong(bitMapLock());
4859 
4860   if (UseAdaptiveSizePolicy) {
4861     size_policy()->checkpoint_roots_final_begin();
4862   }
4863 
4864   ResourceMark rm;
4865   HandleMark   hm;
4866 
4867   GenCollectedHeap* gch = GenCollectedHeap::heap();
4868 
4869   if (should_unload_classes()) {
4870     CodeCache::gc_prologue();
4871   }
4872   assert(haveFreelistLocks(), "must have free list locks");
4873   assert_lock_strong(bitMapLock());
4874 
4875   if (!init_mark_was_synchronous) {
4876     // We might assume that we need not fill TLAB's when
4877     // CMSScavengeBeforeRemark is set, because we may have just done
4878     // a scavenge which would have filled all TLAB's -- and besides
4879     // Eden would be empty. This however may not always be the case --
4880     // for instance although we asked for a scavenge, it may not have
4881     // happened because of a JNI critical section. We probably need
4882     // a policy for deciding whether we can in that case wait until
4883     // the critical section releases and then do the remark following
4884     // the scavenge, and skip it here. In the absence of that policy,
4885     // or of an indication of whether the scavenge did indeed occur,
4886     // we cannot rely on TLAB's having been filled and must do
4887     // so here just in case a scavenge did not happen.
4888     gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4889     // Update the saved marks which may affect the root scans.
4890     gch->save_marks();
4891 
4892     {
4893       COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
4894 
4895       // Note on the role of the mod union table:
4896       // Since the marker in "markFromRoots" marks concurrently with
4897       // mutators, it is possible for some reachable objects not to have been
4898       // scanned. For instance, an only reference to an object A was
4899       // placed in object B after the marker scanned B. Unless B is rescanned,
4900       // A would be collected. Such updates to references in marked objects
4901       // are detected via the mod union table which is the set of all cards
4902       // dirtied since the first checkpoint in this GC cycle and prior to
4903       // the most recent young generation GC, minus those cleaned up by the
4904       // concurrent precleaning.
4905       if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
4906         TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
4907         do_remark_parallel();
4908       } else {
4909         TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
4910                     gclog_or_tty);
4911         do_remark_non_parallel();
4912       }
4913     }
4914   } else {
4915     assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
4916     // The initial mark was stop-world, so there's no rescanning to
4917     // do; go straight on to the next step below.
4918   }
4919   verify_work_stacks_empty();
4920   verify_overflow_empty();
4921 
4922   {
4923     NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
4924     refProcessingWork(asynch, clear_all_soft_refs);
4925   }
4926   verify_work_stacks_empty();
4927   verify_overflow_empty();
4928 
4929   if (should_unload_classes()) {
4930     CodeCache::gc_epilogue();
4931   }
4932   JvmtiExport::gc_epilogue();
4933 
4934   // If we encountered any (marking stack / work queue) overflow
4935   // events during the current CMS cycle, take appropriate
4936   // remedial measures, where possible, so as to try and avoid
4937   // recurrence of that condition.
4938   assert(_markStack.isEmpty(), "No grey objects");
4939   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4940                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4941   if (ser_ovflw > 0) {
4942     if (PrintCMSStatistics != 0) {
4943       gclog_or_tty->print_cr("Marking stack overflow (benign) "
4944         "(pmc_pc="SIZE_FORMAT", pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT
4945         ", kac_preclean="SIZE_FORMAT")",
4946         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4947         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4948     }
4949     _markStack.expand();
4950     _ser_pmc_remark_ovflw = 0;
4951     _ser_pmc_preclean_ovflw = 0;
4952     _ser_kac_preclean_ovflw = 0;
4953     _ser_kac_ovflw = 0;
4954   }
4955   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4956     if (PrintCMSStatistics != 0) {
4957       gclog_or_tty->print_cr("Work queue overflow (benign) "
4958         "(pmc_rm="SIZE_FORMAT", kac="SIZE_FORMAT")",
4959         _par_pmc_remark_ovflw, _par_kac_ovflw);
4960     }
4961     _par_pmc_remark_ovflw = 0;
4962     _par_kac_ovflw = 0;
4963   }
4964   if (PrintCMSStatistics != 0) {
4965      if (_markStack._hit_limit > 0) {
4966        gclog_or_tty->print_cr(" (benign) Hit max stack size limit ("SIZE_FORMAT")",
4967                               _markStack._hit_limit);
4968      }
4969      if (_markStack._failed_double > 0) {
4970        gclog_or_tty->print_cr(" (benign) Failed stack doubling ("SIZE_FORMAT"),"
4971                               " current capacity "SIZE_FORMAT,
4972                               _markStack._failed_double,
4973                               _markStack.capacity());
4974      }
4975   }
4976   _markStack._hit_limit = 0;
4977   _markStack._failed_double = 0;
4978 
4979   if ((VerifyAfterGC || VerifyDuringGC) &&
4980       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4981     verify_after_remark();
4982   }
4983 
4984   // Change under the freelistLocks.
4985   _collectorState = Sweeping;
4986   // Call isAllClear() under bitMapLock
4987   assert(_modUnionTable.isAllClear(),
4988       "Should be clear by end of the final marking");
4989   assert(_ct->klass_rem_set()->mod_union_is_clear(),
4990       "Should be clear by end of the final marking");
4991   if (UseAdaptiveSizePolicy) {
4992     size_policy()->checkpoint_roots_final_end(gch->gc_cause());
4993   }
4994 }
4995 
4996 // Parallel remark task
4997 class CMSParRemarkTask: public AbstractGangTask {
4998   CMSCollector* _collector;
4999   int           _n_workers;
5000   CompactibleFreeListSpace* _cms_space;
5001 
5002   // The per-thread work queues, available here for stealing.
5003   OopTaskQueueSet*       _task_queues;
5004   ParallelTaskTerminator _term;
5005 
5006  public:
5007   // A value of 0 passed to n_workers will cause the number of
5008   // workers to be taken from the active workers in the work gang.
5009   CMSParRemarkTask(CMSCollector* collector,
5010                    CompactibleFreeListSpace* cms_space,
5011                    int n_workers, FlexibleWorkGang* workers,
5012                    OopTaskQueueSet* task_queues):
5013     AbstractGangTask("Rescan roots and grey objects in parallel"),
5014     _collector(collector),
5015     _cms_space(cms_space),
5016     _n_workers(n_workers),
5017     _task_queues(task_queues),
5018     _term(n_workers, task_queues) { }
5019 
5020   OopTaskQueueSet* task_queues() { return _task_queues; }
5021 
5022   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5023 
5024   ParallelTaskTerminator* terminator() { return &_term; }
5025   int n_workers() { return _n_workers; }
5026 
5027   void work(uint worker_id);
5028 
5029  private:
5030   // Work method in support of parallel rescan ... of young gen spaces
5031   void do_young_space_rescan(int i, Par_MarkRefsIntoAndScanClosure* cl,
5032                              ContiguousSpace* space,
5033                              HeapWord** chunk_array, size_t chunk_top);
5034 
5035   // ... of  dirty cards in old space
5036   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
5037                                   Par_MarkRefsIntoAndScanClosure* cl);
5038 
5039   // ... work stealing for the above
5040   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
5041 };
5042 
5043 class RemarkKlassClosure : public KlassClosure {
5044   CMKlassClosure _cm_klass_closure;
5045  public:
5046   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
5047   void do_klass(Klass* k) {
5048     // Check if we have modified any oops in the Klass during the concurrent marking.
5049     if (k->has_accumulated_modified_oops()) {
5050       k->clear_accumulated_modified_oops();
5051 
5052       // We could have transfered the current modified marks to the accumulated marks,
5053       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
5054     } else if (k->has_modified_oops()) {
5055       // Don't clear anything, this info is needed by the next young collection.
5056     } else {
5057       // No modified oops in the Klass.
5058       return;
5059     }
5060 
5061     // The klass has modified fields, need to scan the klass.
5062     _cm_klass_closure.do_klass(k);
5063   }
5064 };
5065 
5066 // work_queue(i) is passed to the closure
5067 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
5068 // also is passed to do_dirty_card_rescan_tasks() and to
5069 // do_work_steal() to select the i-th task_queue.
5070 
5071 void CMSParRemarkTask::work(uint worker_id) {
5072   elapsedTimer _timer;
5073   ResourceMark rm;
5074   HandleMark   hm;
5075 
5076   // ---------- rescan from roots --------------
5077   _timer.start();
5078   GenCollectedHeap* gch = GenCollectedHeap::heap();
5079   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
5080     _collector->_span, _collector->ref_processor(),
5081     &(_collector->_markBitMap),
5082     work_queue(worker_id));
5083 
5084   // Rescan young gen roots first since these are likely
5085   // coarsely partitioned and may, on that account, constitute
5086   // the critical path; thus, it's best to start off that
5087   // work first.
5088   // ---------- young gen roots --------------
5089   {
5090     DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
5091     EdenSpace* eden_space = dng->eden();
5092     ContiguousSpace* from_space = dng->from();
5093     ContiguousSpace* to_space   = dng->to();
5094 
5095     HeapWord** eca = _collector->_eden_chunk_array;
5096     size_t     ect = _collector->_eden_chunk_index;
5097     HeapWord** sca = _collector->_survivor_chunk_array;
5098     size_t     sct = _collector->_survivor_chunk_index;
5099 
5100     assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
5101     assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
5102 
5103     do_young_space_rescan(worker_id, &par_mrias_cl, to_space, NULL, 0);
5104     do_young_space_rescan(worker_id, &par_mrias_cl, from_space, sca, sct);
5105     do_young_space_rescan(worker_id, &par_mrias_cl, eden_space, eca, ect);
5106 
5107     _timer.stop();
5108     if (PrintCMSStatistics != 0) {
5109       gclog_or_tty->print_cr(
5110         "Finished young gen rescan work in %dth thread: %3.3f sec",
5111         worker_id, _timer.seconds());
5112     }
5113   }
5114 
5115   // ---------- remaining roots --------------
5116   _timer.reset();
5117   _timer.start();
5118   gch->gen_process_strong_roots(_collector->_cmsGen->level(),
5119                                 false,     // yg was scanned above
5120                                 false,     // this is parallel code
5121                                 false,     // not scavenging
5122                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
5123                                 &par_mrias_cl,
5124                                 true,   // walk all of code cache if (so & SO_CodeCache)
5125                                 NULL,
5126                                 NULL);     // The dirty klasses will be handled below
5127   assert(_collector->should_unload_classes()
5128          || (_collector->CMSCollector::roots_scanning_options() & SharedHeap::SO_CodeCache),
5129          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5130   _timer.stop();
5131   if (PrintCMSStatistics != 0) {
5132     gclog_or_tty->print_cr(
5133       "Finished remaining root rescan work in %dth thread: %3.3f sec",
5134       worker_id, _timer.seconds());
5135   }
5136 
5137   // ---------- unhandled CLD scanning ----------
5138   if (worker_id == 0) { // Single threaded at the moment.
5139     _timer.reset();
5140     _timer.start();
5141 
5142     // Scan all new class loader data objects and new dependencies that were
5143     // introduced during concurrent marking.
5144     ResourceMark rm;
5145     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5146     for (int i = 0; i < array->length(); i++) {
5147       par_mrias_cl.do_class_loader_data(array->at(i));
5148     }
5149 
5150     // We don't need to keep track of new CLDs anymore.
5151     ClassLoaderDataGraph::remember_new_clds(false);
5152 
5153     _timer.stop();
5154     if (PrintCMSStatistics != 0) {
5155       gclog_or_tty->print_cr(
5156           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
5157           worker_id, _timer.seconds());
5158     }
5159   }
5160 
5161   // ---------- dirty klass scanning ----------
5162   if (worker_id == 0) { // Single threaded at the moment.
5163     _timer.reset();
5164     _timer.start();
5165 
5166     // Scan all classes that was dirtied during the concurrent marking phase.
5167     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
5168     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5169 
5170     _timer.stop();
5171     if (PrintCMSStatistics != 0) {
5172       gclog_or_tty->print_cr(
5173           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
5174           worker_id, _timer.seconds());
5175     }
5176   }
5177 
5178   // We might have added oops to ClassLoaderData::_handles during the
5179   // concurrent marking phase. These oops point to newly allocated objects
5180   // that are guaranteed to be kept alive. Either by the direct allocation
5181   // code, or when the young collector processes the strong roots. Hence,
5182   // we don't have to revisit the _handles block during the remark phase.
5183 
5184   // ---------- rescan dirty cards ------------
5185   _timer.reset();
5186   _timer.start();
5187 
5188   // Do the rescan tasks for each of the two spaces
5189   // (cms_space) in turn.
5190   // "worker_id" is passed to select the task_queue for "worker_id"
5191   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
5192   _timer.stop();
5193   if (PrintCMSStatistics != 0) {
5194     gclog_or_tty->print_cr(
5195       "Finished dirty card rescan work in %dth thread: %3.3f sec",
5196       worker_id, _timer.seconds());
5197   }
5198 
5199   // ---------- steal work from other threads ...
5200   // ---------- ... and drain overflow list.
5201   _timer.reset();
5202   _timer.start();
5203   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
5204   _timer.stop();
5205   if (PrintCMSStatistics != 0) {
5206     gclog_or_tty->print_cr(
5207       "Finished work stealing in %dth thread: %3.3f sec",
5208       worker_id, _timer.seconds());
5209   }
5210 }
5211 
5212 // Note that parameter "i" is not used.
5213 void
5214 CMSParRemarkTask::do_young_space_rescan(int i,
5215   Par_MarkRefsIntoAndScanClosure* cl, ContiguousSpace* space,
5216   HeapWord** chunk_array, size_t chunk_top) {
5217   // Until all tasks completed:
5218   // . claim an unclaimed task
5219   // . compute region boundaries corresponding to task claimed
5220   //   using chunk_array
5221   // . par_oop_iterate(cl) over that region
5222 
5223   ResourceMark rm;
5224   HandleMark   hm;
5225 
5226   SequentialSubTasksDone* pst = space->par_seq_tasks();
5227   assert(pst->valid(), "Uninitialized use?");
5228 
5229   uint nth_task = 0;
5230   uint n_tasks  = pst->n_tasks();
5231 
5232   HeapWord *start, *end;
5233   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5234     // We claimed task # nth_task; compute its boundaries.
5235     if (chunk_top == 0) {  // no samples were taken
5236       assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
5237       start = space->bottom();
5238       end   = space->top();
5239     } else if (nth_task == 0) {
5240       start = space->bottom();
5241       end   = chunk_array[nth_task];
5242     } else if (nth_task < (uint)chunk_top) {
5243       assert(nth_task >= 1, "Control point invariant");
5244       start = chunk_array[nth_task - 1];
5245       end   = chunk_array[nth_task];
5246     } else {
5247       assert(nth_task == (uint)chunk_top, "Control point invariant");
5248       start = chunk_array[chunk_top - 1];
5249       end   = space->top();
5250     }
5251     MemRegion mr(start, end);
5252     // Verify that mr is in space
5253     assert(mr.is_empty() || space->used_region().contains(mr),
5254            "Should be in space");
5255     // Verify that "start" is an object boundary
5256     assert(mr.is_empty() || oop(mr.start())->is_oop(),
5257            "Should be an oop");
5258     space->par_oop_iterate(mr, cl);
5259   }
5260   pst->all_tasks_completed();
5261 }
5262 
5263 void
5264 CMSParRemarkTask::do_dirty_card_rescan_tasks(
5265   CompactibleFreeListSpace* sp, int i,
5266   Par_MarkRefsIntoAndScanClosure* cl) {
5267   // Until all tasks completed:
5268   // . claim an unclaimed task
5269   // . compute region boundaries corresponding to task claimed
5270   // . transfer dirty bits ct->mut for that region
5271   // . apply rescanclosure to dirty mut bits for that region
5272 
5273   ResourceMark rm;
5274   HandleMark   hm;
5275 
5276   OopTaskQueue* work_q = work_queue(i);
5277   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
5278   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
5279   // CAUTION: This closure has state that persists across calls to
5280   // the work method dirty_range_iterate_clear() in that it has
5281   // imbedded in it a (subtype of) UpwardsObjectClosure. The
5282   // use of that state in the imbedded UpwardsObjectClosure instance
5283   // assumes that the cards are always iterated (even if in parallel
5284   // by several threads) in monotonically increasing order per each
5285   // thread. This is true of the implementation below which picks
5286   // card ranges (chunks) in monotonically increasing order globally
5287   // and, a-fortiori, in monotonically increasing order per thread
5288   // (the latter order being a subsequence of the former).
5289   // If the work code below is ever reorganized into a more chaotic
5290   // work-partitioning form than the current "sequential tasks"
5291   // paradigm, the use of that persistent state will have to be
5292   // revisited and modified appropriately. See also related
5293   // bug 4756801 work on which should examine this code to make
5294   // sure that the changes there do not run counter to the
5295   // assumptions made here and necessary for correctness and
5296   // efficiency. Note also that this code might yield inefficient
5297   // behaviour in the case of very large objects that span one or
5298   // more work chunks. Such objects would potentially be scanned
5299   // several times redundantly. Work on 4756801 should try and
5300   // address that performance anomaly if at all possible. XXX
5301   MemRegion  full_span  = _collector->_span;
5302   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
5303   MarkFromDirtyCardsClosure
5304     greyRescanClosure(_collector, full_span, // entire span of interest
5305                       sp, bm, work_q, cl);
5306 
5307   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
5308   assert(pst->valid(), "Uninitialized use?");
5309   uint nth_task = 0;
5310   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
5311   MemRegion span = sp->used_region();
5312   HeapWord* start_addr = span.start();
5313   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
5314                                            alignment);
5315   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
5316   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
5317          start_addr, "Check alignment");
5318   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
5319          chunk_size, "Check alignment");
5320 
5321   while (!pst->is_task_claimed(/* reference */ nth_task)) {
5322     // Having claimed the nth_task, compute corresponding mem-region,
5323     // which is a-fortiori aligned correctly (i.e. at a MUT bopundary).
5324     // The alignment restriction ensures that we do not need any
5325     // synchronization with other gang-workers while setting or
5326     // clearing bits in thus chunk of the MUT.
5327     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
5328                                     start_addr + (nth_task+1)*chunk_size);
5329     // The last chunk's end might be way beyond end of the
5330     // used region. In that case pull back appropriately.
5331     if (this_span.end() > end_addr) {
5332       this_span.set_end(end_addr);
5333       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
5334     }
5335     // Iterate over the dirty cards covering this chunk, marking them
5336     // precleaned, and setting the corresponding bits in the mod union
5337     // table. Since we have been careful to partition at Card and MUT-word
5338     // boundaries no synchronization is needed between parallel threads.
5339     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
5340                                                  &modUnionClosure);
5341 
5342     // Having transferred these marks into the modUnionTable,
5343     // rescan the marked objects on the dirty cards in the modUnionTable.
5344     // Even if this is at a synchronous collection, the initial marking
5345     // may have been done during an asynchronous collection so there
5346     // may be dirty bits in the mod-union table.
5347     _collector->_modUnionTable.dirty_range_iterate_clear(
5348                   this_span, &greyRescanClosure);
5349     _collector->_modUnionTable.verifyNoOneBitsInRange(
5350                                  this_span.start(),
5351                                  this_span.end());
5352   }
5353   pst->all_tasks_completed();  // declare that i am done
5354 }
5355 
5356 // . see if we can share work_queues with ParNew? XXX
5357 void
5358 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
5359                                 int* seed) {
5360   OopTaskQueue* work_q = work_queue(i);
5361   NOT_PRODUCT(int num_steals = 0;)
5362   oop obj_to_scan;
5363   CMSBitMap* bm = &(_collector->_markBitMap);
5364 
5365   while (true) {
5366     // Completely finish any left over work from (an) earlier round(s)
5367     cl->trim_queue(0);
5368     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5369                                          (size_t)ParGCDesiredObjsFromOverflowList);
5370     // Now check if there's any work in the overflow list
5371     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5372     // only affects the number of attempts made to get work from the
5373     // overflow list and does not affect the number of workers.  Just
5374     // pass ParallelGCThreads so this behavior is unchanged.
5375     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5376                                                 work_q,
5377                                                 ParallelGCThreads)) {
5378       // found something in global overflow list;
5379       // not yet ready to go stealing work from others.
5380       // We'd like to assert(work_q->size() != 0, ...)
5381       // because we just took work from the overflow list,
5382       // but of course we can't since all of that could have
5383       // been already stolen from us.
5384       // "He giveth and He taketh away."
5385       continue;
5386     }
5387     // Verify that we have no work before we resort to stealing
5388     assert(work_q->size() == 0, "Have work, shouldn't steal");
5389     // Try to steal from other queues that have work
5390     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5391       NOT_PRODUCT(num_steals++;)
5392       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5393       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5394       // Do scanning work
5395       obj_to_scan->oop_iterate(cl);
5396       // Loop around, finish this work, and try to steal some more
5397     } else if (terminator()->offer_termination()) {
5398         break;  // nirvana from the infinite cycle
5399     }
5400   }
5401   NOT_PRODUCT(
5402     if (PrintCMSStatistics != 0) {
5403       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5404     }
5405   )
5406   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
5407          "Else our work is not yet done");
5408 }
5409 
5410 // Return a thread-local PLAB recording array, as appropriate.
5411 void* CMSCollector::get_data_recorder(int thr_num) {
5412   if (_survivor_plab_array != NULL &&
5413       (CMSPLABRecordAlways ||
5414        (_collectorState > Marking && _collectorState < FinalMarking))) {
5415     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
5416     ChunkArray* ca = &_survivor_plab_array[thr_num];
5417     ca->reset();   // clear it so that fresh data is recorded
5418     return (void*) ca;
5419   } else {
5420     return NULL;
5421   }
5422 }
5423 
5424 // Reset all the thread-local PLAB recording arrays
5425 void CMSCollector::reset_survivor_plab_arrays() {
5426   for (uint i = 0; i < ParallelGCThreads; i++) {
5427     _survivor_plab_array[i].reset();
5428   }
5429 }
5430 
5431 // Merge the per-thread plab arrays into the global survivor chunk
5432 // array which will provide the partitioning of the survivor space
5433 // for CMS rescan.
5434 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
5435                                               int no_of_gc_threads) {
5436   assert(_survivor_plab_array  != NULL, "Error");
5437   assert(_survivor_chunk_array != NULL, "Error");
5438   assert(_collectorState == FinalMarking, "Error");
5439   for (int j = 0; j < no_of_gc_threads; j++) {
5440     _cursor[j] = 0;
5441   }
5442   HeapWord* top = surv->top();
5443   size_t i;
5444   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
5445     HeapWord* min_val = top;          // Higher than any PLAB address
5446     uint      min_tid = 0;            // position of min_val this round
5447     for (int j = 0; j < no_of_gc_threads; j++) {
5448       ChunkArray* cur_sca = &_survivor_plab_array[j];
5449       if (_cursor[j] == cur_sca->end()) {
5450         continue;
5451       }
5452       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
5453       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
5454       assert(surv->used_region().contains(cur_val), "Out of bounds value");
5455       if (cur_val < min_val) {
5456         min_tid = j;
5457         min_val = cur_val;
5458       } else {
5459         assert(cur_val < top, "All recorded addresses should be less");
5460       }
5461     }
5462     // At this point min_val and min_tid are respectively
5463     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
5464     // and the thread (j) that witnesses that address.
5465     // We record this address in the _survivor_chunk_array[i]
5466     // and increment _cursor[min_tid] prior to the next round i.
5467     if (min_val == top) {
5468       break;
5469     }
5470     _survivor_chunk_array[i] = min_val;
5471     _cursor[min_tid]++;
5472   }
5473   // We are all done; record the size of the _survivor_chunk_array
5474   _survivor_chunk_index = i; // exclusive: [0, i)
5475   if (PrintCMSStatistics > 0) {
5476     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
5477   }
5478   // Verify that we used up all the recorded entries
5479   #ifdef ASSERT
5480     size_t total = 0;
5481     for (int j = 0; j < no_of_gc_threads; j++) {
5482       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
5483       total += _cursor[j];
5484     }
5485     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
5486     // Check that the merged array is in sorted order
5487     if (total > 0) {
5488       for (size_t i = 0; i < total - 1; i++) {
5489         if (PrintCMSStatistics > 0) {
5490           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
5491                               i, _survivor_chunk_array[i]);
5492         }
5493         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
5494                "Not sorted");
5495       }
5496     }
5497   #endif // ASSERT
5498 }
5499 
5500 // Set up the space's par_seq_tasks structure for work claiming
5501 // for parallel rescan of young gen.
5502 // See ParRescanTask where this is currently used.
5503 void
5504 CMSCollector::
5505 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
5506   assert(n_threads > 0, "Unexpected n_threads argument");
5507   DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
5508 
5509   // Eden space
5510   {
5511     SequentialSubTasksDone* pst = dng->eden()->par_seq_tasks();
5512     assert(!pst->valid(), "Clobbering existing data?");
5513     // Each valid entry in [0, _eden_chunk_index) represents a task.
5514     size_t n_tasks = _eden_chunk_index + 1;
5515     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
5516     // Sets the condition for completion of the subtask (how many threads
5517     // need to finish in order to be done).
5518     pst->set_n_threads(n_threads);
5519     pst->set_n_tasks((int)n_tasks);
5520   }
5521 
5522   // Merge the survivor plab arrays into _survivor_chunk_array
5523   if (_survivor_plab_array != NULL) {
5524     merge_survivor_plab_arrays(dng->from(), n_threads);
5525   } else {
5526     assert(_survivor_chunk_index == 0, "Error");
5527   }
5528 
5529   // To space
5530   {
5531     SequentialSubTasksDone* pst = dng->to()->par_seq_tasks();
5532     assert(!pst->valid(), "Clobbering existing data?");
5533     // Sets the condition for completion of the subtask (how many threads
5534     // need to finish in order to be done).
5535     pst->set_n_threads(n_threads);
5536     pst->set_n_tasks(1);
5537     assert(pst->valid(), "Error");
5538   }
5539 
5540   // From space
5541   {
5542     SequentialSubTasksDone* pst = dng->from()->par_seq_tasks();
5543     assert(!pst->valid(), "Clobbering existing data?");
5544     size_t n_tasks = _survivor_chunk_index + 1;
5545     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5546     // Sets the condition for completion of the subtask (how many threads
5547     // need to finish in order to be done).
5548     pst->set_n_threads(n_threads);
5549     pst->set_n_tasks((int)n_tasks);
5550     assert(pst->valid(), "Error");
5551   }
5552 }
5553 
5554 // Parallel version of remark
5555 void CMSCollector::do_remark_parallel() {
5556   GenCollectedHeap* gch = GenCollectedHeap::heap();
5557   FlexibleWorkGang* workers = gch->workers();
5558   assert(workers != NULL, "Need parallel worker threads.");
5559   // Choose to use the number of GC workers most recently set
5560   // into "active_workers".  If active_workers is not set, set it
5561   // to ParallelGCThreads.
5562   int n_workers = workers->active_workers();
5563   if (n_workers == 0) {
5564     assert(n_workers > 0, "Should have been set during scavenge");
5565     n_workers = ParallelGCThreads;
5566     workers->set_active_workers(n_workers);
5567   }
5568   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5569 
5570   CMSParRemarkTask tsk(this,
5571     cms_space,
5572     n_workers, workers, task_queues());
5573 
5574   // Set up for parallel process_strong_roots work.
5575   gch->set_par_threads(n_workers);
5576   // We won't be iterating over the cards in the card table updating
5577   // the younger_gen cards, so we shouldn't call the following else
5578   // the verification code as well as subsequent younger_refs_iterate
5579   // code would get confused. XXX
5580   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5581 
5582   // The young gen rescan work will not be done as part of
5583   // process_strong_roots (which currently doesn't knw how to
5584   // parallelize such a scan), but rather will be broken up into
5585   // a set of parallel tasks (via the sampling that the [abortable]
5586   // preclean phase did of EdenSpace, plus the [two] tasks of
5587   // scanning the [two] survivor spaces. Further fine-grain
5588   // parallelization of the scanning of the survivor spaces
5589   // themselves, and of precleaning of the younger gen itself
5590   // is deferred to the future.
5591   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5592 
5593   // The dirty card rescan work is broken up into a "sequence"
5594   // of parallel tasks (per constituent space) that are dynamically
5595   // claimed by the parallel threads.
5596   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5597 
5598   // It turns out that even when we're using 1 thread, doing the work in a
5599   // separate thread causes wide variance in run times.  We can't help this
5600   // in the multi-threaded case, but we special-case n=1 here to get
5601   // repeatable measurements of the 1-thread overhead of the parallel code.
5602   if (n_workers > 1) {
5603     // Make refs discovery MT-safe, if it isn't already: it may not
5604     // necessarily be so, since it's possible that we are doing
5605     // ST marking.
5606     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5607     GenCollectedHeap::StrongRootsScope srs(gch);
5608     workers->run_task(&tsk);
5609   } else {
5610     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5611     GenCollectedHeap::StrongRootsScope srs(gch);
5612     tsk.work(0);
5613   }
5614 
5615   gch->set_par_threads(0);  // 0 ==> non-parallel.
5616   // restore, single-threaded for now, any preserved marks
5617   // as a result of work_q overflow
5618   restore_preserved_marks_if_any();
5619 }
5620 
5621 // Non-parallel version of remark
5622 void CMSCollector::do_remark_non_parallel() {
5623   ResourceMark rm;
5624   HandleMark   hm;
5625   GenCollectedHeap* gch = GenCollectedHeap::heap();
5626   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5627 
5628   MarkRefsIntoAndScanClosure
5629     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5630              &_markStack, this,
5631              false /* should_yield */, false /* not precleaning */);
5632   MarkFromDirtyCardsClosure
5633     markFromDirtyCardsClosure(this, _span,
5634                               NULL,  // space is set further below
5635                               &_markBitMap, &_markStack, &mrias_cl);
5636   {
5637     TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
5638     // Iterate over the dirty cards, setting the corresponding bits in the
5639     // mod union table.
5640     {
5641       ModUnionClosure modUnionClosure(&_modUnionTable);
5642       _ct->ct_bs()->dirty_card_iterate(
5643                       _cmsGen->used_region(),
5644                       &modUnionClosure);
5645     }
5646     // Having transferred these marks into the modUnionTable, we just need
5647     // to rescan the marked objects on the dirty cards in the modUnionTable.
5648     // The initial marking may have been done during an asynchronous
5649     // collection so there may be dirty bits in the mod-union table.
5650     const int alignment =
5651       CardTableModRefBS::card_size * BitsPerWord;
5652     {
5653       // ... First handle dirty cards in CMS gen
5654       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5655       MemRegion ur = _cmsGen->used_region();
5656       HeapWord* lb = ur.start();
5657       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5658       MemRegion cms_span(lb, ub);
5659       _modUnionTable.dirty_range_iterate_clear(cms_span,
5660                                                &markFromDirtyCardsClosure);
5661       verify_work_stacks_empty();
5662       if (PrintCMSStatistics != 0) {
5663         gclog_or_tty->print(" (re-scanned "SIZE_FORMAT" dirty cards in cms gen) ",
5664           markFromDirtyCardsClosure.num_dirty_cards());
5665       }
5666     }
5667   }
5668   if (VerifyDuringGC &&
5669       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5670     HandleMark hm;  // Discard invalid handles created during verification
5671     Universe::verify(true);
5672   }
5673   {
5674     TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
5675 
5676     verify_work_stacks_empty();
5677 
5678     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5679     GenCollectedHeap::StrongRootsScope srs(gch);
5680     gch->gen_process_strong_roots(_cmsGen->level(),
5681                                   true,  // younger gens as roots
5682                                   false, // use the local StrongRootsScope
5683                                   false, // not scavenging
5684                                   SharedHeap::ScanningOption(roots_scanning_options()),
5685                                   &mrias_cl,
5686                                   true,   // walk code active on stacks
5687                                   NULL,
5688                                   NULL);  // The dirty klasses will be handled below
5689 
5690     assert(should_unload_classes()
5691            || (roots_scanning_options() & SharedHeap::SO_CodeCache),
5692            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5693   }
5694 
5695   {
5696     TraceTime t("visit unhandled CLDs", PrintGCDetails, false, gclog_or_tty);
5697 
5698     verify_work_stacks_empty();
5699 
5700     // Scan all class loader data objects that might have been introduced
5701     // during concurrent marking.
5702     ResourceMark rm;
5703     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5704     for (int i = 0; i < array->length(); i++) {
5705       mrias_cl.do_class_loader_data(array->at(i));
5706     }
5707 
5708     // We don't need to keep track of new CLDs anymore.
5709     ClassLoaderDataGraph::remember_new_clds(false);
5710 
5711     verify_work_stacks_empty();
5712   }
5713 
5714   {
5715     TraceTime t("dirty klass scan", PrintGCDetails, false, gclog_or_tty);
5716 
5717     verify_work_stacks_empty();
5718 
5719     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5720     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5721 
5722     verify_work_stacks_empty();
5723   }
5724 
5725   // We might have added oops to ClassLoaderData::_handles during the
5726   // concurrent marking phase. These oops point to newly allocated objects
5727   // that are guaranteed to be kept alive. Either by the direct allocation
5728   // code, or when the young collector processes the strong roots. Hence,
5729   // we don't have to revisit the _handles block during the remark phase.
5730 
5731   verify_work_stacks_empty();
5732   // Restore evacuated mark words, if any, used for overflow list links
5733   if (!CMSOverflowEarlyRestoration) {
5734     restore_preserved_marks_if_any();
5735   }
5736   verify_overflow_empty();
5737 }
5738 
5739 ////////////////////////////////////////////////////////
5740 // Parallel Reference Processing Task Proxy Class
5741 ////////////////////////////////////////////////////////
5742 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5743   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5744   CMSCollector*          _collector;
5745   CMSBitMap*             _mark_bit_map;
5746   const MemRegion        _span;
5747   ProcessTask&           _task;
5748 
5749 public:
5750   CMSRefProcTaskProxy(ProcessTask&     task,
5751                       CMSCollector*    collector,
5752                       const MemRegion& span,
5753                       CMSBitMap*       mark_bit_map,
5754                       AbstractWorkGang* workers,
5755                       OopTaskQueueSet* task_queues):
5756     // XXX Should superclass AGTWOQ also know about AWG since it knows
5757     // about the task_queues used by the AWG? Then it could initialize
5758     // the terminator() object. See 6984287. The set_for_termination()
5759     // below is a temporary band-aid for the regression in 6984287.
5760     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5761       task_queues),
5762     _task(task),
5763     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5764   {
5765     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5766            "Inconsistency in _span");
5767     set_for_termination(workers->active_workers());
5768   }
5769 
5770   OopTaskQueueSet* task_queues() { return queues(); }
5771 
5772   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5773 
5774   void do_work_steal(int i,
5775                      CMSParDrainMarkingStackClosure* drain,
5776                      CMSParKeepAliveClosure* keep_alive,
5777                      int* seed);
5778 
5779   virtual void work(uint worker_id);
5780 };
5781 
5782 void CMSRefProcTaskProxy::work(uint worker_id) {
5783   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5784   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5785                                         _mark_bit_map,
5786                                         work_queue(worker_id));
5787   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5788                                                  _mark_bit_map,
5789                                                  work_queue(worker_id));
5790   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5791   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5792   if (_task.marks_oops_alive()) {
5793     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5794                   _collector->hash_seed(worker_id));
5795   }
5796   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5797   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5798 }
5799 
5800 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5801   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5802   EnqueueTask& _task;
5803 
5804 public:
5805   CMSRefEnqueueTaskProxy(EnqueueTask& task)
5806     : AbstractGangTask("Enqueue reference objects in parallel"),
5807       _task(task)
5808   { }
5809 
5810   virtual void work(uint worker_id)
5811   {
5812     _task.work(worker_id);
5813   }
5814 };
5815 
5816 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5817   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5818    _span(span),
5819    _bit_map(bit_map),
5820    _work_queue(work_queue),
5821    _mark_and_push(collector, span, bit_map, work_queue),
5822    _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
5823                         (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5824 { }
5825 
5826 // . see if we can share work_queues with ParNew? XXX
5827 void CMSRefProcTaskProxy::do_work_steal(int i,
5828   CMSParDrainMarkingStackClosure* drain,
5829   CMSParKeepAliveClosure* keep_alive,
5830   int* seed) {
5831   OopTaskQueue* work_q = work_queue(i);
5832   NOT_PRODUCT(int num_steals = 0;)
5833   oop obj_to_scan;
5834 
5835   while (true) {
5836     // Completely finish any left over work from (an) earlier round(s)
5837     drain->trim_queue(0);
5838     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5839                                          (size_t)ParGCDesiredObjsFromOverflowList);
5840     // Now check if there's any work in the overflow list
5841     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5842     // only affects the number of attempts made to get work from the
5843     // overflow list and does not affect the number of workers.  Just
5844     // pass ParallelGCThreads so this behavior is unchanged.
5845     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5846                                                 work_q,
5847                                                 ParallelGCThreads)) {
5848       // Found something in global overflow list;
5849       // not yet ready to go stealing work from others.
5850       // We'd like to assert(work_q->size() != 0, ...)
5851       // because we just took work from the overflow list,
5852       // but of course we can't, since all of that might have
5853       // been already stolen from us.
5854       continue;
5855     }
5856     // Verify that we have no work before we resort to stealing
5857     assert(work_q->size() == 0, "Have work, shouldn't steal");
5858     // Try to steal from other queues that have work
5859     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5860       NOT_PRODUCT(num_steals++;)
5861       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5862       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5863       // Do scanning work
5864       obj_to_scan->oop_iterate(keep_alive);
5865       // Loop around, finish this work, and try to steal some more
5866     } else if (terminator()->offer_termination()) {
5867       break;  // nirvana from the infinite cycle
5868     }
5869   }
5870   NOT_PRODUCT(
5871     if (PrintCMSStatistics != 0) {
5872       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5873     }
5874   )
5875 }
5876 
5877 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5878 {
5879   GenCollectedHeap* gch = GenCollectedHeap::heap();
5880   FlexibleWorkGang* workers = gch->workers();
5881   assert(workers != NULL, "Need parallel worker threads.");
5882   CMSRefProcTaskProxy rp_task(task, &_collector,
5883                               _collector.ref_processor()->span(),
5884                               _collector.markBitMap(),
5885                               workers, _collector.task_queues());
5886   workers->run_task(&rp_task);
5887 }
5888 
5889 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5890 {
5891 
5892   GenCollectedHeap* gch = GenCollectedHeap::heap();
5893   FlexibleWorkGang* workers = gch->workers();
5894   assert(workers != NULL, "Need parallel worker threads.");
5895   CMSRefEnqueueTaskProxy enq_task(task);
5896   workers->run_task(&enq_task);
5897 }
5898 
5899 void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
5900 
5901   ResourceMark rm;
5902   HandleMark   hm;
5903 
5904   ReferenceProcessor* rp = ref_processor();
5905   assert(rp->span().equals(_span), "Spans should be equal");
5906   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5907   // Process weak references.
5908   rp->setup_policy(clear_all_soft_refs);
5909   verify_work_stacks_empty();
5910 
5911   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5912                                           &_markStack, false /* !preclean */);
5913   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5914                                 _span, &_markBitMap, &_markStack,
5915                                 &cmsKeepAliveClosure, false /* !preclean */);
5916   {
5917     TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
5918     if (rp->processing_is_mt()) {
5919       // Set the degree of MT here.  If the discovery is done MT, there
5920       // may have been a different number of threads doing the discovery
5921       // and a different number of discovered lists may have Ref objects.
5922       // That is OK as long as the Reference lists are balanced (see
5923       // balance_all_queues() and balance_queues()).
5924       GenCollectedHeap* gch = GenCollectedHeap::heap();
5925       int active_workers = ParallelGCThreads;
5926       FlexibleWorkGang* workers = gch->workers();
5927       if (workers != NULL) {
5928         active_workers = workers->active_workers();
5929         // The expectation is that active_workers will have already
5930         // been set to a reasonable value.  If it has not been set,
5931         // investigate.
5932         assert(active_workers > 0, "Should have been set during scavenge");
5933       }
5934       rp->set_active_mt_degree(active_workers);
5935       CMSRefProcTaskExecutor task_executor(*this);
5936       rp->process_discovered_references(&_is_alive_closure,
5937                                         &cmsKeepAliveClosure,
5938                                         &cmsDrainMarkingStackClosure,
5939                                         &task_executor);
5940     } else {
5941       rp->process_discovered_references(&_is_alive_closure,
5942                                         &cmsKeepAliveClosure,
5943                                         &cmsDrainMarkingStackClosure,
5944                                         NULL);
5945     }
5946     verify_work_stacks_empty();
5947   }
5948 
5949   if (should_unload_classes()) {
5950     {
5951       TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
5952 
5953       // Follow SystemDictionary roots and unload classes
5954       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5955 
5956       // Follow CodeCache roots and unload any methods marked for unloading
5957       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5958 
5959       cmsDrainMarkingStackClosure.do_void();
5960       verify_work_stacks_empty();
5961 
5962       // Update subklass/sibling/implementor links in KlassKlass descendants
5963       Klass::clean_weak_klass_links(&_is_alive_closure);
5964       // Nothing should have been pushed onto the working stacks.
5965       verify_work_stacks_empty();
5966     }
5967 
5968     {
5969       TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty);
5970       // Clean up unreferenced symbols in symbol table.
5971       SymbolTable::unlink();
5972     }
5973   }
5974 
5975   // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
5976   // Need to check if we really scanned the StringTable.
5977   if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
5978     TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
5979     // Now clean up stale oops in StringTable
5980     StringTable::unlink(&_is_alive_closure);
5981   }
5982 
5983   verify_work_stacks_empty();
5984   // Restore any preserved marks as a result of mark stack or
5985   // work queue overflow
5986   restore_preserved_marks_if_any();  // done single-threaded for now
5987 
5988   rp->set_enqueuing_is_done(true);
5989   if (rp->processing_is_mt()) {
5990     rp->balance_all_queues();
5991     CMSRefProcTaskExecutor task_executor(*this);
5992     rp->enqueue_discovered_references(&task_executor);
5993   } else {
5994     rp->enqueue_discovered_references(NULL);
5995   }
5996   rp->verify_no_references_recorded();
5997   assert(!rp->discovery_enabled(), "should have been disabled");
5998 }
5999 
6000 #ifndef PRODUCT
6001 void CMSCollector::check_correct_thread_executing() {
6002   Thread* t = Thread::current();
6003   // Only the VM thread or the CMS thread should be here.
6004   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
6005          "Unexpected thread type");
6006   // If this is the vm thread, the foreground process
6007   // should not be waiting.  Note that _foregroundGCIsActive is
6008   // true while the foreground collector is waiting.
6009   if (_foregroundGCShouldWait) {
6010     // We cannot be the VM thread
6011     assert(t->is_ConcurrentGC_thread(),
6012            "Should be CMS thread");
6013   } else {
6014     // We can be the CMS thread only if we are in a stop-world
6015     // phase of CMS collection.
6016     if (t->is_ConcurrentGC_thread()) {
6017       assert(_collectorState == InitialMarking ||
6018              _collectorState == FinalMarking,
6019              "Should be a stop-world phase");
6020       // The CMS thread should be holding the CMS_token.
6021       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6022              "Potential interference with concurrently "
6023              "executing VM thread");
6024     }
6025   }
6026 }
6027 #endif
6028 
6029 void CMSCollector::sweep(bool asynch) {
6030   assert(_collectorState == Sweeping, "just checking");
6031   check_correct_thread_executing();
6032   verify_work_stacks_empty();
6033   verify_overflow_empty();
6034   increment_sweep_count();
6035   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
6036 
6037   _inter_sweep_timer.stop();
6038   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
6039   size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
6040 
6041   assert(!_intra_sweep_timer.is_active(), "Should not be active");
6042   _intra_sweep_timer.reset();
6043   _intra_sweep_timer.start();
6044   if (asynch) {
6045     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6046     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
6047     // First sweep the old gen
6048     {
6049       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
6050                                bitMapLock());
6051       sweepWork(_cmsGen, asynch);
6052     }
6053 
6054     // Update Universe::_heap_*_at_gc figures.
6055     // We need all the free list locks to make the abstract state
6056     // transition from Sweeping to Resetting. See detailed note
6057     // further below.
6058     {
6059       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
6060       // Update heap occupancy information which is used as
6061       // input to soft ref clearing policy at the next gc.
6062       Universe::update_heap_info_at_gc();
6063       _collectorState = Resizing;
6064     }
6065   } else {
6066     // already have needed locks
6067     sweepWork(_cmsGen,  asynch);
6068     // Update heap occupancy information which is used as
6069     // input to soft ref clearing policy at the next gc.
6070     Universe::update_heap_info_at_gc();
6071     _collectorState = Resizing;
6072   }
6073   verify_work_stacks_empty();
6074   verify_overflow_empty();
6075 
6076   _intra_sweep_timer.stop();
6077   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
6078 
6079   _inter_sweep_timer.reset();
6080   _inter_sweep_timer.start();
6081 
6082   // We need to use a monotonically non-deccreasing time in ms
6083   // or we will see time-warp warnings and os::javaTimeMillis()
6084   // does not guarantee monotonicity.
6085   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
6086   update_time_of_last_gc(now);
6087 
6088   // NOTE on abstract state transitions:
6089   // Mutators allocate-live and/or mark the mod-union table dirty
6090   // based on the state of the collection.  The former is done in
6091   // the interval [Marking, Sweeping] and the latter in the interval
6092   // [Marking, Sweeping).  Thus the transitions into the Marking state
6093   // and out of the Sweeping state must be synchronously visible
6094   // globally to the mutators.
6095   // The transition into the Marking state happens with the world
6096   // stopped so the mutators will globally see it.  Sweeping is
6097   // done asynchronously by the background collector so the transition
6098   // from the Sweeping state to the Resizing state must be done
6099   // under the freelistLock (as is the check for whether to
6100   // allocate-live and whether to dirty the mod-union table).
6101   assert(_collectorState == Resizing, "Change of collector state to"
6102     " Resizing must be done under the freelistLocks (plural)");
6103 
6104   // Now that sweeping has been completed, we clear
6105   // the incremental_collection_failed flag,
6106   // thus inviting a younger gen collection to promote into
6107   // this generation. If such a promotion may still fail,
6108   // the flag will be set again when a young collection is
6109   // attempted.
6110   GenCollectedHeap* gch = GenCollectedHeap::heap();
6111   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
6112   gch->update_full_collections_completed(_collection_count_start);
6113 }
6114 
6115 // FIX ME!!! Looks like this belongs in CFLSpace, with
6116 // CMSGen merely delegating to it.
6117 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
6118   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
6119   HeapWord*  minAddr        = _cmsSpace->bottom();
6120   HeapWord*  largestAddr    =
6121     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
6122   if (largestAddr == NULL) {
6123     // The dictionary appears to be empty.  In this case
6124     // try to coalesce at the end of the heap.
6125     largestAddr = _cmsSpace->end();
6126   }
6127   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
6128   size_t nearLargestOffset =
6129     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
6130   if (PrintFLSStatistics != 0) {
6131     gclog_or_tty->print_cr(
6132       "CMS: Large Block: " PTR_FORMAT ";"
6133       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
6134       largestAddr,
6135       _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
6136   }
6137   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
6138 }
6139 
6140 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
6141   return addr >= _cmsSpace->nearLargestChunk();
6142 }
6143 
6144 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
6145   return _cmsSpace->find_chunk_at_end();
6146 }
6147 
6148 void ConcurrentMarkSweepGeneration::update_gc_stats(int current_level,
6149                                                     bool full) {
6150   // The next lower level has been collected.  Gather any statistics
6151   // that are of interest at this point.
6152   if (!full && (current_level + 1) == level()) {
6153     // Gather statistics on the young generation collection.
6154     collector()->stats().record_gc0_end(used());
6155   }
6156 }
6157 
6158 CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
6159   GenCollectedHeap* gch = GenCollectedHeap::heap();
6160   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
6161     "Wrong type of heap");
6162   CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
6163     gch->gen_policy()->size_policy();
6164   assert(sp->is_gc_cms_adaptive_size_policy(),
6165     "Wrong type of size policy");
6166   return sp;
6167 }
6168 
6169 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
6170   if (PrintGCDetails && Verbose) {
6171     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
6172   }
6173   _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
6174   _debug_collection_type =
6175     (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
6176   if (PrintGCDetails && Verbose) {
6177     gclog_or_tty->print_cr("to %d ", _debug_collection_type);
6178   }
6179 }
6180 
6181 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
6182   bool asynch) {
6183   // We iterate over the space(s) underlying this generation,
6184   // checking the mark bit map to see if the bits corresponding
6185   // to specific blocks are marked or not. Blocks that are
6186   // marked are live and are not swept up. All remaining blocks
6187   // are swept up, with coalescing on-the-fly as we sweep up
6188   // contiguous free and/or garbage blocks:
6189   // We need to ensure that the sweeper synchronizes with allocators
6190   // and stop-the-world collectors. In particular, the following
6191   // locks are used:
6192   // . CMS token: if this is held, a stop the world collection cannot occur
6193   // . freelistLock: if this is held no allocation can occur from this
6194   //                 generation by another thread
6195   // . bitMapLock: if this is held, no other thread can access or update
6196   //
6197 
6198   // Note that we need to hold the freelistLock if we use
6199   // block iterate below; else the iterator might go awry if
6200   // a mutator (or promotion) causes block contents to change
6201   // (for instance if the allocator divvies up a block).
6202   // If we hold the free list lock, for all practical purposes
6203   // young generation GC's can't occur (they'll usually need to
6204   // promote), so we might as well prevent all young generation
6205   // GC's while we do a sweeping step. For the same reason, we might
6206   // as well take the bit map lock for the entire duration
6207 
6208   // check that we hold the requisite locks
6209   assert(have_cms_token(), "Should hold cms token");
6210   assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
6211          || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
6212         "Should possess CMS token to sweep");
6213   assert_lock_strong(gen->freelistLock());
6214   assert_lock_strong(bitMapLock());
6215 
6216   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
6217   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
6218   gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
6219                                       _inter_sweep_estimate.padded_average(),
6220                                       _intra_sweep_estimate.padded_average());
6221   gen->setNearLargestChunk();
6222 
6223   {
6224     SweepClosure sweepClosure(this, gen, &_markBitMap,
6225                             CMSYield && asynch);
6226     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
6227     // We need to free-up/coalesce garbage/blocks from a
6228     // co-terminal free run. This is done in the SweepClosure
6229     // destructor; so, do not remove this scope, else the
6230     // end-of-sweep-census below will be off by a little bit.
6231   }
6232   gen->cmsSpace()->sweep_completed();
6233   gen->cmsSpace()->endSweepFLCensus(sweep_count());
6234   if (should_unload_classes()) {                // unloaded classes this cycle,
6235     _concurrent_cycles_since_last_unload = 0;   // ... reset count
6236   } else {                                      // did not unload classes,
6237     _concurrent_cycles_since_last_unload++;     // ... increment count
6238   }
6239 }
6240 
6241 // Reset CMS data structures (for now just the marking bit map)
6242 // preparatory for the next cycle.
6243 void CMSCollector::reset(bool asynch) {
6244   GenCollectedHeap* gch = GenCollectedHeap::heap();
6245   CMSAdaptiveSizePolicy* sp = size_policy();
6246   AdaptiveSizePolicyOutput(sp, gch->total_collections());
6247   if (asynch) {
6248     CMSTokenSyncWithLocks ts(true, bitMapLock());
6249 
6250     // If the state is not "Resetting", the foreground  thread
6251     // has done a collection and the resetting.
6252     if (_collectorState != Resetting) {
6253       assert(_collectorState == Idling, "The state should only change"
6254         " because the foreground collector has finished the collection");
6255       return;
6256     }
6257 
6258     // Clear the mark bitmap (no grey objects to start with)
6259     // for the next cycle.
6260     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6261     CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
6262 
6263     HeapWord* curAddr = _markBitMap.startWord();
6264     while (curAddr < _markBitMap.endWord()) {
6265       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
6266       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
6267       _markBitMap.clear_large_range(chunk);
6268       if (ConcurrentMarkSweepThread::should_yield() &&
6269           !foregroundGCIsActive() &&
6270           CMSYield) {
6271         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6272                "CMS thread should hold CMS token");
6273         assert_lock_strong(bitMapLock());
6274         bitMapLock()->unlock();
6275         ConcurrentMarkSweepThread::desynchronize(true);
6276         ConcurrentMarkSweepThread::acknowledge_yield_request();
6277         stopTimer();
6278         if (PrintCMSStatistics != 0) {
6279           incrementYields();
6280         }
6281         icms_wait();
6282 
6283         // See the comment in coordinator_yield()
6284         for (unsigned i = 0; i < CMSYieldSleepCount &&
6285                          ConcurrentMarkSweepThread::should_yield() &&
6286                          !CMSCollector::foregroundGCIsActive(); ++i) {
6287           os::sleep(Thread::current(), 1, false);
6288           ConcurrentMarkSweepThread::acknowledge_yield_request();
6289         }
6290 
6291         ConcurrentMarkSweepThread::synchronize(true);
6292         bitMapLock()->lock_without_safepoint_check();
6293         startTimer();
6294       }
6295       curAddr = chunk.end();
6296     }
6297     // A successful mostly concurrent collection has been done.
6298     // Because only the full (i.e., concurrent mode failure) collections
6299     // are being measured for gc overhead limits, clean the "near" flag
6300     // and count.
6301     sp->reset_gc_overhead_limit_count();
6302     _collectorState = Idling;
6303   } else {
6304     // already have the lock
6305     assert(_collectorState == Resetting, "just checking");
6306     assert_lock_strong(bitMapLock());
6307     _markBitMap.clear_all();
6308     _collectorState = Idling;
6309   }
6310 
6311   // Stop incremental mode after a cycle completes, so that any future cycles
6312   // are triggered by allocation.
6313   stop_icms();
6314 
6315   NOT_PRODUCT(
6316     if (RotateCMSCollectionTypes) {
6317       _cmsGen->rotate_debug_collection_type();
6318     }
6319   )
6320 }
6321 
6322 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
6323   gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
6324   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
6325   TraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
6326   TraceCollectorStats tcs(counters());
6327 
6328   switch (op) {
6329     case CMS_op_checkpointRootsInitial: {
6330       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6331       checkpointRootsInitial(true);       // asynch
6332       if (PrintGC) {
6333         _cmsGen->printOccupancy("initial-mark");
6334       }
6335       break;
6336     }
6337     case CMS_op_checkpointRootsFinal: {
6338       SvcGCMarker sgcm(SvcGCMarker::OTHER);
6339       checkpointRootsFinal(true,    // asynch
6340                            false,   // !clear_all_soft_refs
6341                            false);  // !init_mark_was_synchronous
6342       if (PrintGC) {
6343         _cmsGen->printOccupancy("remark");
6344       }
6345       break;
6346     }
6347     default:
6348       fatal("No such CMS_op");
6349   }
6350 }
6351 
6352 #ifndef PRODUCT
6353 size_t const CMSCollector::skip_header_HeapWords() {
6354   return FreeChunk::header_size();
6355 }
6356 
6357 // Try and collect here conditions that should hold when
6358 // CMS thread is exiting. The idea is that the foreground GC
6359 // thread should not be blocked if it wants to terminate
6360 // the CMS thread and yet continue to run the VM for a while
6361 // after that.
6362 void CMSCollector::verify_ok_to_terminate() const {
6363   assert(Thread::current()->is_ConcurrentGC_thread(),
6364          "should be called by CMS thread");
6365   assert(!_foregroundGCShouldWait, "should be false");
6366   // We could check here that all the various low-level locks
6367   // are not held by the CMS thread, but that is overkill; see
6368   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
6369   // is checked.
6370 }
6371 #endif
6372 
6373 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
6374    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
6375           "missing Printezis mark?");
6376   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6377   size_t size = pointer_delta(nextOneAddr + 1, addr);
6378   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6379          "alignment problem");
6380   assert(size >= 3, "Necessary for Printezis marks to work");
6381   return size;
6382 }
6383 
6384 // A variant of the above (block_size_using_printezis_bits()) except
6385 // that we return 0 if the P-bits are not yet set.
6386 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
6387   if (_markBitMap.isMarked(addr + 1)) {
6388     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
6389     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
6390     size_t size = pointer_delta(nextOneAddr + 1, addr);
6391     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6392            "alignment problem");
6393     assert(size >= 3, "Necessary for Printezis marks to work");
6394     return size;
6395   }
6396   return 0;
6397 }
6398 
6399 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
6400   size_t sz = 0;
6401   oop p = (oop)addr;
6402   if (p->klass_or_null() != NULL) {
6403     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
6404   } else {
6405     sz = block_size_using_printezis_bits(addr);
6406   }
6407   assert(sz > 0, "size must be nonzero");
6408   HeapWord* next_block = addr + sz;
6409   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
6410                                              CardTableModRefBS::card_size);
6411   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
6412          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
6413          "must be different cards");
6414   return next_card;
6415 }
6416 
6417 
6418 // CMS Bit Map Wrapper /////////////////////////////////////////
6419 
6420 // Construct a CMS bit map infrastructure, but don't create the
6421 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
6422 // further below.
6423 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
6424   _bm(),
6425   _shifter(shifter),
6426   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
6427 {
6428   _bmStartWord = 0;
6429   _bmWordSize  = 0;
6430 }
6431 
6432 bool CMSBitMap::allocate(MemRegion mr) {
6433   _bmStartWord = mr.start();
6434   _bmWordSize  = mr.word_size();
6435   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
6436                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
6437   if (!brs.is_reserved()) {
6438     warning("CMS bit map allocation failure");
6439     return false;
6440   }
6441   // For now we'll just commit all of the bit map up fromt.
6442   // Later on we'll try to be more parsimonious with swap.
6443   if (!_virtual_space.initialize(brs, brs.size())) {
6444     warning("CMS bit map backing store failure");
6445     return false;
6446   }
6447   assert(_virtual_space.committed_size() == brs.size(),
6448          "didn't reserve backing store for all of CMS bit map?");
6449   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
6450   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
6451          _bmWordSize, "inconsistency in bit map sizing");
6452   _bm.set_size(_bmWordSize >> _shifter);
6453 
6454   // bm.clear(); // can we rely on getting zero'd memory? verify below
6455   assert(isAllClear(),
6456          "Expected zero'd memory from ReservedSpace constructor");
6457   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
6458          "consistency check");
6459   return true;
6460 }
6461 
6462 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
6463   HeapWord *next_addr, *end_addr, *last_addr;
6464   assert_locked();
6465   assert(covers(mr), "out-of-range error");
6466   // XXX assert that start and end are appropriately aligned
6467   for (next_addr = mr.start(), end_addr = mr.end();
6468        next_addr < end_addr; next_addr = last_addr) {
6469     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
6470     last_addr = dirty_region.end();
6471     if (!dirty_region.is_empty()) {
6472       cl->do_MemRegion(dirty_region);
6473     } else {
6474       assert(last_addr == end_addr, "program logic");
6475       return;
6476     }
6477   }
6478 }
6479 
6480 #ifndef PRODUCT
6481 void CMSBitMap::assert_locked() const {
6482   CMSLockVerifier::assert_locked(lock());
6483 }
6484 
6485 bool CMSBitMap::covers(MemRegion mr) const {
6486   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
6487   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
6488          "size inconsistency");
6489   return (mr.start() >= _bmStartWord) &&
6490          (mr.end()   <= endWord());
6491 }
6492 
6493 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
6494     return (start >= _bmStartWord && (start + size) <= endWord());
6495 }
6496 
6497 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
6498   // verify that there are no 1 bits in the interval [left, right)
6499   FalseBitMapClosure falseBitMapClosure;
6500   iterate(&falseBitMapClosure, left, right);
6501 }
6502 
6503 void CMSBitMap::region_invariant(MemRegion mr)
6504 {
6505   assert_locked();
6506   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
6507   assert(!mr.is_empty(), "unexpected empty region");
6508   assert(covers(mr), "mr should be covered by bit map");
6509   // convert address range into offset range
6510   size_t start_ofs = heapWordToOffset(mr.start());
6511   // Make sure that end() is appropriately aligned
6512   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
6513                         (1 << (_shifter+LogHeapWordSize))),
6514          "Misaligned mr.end()");
6515   size_t end_ofs   = heapWordToOffset(mr.end());
6516   assert(end_ofs > start_ofs, "Should mark at least one bit");
6517 }
6518 
6519 #endif
6520 
6521 bool CMSMarkStack::allocate(size_t size) {
6522   // allocate a stack of the requisite depth
6523   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6524                    size * sizeof(oop)));
6525   if (!rs.is_reserved()) {
6526     warning("CMSMarkStack allocation failure");
6527     return false;
6528   }
6529   if (!_virtual_space.initialize(rs, rs.size())) {
6530     warning("CMSMarkStack backing store failure");
6531     return false;
6532   }
6533   assert(_virtual_space.committed_size() == rs.size(),
6534          "didn't reserve backing store for all of CMS stack?");
6535   _base = (oop*)(_virtual_space.low());
6536   _index = 0;
6537   _capacity = size;
6538   NOT_PRODUCT(_max_depth = 0);
6539   return true;
6540 }
6541 
6542 // XXX FIX ME !!! In the MT case we come in here holding a
6543 // leaf lock. For printing we need to take a further lock
6544 // which has lower rank. We need to recallibrate the two
6545 // lock-ranks involved in order to be able to rpint the
6546 // messages below. (Or defer the printing to the caller.
6547 // For now we take the expedient path of just disabling the
6548 // messages for the problematic case.)
6549 void CMSMarkStack::expand() {
6550   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
6551   if (_capacity == MarkStackSizeMax) {
6552     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6553       // We print a warning message only once per CMS cycle.
6554       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
6555     }
6556     return;
6557   }
6558   // Double capacity if possible
6559   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
6560   // Do not give up existing stack until we have managed to
6561   // get the double capacity that we desired.
6562   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6563                    new_capacity * sizeof(oop)));
6564   if (rs.is_reserved()) {
6565     // Release the backing store associated with old stack
6566     _virtual_space.release();
6567     // Reinitialize virtual space for new stack
6568     if (!_virtual_space.initialize(rs, rs.size())) {
6569       fatal("Not enough swap for expanded marking stack");
6570     }
6571     _base = (oop*)(_virtual_space.low());
6572     _index = 0;
6573     _capacity = new_capacity;
6574   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6575     // Failed to double capacity, continue;
6576     // we print a detail message only once per CMS cycle.
6577     gclog_or_tty->print(" (benign) Failed to expand marking stack from "SIZE_FORMAT"K to "
6578             SIZE_FORMAT"K",
6579             _capacity / K, new_capacity / K);
6580   }
6581 }
6582 
6583 
6584 // Closures
6585 // XXX: there seems to be a lot of code  duplication here;
6586 // should refactor and consolidate common code.
6587 
6588 // This closure is used to mark refs into the CMS generation in
6589 // the CMS bit map. Called at the first checkpoint. This closure
6590 // assumes that we do not need to re-mark dirty cards; if the CMS
6591 // generation on which this is used is not an oldest
6592 // generation then this will lose younger_gen cards!
6593 
6594 MarkRefsIntoClosure::MarkRefsIntoClosure(
6595   MemRegion span, CMSBitMap* bitMap):
6596     _span(span),
6597     _bitMap(bitMap)
6598 {
6599     assert(_ref_processor == NULL, "deliberately left NULL");
6600     assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6601 }
6602 
6603 void MarkRefsIntoClosure::do_oop(oop obj) {
6604   // if p points into _span, then mark corresponding bit in _markBitMap
6605   assert(obj->is_oop(), "expected an oop");
6606   HeapWord* addr = (HeapWord*)obj;
6607   if (_span.contains(addr)) {
6608     // this should be made more efficient
6609     _bitMap->mark(addr);
6610   }
6611 }
6612 
6613 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6614 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6615 
6616 // A variant of the above, used for CMS marking verification.
6617 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6618   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6619     _span(span),
6620     _verification_bm(verification_bm),
6621     _cms_bm(cms_bm)
6622 {
6623     assert(_ref_processor == NULL, "deliberately left NULL");
6624     assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6625 }
6626 
6627 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6628   // if p points into _span, then mark corresponding bit in _markBitMap
6629   assert(obj->is_oop(), "expected an oop");
6630   HeapWord* addr = (HeapWord*)obj;
6631   if (_span.contains(addr)) {
6632     _verification_bm->mark(addr);
6633     if (!_cms_bm->isMarked(addr)) {
6634       oop(addr)->print();
6635       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr);
6636       fatal("... aborting");
6637     }
6638   }
6639 }
6640 
6641 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6642 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6643 
6644 //////////////////////////////////////////////////
6645 // MarkRefsIntoAndScanClosure
6646 //////////////////////////////////////////////////
6647 
6648 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6649                                                        ReferenceProcessor* rp,
6650                                                        CMSBitMap* bit_map,
6651                                                        CMSBitMap* mod_union_table,
6652                                                        CMSMarkStack*  mark_stack,
6653                                                        CMSCollector* collector,
6654                                                        bool should_yield,
6655                                                        bool concurrent_precleaning):
6656   _collector(collector),
6657   _span(span),
6658   _bit_map(bit_map),
6659   _mark_stack(mark_stack),
6660   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6661                       mark_stack, concurrent_precleaning),
6662   _yield(should_yield),
6663   _concurrent_precleaning(concurrent_precleaning),
6664   _freelistLock(NULL)
6665 {
6666   _ref_processor = rp;
6667   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6668 }
6669 
6670 // This closure is used to mark refs into the CMS generation at the
6671 // second (final) checkpoint, and to scan and transitively follow
6672 // the unmarked oops. It is also used during the concurrent precleaning
6673 // phase while scanning objects on dirty cards in the CMS generation.
6674 // The marks are made in the marking bit map and the marking stack is
6675 // used for keeping the (newly) grey objects during the scan.
6676 // The parallel version (Par_...) appears further below.
6677 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6678   if (obj != NULL) {
6679     assert(obj->is_oop(), "expected an oop");
6680     HeapWord* addr = (HeapWord*)obj;
6681     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6682     assert(_collector->overflow_list_is_empty(),
6683            "overflow list should be empty");
6684     if (_span.contains(addr) &&
6685         !_bit_map->isMarked(addr)) {
6686       // mark bit map (object is now grey)
6687       _bit_map->mark(addr);
6688       // push on marking stack (stack should be empty), and drain the
6689       // stack by applying this closure to the oops in the oops popped
6690       // from the stack (i.e. blacken the grey objects)
6691       bool res = _mark_stack->push(obj);
6692       assert(res, "Should have space to push on empty stack");
6693       do {
6694         oop new_oop = _mark_stack->pop();
6695         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6696         assert(_bit_map->isMarked((HeapWord*)new_oop),
6697                "only grey objects on this stack");
6698         // iterate over the oops in this oop, marking and pushing
6699         // the ones in CMS heap (i.e. in _span).
6700         new_oop->oop_iterate(&_pushAndMarkClosure);
6701         // check if it's time to yield
6702         do_yield_check();
6703       } while (!_mark_stack->isEmpty() ||
6704                (!_concurrent_precleaning && take_from_overflow_list()));
6705         // if marking stack is empty, and we are not doing this
6706         // during precleaning, then check the overflow list
6707     }
6708     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6709     assert(_collector->overflow_list_is_empty(),
6710            "overflow list was drained above");
6711     // We could restore evacuated mark words, if any, used for
6712     // overflow list links here because the overflow list is
6713     // provably empty here. That would reduce the maximum
6714     // size requirements for preserved_{oop,mark}_stack.
6715     // But we'll just postpone it until we are all done
6716     // so we can just stream through.
6717     if (!_concurrent_precleaning && CMSOverflowEarlyRestoration) {
6718       _collector->restore_preserved_marks_if_any();
6719       assert(_collector->no_preserved_marks(), "No preserved marks");
6720     }
6721     assert(!CMSOverflowEarlyRestoration || _collector->no_preserved_marks(),
6722            "All preserved marks should have been restored above");
6723   }
6724 }
6725 
6726 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6727 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6728 
6729 void MarkRefsIntoAndScanClosure::do_yield_work() {
6730   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6731          "CMS thread should hold CMS token");
6732   assert_lock_strong(_freelistLock);
6733   assert_lock_strong(_bit_map->lock());
6734   // relinquish the free_list_lock and bitMaplock()
6735   _bit_map->lock()->unlock();
6736   _freelistLock->unlock();
6737   ConcurrentMarkSweepThread::desynchronize(true);
6738   ConcurrentMarkSweepThread::acknowledge_yield_request();
6739   _collector->stopTimer();
6740   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6741   if (PrintCMSStatistics != 0) {
6742     _collector->incrementYields();
6743   }
6744   _collector->icms_wait();
6745 
6746   // See the comment in coordinator_yield()
6747   for (unsigned i = 0;
6748        i < CMSYieldSleepCount &&
6749        ConcurrentMarkSweepThread::should_yield() &&
6750        !CMSCollector::foregroundGCIsActive();
6751        ++i) {
6752     os::sleep(Thread::current(), 1, false);
6753     ConcurrentMarkSweepThread::acknowledge_yield_request();
6754   }
6755 
6756   ConcurrentMarkSweepThread::synchronize(true);
6757   _freelistLock->lock_without_safepoint_check();
6758   _bit_map->lock()->lock_without_safepoint_check();
6759   _collector->startTimer();
6760 }
6761 
6762 ///////////////////////////////////////////////////////////
6763 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6764 //                                 MarkRefsIntoAndScanClosure
6765 ///////////////////////////////////////////////////////////
6766 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6767   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6768   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6769   _span(span),
6770   _bit_map(bit_map),
6771   _work_queue(work_queue),
6772   _low_water_mark(MIN2((uint)(work_queue->max_elems()/4),
6773                        (uint)(CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6774   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6775 {
6776   _ref_processor = rp;
6777   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
6778 }
6779 
6780 // This closure is used to mark refs into the CMS generation at the
6781 // second (final) checkpoint, and to scan and transitively follow
6782 // the unmarked oops. The marks are made in the marking bit map and
6783 // the work_queue is used for keeping the (newly) grey objects during
6784 // the scan phase whence they are also available for stealing by parallel
6785 // threads. Since the marking bit map is shared, updates are
6786 // synchronized (via CAS).
6787 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6788   if (obj != NULL) {
6789     // Ignore mark word because this could be an already marked oop
6790     // that may be chained at the end of the overflow list.
6791     assert(obj->is_oop(true), "expected an oop");
6792     HeapWord* addr = (HeapWord*)obj;
6793     if (_span.contains(addr) &&
6794         !_bit_map->isMarked(addr)) {
6795       // mark bit map (object will become grey):
6796       // It is possible for several threads to be
6797       // trying to "claim" this object concurrently;
6798       // the unique thread that succeeds in marking the
6799       // object first will do the subsequent push on
6800       // to the work queue (or overflow list).
6801       if (_bit_map->par_mark(addr)) {
6802         // push on work_queue (which may not be empty), and trim the
6803         // queue to an appropriate length by applying this closure to
6804         // the oops in the oops popped from the stack (i.e. blacken the
6805         // grey objects)
6806         bool res = _work_queue->push(obj);
6807         assert(res, "Low water mark should be less than capacity?");
6808         trim_queue(_low_water_mark);
6809       } // Else, another thread claimed the object
6810     }
6811   }
6812 }
6813 
6814 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6815 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6816 
6817 // This closure is used to rescan the marked objects on the dirty cards
6818 // in the mod union table and the card table proper.
6819 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6820   oop p, MemRegion mr) {
6821 
6822   size_t size = 0;
6823   HeapWord* addr = (HeapWord*)p;
6824   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6825   assert(_span.contains(addr), "we are scanning the CMS generation");
6826   // check if it's time to yield
6827   if (do_yield_check()) {
6828     // We yielded for some foreground stop-world work,
6829     // and we have been asked to abort this ongoing preclean cycle.
6830     return 0;
6831   }
6832   if (_bitMap->isMarked(addr)) {
6833     // it's marked; is it potentially uninitialized?
6834     if (p->klass_or_null() != NULL) {
6835         // an initialized object; ignore mark word in verification below
6836         // since we are running concurrent with mutators
6837         assert(p->is_oop(true), "should be an oop");
6838         if (p->is_objArray()) {
6839           // objArrays are precisely marked; restrict scanning
6840           // to dirty cards only.
6841           size = CompactibleFreeListSpace::adjustObjectSize(
6842                    p->oop_iterate(_scanningClosure, mr));
6843         } else {
6844           // A non-array may have been imprecisely marked; we need
6845           // to scan object in its entirety.
6846           size = CompactibleFreeListSpace::adjustObjectSize(
6847                    p->oop_iterate(_scanningClosure));
6848         }
6849         #ifdef DEBUG
6850           size_t direct_size =
6851             CompactibleFreeListSpace::adjustObjectSize(p->size());
6852           assert(size == direct_size, "Inconsistency in size");
6853           assert(size >= 3, "Necessary for Printezis marks to work");
6854           if (!_bitMap->isMarked(addr+1)) {
6855             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6856           } else {
6857             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6858             assert(_bitMap->isMarked(addr+size-1),
6859                    "inconsistent Printezis mark");
6860           }
6861         #endif // DEBUG
6862     } else {
6863       // an unitialized object
6864       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6865       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6866       size = pointer_delta(nextOneAddr + 1, addr);
6867       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6868              "alignment problem");
6869       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6870       // will dirty the card when the klass pointer is installed in the
6871       // object (signalling the completion of initialization).
6872     }
6873   } else {
6874     // Either a not yet marked object or an uninitialized object
6875     if (p->klass_or_null() == NULL) {
6876       // An uninitialized object, skip to the next card, since
6877       // we may not be able to read its P-bits yet.
6878       assert(size == 0, "Initial value");
6879     } else {
6880       // An object not (yet) reached by marking: we merely need to
6881       // compute its size so as to go look at the next block.
6882       assert(p->is_oop(true), "should be an oop");
6883       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6884     }
6885   }
6886   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6887   return size;
6888 }
6889 
6890 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6891   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6892          "CMS thread should hold CMS token");
6893   assert_lock_strong(_freelistLock);
6894   assert_lock_strong(_bitMap->lock());
6895   // relinquish the free_list_lock and bitMaplock()
6896   _bitMap->lock()->unlock();
6897   _freelistLock->unlock();
6898   ConcurrentMarkSweepThread::desynchronize(true);
6899   ConcurrentMarkSweepThread::acknowledge_yield_request();
6900   _collector->stopTimer();
6901   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6902   if (PrintCMSStatistics != 0) {
6903     _collector->incrementYields();
6904   }
6905   _collector->icms_wait();
6906 
6907   // See the comment in coordinator_yield()
6908   for (unsigned i = 0; i < CMSYieldSleepCount &&
6909                    ConcurrentMarkSweepThread::should_yield() &&
6910                    !CMSCollector::foregroundGCIsActive(); ++i) {
6911     os::sleep(Thread::current(), 1, false);
6912     ConcurrentMarkSweepThread::acknowledge_yield_request();
6913   }
6914 
6915   ConcurrentMarkSweepThread::synchronize(true);
6916   _freelistLock->lock_without_safepoint_check();
6917   _bitMap->lock()->lock_without_safepoint_check();
6918   _collector->startTimer();
6919 }
6920 
6921 
6922 //////////////////////////////////////////////////////////////////
6923 // SurvivorSpacePrecleanClosure
6924 //////////////////////////////////////////////////////////////////
6925 // This (single-threaded) closure is used to preclean the oops in
6926 // the survivor spaces.
6927 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6928 
6929   HeapWord* addr = (HeapWord*)p;
6930   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6931   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6932   assert(p->klass_or_null() != NULL, "object should be initializd");
6933   // an initialized object; ignore mark word in verification below
6934   // since we are running concurrent with mutators
6935   assert(p->is_oop(true), "should be an oop");
6936   // Note that we do not yield while we iterate over
6937   // the interior oops of p, pushing the relevant ones
6938   // on our marking stack.
6939   size_t size = p->oop_iterate(_scanning_closure);
6940   do_yield_check();
6941   // Observe that below, we do not abandon the preclean
6942   // phase as soon as we should; rather we empty the
6943   // marking stack before returning. This is to satisfy
6944   // some existing assertions. In general, it may be a
6945   // good idea to abort immediately and complete the marking
6946   // from the grey objects at a later time.
6947   while (!_mark_stack->isEmpty()) {
6948     oop new_oop = _mark_stack->pop();
6949     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6950     assert(_bit_map->isMarked((HeapWord*)new_oop),
6951            "only grey objects on this stack");
6952     // iterate over the oops in this oop, marking and pushing
6953     // the ones in CMS heap (i.e. in _span).
6954     new_oop->oop_iterate(_scanning_closure);
6955     // check if it's time to yield
6956     do_yield_check();
6957   }
6958   unsigned int after_count =
6959     GenCollectedHeap::heap()->total_collections();
6960   bool abort = (_before_count != after_count) ||
6961                _collector->should_abort_preclean();
6962   return abort ? 0 : size;
6963 }
6964 
6965 void SurvivorSpacePrecleanClosure::do_yield_work() {
6966   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6967          "CMS thread should hold CMS token");
6968   assert_lock_strong(_bit_map->lock());
6969   // Relinquish the bit map lock
6970   _bit_map->lock()->unlock();
6971   ConcurrentMarkSweepThread::desynchronize(true);
6972   ConcurrentMarkSweepThread::acknowledge_yield_request();
6973   _collector->stopTimer();
6974   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
6975   if (PrintCMSStatistics != 0) {
6976     _collector->incrementYields();
6977   }
6978   _collector->icms_wait();
6979 
6980   // See the comment in coordinator_yield()
6981   for (unsigned i = 0; i < CMSYieldSleepCount &&
6982                        ConcurrentMarkSweepThread::should_yield() &&
6983                        !CMSCollector::foregroundGCIsActive(); ++i) {
6984     os::sleep(Thread::current(), 1, false);
6985     ConcurrentMarkSweepThread::acknowledge_yield_request();
6986   }
6987 
6988   ConcurrentMarkSweepThread::synchronize(true);
6989   _bit_map->lock()->lock_without_safepoint_check();
6990   _collector->startTimer();
6991 }
6992 
6993 // This closure is used to rescan the marked objects on the dirty cards
6994 // in the mod union table and the card table proper. In the parallel
6995 // case, although the bitMap is shared, we do a single read so the
6996 // isMarked() query is "safe".
6997 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6998   // Ignore mark word because we are running concurrent with mutators
6999   assert(p->is_oop_or_null(true), "expected an oop or null");
7000   HeapWord* addr = (HeapWord*)p;
7001   assert(_span.contains(addr), "we are scanning the CMS generation");
7002   bool is_obj_array = false;
7003   #ifdef DEBUG
7004     if (!_parallel) {
7005       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
7006       assert(_collector->overflow_list_is_empty(),
7007              "overflow list should be empty");
7008 
7009     }
7010   #endif // DEBUG
7011   if (_bit_map->isMarked(addr)) {
7012     // Obj arrays are precisely marked, non-arrays are not;
7013     // so we scan objArrays precisely and non-arrays in their
7014     // entirety.
7015     if (p->is_objArray()) {
7016       is_obj_array = true;
7017       if (_parallel) {
7018         p->oop_iterate(_par_scan_closure, mr);
7019       } else {
7020         p->oop_iterate(_scan_closure, mr);
7021       }
7022     } else {
7023       if (_parallel) {
7024         p->oop_iterate(_par_scan_closure);
7025       } else {
7026         p->oop_iterate(_scan_closure);
7027       }
7028     }
7029   }
7030   #ifdef DEBUG
7031     if (!_parallel) {
7032       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
7033       assert(_collector->overflow_list_is_empty(),
7034              "overflow list should be empty");
7035 
7036     }
7037   #endif // DEBUG
7038   return is_obj_array;
7039 }
7040 
7041 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
7042                         MemRegion span,
7043                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
7044                         bool should_yield, bool verifying):
7045   _collector(collector),
7046   _span(span),
7047   _bitMap(bitMap),
7048   _mut(&collector->_modUnionTable),
7049   _markStack(markStack),
7050   _yield(should_yield),
7051   _skipBits(0)
7052 {
7053   assert(_markStack->isEmpty(), "stack should be empty");
7054   _finger = _bitMap->startWord();
7055   _threshold = _finger;
7056   assert(_collector->_restart_addr == NULL, "Sanity check");
7057   assert(_span.contains(_finger), "Out of bounds _finger?");
7058   DEBUG_ONLY(_verifying = verifying;)
7059 }
7060 
7061 void MarkFromRootsClosure::reset(HeapWord* addr) {
7062   assert(_markStack->isEmpty(), "would cause duplicates on stack");
7063   assert(_span.contains(addr), "Out of bounds _finger?");
7064   _finger = addr;
7065   _threshold = (HeapWord*)round_to(
7066                  (intptr_t)_finger, CardTableModRefBS::card_size);
7067 }
7068 
7069 // Should revisit to see if this should be restructured for
7070 // greater efficiency.
7071 bool MarkFromRootsClosure::do_bit(size_t offset) {
7072   if (_skipBits > 0) {
7073     _skipBits--;
7074     return true;
7075   }
7076   // convert offset into a HeapWord*
7077   HeapWord* addr = _bitMap->startWord() + offset;
7078   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
7079          "address out of range");
7080   assert(_bitMap->isMarked(addr), "tautology");
7081   if (_bitMap->isMarked(addr+1)) {
7082     // this is an allocated but not yet initialized object
7083     assert(_skipBits == 0, "tautology");
7084     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
7085     oop p = oop(addr);
7086     if (p->klass_or_null() == NULL) {
7087       DEBUG_ONLY(if (!_verifying) {)
7088         // We re-dirty the cards on which this object lies and increase
7089         // the _threshold so that we'll come back to scan this object
7090         // during the preclean or remark phase. (CMSCleanOnEnter)
7091         if (CMSCleanOnEnter) {
7092           size_t sz = _collector->block_size_using_printezis_bits(addr);
7093           HeapWord* end_card_addr   = (HeapWord*)round_to(
7094                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7095           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7096           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7097           // Bump _threshold to end_card_addr; note that
7098           // _threshold cannot possibly exceed end_card_addr, anyhow.
7099           // This prevents future clearing of the card as the scan proceeds
7100           // to the right.
7101           assert(_threshold <= end_card_addr,
7102                  "Because we are just scanning into this object");
7103           if (_threshold < end_card_addr) {
7104             _threshold = end_card_addr;
7105           }
7106           if (p->klass_or_null() != NULL) {
7107             // Redirty the range of cards...
7108             _mut->mark_range(redirty_range);
7109           } // ...else the setting of klass will dirty the card anyway.
7110         }
7111       DEBUG_ONLY(})
7112       return true;
7113     }
7114   }
7115   scanOopsInOop(addr);
7116   return true;
7117 }
7118 
7119 // We take a break if we've been at this for a while,
7120 // so as to avoid monopolizing the locks involved.
7121 void MarkFromRootsClosure::do_yield_work() {
7122   // First give up the locks, then yield, then re-lock
7123   // We should probably use a constructor/destructor idiom to
7124   // do this unlock/lock or modify the MutexUnlocker class to
7125   // serve our purpose. XXX
7126   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7127          "CMS thread should hold CMS token");
7128   assert_lock_strong(_bitMap->lock());
7129   _bitMap->lock()->unlock();
7130   ConcurrentMarkSweepThread::desynchronize(true);
7131   ConcurrentMarkSweepThread::acknowledge_yield_request();
7132   _collector->stopTimer();
7133   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7134   if (PrintCMSStatistics != 0) {
7135     _collector->incrementYields();
7136   }
7137   _collector->icms_wait();
7138 
7139   // See the comment in coordinator_yield()
7140   for (unsigned i = 0; i < CMSYieldSleepCount &&
7141                        ConcurrentMarkSweepThread::should_yield() &&
7142                        !CMSCollector::foregroundGCIsActive(); ++i) {
7143     os::sleep(Thread::current(), 1, false);
7144     ConcurrentMarkSweepThread::acknowledge_yield_request();
7145   }
7146 
7147   ConcurrentMarkSweepThread::synchronize(true);
7148   _bitMap->lock()->lock_without_safepoint_check();
7149   _collector->startTimer();
7150 }
7151 
7152 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
7153   assert(_bitMap->isMarked(ptr), "expected bit to be set");
7154   assert(_markStack->isEmpty(),
7155          "should drain stack to limit stack usage");
7156   // convert ptr to an oop preparatory to scanning
7157   oop obj = oop(ptr);
7158   // Ignore mark word in verification below, since we
7159   // may be running concurrent with mutators.
7160   assert(obj->is_oop(true), "should be an oop");
7161   assert(_finger <= ptr, "_finger runneth ahead");
7162   // advance the finger to right end of this object
7163   _finger = ptr + obj->size();
7164   assert(_finger > ptr, "we just incremented it above");
7165   // On large heaps, it may take us some time to get through
7166   // the marking phase (especially if running iCMS). During
7167   // this time it's possible that a lot of mutations have
7168   // accumulated in the card table and the mod union table --
7169   // these mutation records are redundant until we have
7170   // actually traced into the corresponding card.
7171   // Here, we check whether advancing the finger would make
7172   // us cross into a new card, and if so clear corresponding
7173   // cards in the MUT (preclean them in the card-table in the
7174   // future).
7175 
7176   DEBUG_ONLY(if (!_verifying) {)
7177     // The clean-on-enter optimization is disabled by default,
7178     // until we fix 6178663.
7179     if (CMSCleanOnEnter && (_finger > _threshold)) {
7180       // [_threshold, _finger) represents the interval
7181       // of cards to be cleared  in MUT (or precleaned in card table).
7182       // The set of cards to be cleared is all those that overlap
7183       // with the interval [_threshold, _finger); note that
7184       // _threshold is always kept card-aligned but _finger isn't
7185       // always card-aligned.
7186       HeapWord* old_threshold = _threshold;
7187       assert(old_threshold == (HeapWord*)round_to(
7188               (intptr_t)old_threshold, CardTableModRefBS::card_size),
7189              "_threshold should always be card-aligned");
7190       _threshold = (HeapWord*)round_to(
7191                      (intptr_t)_finger, CardTableModRefBS::card_size);
7192       MemRegion mr(old_threshold, _threshold);
7193       assert(!mr.is_empty(), "Control point invariant");
7194       assert(_span.contains(mr), "Should clear within span");
7195       _mut->clear_range(mr);
7196     }
7197   DEBUG_ONLY(})
7198   // Note: the finger doesn't advance while we drain
7199   // the stack below.
7200   PushOrMarkClosure pushOrMarkClosure(_collector,
7201                                       _span, _bitMap, _markStack,
7202                                       _finger, this);
7203   bool res = _markStack->push(obj);
7204   assert(res, "Empty non-zero size stack should have space for single push");
7205   while (!_markStack->isEmpty()) {
7206     oop new_oop = _markStack->pop();
7207     // Skip verifying header mark word below because we are
7208     // running concurrent with mutators.
7209     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7210     // now scan this oop's oops
7211     new_oop->oop_iterate(&pushOrMarkClosure);
7212     do_yield_check();
7213   }
7214   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
7215 }
7216 
7217 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
7218                        CMSCollector* collector, MemRegion span,
7219                        CMSBitMap* bit_map,
7220                        OopTaskQueue* work_queue,
7221                        CMSMarkStack*  overflow_stack,
7222                        bool should_yield):
7223   _collector(collector),
7224   _whole_span(collector->_span),
7225   _span(span),
7226   _bit_map(bit_map),
7227   _mut(&collector->_modUnionTable),
7228   _work_queue(work_queue),
7229   _overflow_stack(overflow_stack),
7230   _yield(should_yield),
7231   _skip_bits(0),
7232   _task(task)
7233 {
7234   assert(_work_queue->size() == 0, "work_queue should be empty");
7235   _finger = span.start();
7236   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
7237   assert(_span.contains(_finger), "Out of bounds _finger?");
7238 }
7239 
7240 // Should revisit to see if this should be restructured for
7241 // greater efficiency.
7242 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
7243   if (_skip_bits > 0) {
7244     _skip_bits--;
7245     return true;
7246   }
7247   // convert offset into a HeapWord*
7248   HeapWord* addr = _bit_map->startWord() + offset;
7249   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
7250          "address out of range");
7251   assert(_bit_map->isMarked(addr), "tautology");
7252   if (_bit_map->isMarked(addr+1)) {
7253     // this is an allocated object that might not yet be initialized
7254     assert(_skip_bits == 0, "tautology");
7255     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
7256     oop p = oop(addr);
7257     if (p->klass_or_null() == NULL) {
7258       // in the case of Clean-on-Enter optimization, redirty card
7259       // and avoid clearing card by increasing  the threshold.
7260       return true;
7261     }
7262   }
7263   scan_oops_in_oop(addr);
7264   return true;
7265 }
7266 
7267 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
7268   assert(_bit_map->isMarked(ptr), "expected bit to be set");
7269   // Should we assert that our work queue is empty or
7270   // below some drain limit?
7271   assert(_work_queue->size() == 0,
7272          "should drain stack to limit stack usage");
7273   // convert ptr to an oop preparatory to scanning
7274   oop obj = oop(ptr);
7275   // Ignore mark word in verification below, since we
7276   // may be running concurrent with mutators.
7277   assert(obj->is_oop(true), "should be an oop");
7278   assert(_finger <= ptr, "_finger runneth ahead");
7279   // advance the finger to right end of this object
7280   _finger = ptr + obj->size();
7281   assert(_finger > ptr, "we just incremented it above");
7282   // On large heaps, it may take us some time to get through
7283   // the marking phase (especially if running iCMS). During
7284   // this time it's possible that a lot of mutations have
7285   // accumulated in the card table and the mod union table --
7286   // these mutation records are redundant until we have
7287   // actually traced into the corresponding card.
7288   // Here, we check whether advancing the finger would make
7289   // us cross into a new card, and if so clear corresponding
7290   // cards in the MUT (preclean them in the card-table in the
7291   // future).
7292 
7293   // The clean-on-enter optimization is disabled by default,
7294   // until we fix 6178663.
7295   if (CMSCleanOnEnter && (_finger > _threshold)) {
7296     // [_threshold, _finger) represents the interval
7297     // of cards to be cleared  in MUT (or precleaned in card table).
7298     // The set of cards to be cleared is all those that overlap
7299     // with the interval [_threshold, _finger); note that
7300     // _threshold is always kept card-aligned but _finger isn't
7301     // always card-aligned.
7302     HeapWord* old_threshold = _threshold;
7303     assert(old_threshold == (HeapWord*)round_to(
7304             (intptr_t)old_threshold, CardTableModRefBS::card_size),
7305            "_threshold should always be card-aligned");
7306     _threshold = (HeapWord*)round_to(
7307                    (intptr_t)_finger, CardTableModRefBS::card_size);
7308     MemRegion mr(old_threshold, _threshold);
7309     assert(!mr.is_empty(), "Control point invariant");
7310     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
7311     _mut->clear_range(mr);
7312   }
7313 
7314   // Note: the local finger doesn't advance while we drain
7315   // the stack below, but the global finger sure can and will.
7316   HeapWord** gfa = _task->global_finger_addr();
7317   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
7318                                       _span, _bit_map,
7319                                       _work_queue,
7320                                       _overflow_stack,
7321                                       _finger,
7322                                       gfa, this);
7323   bool res = _work_queue->push(obj);   // overflow could occur here
7324   assert(res, "Will hold once we use workqueues");
7325   while (true) {
7326     oop new_oop;
7327     if (!_work_queue->pop_local(new_oop)) {
7328       // We emptied our work_queue; check if there's stuff that can
7329       // be gotten from the overflow stack.
7330       if (CMSConcMarkingTask::get_work_from_overflow_stack(
7331             _overflow_stack, _work_queue)) {
7332         do_yield_check();
7333         continue;
7334       } else {  // done
7335         break;
7336       }
7337     }
7338     // Skip verifying header mark word below because we are
7339     // running concurrent with mutators.
7340     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
7341     // now scan this oop's oops
7342     new_oop->oop_iterate(&pushOrMarkClosure);
7343     do_yield_check();
7344   }
7345   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
7346 }
7347 
7348 // Yield in response to a request from VM Thread or
7349 // from mutators.
7350 void Par_MarkFromRootsClosure::do_yield_work() {
7351   assert(_task != NULL, "sanity");
7352   _task->yield();
7353 }
7354 
7355 // A variant of the above used for verifying CMS marking work.
7356 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
7357                         MemRegion span,
7358                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7359                         CMSMarkStack*  mark_stack):
7360   _collector(collector),
7361   _span(span),
7362   _verification_bm(verification_bm),
7363   _cms_bm(cms_bm),
7364   _mark_stack(mark_stack),
7365   _pam_verify_closure(collector, span, verification_bm, cms_bm,
7366                       mark_stack)
7367 {
7368   assert(_mark_stack->isEmpty(), "stack should be empty");
7369   _finger = _verification_bm->startWord();
7370   assert(_collector->_restart_addr == NULL, "Sanity check");
7371   assert(_span.contains(_finger), "Out of bounds _finger?");
7372 }
7373 
7374 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
7375   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
7376   assert(_span.contains(addr), "Out of bounds _finger?");
7377   _finger = addr;
7378 }
7379 
7380 // Should revisit to see if this should be restructured for
7381 // greater efficiency.
7382 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
7383   // convert offset into a HeapWord*
7384   HeapWord* addr = _verification_bm->startWord() + offset;
7385   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
7386          "address out of range");
7387   assert(_verification_bm->isMarked(addr), "tautology");
7388   assert(_cms_bm->isMarked(addr), "tautology");
7389 
7390   assert(_mark_stack->isEmpty(),
7391          "should drain stack to limit stack usage");
7392   // convert addr to an oop preparatory to scanning
7393   oop obj = oop(addr);
7394   assert(obj->is_oop(), "should be an oop");
7395   assert(_finger <= addr, "_finger runneth ahead");
7396   // advance the finger to right end of this object
7397   _finger = addr + obj->size();
7398   assert(_finger > addr, "we just incremented it above");
7399   // Note: the finger doesn't advance while we drain
7400   // the stack below.
7401   bool res = _mark_stack->push(obj);
7402   assert(res, "Empty non-zero size stack should have space for single push");
7403   while (!_mark_stack->isEmpty()) {
7404     oop new_oop = _mark_stack->pop();
7405     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
7406     // now scan this oop's oops
7407     new_oop->oop_iterate(&_pam_verify_closure);
7408   }
7409   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
7410   return true;
7411 }
7412 
7413 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
7414   CMSCollector* collector, MemRegion span,
7415   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
7416   CMSMarkStack*  mark_stack):
7417   CMSOopClosure(collector->ref_processor()),
7418   _collector(collector),
7419   _span(span),
7420   _verification_bm(verification_bm),
7421   _cms_bm(cms_bm),
7422   _mark_stack(mark_stack)
7423 { }
7424 
7425 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
7426 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
7427 
7428 // Upon stack overflow, we discard (part of) the stack,
7429 // remembering the least address amongst those discarded
7430 // in CMSCollector's _restart_address.
7431 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
7432   // Remember the least grey address discarded
7433   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
7434   _collector->lower_restart_addr(ra);
7435   _mark_stack->reset();  // discard stack contents
7436   _mark_stack->expand(); // expand the stack if possible
7437 }
7438 
7439 void PushAndMarkVerifyClosure::do_oop(oop obj) {
7440   assert(obj->is_oop_or_null(), "expected an oop or NULL");
7441   HeapWord* addr = (HeapWord*)obj;
7442   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
7443     // Oop lies in _span and isn't yet grey or black
7444     _verification_bm->mark(addr);            // now grey
7445     if (!_cms_bm->isMarked(addr)) {
7446       oop(addr)->print();
7447       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
7448                              addr);
7449       fatal("... aborting");
7450     }
7451 
7452     if (!_mark_stack->push(obj)) { // stack overflow
7453       if (PrintCMSStatistics != 0) {
7454         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7455                                SIZE_FORMAT, _mark_stack->capacity());
7456       }
7457       assert(_mark_stack->isFull(), "Else push should have succeeded");
7458       handle_stack_overflow(addr);
7459     }
7460     // anything including and to the right of _finger
7461     // will be scanned as we iterate over the remainder of the
7462     // bit map
7463   }
7464 }
7465 
7466 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
7467                      MemRegion span,
7468                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
7469                      HeapWord* finger, MarkFromRootsClosure* parent) :
7470   CMSOopClosure(collector->ref_processor()),
7471   _collector(collector),
7472   _span(span),
7473   _bitMap(bitMap),
7474   _markStack(markStack),
7475   _finger(finger),
7476   _parent(parent)
7477 { }
7478 
7479 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
7480                      MemRegion span,
7481                      CMSBitMap* bit_map,
7482                      OopTaskQueue* work_queue,
7483                      CMSMarkStack*  overflow_stack,
7484                      HeapWord* finger,
7485                      HeapWord** global_finger_addr,
7486                      Par_MarkFromRootsClosure* parent) :
7487   CMSOopClosure(collector->ref_processor()),
7488   _collector(collector),
7489   _whole_span(collector->_span),
7490   _span(span),
7491   _bit_map(bit_map),
7492   _work_queue(work_queue),
7493   _overflow_stack(overflow_stack),
7494   _finger(finger),
7495   _global_finger_addr(global_finger_addr),
7496   _parent(parent)
7497 { }
7498 
7499 // Assumes thread-safe access by callers, who are
7500 // responsible for mutual exclusion.
7501 void CMSCollector::lower_restart_addr(HeapWord* low) {
7502   assert(_span.contains(low), "Out of bounds addr");
7503   if (_restart_addr == NULL) {
7504     _restart_addr = low;
7505   } else {
7506     _restart_addr = MIN2(_restart_addr, low);
7507   }
7508 }
7509 
7510 // Upon stack overflow, we discard (part of) the stack,
7511 // remembering the least address amongst those discarded
7512 // in CMSCollector's _restart_address.
7513 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7514   // Remember the least grey address discarded
7515   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
7516   _collector->lower_restart_addr(ra);
7517   _markStack->reset();  // discard stack contents
7518   _markStack->expand(); // expand the stack if possible
7519 }
7520 
7521 // Upon stack overflow, we discard (part of) the stack,
7522 // remembering the least address amongst those discarded
7523 // in CMSCollector's _restart_address.
7524 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
7525   // We need to do this under a mutex to prevent other
7526   // workers from interfering with the work done below.
7527   MutexLockerEx ml(_overflow_stack->par_lock(),
7528                    Mutex::_no_safepoint_check_flag);
7529   // Remember the least grey address discarded
7530   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
7531   _collector->lower_restart_addr(ra);
7532   _overflow_stack->reset();  // discard stack contents
7533   _overflow_stack->expand(); // expand the stack if possible
7534 }
7535 
7536 void CMKlassClosure::do_klass(Klass* k) {
7537   assert(_oop_closure != NULL, "Not initialized?");
7538   k->oops_do(_oop_closure);
7539 }
7540 
7541 void PushOrMarkClosure::do_oop(oop obj) {
7542   // Ignore mark word because we are running concurrent with mutators.
7543   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7544   HeapWord* addr = (HeapWord*)obj;
7545   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
7546     // Oop lies in _span and isn't yet grey or black
7547     _bitMap->mark(addr);            // now grey
7548     if (addr < _finger) {
7549       // the bit map iteration has already either passed, or
7550       // sampled, this bit in the bit map; we'll need to
7551       // use the marking stack to scan this oop's oops.
7552       bool simulate_overflow = false;
7553       NOT_PRODUCT(
7554         if (CMSMarkStackOverflowALot &&
7555             _collector->simulate_overflow()) {
7556           // simulate a stack overflow
7557           simulate_overflow = true;
7558         }
7559       )
7560       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
7561         if (PrintCMSStatistics != 0) {
7562           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7563                                  SIZE_FORMAT, _markStack->capacity());
7564         }
7565         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
7566         handle_stack_overflow(addr);
7567       }
7568     }
7569     // anything including and to the right of _finger
7570     // will be scanned as we iterate over the remainder of the
7571     // bit map
7572     do_yield_check();
7573   }
7574 }
7575 
7576 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7577 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7578 
7579 void Par_PushOrMarkClosure::do_oop(oop obj) {
7580   // Ignore mark word because we are running concurrent with mutators.
7581   assert(obj->is_oop_or_null(true), "expected an oop or NULL");
7582   HeapWord* addr = (HeapWord*)obj;
7583   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7584     // Oop lies in _span and isn't yet grey or black
7585     // We read the global_finger (volatile read) strictly after marking oop
7586     bool res = _bit_map->par_mark(addr);    // now grey
7587     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7588     // Should we push this marked oop on our stack?
7589     // -- if someone else marked it, nothing to do
7590     // -- if target oop is above global finger nothing to do
7591     // -- if target oop is in chunk and above local finger
7592     //      then nothing to do
7593     // -- else push on work queue
7594     if (   !res       // someone else marked it, they will deal with it
7595         || (addr >= *gfa)  // will be scanned in a later task
7596         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7597       return;
7598     }
7599     // the bit map iteration has already either passed, or
7600     // sampled, this bit in the bit map; we'll need to
7601     // use the marking stack to scan this oop's oops.
7602     bool simulate_overflow = false;
7603     NOT_PRODUCT(
7604       if (CMSMarkStackOverflowALot &&
7605           _collector->simulate_overflow()) {
7606         // simulate a stack overflow
7607         simulate_overflow = true;
7608       }
7609     )
7610     if (simulate_overflow ||
7611         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7612       // stack overflow
7613       if (PrintCMSStatistics != 0) {
7614         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7615                                SIZE_FORMAT, _overflow_stack->capacity());
7616       }
7617       // We cannot assert that the overflow stack is full because
7618       // it may have been emptied since.
7619       assert(simulate_overflow ||
7620              _work_queue->size() == _work_queue->max_elems(),
7621             "Else push should have succeeded");
7622       handle_stack_overflow(addr);
7623     }
7624     do_yield_check();
7625   }
7626 }
7627 
7628 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7629 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7630 
7631 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7632                                        MemRegion span,
7633                                        ReferenceProcessor* rp,
7634                                        CMSBitMap* bit_map,
7635                                        CMSBitMap* mod_union_table,
7636                                        CMSMarkStack*  mark_stack,
7637                                        bool           concurrent_precleaning):
7638   CMSOopClosure(rp),
7639   _collector(collector),
7640   _span(span),
7641   _bit_map(bit_map),
7642   _mod_union_table(mod_union_table),
7643   _mark_stack(mark_stack),
7644   _concurrent_precleaning(concurrent_precleaning)
7645 {
7646   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7647 }
7648 
7649 // Grey object rescan during pre-cleaning and second checkpoint phases --
7650 // the non-parallel version (the parallel version appears further below.)
7651 void PushAndMarkClosure::do_oop(oop obj) {
7652   // Ignore mark word verification. If during concurrent precleaning,
7653   // the object monitor may be locked. If during the checkpoint
7654   // phases, the object may already have been reached by a  different
7655   // path and may be at the end of the global overflow list (so
7656   // the mark word may be NULL).
7657   assert(obj->is_oop_or_null(true /* ignore mark word */),
7658          "expected an oop or NULL");
7659   HeapWord* addr = (HeapWord*)obj;
7660   // Check if oop points into the CMS generation
7661   // and is not marked
7662   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7663     // a white object ...
7664     _bit_map->mark(addr);         // ... now grey
7665     // push on the marking stack (grey set)
7666     bool simulate_overflow = false;
7667     NOT_PRODUCT(
7668       if (CMSMarkStackOverflowALot &&
7669           _collector->simulate_overflow()) {
7670         // simulate a stack overflow
7671         simulate_overflow = true;
7672       }
7673     )
7674     if (simulate_overflow || !_mark_stack->push(obj)) {
7675       if (_concurrent_precleaning) {
7676          // During precleaning we can just dirty the appropriate card(s)
7677          // in the mod union table, thus ensuring that the object remains
7678          // in the grey set  and continue. In the case of object arrays
7679          // we need to dirty all of the cards that the object spans,
7680          // since the rescan of object arrays will be limited to the
7681          // dirty cards.
7682          // Note that no one can be intefering with us in this action
7683          // of dirtying the mod union table, so no locking or atomics
7684          // are required.
7685          if (obj->is_objArray()) {
7686            size_t sz = obj->size();
7687            HeapWord* end_card_addr = (HeapWord*)round_to(
7688                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7689            MemRegion redirty_range = MemRegion(addr, end_card_addr);
7690            assert(!redirty_range.is_empty(), "Arithmetical tautology");
7691            _mod_union_table->mark_range(redirty_range);
7692          } else {
7693            _mod_union_table->mark(addr);
7694          }
7695          _collector->_ser_pmc_preclean_ovflw++;
7696       } else {
7697          // During the remark phase, we need to remember this oop
7698          // in the overflow list.
7699          _collector->push_on_overflow_list(obj);
7700          _collector->_ser_pmc_remark_ovflw++;
7701       }
7702     }
7703   }
7704 }
7705 
7706 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7707                                                MemRegion span,
7708                                                ReferenceProcessor* rp,
7709                                                CMSBitMap* bit_map,
7710                                                OopTaskQueue* work_queue):
7711   CMSOopClosure(rp),
7712   _collector(collector),
7713   _span(span),
7714   _bit_map(bit_map),
7715   _work_queue(work_queue)
7716 {
7717   assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
7718 }
7719 
7720 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7721 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7722 
7723 // Grey object rescan during second checkpoint phase --
7724 // the parallel version.
7725 void Par_PushAndMarkClosure::do_oop(oop obj) {
7726   // In the assert below, we ignore the mark word because
7727   // this oop may point to an already visited object that is
7728   // on the overflow stack (in which case the mark word has
7729   // been hijacked for chaining into the overflow stack --
7730   // if this is the last object in the overflow stack then
7731   // its mark word will be NULL). Because this object may
7732   // have been subsequently popped off the global overflow
7733   // stack, and the mark word possibly restored to the prototypical
7734   // value, by the time we get to examined this failing assert in
7735   // the debugger, is_oop_or_null(false) may subsequently start
7736   // to hold.
7737   assert(obj->is_oop_or_null(true),
7738          "expected an oop or NULL");
7739   HeapWord* addr = (HeapWord*)obj;
7740   // Check if oop points into the CMS generation
7741   // and is not marked
7742   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7743     // a white object ...
7744     // If we manage to "claim" the object, by being the
7745     // first thread to mark it, then we push it on our
7746     // marking stack
7747     if (_bit_map->par_mark(addr)) {     // ... now grey
7748       // push on work queue (grey set)
7749       bool simulate_overflow = false;
7750       NOT_PRODUCT(
7751         if (CMSMarkStackOverflowALot &&
7752             _collector->par_simulate_overflow()) {
7753           // simulate a stack overflow
7754           simulate_overflow = true;
7755         }
7756       )
7757       if (simulate_overflow || !_work_queue->push(obj)) {
7758         _collector->par_push_on_overflow_list(obj);
7759         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7760       }
7761     } // Else, some other thread got there first
7762   }
7763 }
7764 
7765 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7766 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7767 
7768 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7769   Mutex* bml = _collector->bitMapLock();
7770   assert_lock_strong(bml);
7771   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7772          "CMS thread should hold CMS token");
7773 
7774   bml->unlock();
7775   ConcurrentMarkSweepThread::desynchronize(true);
7776 
7777   ConcurrentMarkSweepThread::acknowledge_yield_request();
7778 
7779   _collector->stopTimer();
7780   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
7781   if (PrintCMSStatistics != 0) {
7782     _collector->incrementYields();
7783   }
7784   _collector->icms_wait();
7785 
7786   // See the comment in coordinator_yield()
7787   for (unsigned i = 0; i < CMSYieldSleepCount &&
7788                        ConcurrentMarkSweepThread::should_yield() &&
7789                        !CMSCollector::foregroundGCIsActive(); ++i) {
7790     os::sleep(Thread::current(), 1, false);
7791     ConcurrentMarkSweepThread::acknowledge_yield_request();
7792   }
7793 
7794   ConcurrentMarkSweepThread::synchronize(true);
7795   bml->lock();
7796 
7797   _collector->startTimer();
7798 }
7799 
7800 bool CMSPrecleanRefsYieldClosure::should_return() {
7801   if (ConcurrentMarkSweepThread::should_yield()) {
7802     do_yield_work();
7803   }
7804   return _collector->foregroundGCIsActive();
7805 }
7806 
7807 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7808   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7809          "mr should be aligned to start at a card boundary");
7810   // We'd like to assert:
7811   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7812   //        "mr should be a range of cards");
7813   // However, that would be too strong in one case -- the last
7814   // partition ends at _unallocated_block which, in general, can be
7815   // an arbitrary boundary, not necessarily card aligned.
7816   if (PrintCMSStatistics != 0) {
7817     _num_dirty_cards +=
7818          mr.word_size()/CardTableModRefBS::card_size_in_words;
7819   }
7820   _space->object_iterate_mem(mr, &_scan_cl);
7821 }
7822 
7823 SweepClosure::SweepClosure(CMSCollector* collector,
7824                            ConcurrentMarkSweepGeneration* g,
7825                            CMSBitMap* bitMap, bool should_yield) :
7826   _collector(collector),
7827   _g(g),
7828   _sp(g->cmsSpace()),
7829   _limit(_sp->sweep_limit()),
7830   _freelistLock(_sp->freelistLock()),
7831   _bitMap(bitMap),
7832   _yield(should_yield),
7833   _inFreeRange(false),           // No free range at beginning of sweep
7834   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7835   _lastFreeRangeCoalesced(false),
7836   _freeFinger(g->used_region().start())
7837 {
7838   NOT_PRODUCT(
7839     _numObjectsFreed = 0;
7840     _numWordsFreed   = 0;
7841     _numObjectsLive = 0;
7842     _numWordsLive = 0;
7843     _numObjectsAlreadyFree = 0;
7844     _numWordsAlreadyFree = 0;
7845     _last_fc = NULL;
7846 
7847     _sp->initializeIndexedFreeListArrayReturnedBytes();
7848     _sp->dictionary()->initialize_dict_returned_bytes();
7849   )
7850   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7851          "sweep _limit out of bounds");
7852   if (CMSTraceSweeper) {
7853     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7854                         _limit);
7855   }
7856 }
7857 
7858 void SweepClosure::print_on(outputStream* st) const {
7859   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7860                 _sp->bottom(), _sp->end());
7861   tty->print_cr("_limit = " PTR_FORMAT, _limit);
7862   tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
7863   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
7864   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7865                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7866 }
7867 
7868 #ifndef PRODUCT
7869 // Assertion checking only:  no useful work in product mode --
7870 // however, if any of the flags below become product flags,
7871 // you may need to review this code to see if it needs to be
7872 // enabled in product mode.
7873 SweepClosure::~SweepClosure() {
7874   assert_lock_strong(_freelistLock);
7875   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7876          "sweep _limit out of bounds");
7877   if (inFreeRange()) {
7878     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7879     print();
7880     ShouldNotReachHere();
7881   }
7882   if (Verbose && PrintGC) {
7883     gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
7884                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7885     gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
7886                            SIZE_FORMAT" bytes  "
7887       "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
7888       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7889       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7890     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7891                         * sizeof(HeapWord);
7892     gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
7893 
7894     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7895       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7896       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7897       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7898       gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returned_bytes);
7899       gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
7900         indexListReturnedBytes);
7901       gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
7902         dict_returned_bytes);
7903     }
7904   }
7905   if (CMSTraceSweeper) {
7906     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7907                            _limit);
7908   }
7909 }
7910 #endif  // PRODUCT
7911 
7912 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7913     bool freeRangeInFreeLists) {
7914   if (CMSTraceSweeper) {
7915     gclog_or_tty->print("---- Start free range at 0x%x with free block (%d)\n",
7916                freeFinger, freeRangeInFreeLists);
7917   }
7918   assert(!inFreeRange(), "Trampling existing free range");
7919   set_inFreeRange(true);
7920   set_lastFreeRangeCoalesced(false);
7921 
7922   set_freeFinger(freeFinger);
7923   set_freeRangeInFreeLists(freeRangeInFreeLists);
7924   if (CMSTestInFreeList) {
7925     if (freeRangeInFreeLists) {
7926       FreeChunk* fc = (FreeChunk*) freeFinger;
7927       assert(fc->is_free(), "A chunk on the free list should be free.");
7928       assert(fc->size() > 0, "Free range should have a size");
7929       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7930     }
7931   }
7932 }
7933 
7934 // Note that the sweeper runs concurrently with mutators. Thus,
7935 // it is possible for direct allocation in this generation to happen
7936 // in the middle of the sweep. Note that the sweeper also coalesces
7937 // contiguous free blocks. Thus, unless the sweeper and the allocator
7938 // synchronize appropriately freshly allocated blocks may get swept up.
7939 // This is accomplished by the sweeper locking the free lists while
7940 // it is sweeping. Thus blocks that are determined to be free are
7941 // indeed free. There is however one additional complication:
7942 // blocks that have been allocated since the final checkpoint and
7943 // mark, will not have been marked and so would be treated as
7944 // unreachable and swept up. To prevent this, the allocator marks
7945 // the bit map when allocating during the sweep phase. This leads,
7946 // however, to a further complication -- objects may have been allocated
7947 // but not yet initialized -- in the sense that the header isn't yet
7948 // installed. The sweeper can not then determine the size of the block
7949 // in order to skip over it. To deal with this case, we use a technique
7950 // (due to Printezis) to encode such uninitialized block sizes in the
7951 // bit map. Since the bit map uses a bit per every HeapWord, but the
7952 // CMS generation has a minimum object size of 3 HeapWords, it follows
7953 // that "normal marks" won't be adjacent in the bit map (there will
7954 // always be at least two 0 bits between successive 1 bits). We make use
7955 // of these "unused" bits to represent uninitialized blocks -- the bit
7956 // corresponding to the start of the uninitialized object and the next
7957 // bit are both set. Finally, a 1 bit marks the end of the object that
7958 // started with the two consecutive 1 bits to indicate its potentially
7959 // uninitialized state.
7960 
7961 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7962   FreeChunk* fc = (FreeChunk*)addr;
7963   size_t res;
7964 
7965   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7966   // than "addr == _limit" because although _limit was a block boundary when
7967   // we started the sweep, it may no longer be one because heap expansion
7968   // may have caused us to coalesce the block ending at the address _limit
7969   // with a newly expanded chunk (this happens when _limit was set to the
7970   // previous _end of the space), so we may have stepped past _limit:
7971   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7972   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7973     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7974            "sweep _limit out of bounds");
7975     assert(addr < _sp->end(), "addr out of bounds");
7976     // Flush any free range we might be holding as a single
7977     // coalesced chunk to the appropriate free list.
7978     if (inFreeRange()) {
7979       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7980              err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
7981       flush_cur_free_chunk(freeFinger(),
7982                            pointer_delta(addr, freeFinger()));
7983       if (CMSTraceSweeper) {
7984         gclog_or_tty->print("Sweep: last chunk: ");
7985         gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") "
7986                    "[coalesced:"SIZE_FORMAT"]\n",
7987                    freeFinger(), pointer_delta(addr, freeFinger()),
7988                    lastFreeRangeCoalesced());
7989       }
7990     }
7991 
7992     // help the iterator loop finish
7993     return pointer_delta(_sp->end(), addr);
7994   }
7995 
7996   assert(addr < _limit, "sweep invariant");
7997   // check if we should yield
7998   do_yield_check(addr);
7999   if (fc->is_free()) {
8000     // Chunk that is already free
8001     res = fc->size();
8002     do_already_free_chunk(fc);
8003     debug_only(_sp->verifyFreeLists());
8004     // If we flush the chunk at hand in lookahead_and_flush()
8005     // and it's coalesced with a preceding chunk, then the
8006     // process of "mangling" the payload of the coalesced block
8007     // will cause erasure of the size information from the
8008     // (erstwhile) header of all the coalesced blocks but the
8009     // first, so the first disjunct in the assert will not hold
8010     // in that specific case (in which case the second disjunct
8011     // will hold).
8012     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
8013            "Otherwise the size info doesn't change at this step");
8014     NOT_PRODUCT(
8015       _numObjectsAlreadyFree++;
8016       _numWordsAlreadyFree += res;
8017     )
8018     NOT_PRODUCT(_last_fc = fc;)
8019   } else if (!_bitMap->isMarked(addr)) {
8020     // Chunk is fresh garbage
8021     res = do_garbage_chunk(fc);
8022     debug_only(_sp->verifyFreeLists());
8023     NOT_PRODUCT(
8024       _numObjectsFreed++;
8025       _numWordsFreed += res;
8026     )
8027   } else {
8028     // Chunk that is alive.
8029     res = do_live_chunk(fc);
8030     debug_only(_sp->verifyFreeLists());
8031     NOT_PRODUCT(
8032         _numObjectsLive++;
8033         _numWordsLive += res;
8034     )
8035   }
8036   return res;
8037 }
8038 
8039 // For the smart allocation, record following
8040 //  split deaths - a free chunk is removed from its free list because
8041 //      it is being split into two or more chunks.
8042 //  split birth - a free chunk is being added to its free list because
8043 //      a larger free chunk has been split and resulted in this free chunk.
8044 //  coal death - a free chunk is being removed from its free list because
8045 //      it is being coalesced into a large free chunk.
8046 //  coal birth - a free chunk is being added to its free list because
8047 //      it was created when two or more free chunks where coalesced into
8048 //      this free chunk.
8049 //
8050 // These statistics are used to determine the desired number of free
8051 // chunks of a given size.  The desired number is chosen to be relative
8052 // to the end of a CMS sweep.  The desired number at the end of a sweep
8053 // is the
8054 //      count-at-end-of-previous-sweep (an amount that was enough)
8055 //              - count-at-beginning-of-current-sweep  (the excess)
8056 //              + split-births  (gains in this size during interval)
8057 //              - split-deaths  (demands on this size during interval)
8058 // where the interval is from the end of one sweep to the end of the
8059 // next.
8060 //
8061 // When sweeping the sweeper maintains an accumulated chunk which is
8062 // the chunk that is made up of chunks that have been coalesced.  That
8063 // will be termed the left-hand chunk.  A new chunk of garbage that
8064 // is being considered for coalescing will be referred to as the
8065 // right-hand chunk.
8066 //
8067 // When making a decision on whether to coalesce a right-hand chunk with
8068 // the current left-hand chunk, the current count vs. the desired count
8069 // of the left-hand chunk is considered.  Also if the right-hand chunk
8070 // is near the large chunk at the end of the heap (see
8071 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
8072 // left-hand chunk is coalesced.
8073 //
8074 // When making a decision about whether to split a chunk, the desired count
8075 // vs. the current count of the candidate to be split is also considered.
8076 // If the candidate is underpopulated (currently fewer chunks than desired)
8077 // a chunk of an overpopulated (currently more chunks than desired) size may
8078 // be chosen.  The "hint" associated with a free list, if non-null, points
8079 // to a free list which may be overpopulated.
8080 //
8081 
8082 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
8083   const size_t size = fc->size();
8084   // Chunks that cannot be coalesced are not in the
8085   // free lists.
8086   if (CMSTestInFreeList && !fc->cantCoalesce()) {
8087     assert(_sp->verify_chunk_in_free_list(fc),
8088       "free chunk should be in free lists");
8089   }
8090   // a chunk that is already free, should not have been
8091   // marked in the bit map
8092   HeapWord* const addr = (HeapWord*) fc;
8093   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
8094   // Verify that the bit map has no bits marked between
8095   // addr and purported end of this block.
8096   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8097 
8098   // Some chunks cannot be coalesced under any circumstances.
8099   // See the definition of cantCoalesce().
8100   if (!fc->cantCoalesce()) {
8101     // This chunk can potentially be coalesced.
8102     if (_sp->adaptive_freelists()) {
8103       // All the work is done in
8104       do_post_free_or_garbage_chunk(fc, size);
8105     } else {  // Not adaptive free lists
8106       // this is a free chunk that can potentially be coalesced by the sweeper;
8107       if (!inFreeRange()) {
8108         // if the next chunk is a free block that can't be coalesced
8109         // it doesn't make sense to remove this chunk from the free lists
8110         FreeChunk* nextChunk = (FreeChunk*)(addr + size);
8111         assert((HeapWord*)nextChunk <= _sp->end(), "Chunk size out of bounds?");
8112         if ((HeapWord*)nextChunk < _sp->end() &&     // There is another free chunk to the right ...
8113             nextChunk->is_free()               &&     // ... which is free...
8114             nextChunk->cantCoalesce()) {             // ... but can't be coalesced
8115           // nothing to do
8116         } else {
8117           // Potentially the start of a new free range:
8118           // Don't eagerly remove it from the free lists.
8119           // No need to remove it if it will just be put
8120           // back again.  (Also from a pragmatic point of view
8121           // if it is a free block in a region that is beyond
8122           // any allocated blocks, an assertion will fail)
8123           // Remember the start of a free run.
8124           initialize_free_range(addr, true);
8125           // end - can coalesce with next chunk
8126         }
8127       } else {
8128         // the midst of a free range, we are coalescing
8129         print_free_block_coalesced(fc);
8130         if (CMSTraceSweeper) {
8131           gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
8132         }
8133         // remove it from the free lists
8134         _sp->removeFreeChunkFromFreeLists(fc);
8135         set_lastFreeRangeCoalesced(true);
8136         // If the chunk is being coalesced and the current free range is
8137         // in the free lists, remove the current free range so that it
8138         // will be returned to the free lists in its entirety - all
8139         // the coalesced pieces included.
8140         if (freeRangeInFreeLists()) {
8141           FreeChunk* ffc = (FreeChunk*) freeFinger();
8142           assert(ffc->size() == pointer_delta(addr, freeFinger()),
8143             "Size of free range is inconsistent with chunk size.");
8144           if (CMSTestInFreeList) {
8145             assert(_sp->verify_chunk_in_free_list(ffc),
8146               "free range is not in free lists");
8147           }
8148           _sp->removeFreeChunkFromFreeLists(ffc);
8149           set_freeRangeInFreeLists(false);
8150         }
8151       }
8152     }
8153     // Note that if the chunk is not coalescable (the else arm
8154     // below), we unconditionally flush, without needing to do
8155     // a "lookahead," as we do below.
8156     if (inFreeRange()) lookahead_and_flush(fc, size);
8157   } else {
8158     // Code path common to both original and adaptive free lists.
8159 
8160     // cant coalesce with previous block; this should be treated
8161     // as the end of a free run if any
8162     if (inFreeRange()) {
8163       // we kicked some butt; time to pick up the garbage
8164       assert(freeFinger() < addr, "freeFinger points too high");
8165       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8166     }
8167     // else, nothing to do, just continue
8168   }
8169 }
8170 
8171 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
8172   // This is a chunk of garbage.  It is not in any free list.
8173   // Add it to a free list or let it possibly be coalesced into
8174   // a larger chunk.
8175   HeapWord* const addr = (HeapWord*) fc;
8176   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8177 
8178   if (_sp->adaptive_freelists()) {
8179     // Verify that the bit map has no bits marked between
8180     // addr and purported end of just dead object.
8181     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8182 
8183     do_post_free_or_garbage_chunk(fc, size);
8184   } else {
8185     if (!inFreeRange()) {
8186       // start of a new free range
8187       assert(size > 0, "A free range should have a size");
8188       initialize_free_range(addr, false);
8189     } else {
8190       // this will be swept up when we hit the end of the
8191       // free range
8192       if (CMSTraceSweeper) {
8193         gclog_or_tty->print("  -- pick up garbage 0x%x (%d) \n", fc, size);
8194       }
8195       // If the chunk is being coalesced and the current free range is
8196       // in the free lists, remove the current free range so that it
8197       // will be returned to the free lists in its entirety - all
8198       // the coalesced pieces included.
8199       if (freeRangeInFreeLists()) {
8200         FreeChunk* ffc = (FreeChunk*)freeFinger();
8201         assert(ffc->size() == pointer_delta(addr, freeFinger()),
8202           "Size of free range is inconsistent with chunk size.");
8203         if (CMSTestInFreeList) {
8204           assert(_sp->verify_chunk_in_free_list(ffc),
8205             "free range is not in free lists");
8206         }
8207         _sp->removeFreeChunkFromFreeLists(ffc);
8208         set_freeRangeInFreeLists(false);
8209       }
8210       set_lastFreeRangeCoalesced(true);
8211     }
8212     // this will be swept up when we hit the end of the free range
8213 
8214     // Verify that the bit map has no bits marked between
8215     // addr and purported end of just dead object.
8216     _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
8217   }
8218   assert(_limit >= addr + size,
8219          "A freshly garbage chunk can't possibly straddle over _limit");
8220   if (inFreeRange()) lookahead_and_flush(fc, size);
8221   return size;
8222 }
8223 
8224 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
8225   HeapWord* addr = (HeapWord*) fc;
8226   // The sweeper has just found a live object. Return any accumulated
8227   // left hand chunk to the free lists.
8228   if (inFreeRange()) {
8229     assert(freeFinger() < addr, "freeFinger points too high");
8230     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8231   }
8232 
8233   // This object is live: we'd normally expect this to be
8234   // an oop, and like to assert the following:
8235   // assert(oop(addr)->is_oop(), "live block should be an oop");
8236   // However, as we commented above, this may be an object whose
8237   // header hasn't yet been initialized.
8238   size_t size;
8239   assert(_bitMap->isMarked(addr), "Tautology for this control point");
8240   if (_bitMap->isMarked(addr + 1)) {
8241     // Determine the size from the bit map, rather than trying to
8242     // compute it from the object header.
8243     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
8244     size = pointer_delta(nextOneAddr + 1, addr);
8245     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
8246            "alignment problem");
8247 
8248 #ifdef DEBUG
8249       if (oop(addr)->klass_or_null() != NULL) {
8250         // Ignore mark word because we are running concurrent with mutators
8251         assert(oop(addr)->is_oop(true), "live block should be an oop");
8252         assert(size ==
8253                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
8254                "P-mark and computed size do not agree");
8255       }
8256 #endif
8257 
8258   } else {
8259     // This should be an initialized object that's alive.
8260     assert(oop(addr)->klass_or_null() != NULL,
8261            "Should be an initialized object");
8262     // Ignore mark word because we are running concurrent with mutators
8263     assert(oop(addr)->is_oop(true), "live block should be an oop");
8264     // Verify that the bit map has no bits marked between
8265     // addr and purported end of this block.
8266     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
8267     assert(size >= 3, "Necessary for Printezis marks to work");
8268     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
8269     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
8270   }
8271   return size;
8272 }
8273 
8274 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
8275                                                  size_t chunkSize) {
8276   // do_post_free_or_garbage_chunk() should only be called in the case
8277   // of the adaptive free list allocator.
8278   const bool fcInFreeLists = fc->is_free();
8279   assert(_sp->adaptive_freelists(), "Should only be used in this case.");
8280   assert((HeapWord*)fc <= _limit, "sweep invariant");
8281   if (CMSTestInFreeList && fcInFreeLists) {
8282     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
8283   }
8284 
8285   if (CMSTraceSweeper) {
8286     gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
8287   }
8288 
8289   HeapWord* const fc_addr = (HeapWord*) fc;
8290 
8291   bool coalesce;
8292   const size_t left  = pointer_delta(fc_addr, freeFinger());
8293   const size_t right = chunkSize;
8294   switch (FLSCoalescePolicy) {
8295     // numeric value forms a coalition aggressiveness metric
8296     case 0:  { // never coalesce
8297       coalesce = false;
8298       break;
8299     }
8300     case 1: { // coalesce if left & right chunks on overpopulated lists
8301       coalesce = _sp->coalOverPopulated(left) &&
8302                  _sp->coalOverPopulated(right);
8303       break;
8304     }
8305     case 2: { // coalesce if left chunk on overpopulated list (default)
8306       coalesce = _sp->coalOverPopulated(left);
8307       break;
8308     }
8309     case 3: { // coalesce if left OR right chunk on overpopulated list
8310       coalesce = _sp->coalOverPopulated(left) ||
8311                  _sp->coalOverPopulated(right);
8312       break;
8313     }
8314     case 4: { // always coalesce
8315       coalesce = true;
8316       break;
8317     }
8318     default:
8319      ShouldNotReachHere();
8320   }
8321 
8322   // Should the current free range be coalesced?
8323   // If the chunk is in a free range and either we decided to coalesce above
8324   // or the chunk is near the large block at the end of the heap
8325   // (isNearLargestChunk() returns true), then coalesce this chunk.
8326   const bool doCoalesce = inFreeRange()
8327                           && (coalesce || _g->isNearLargestChunk(fc_addr));
8328   if (doCoalesce) {
8329     // Coalesce the current free range on the left with the new
8330     // chunk on the right.  If either is on a free list,
8331     // it must be removed from the list and stashed in the closure.
8332     if (freeRangeInFreeLists()) {
8333       FreeChunk* const ffc = (FreeChunk*)freeFinger();
8334       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
8335         "Size of free range is inconsistent with chunk size.");
8336       if (CMSTestInFreeList) {
8337         assert(_sp->verify_chunk_in_free_list(ffc),
8338           "Chunk is not in free lists");
8339       }
8340       _sp->coalDeath(ffc->size());
8341       _sp->removeFreeChunkFromFreeLists(ffc);
8342       set_freeRangeInFreeLists(false);
8343     }
8344     if (fcInFreeLists) {
8345       _sp->coalDeath(chunkSize);
8346       assert(fc->size() == chunkSize,
8347         "The chunk has the wrong size or is not in the free lists");
8348       _sp->removeFreeChunkFromFreeLists(fc);
8349     }
8350     set_lastFreeRangeCoalesced(true);
8351     print_free_block_coalesced(fc);
8352   } else {  // not in a free range and/or should not coalesce
8353     // Return the current free range and start a new one.
8354     if (inFreeRange()) {
8355       // In a free range but cannot coalesce with the right hand chunk.
8356       // Put the current free range into the free lists.
8357       flush_cur_free_chunk(freeFinger(),
8358                            pointer_delta(fc_addr, freeFinger()));
8359     }
8360     // Set up for new free range.  Pass along whether the right hand
8361     // chunk is in the free lists.
8362     initialize_free_range((HeapWord*)fc, fcInFreeLists);
8363   }
8364 }
8365 
8366 // Lookahead flush:
8367 // If we are tracking a free range, and this is the last chunk that
8368 // we'll look at because its end crosses past _limit, we'll preemptively
8369 // flush it along with any free range we may be holding on to. Note that
8370 // this can be the case only for an already free or freshly garbage
8371 // chunk. If this block is an object, it can never straddle
8372 // over _limit. The "straddling" occurs when _limit is set at
8373 // the previous end of the space when this cycle started, and
8374 // a subsequent heap expansion caused the previously co-terminal
8375 // free block to be coalesced with the newly expanded portion,
8376 // thus rendering _limit a non-block-boundary making it dangerous
8377 // for the sweeper to step over and examine.
8378 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
8379   assert(inFreeRange(), "Should only be called if currently in a free range.");
8380   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
8381   assert(_sp->used_region().contains(eob - 1),
8382          err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
8383                  " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
8384                  _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
8385   if (eob >= _limit) {
8386     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
8387     if (CMSTraceSweeper) {
8388       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
8389                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
8390                              "[" PTR_FORMAT "," PTR_FORMAT ")",
8391                              _limit, fc, eob, _sp->bottom(), _sp->end());
8392     }
8393     // Return the storage we are tracking back into the free lists.
8394     if (CMSTraceSweeper) {
8395       gclog_or_tty->print_cr("Flushing ... ");
8396     }
8397     assert(freeFinger() < eob, "Error");
8398     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
8399   }
8400 }
8401 
8402 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
8403   assert(inFreeRange(), "Should only be called if currently in a free range.");
8404   assert(size > 0,
8405     "A zero sized chunk cannot be added to the free lists.");
8406   if (!freeRangeInFreeLists()) {
8407     if (CMSTestInFreeList) {
8408       FreeChunk* fc = (FreeChunk*) chunk;
8409       fc->set_size(size);
8410       assert(!_sp->verify_chunk_in_free_list(fc),
8411         "chunk should not be in free lists yet");
8412     }
8413     if (CMSTraceSweeper) {
8414       gclog_or_tty->print_cr(" -- add free block 0x%x (%d) to free lists",
8415                     chunk, size);
8416     }
8417     // A new free range is going to be starting.  The current
8418     // free range has not been added to the free lists yet or
8419     // was removed so add it back.
8420     // If the current free range was coalesced, then the death
8421     // of the free range was recorded.  Record a birth now.
8422     if (lastFreeRangeCoalesced()) {
8423       _sp->coalBirth(size);
8424     }
8425     _sp->addChunkAndRepairOffsetTable(chunk, size,
8426             lastFreeRangeCoalesced());
8427   } else if (CMSTraceSweeper) {
8428     gclog_or_tty->print_cr("Already in free list: nothing to flush");
8429   }
8430   set_inFreeRange(false);
8431   set_freeRangeInFreeLists(false);
8432 }
8433 
8434 // We take a break if we've been at this for a while,
8435 // so as to avoid monopolizing the locks involved.
8436 void SweepClosure::do_yield_work(HeapWord* addr) {
8437   // Return current free chunk being used for coalescing (if any)
8438   // to the appropriate freelist.  After yielding, the next
8439   // free block encountered will start a coalescing range of
8440   // free blocks.  If the next free block is adjacent to the
8441   // chunk just flushed, they will need to wait for the next
8442   // sweep to be coalesced.
8443   if (inFreeRange()) {
8444     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
8445   }
8446 
8447   // First give up the locks, then yield, then re-lock.
8448   // We should probably use a constructor/destructor idiom to
8449   // do this unlock/lock or modify the MutexUnlocker class to
8450   // serve our purpose. XXX
8451   assert_lock_strong(_bitMap->lock());
8452   assert_lock_strong(_freelistLock);
8453   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
8454          "CMS thread should hold CMS token");
8455   _bitMap->lock()->unlock();
8456   _freelistLock->unlock();
8457   ConcurrentMarkSweepThread::desynchronize(true);
8458   ConcurrentMarkSweepThread::acknowledge_yield_request();
8459   _collector->stopTimer();
8460   GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
8461   if (PrintCMSStatistics != 0) {
8462     _collector->incrementYields();
8463   }
8464   _collector->icms_wait();
8465 
8466   // See the comment in coordinator_yield()
8467   for (unsigned i = 0; i < CMSYieldSleepCount &&
8468                        ConcurrentMarkSweepThread::should_yield() &&
8469                        !CMSCollector::foregroundGCIsActive(); ++i) {
8470     os::sleep(Thread::current(), 1, false);
8471     ConcurrentMarkSweepThread::acknowledge_yield_request();
8472   }
8473 
8474   ConcurrentMarkSweepThread::synchronize(true);
8475   _freelistLock->lock();
8476   _bitMap->lock()->lock_without_safepoint_check();
8477   _collector->startTimer();
8478 }
8479 
8480 #ifndef PRODUCT
8481 // This is actually very useful in a product build if it can
8482 // be called from the debugger.  Compile it into the product
8483 // as needed.
8484 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
8485   return debug_cms_space->verify_chunk_in_free_list(fc);
8486 }
8487 #endif
8488 
8489 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
8490   if (CMSTraceSweeper) {
8491     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
8492                            fc, fc->size());
8493   }
8494 }
8495 
8496 // CMSIsAliveClosure
8497 bool CMSIsAliveClosure::do_object_b(oop obj) {
8498   HeapWord* addr = (HeapWord*)obj;
8499   return addr != NULL &&
8500          (!_span.contains(addr) || _bit_map->isMarked(addr));
8501 }
8502 
8503 
8504 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
8505                       MemRegion span,
8506                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
8507                       bool cpc):
8508   _collector(collector),
8509   _span(span),
8510   _bit_map(bit_map),
8511   _mark_stack(mark_stack),
8512   _concurrent_precleaning(cpc) {
8513   assert(!_span.is_empty(), "Empty span could spell trouble");
8514 }
8515 
8516 
8517 // CMSKeepAliveClosure: the serial version
8518 void CMSKeepAliveClosure::do_oop(oop obj) {
8519   HeapWord* addr = (HeapWord*)obj;
8520   if (_span.contains(addr) &&
8521       !_bit_map->isMarked(addr)) {
8522     _bit_map->mark(addr);
8523     bool simulate_overflow = false;
8524     NOT_PRODUCT(
8525       if (CMSMarkStackOverflowALot &&
8526           _collector->simulate_overflow()) {
8527         // simulate a stack overflow
8528         simulate_overflow = true;
8529       }
8530     )
8531     if (simulate_overflow || !_mark_stack->push(obj)) {
8532       if (_concurrent_precleaning) {
8533         // We dirty the overflown object and let the remark
8534         // phase deal with it.
8535         assert(_collector->overflow_list_is_empty(), "Error");
8536         // In the case of object arrays, we need to dirty all of
8537         // the cards that the object spans. No locking or atomics
8538         // are needed since no one else can be mutating the mod union
8539         // table.
8540         if (obj->is_objArray()) {
8541           size_t sz = obj->size();
8542           HeapWord* end_card_addr =
8543             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
8544           MemRegion redirty_range = MemRegion(addr, end_card_addr);
8545           assert(!redirty_range.is_empty(), "Arithmetical tautology");
8546           _collector->_modUnionTable.mark_range(redirty_range);
8547         } else {
8548           _collector->_modUnionTable.mark(addr);
8549         }
8550         _collector->_ser_kac_preclean_ovflw++;
8551       } else {
8552         _collector->push_on_overflow_list(obj);
8553         _collector->_ser_kac_ovflw++;
8554       }
8555     }
8556   }
8557 }
8558 
8559 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
8560 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
8561 
8562 // CMSParKeepAliveClosure: a parallel version of the above.
8563 // The work queues are private to each closure (thread),
8564 // but (may be) available for stealing by other threads.
8565 void CMSParKeepAliveClosure::do_oop(oop obj) {
8566   HeapWord* addr = (HeapWord*)obj;
8567   if (_span.contains(addr) &&
8568       !_bit_map->isMarked(addr)) {
8569     // In general, during recursive tracing, several threads
8570     // may be concurrently getting here; the first one to
8571     // "tag" it, claims it.
8572     if (_bit_map->par_mark(addr)) {
8573       bool res = _work_queue->push(obj);
8574       assert(res, "Low water mark should be much less than capacity");
8575       // Do a recursive trim in the hope that this will keep
8576       // stack usage lower, but leave some oops for potential stealers
8577       trim_queue(_low_water_mark);
8578     } // Else, another thread got there first
8579   }
8580 }
8581 
8582 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
8583 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
8584 
8585 void CMSParKeepAliveClosure::trim_queue(uint max) {
8586   while (_work_queue->size() > max) {
8587     oop new_oop;
8588     if (_work_queue->pop_local(new_oop)) {
8589       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
8590       assert(_bit_map->isMarked((HeapWord*)new_oop),
8591              "no white objects on this stack!");
8592       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8593       // iterate over the oops in this oop, marking and pushing
8594       // the ones in CMS heap (i.e. in _span).
8595       new_oop->oop_iterate(&_mark_and_push);
8596     }
8597   }
8598 }
8599 
8600 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
8601                                 CMSCollector* collector,
8602                                 MemRegion span, CMSBitMap* bit_map,
8603                                 OopTaskQueue* work_queue):
8604   _collector(collector),
8605   _span(span),
8606   _bit_map(bit_map),
8607   _work_queue(work_queue) { }
8608 
8609 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
8610   HeapWord* addr = (HeapWord*)obj;
8611   if (_span.contains(addr) &&
8612       !_bit_map->isMarked(addr)) {
8613     if (_bit_map->par_mark(addr)) {
8614       bool simulate_overflow = false;
8615       NOT_PRODUCT(
8616         if (CMSMarkStackOverflowALot &&
8617             _collector->par_simulate_overflow()) {
8618           // simulate a stack overflow
8619           simulate_overflow = true;
8620         }
8621       )
8622       if (simulate_overflow || !_work_queue->push(obj)) {
8623         _collector->par_push_on_overflow_list(obj);
8624         _collector->_par_kac_ovflw++;
8625       }
8626     } // Else another thread got there already
8627   }
8628 }
8629 
8630 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8631 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
8632 
8633 //////////////////////////////////////////////////////////////////
8634 //  CMSExpansionCause                /////////////////////////////
8635 //////////////////////////////////////////////////////////////////
8636 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
8637   switch (cause) {
8638     case _no_expansion:
8639       return "No expansion";
8640     case _satisfy_free_ratio:
8641       return "Free ratio";
8642     case _satisfy_promotion:
8643       return "Satisfy promotion";
8644     case _satisfy_allocation:
8645       return "allocation";
8646     case _allocate_par_lab:
8647       return "Par LAB";
8648     case _allocate_par_spooling_space:
8649       return "Par Spooling Space";
8650     case _adaptive_size_policy:
8651       return "Ergonomics";
8652     default:
8653       return "unknown";
8654   }
8655 }
8656 
8657 void CMSDrainMarkingStackClosure::do_void() {
8658   // the max number to take from overflow list at a time
8659   const size_t num = _mark_stack->capacity()/4;
8660   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
8661          "Overflow list should be NULL during concurrent phases");
8662   while (!_mark_stack->isEmpty() ||
8663          // if stack is empty, check the overflow list
8664          _collector->take_from_overflow_list(num, _mark_stack)) {
8665     oop obj = _mark_stack->pop();
8666     HeapWord* addr = (HeapWord*)obj;
8667     assert(_span.contains(addr), "Should be within span");
8668     assert(_bit_map->isMarked(addr), "Should be marked");
8669     assert(obj->is_oop(), "Should be an oop");
8670     obj->oop_iterate(_keep_alive);
8671   }
8672 }
8673 
8674 void CMSParDrainMarkingStackClosure::do_void() {
8675   // drain queue
8676   trim_queue(0);
8677 }
8678 
8679 // Trim our work_queue so its length is below max at return
8680 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8681   while (_work_queue->size() > max) {
8682     oop new_oop;
8683     if (_work_queue->pop_local(new_oop)) {
8684       assert(new_oop->is_oop(), "Expected an oop");
8685       assert(_bit_map->isMarked((HeapWord*)new_oop),
8686              "no white objects on this stack!");
8687       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8688       // iterate over the oops in this oop, marking and pushing
8689       // the ones in CMS heap (i.e. in _span).
8690       new_oop->oop_iterate(&_mark_and_push);
8691     }
8692   }
8693 }
8694 
8695 ////////////////////////////////////////////////////////////////////
8696 // Support for Marking Stack Overflow list handling and related code
8697 ////////////////////////////////////////////////////////////////////
8698 // Much of the following code is similar in shape and spirit to the
8699 // code used in ParNewGC. We should try and share that code
8700 // as much as possible in the future.
8701 
8702 #ifndef PRODUCT
8703 // Debugging support for CMSStackOverflowALot
8704 
8705 // It's OK to call this multi-threaded;  the worst thing
8706 // that can happen is that we'll get a bunch of closely
8707 // spaced simulated oveflows, but that's OK, in fact
8708 // probably good as it would exercise the overflow code
8709 // under contention.
8710 bool CMSCollector::simulate_overflow() {
8711   if (_overflow_counter-- <= 0) { // just being defensive
8712     _overflow_counter = CMSMarkStackOverflowInterval;
8713     return true;
8714   } else {
8715     return false;
8716   }
8717 }
8718 
8719 bool CMSCollector::par_simulate_overflow() {
8720   return simulate_overflow();
8721 }
8722 #endif
8723 
8724 // Single-threaded
8725 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8726   assert(stack->isEmpty(), "Expected precondition");
8727   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8728   size_t i = num;
8729   oop  cur = _overflow_list;
8730   const markOop proto = markOopDesc::prototype();
8731   NOT_PRODUCT(ssize_t n = 0;)
8732   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8733     next = oop(cur->mark());
8734     cur->set_mark(proto);   // until proven otherwise
8735     assert(cur->is_oop(), "Should be an oop");
8736     bool res = stack->push(cur);
8737     assert(res, "Bit off more than can chew?");
8738     NOT_PRODUCT(n++;)
8739   }
8740   _overflow_list = cur;
8741 #ifndef PRODUCT
8742   assert(_num_par_pushes >= n, "Too many pops?");
8743   _num_par_pushes -=n;
8744 #endif
8745   return !stack->isEmpty();
8746 }
8747 
8748 #define BUSY  (oop(0x1aff1aff))
8749 // (MT-safe) Get a prefix of at most "num" from the list.
8750 // The overflow list is chained through the mark word of
8751 // each object in the list. We fetch the entire list,
8752 // break off a prefix of the right size and return the
8753 // remainder. If other threads try to take objects from
8754 // the overflow list at that time, they will wait for
8755 // some time to see if data becomes available. If (and
8756 // only if) another thread places one or more object(s)
8757 // on the global list before we have returned the suffix
8758 // to the global list, we will walk down our local list
8759 // to find its end and append the global list to
8760 // our suffix before returning it. This suffix walk can
8761 // prove to be expensive (quadratic in the amount of traffic)
8762 // when there are many objects in the overflow list and
8763 // there is much producer-consumer contention on the list.
8764 // *NOTE*: The overflow list manipulation code here and
8765 // in ParNewGeneration:: are very similar in shape,
8766 // except that in the ParNew case we use the old (from/eden)
8767 // copy of the object to thread the list via its klass word.
8768 // Because of the common code, if you make any changes in
8769 // the code below, please check the ParNew version to see if
8770 // similar changes might be needed.
8771 // CR 6797058 has been filed to consolidate the common code.
8772 bool CMSCollector::par_take_from_overflow_list(size_t num,
8773                                                OopTaskQueue* work_q,
8774                                                int no_of_gc_threads) {
8775   assert(work_q->size() == 0, "First empty local work queue");
8776   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8777   if (_overflow_list == NULL) {
8778     return false;
8779   }
8780   // Grab the entire list; we'll put back a suffix
8781   oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8782   Thread* tid = Thread::current();
8783   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8784   // set to ParallelGCThreads.
8785   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8786   size_t sleep_time_millis = MAX2((size_t)1, num/100);
8787   // If the list is busy, we spin for a short while,
8788   // sleeping between attempts to get the list.
8789   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8790     os::sleep(tid, sleep_time_millis, false);
8791     if (_overflow_list == NULL) {
8792       // Nothing left to take
8793       return false;
8794     } else if (_overflow_list != BUSY) {
8795       // Try and grab the prefix
8796       prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
8797     }
8798   }
8799   // If the list was found to be empty, or we spun long
8800   // enough, we give up and return empty-handed. If we leave
8801   // the list in the BUSY state below, it must be the case that
8802   // some other thread holds the overflow list and will set it
8803   // to a non-BUSY state in the future.
8804   if (prefix == NULL || prefix == BUSY) {
8805      // Nothing to take or waited long enough
8806      if (prefix == NULL) {
8807        // Write back the NULL in case we overwrote it with BUSY above
8808        // and it is still the same value.
8809        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8810      }
8811      return false;
8812   }
8813   assert(prefix != NULL && prefix != BUSY, "Error");
8814   size_t i = num;
8815   oop cur = prefix;
8816   // Walk down the first "num" objects, unless we reach the end.
8817   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8818   if (cur->mark() == NULL) {
8819     // We have "num" or fewer elements in the list, so there
8820     // is nothing to return to the global list.
8821     // Write back the NULL in lieu of the BUSY we wrote
8822     // above, if it is still the same value.
8823     if (_overflow_list == BUSY) {
8824       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8825     }
8826   } else {
8827     // Chop off the suffix and rerturn it to the global list.
8828     assert(cur->mark() != BUSY, "Error");
8829     oop suffix_head = cur->mark(); // suffix will be put back on global list
8830     cur->set_mark(NULL);           // break off suffix
8831     // It's possible that the list is still in the empty(busy) state
8832     // we left it in a short while ago; in that case we may be
8833     // able to place back the suffix without incurring the cost
8834     // of a walk down the list.
8835     oop observed_overflow_list = _overflow_list;
8836     oop cur_overflow_list = observed_overflow_list;
8837     bool attached = false;
8838     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8839       observed_overflow_list =
8840         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8841       if (cur_overflow_list == observed_overflow_list) {
8842         attached = true;
8843         break;
8844       } else cur_overflow_list = observed_overflow_list;
8845     }
8846     if (!attached) {
8847       // Too bad, someone else sneaked in (at least) an element; we'll need
8848       // to do a splice. Find tail of suffix so we can prepend suffix to global
8849       // list.
8850       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8851       oop suffix_tail = cur;
8852       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8853              "Tautology");
8854       observed_overflow_list = _overflow_list;
8855       do {
8856         cur_overflow_list = observed_overflow_list;
8857         if (cur_overflow_list != BUSY) {
8858           // Do the splice ...
8859           suffix_tail->set_mark(markOop(cur_overflow_list));
8860         } else { // cur_overflow_list == BUSY
8861           suffix_tail->set_mark(NULL);
8862         }
8863         // ... and try to place spliced list back on overflow_list ...
8864         observed_overflow_list =
8865           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8866       } while (cur_overflow_list != observed_overflow_list);
8867       // ... until we have succeeded in doing so.
8868     }
8869   }
8870 
8871   // Push the prefix elements on work_q
8872   assert(prefix != NULL, "control point invariant");
8873   const markOop proto = markOopDesc::prototype();
8874   oop next;
8875   NOT_PRODUCT(ssize_t n = 0;)
8876   for (cur = prefix; cur != NULL; cur = next) {
8877     next = oop(cur->mark());
8878     cur->set_mark(proto);   // until proven otherwise
8879     assert(cur->is_oop(), "Should be an oop");
8880     bool res = work_q->push(cur);
8881     assert(res, "Bit off more than we can chew?");
8882     NOT_PRODUCT(n++;)
8883   }
8884 #ifndef PRODUCT
8885   assert(_num_par_pushes >= n, "Too many pops?");
8886   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8887 #endif
8888   return true;
8889 }
8890 
8891 // Single-threaded
8892 void CMSCollector::push_on_overflow_list(oop p) {
8893   NOT_PRODUCT(_num_par_pushes++;)
8894   assert(p->is_oop(), "Not an oop");
8895   preserve_mark_if_necessary(p);
8896   p->set_mark((markOop)_overflow_list);
8897   _overflow_list = p;
8898 }
8899 
8900 // Multi-threaded; use CAS to prepend to overflow list
8901 void CMSCollector::par_push_on_overflow_list(oop p) {
8902   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8903   assert(p->is_oop(), "Not an oop");
8904   par_preserve_mark_if_necessary(p);
8905   oop observed_overflow_list = _overflow_list;
8906   oop cur_overflow_list;
8907   do {
8908     cur_overflow_list = observed_overflow_list;
8909     if (cur_overflow_list != BUSY) {
8910       p->set_mark(markOop(cur_overflow_list));
8911     } else {
8912       p->set_mark(NULL);
8913     }
8914     observed_overflow_list =
8915       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8916   } while (cur_overflow_list != observed_overflow_list);
8917 }
8918 #undef BUSY
8919 
8920 // Single threaded
8921 // General Note on GrowableArray: pushes may silently fail
8922 // because we are (temporarily) out of C-heap for expanding
8923 // the stack. The problem is quite ubiquitous and affects
8924 // a lot of code in the JVM. The prudent thing for GrowableArray
8925 // to do (for now) is to exit with an error. However, that may
8926 // be too draconian in some cases because the caller may be
8927 // able to recover without much harm. For such cases, we
8928 // should probably introduce a "soft_push" method which returns
8929 // an indication of success or failure with the assumption that
8930 // the caller may be able to recover from a failure; code in
8931 // the VM can then be changed, incrementally, to deal with such
8932 // failures where possible, thus, incrementally hardening the VM
8933 // in such low resource situations.
8934 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8935   _preserved_oop_stack.push(p);
8936   _preserved_mark_stack.push(m);
8937   assert(m == p->mark(), "Mark word changed");
8938   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8939          "bijection");
8940 }
8941 
8942 // Single threaded
8943 void CMSCollector::preserve_mark_if_necessary(oop p) {
8944   markOop m = p->mark();
8945   if (m->must_be_preserved(p)) {
8946     preserve_mark_work(p, m);
8947   }
8948 }
8949 
8950 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8951   markOop m = p->mark();
8952   if (m->must_be_preserved(p)) {
8953     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8954     // Even though we read the mark word without holding
8955     // the lock, we are assured that it will not change
8956     // because we "own" this oop, so no other thread can
8957     // be trying to push it on the overflow list; see
8958     // the assertion in preserve_mark_work() that checks
8959     // that m == p->mark().
8960     preserve_mark_work(p, m);
8961   }
8962 }
8963 
8964 // We should be able to do this multi-threaded,
8965 // a chunk of stack being a task (this is
8966 // correct because each oop only ever appears
8967 // once in the overflow list. However, it's
8968 // not very easy to completely overlap this with
8969 // other operations, so will generally not be done
8970 // until all work's been completed. Because we
8971 // expect the preserved oop stack (set) to be small,
8972 // it's probably fine to do this single-threaded.
8973 // We can explore cleverer concurrent/overlapped/parallel
8974 // processing of preserved marks if we feel the
8975 // need for this in the future. Stack overflow should
8976 // be so rare in practice and, when it happens, its
8977 // effect on performance so great that this will
8978 // likely just be in the noise anyway.
8979 void CMSCollector::restore_preserved_marks_if_any() {
8980   assert(SafepointSynchronize::is_at_safepoint(),
8981          "world should be stopped");
8982   assert(Thread::current()->is_ConcurrentGC_thread() ||
8983          Thread::current()->is_VM_thread(),
8984          "should be single-threaded");
8985   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8986          "bijection");
8987 
8988   while (!_preserved_oop_stack.is_empty()) {
8989     oop p = _preserved_oop_stack.pop();
8990     assert(p->is_oop(), "Should be an oop");
8991     assert(_span.contains(p), "oop should be in _span");
8992     assert(p->mark() == markOopDesc::prototype(),
8993            "Set when taken from overflow list");
8994     markOop m = _preserved_mark_stack.pop();
8995     p->set_mark(m);
8996   }
8997   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8998          "stacks were cleared above");
8999 }
9000 
9001 #ifndef PRODUCT
9002 bool CMSCollector::no_preserved_marks() const {
9003   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
9004 }
9005 #endif
9006 
9007 CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
9008 {
9009   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9010   CMSAdaptiveSizePolicy* size_policy =
9011     (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
9012   assert(size_policy->is_gc_cms_adaptive_size_policy(),
9013     "Wrong type for size policy");
9014   return size_policy;
9015 }
9016 
9017 void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
9018                                            size_t desired_promo_size) {
9019   if (cur_promo_size < desired_promo_size) {
9020     size_t expand_bytes = desired_promo_size - cur_promo_size;
9021     if (PrintAdaptiveSizePolicy && Verbose) {
9022       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9023         "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
9024         expand_bytes);
9025     }
9026     expand(expand_bytes,
9027            MinHeapDeltaBytes,
9028            CMSExpansionCause::_adaptive_size_policy);
9029   } else if (desired_promo_size < cur_promo_size) {
9030     size_t shrink_bytes = cur_promo_size - desired_promo_size;
9031     if (PrintAdaptiveSizePolicy && Verbose) {
9032       gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
9033         "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
9034         shrink_bytes);
9035     }
9036     shrink(shrink_bytes);
9037   }
9038 }
9039 
9040 CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
9041   GenCollectedHeap* gch = GenCollectedHeap::heap();
9042   CMSGCAdaptivePolicyCounters* counters =
9043     (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
9044   assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
9045     "Wrong kind of counters");
9046   return counters;
9047 }
9048 
9049 
9050 void ASConcurrentMarkSweepGeneration::update_counters() {
9051   if (UsePerfData) {
9052     _space_counters->update_all();
9053     _gen_counters->update_all();
9054     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9055     GenCollectedHeap* gch = GenCollectedHeap::heap();
9056     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9057     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9058       "Wrong gc statistics type");
9059     counters->update_counters(gc_stats_l);
9060   }
9061 }
9062 
9063 void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
9064   if (UsePerfData) {
9065     _space_counters->update_used(used);
9066     _space_counters->update_capacity();
9067     _gen_counters->update_all();
9068 
9069     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9070     GenCollectedHeap* gch = GenCollectedHeap::heap();
9071     CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
9072     assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
9073       "Wrong gc statistics type");
9074     counters->update_counters(gc_stats_l);
9075   }
9076 }
9077 
9078 // The desired expansion delta is computed so that:
9079 // . desired free percentage or greater is used
9080 void ASConcurrentMarkSweepGeneration::compute_new_size() {
9081   assert_locked_or_safepoint(Heap_lock);
9082 
9083   GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
9084 
9085   // If incremental collection failed, we just want to expand
9086   // to the limit.
9087   if (incremental_collection_failed()) {
9088     clear_incremental_collection_failed();
9089     grow_to_reserved();
9090     return;
9091   }
9092 
9093   assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
9094 
9095   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
9096     "Wrong type of heap");
9097   int prev_level = level() - 1;
9098   assert(prev_level >= 0, "The cms generation is the lowest generation");
9099   Generation* prev_gen = gch->get_gen(prev_level);
9100   assert(prev_gen->kind() == Generation::ASParNew,
9101     "Wrong type of young generation");
9102   ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
9103   size_t cur_eden = younger_gen->eden()->capacity();
9104   CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
9105   size_t cur_promo = free();
9106   size_policy->compute_tenured_generation_free_space(cur_promo,
9107                                                        max_available(),
9108                                                        cur_eden);
9109   resize(cur_promo, size_policy->promo_size());
9110 
9111   // Record the new size of the space in the cms generation
9112   // that is available for promotions.  This is temporary.
9113   // It should be the desired promo size.
9114   size_policy->avg_cms_promo()->sample(free());
9115   size_policy->avg_old_live()->sample(used());
9116 
9117   if (UsePerfData) {
9118     CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
9119     counters->update_cms_capacity_counter(capacity());
9120   }
9121 }
9122 
9123 void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
9124   assert_locked_or_safepoint(Heap_lock);
9125   assert_lock_strong(freelistLock());
9126   HeapWord* old_end = _cmsSpace->end();
9127   HeapWord* unallocated_start = _cmsSpace->unallocated_block();
9128   assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
9129   FreeChunk* chunk_at_end = find_chunk_at_end();
9130   if (chunk_at_end == NULL) {
9131     // No room to shrink
9132     if (PrintGCDetails && Verbose) {
9133       gclog_or_tty->print_cr("No room to shrink: old_end  "
9134         PTR_FORMAT "  unallocated_start  " PTR_FORMAT
9135         " chunk_at_end  " PTR_FORMAT,
9136         old_end, unallocated_start, chunk_at_end);
9137     }
9138     return;
9139   } else {
9140 
9141     // Find the chunk at the end of the space and determine
9142     // how much it can be shrunk.
9143     size_t shrinkable_size_in_bytes = chunk_at_end->size();
9144     size_t aligned_shrinkable_size_in_bytes =
9145       align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
9146     assert(unallocated_start <= chunk_at_end->end(),
9147       "Inconsistent chunk at end of space");
9148     size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
9149     size_t word_size_before = heap_word_size(_virtual_space.committed_size());
9150 
9151     // Shrink the underlying space
9152     _virtual_space.shrink_by(bytes);
9153     if (PrintGCDetails && Verbose) {
9154       gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
9155         " desired_bytes " SIZE_FORMAT
9156         " shrinkable_size_in_bytes " SIZE_FORMAT
9157         " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
9158         "  bytes  " SIZE_FORMAT,
9159         desired_bytes, shrinkable_size_in_bytes,
9160         aligned_shrinkable_size_in_bytes, bytes);
9161       gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
9162         "  unallocated_start  " SIZE_FORMAT,
9163         old_end, unallocated_start);
9164     }
9165 
9166     // If the space did shrink (shrinking is not guaranteed),
9167     // shrink the chunk at the end by the appropriate amount.
9168     if (((HeapWord*)_virtual_space.high()) < old_end) {
9169       size_t new_word_size =
9170         heap_word_size(_virtual_space.committed_size());
9171 
9172       // Have to remove the chunk from the dictionary because it is changing
9173       // size and might be someplace elsewhere in the dictionary.
9174 
9175       // Get the chunk at end, shrink it, and put it
9176       // back.
9177       _cmsSpace->removeChunkFromDictionary(chunk_at_end);
9178       size_t word_size_change = word_size_before - new_word_size;
9179       size_t chunk_at_end_old_size = chunk_at_end->size();
9180       assert(chunk_at_end_old_size >= word_size_change,
9181         "Shrink is too large");
9182       chunk_at_end->set_size(chunk_at_end_old_size -
9183                           word_size_change);
9184       _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
9185         word_size_change);
9186 
9187       _cmsSpace->returnChunkToDictionary(chunk_at_end);
9188 
9189       MemRegion mr(_cmsSpace->bottom(), new_word_size);
9190       _bts->resize(new_word_size);  // resize the block offset shared array
9191       Universe::heap()->barrier_set()->resize_covered_region(mr);
9192       _cmsSpace->assert_locked();
9193       _cmsSpace->set_end((HeapWord*)_virtual_space.high());
9194 
9195       NOT_PRODUCT(_cmsSpace->dictionary()->verify());
9196 
9197       // update the space and generation capacity counters
9198       if (UsePerfData) {
9199         _space_counters->update_capacity();
9200         _gen_counters->update_all();
9201       }
9202 
9203       if (Verbose && PrintGCDetails) {
9204         size_t new_mem_size = _virtual_space.committed_size();
9205         size_t old_mem_size = new_mem_size + bytes;
9206         gclog_or_tty->print_cr("Shrinking %s from %ldK by %ldK to %ldK",
9207                       name(), old_mem_size/K, bytes/K, new_mem_size/K);
9208       }
9209     }
9210 
9211     assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
9212       "Inconsistency at end of space");
9213     assert(chunk_at_end->end() == _cmsSpace->end(),
9214       "Shrinking is inconsistent");
9215     return;
9216   }
9217 }
9218 
9219 // Transfer some number of overflown objects to usual marking
9220 // stack. Return true if some objects were transferred.
9221 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
9222   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
9223                     (size_t)ParGCDesiredObjsFromOverflowList);
9224 
9225   bool res = _collector->take_from_overflow_list(num, _mark_stack);
9226   assert(_collector->overflow_list_is_empty() || res,
9227          "If list is not empty, we should have taken something");
9228   assert(!res || !_mark_stack->isEmpty(),
9229          "If we took something, it should now be on our stack");
9230   return res;
9231 }
9232 
9233 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
9234   size_t res = _sp->block_size_no_stall(addr, _collector);
9235   if (_sp->block_is_obj(addr)) {
9236     if (_live_bit_map->isMarked(addr)) {
9237       // It can't have been dead in a previous cycle
9238       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
9239     } else {
9240       _dead_bit_map->mark(addr);      // mark the dead object
9241     }
9242   }
9243   // Could be 0, if the block size could not be computed without stalling.
9244   return res;
9245 }
9246 
9247 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
9248 
9249   switch (phase) {
9250     case CMSCollector::InitialMarking:
9251       initialize(true  /* fullGC */ ,
9252                  cause /* cause of the GC */,
9253                  true  /* recordGCBeginTime */,
9254                  true  /* recordPreGCUsage */,
9255                  false /* recordPeakUsage */,
9256                  false /* recordPostGCusage */,
9257                  true  /* recordAccumulatedGCTime */,
9258                  false /* recordGCEndTime */,
9259                  false /* countCollection */  );
9260       break;
9261 
9262     case CMSCollector::FinalMarking:
9263       initialize(true  /* fullGC */ ,
9264                  cause /* cause of the GC */,
9265                  false /* recordGCBeginTime */,
9266                  false /* recordPreGCUsage */,
9267                  false /* recordPeakUsage */,
9268                  false /* recordPostGCusage */,
9269                  true  /* recordAccumulatedGCTime */,
9270                  false /* recordGCEndTime */,
9271                  false /* countCollection */  );
9272       break;
9273 
9274     case CMSCollector::Sweeping:
9275       initialize(true  /* fullGC */ ,
9276                  cause /* cause of the GC */,
9277                  false /* recordGCBeginTime */,
9278                  false /* recordPreGCUsage */,
9279                  true  /* recordPeakUsage */,
9280                  true  /* recordPostGCusage */,
9281                  false /* recordAccumulatedGCTime */,
9282                  true  /* recordGCEndTime */,
9283                  true  /* countCollection */  );
9284       break;
9285 
9286     default:
9287       ShouldNotReachHere();
9288   }
9289 }
9290