1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "gc/cms/cmsCollectorPolicy.hpp"
  32 #include "gc/cms/cmsGCStats.hpp"
  33 #include "gc/cms/cmsHeap.hpp"
  34 #include "gc/cms/cmsOopClosures.inline.hpp"
  35 #include "gc/cms/cmsVMOperations.hpp"
  36 #include "gc/cms/compactibleFreeListSpace.hpp"
  37 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  38 #include "gc/cms/concurrentMarkSweepThread.hpp"
  39 #include "gc/cms/parNewGeneration.hpp"
  40 #include "gc/cms/promotionInfo.inline.hpp"
  41 #include "gc/serial/genMarkSweep.hpp"
  42 #include "gc/serial/tenuredGeneration.hpp"
  43 #include "gc/shared/adaptiveSizePolicy.hpp"
  44 #include "gc/shared/cardGeneration.inline.hpp"
  45 #include "gc/shared/cardTableRS.hpp"
  46 #include "gc/shared/collectedHeap.inline.hpp"
  47 #include "gc/shared/collectorCounters.hpp"
  48 #include "gc/shared/collectorPolicy.hpp"
  49 #include "gc/shared/gcLocker.hpp"
  50 #include "gc/shared/gcPolicyCounters.hpp"
  51 #include "gc/shared/gcTimer.hpp"
  52 #include "gc/shared/gcTrace.hpp"
  53 #include "gc/shared/gcTraceTime.inline.hpp"
  54 #include "gc/shared/genCollectedHeap.hpp"
  55 #include "gc/shared/genOopClosures.inline.hpp"
  56 #include "gc/shared/isGCActiveMark.hpp"
  57 #include "gc/shared/oopStorageParState.hpp"
  58 #include "gc/shared/owstTaskTerminator.hpp"
  59 #include "gc/shared/referencePolicy.hpp"
  60 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  61 #include "gc/shared/space.inline.hpp"
  62 #include "gc/shared/strongRootsScope.hpp"
  63 #include "gc/shared/taskqueue.inline.hpp"
  64 #include "gc/shared/weakProcessor.hpp"
  65 #include "gc/shared/workerPolicy.hpp"
  66 #include "logging/log.hpp"
  67 #include "logging/logStream.hpp"
  68 #include "memory/allocation.hpp"
  69 #include "memory/binaryTreeDictionary.inline.hpp"
  70 #include "memory/iterator.inline.hpp"
  71 #include "memory/padded.hpp"
  72 #include "memory/resourceArea.hpp"
  73 #include "oops/access.inline.hpp"
  74 #include "oops/oop.inline.hpp"
  75 #include "prims/jvmtiExport.hpp"
  76 #include "runtime/atomic.hpp"
  77 #include "runtime/flags/flagSetting.hpp"
  78 #include "runtime/globals_extension.hpp"
  79 #include "runtime/handles.inline.hpp"
  80 #include "runtime/java.hpp"
  81 #include "runtime/orderAccess.hpp"
  82 #include "runtime/timer.hpp"
  83 #include "runtime/vmThread.hpp"
  84 #include "services/memoryService.hpp"
  85 #include "services/runtimeService.hpp"
  86 #include "utilities/align.hpp"
  87 #include "utilities/stack.inline.hpp"
  88 
  89 // statics
  90 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  91 bool CMSCollector::_full_gc_requested = false;
  92 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  93 
  94 //////////////////////////////////////////////////////////////////
  95 // In support of CMS/VM thread synchronization
  96 //////////////////////////////////////////////////////////////////
  97 // We split use of the CGC_lock into 2 "levels".
  98 // The low-level locking is of the usual CGC_lock monitor. We introduce
  99 // a higher level "token" (hereafter "CMS token") built on top of the
 100 // low level monitor (hereafter "CGC lock").
 101 // The token-passing protocol gives priority to the VM thread. The
 102 // CMS-lock doesn't provide any fairness guarantees, but clients
 103 // should ensure that it is only held for very short, bounded
 104 // durations.
 105 //
 106 // When either of the CMS thread or the VM thread is involved in
 107 // collection operations during which it does not want the other
 108 // thread to interfere, it obtains the CMS token.
 109 //
 110 // If either thread tries to get the token while the other has
 111 // it, that thread waits. However, if the VM thread and CMS thread
 112 // both want the token, then the VM thread gets priority while the
 113 // CMS thread waits. This ensures, for instance, that the "concurrent"
 114 // phases of the CMS thread's work do not block out the VM thread
 115 // for long periods of time as the CMS thread continues to hog
 116 // the token. (See bug 4616232).
 117 //
 118 // The baton-passing functions are, however, controlled by the
 119 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
 120 // and here the low-level CMS lock, not the high level token,
 121 // ensures mutual exclusion.
 122 //
 123 // Two important conditions that we have to satisfy:
 124 // 1. if a thread does a low-level wait on the CMS lock, then it
 125 //    relinquishes the CMS token if it were holding that token
 126 //    when it acquired the low-level CMS lock.
 127 // 2. any low-level notifications on the low-level lock
 128 //    should only be sent when a thread has relinquished the token.
 129 //
 130 // In the absence of either property, we'd have potential deadlock.
 131 //
 132 // We protect each of the CMS (concurrent and sequential) phases
 133 // with the CMS _token_, not the CMS _lock_.
 134 //
 135 // The only code protected by CMS lock is the token acquisition code
 136 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 137 // baton-passing code.
 138 //
 139 // Unfortunately, i couldn't come up with a good abstraction to factor and
 140 // hide the naked CGC_lock manipulation in the baton-passing code
 141 // further below. That's something we should try to do. Also, the proof
 142 // of correctness of this 2-level locking scheme is far from obvious,
 143 // and potentially quite slippery. We have an uneasy suspicion, for instance,
 144 // that there may be a theoretical possibility of delay/starvation in the
 145 // low-level lock/wait/notify scheme used for the baton-passing because of
 146 // potential interference with the priority scheme embodied in the
 147 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 148 // invocation further below and marked with "XXX 20011219YSR".
 149 // Indeed, as we note elsewhere, this may become yet more slippery
 150 // in the presence of multiple CMS and/or multiple VM threads. XXX
 151 
 152 class CMSTokenSync: public StackObj {
 153  private:
 154   bool _is_cms_thread;
 155  public:
 156   CMSTokenSync(bool is_cms_thread):
 157     _is_cms_thread(is_cms_thread) {
 158     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 159            "Incorrect argument to constructor");
 160     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 161   }
 162 
 163   ~CMSTokenSync() {
 164     assert(_is_cms_thread ?
 165              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 166              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 167           "Incorrect state");
 168     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 169   }
 170 };
 171 
 172 // Convenience class that does a CMSTokenSync, and then acquires
 173 // upto three locks.
 174 class CMSTokenSyncWithLocks: public CMSTokenSync {
 175  private:
 176   // Note: locks are acquired in textual declaration order
 177   // and released in the opposite order
 178   MutexLockerEx _locker1, _locker2, _locker3;
 179  public:
 180   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 181                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 182     CMSTokenSync(is_cms_thread),
 183     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 184     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 185     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 186   { }
 187 };
 188 
 189 
 190 //////////////////////////////////////////////////////////////////
 191 //  Concurrent Mark-Sweep Generation /////////////////////////////
 192 //////////////////////////////////////////////////////////////////
 193 
 194 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 195 
 196 // This struct contains per-thread things necessary to support parallel
 197 // young-gen collection.
 198 class CMSParGCThreadState: public CHeapObj<mtGC> {
 199  public:
 200   CompactibleFreeListSpaceLAB lab;
 201   PromotionInfo promo;
 202 
 203   // Constructor.
 204   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 205     promo.setSpace(cfls);
 206   }
 207 };
 208 
 209 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 210      ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
 211   CardGeneration(rs, initial_byte_size, ct),
 212   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 213   _did_compact(false)
 214 {
 215   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 216   HeapWord* end    = (HeapWord*) _virtual_space.high();
 217 
 218   _direct_allocated_words = 0;
 219   NOT_PRODUCT(
 220     _numObjectsPromoted = 0;
 221     _numWordsPromoted = 0;
 222     _numObjectsAllocated = 0;
 223     _numWordsAllocated = 0;
 224   )
 225 
 226   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
 227   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 228   _cmsSpace->_old_gen = this;
 229 
 230   _gc_stats = new CMSGCStats();
 231 
 232   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 233   // offsets match. The ability to tell free chunks from objects
 234   // depends on this property.
 235   debug_only(
 236     FreeChunk* junk = NULL;
 237     assert(UseCompressedClassPointers ||
 238            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 239            "Offset of FreeChunk::_prev within FreeChunk must match"
 240            "  that of OopDesc::_klass within OopDesc");
 241   )
 242 
 243   _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
 244   for (uint i = 0; i < ParallelGCThreads; i++) {
 245     _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 246   }
 247 
 248   _incremental_collection_failed = false;
 249   // The "dilatation_factor" is the expansion that can occur on
 250   // account of the fact that the minimum object size in the CMS
 251   // generation may be larger than that in, say, a contiguous young
 252   //  generation.
 253   // Ideally, in the calculation below, we'd compute the dilatation
 254   // factor as: MinChunkSize/(promoting_gen's min object size)
 255   // Since we do not have such a general query interface for the
 256   // promoting generation, we'll instead just use the minimum
 257   // object size (which today is a header's worth of space);
 258   // note that all arithmetic is in units of HeapWords.
 259   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 260   assert(_dilatation_factor >= 1.0, "from previous assert");
 261 }
 262 
 263 
 264 // The field "_initiating_occupancy" represents the occupancy percentage
 265 // at which we trigger a new collection cycle.  Unless explicitly specified
 266 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 267 // is calculated by:
 268 //
 269 //   Let "f" be MinHeapFreeRatio in
 270 //
 271 //    _initiating_occupancy = 100-f +
 272 //                           f * (CMSTriggerRatio/100)
 273 //   where CMSTriggerRatio is the argument "tr" below.
 274 //
 275 // That is, if we assume the heap is at its desired maximum occupancy at the
 276 // end of a collection, we let CMSTriggerRatio of the (purported) free
 277 // space be allocated before initiating a new collection cycle.
 278 //
 279 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 280   assert(io <= 100 && tr <= 100, "Check the arguments");
 281   if (io >= 0) {
 282     _initiating_occupancy = (double)io / 100.0;
 283   } else {
 284     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 285                              (double)(tr * MinHeapFreeRatio) / 100.0)
 286                             / 100.0;
 287   }
 288 }
 289 
 290 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 291   assert(collector() != NULL, "no collector");
 292   collector()->ref_processor_init();
 293 }
 294 
 295 void CMSCollector::ref_processor_init() {
 296   if (_ref_processor == NULL) {
 297     // Allocate and initialize a reference processor
 298     _ref_processor =
 299       new ReferenceProcessor(&_span_based_discoverer,
 300                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 301                              ParallelGCThreads,                      // mt processing degree
 302                              _cmsGen->refs_discovery_is_mt(),        // mt discovery
 303                              MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 304                              _cmsGen->refs_discovery_is_atomic(),    // discovery is not atomic
 305                              &_is_alive_closure,                     // closure for liveness info
 306                              false);                                 // disable adjusting number of processing threads
 307     // Initialize the _ref_processor field of CMSGen
 308     _cmsGen->set_ref_processor(_ref_processor);
 309 
 310   }
 311 }
 312 
 313 AdaptiveSizePolicy* CMSCollector::size_policy() {
 314   return CMSHeap::heap()->size_policy();
 315 }
 316 
 317 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 318 
 319   const char* gen_name = "old";
 320   GenCollectorPolicy* gcp = CMSHeap::heap()->gen_policy();
 321   // Generation Counters - generation 1, 1 subspace
 322   _gen_counters = new GenerationCounters(gen_name, 1, 1,
 323       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
 324 
 325   _space_counters = new GSpaceCounters(gen_name, 0,
 326                                        _virtual_space.reserved_size(),
 327                                        this, _gen_counters);
 328 }
 329 
 330 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 331   _cms_gen(cms_gen)
 332 {
 333   assert(alpha <= 100, "bad value");
 334   _saved_alpha = alpha;
 335 
 336   // Initialize the alphas to the bootstrap value of 100.
 337   _gc0_alpha = _cms_alpha = 100;
 338 
 339   _cms_begin_time.update();
 340   _cms_end_time.update();
 341 
 342   _gc0_duration = 0.0;
 343   _gc0_period = 0.0;
 344   _gc0_promoted = 0;
 345 
 346   _cms_duration = 0.0;
 347   _cms_period = 0.0;
 348   _cms_allocated = 0;
 349 
 350   _cms_used_at_gc0_begin = 0;
 351   _cms_used_at_gc0_end = 0;
 352   _allow_duty_cycle_reduction = false;
 353   _valid_bits = 0;
 354 }
 355 
 356 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 357   // TBD: CR 6909490
 358   return 1.0;
 359 }
 360 
 361 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 362 }
 363 
 364 // If promotion failure handling is on use
 365 // the padded average size of the promotion for each
 366 // young generation collection.
 367 double CMSStats::time_until_cms_gen_full() const {
 368   size_t cms_free = _cms_gen->cmsSpace()->free();
 369   CMSHeap* heap = CMSHeap::heap();
 370   size_t expected_promotion = MIN2(heap->young_gen()->capacity(),
 371                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 372   if (cms_free > expected_promotion) {
 373     // Start a cms collection if there isn't enough space to promote
 374     // for the next young collection.  Use the padded average as
 375     // a safety factor.
 376     cms_free -= expected_promotion;
 377 
 378     // Adjust by the safety factor.
 379     double cms_free_dbl = (double)cms_free;
 380     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
 381     // Apply a further correction factor which tries to adjust
 382     // for recent occurance of concurrent mode failures.
 383     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 384     cms_free_dbl = cms_free_dbl * cms_adjustment;
 385 
 386     log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 387                   cms_free, expected_promotion);
 388     log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
 389     // Add 1 in case the consumption rate goes to zero.
 390     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 391   }
 392   return 0.0;
 393 }
 394 
 395 // Compare the duration of the cms collection to the
 396 // time remaining before the cms generation is empty.
 397 // Note that the time from the start of the cms collection
 398 // to the start of the cms sweep (less than the total
 399 // duration of the cms collection) can be used.  This
 400 // has been tried and some applications experienced
 401 // promotion failures early in execution.  This was
 402 // possibly because the averages were not accurate
 403 // enough at the beginning.
 404 double CMSStats::time_until_cms_start() const {
 405   // We add "gc0_period" to the "work" calculation
 406   // below because this query is done (mostly) at the
 407   // end of a scavenge, so we need to conservatively
 408   // account for that much possible delay
 409   // in the query so as to avoid concurrent mode failures
 410   // due to starting the collection just a wee bit too
 411   // late.
 412   double work = cms_duration() + gc0_period();
 413   double deadline = time_until_cms_gen_full();
 414   // If a concurrent mode failure occurred recently, we want to be
 415   // more conservative and halve our expected time_until_cms_gen_full()
 416   if (work > deadline) {
 417     log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
 418                           cms_duration(), gc0_period(), time_until_cms_gen_full());
 419     return 0.0;
 420   }
 421   return work - deadline;
 422 }
 423 
 424 #ifndef PRODUCT
 425 void CMSStats::print_on(outputStream *st) const {
 426   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 427   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 428                gc0_duration(), gc0_period(), gc0_promoted());
 429   st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 430             cms_duration(), cms_period(), cms_allocated());
 431   st->print(",cms_since_beg=%g,cms_since_end=%g",
 432             cms_time_since_begin(), cms_time_since_end());
 433   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 434             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 435 
 436   if (valid()) {
 437     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 438               promotion_rate(), cms_allocation_rate());
 439     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 440               cms_consumption_rate(), time_until_cms_gen_full());
 441   }
 442   st->cr();
 443 }
 444 #endif // #ifndef PRODUCT
 445 
 446 CMSCollector::CollectorState CMSCollector::_collectorState =
 447                              CMSCollector::Idling;
 448 bool CMSCollector::_foregroundGCIsActive = false;
 449 bool CMSCollector::_foregroundGCShouldWait = false;
 450 
 451 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 452                            CardTableRS*                   ct,
 453                            ConcurrentMarkSweepPolicy*     cp):
 454   _overflow_list(NULL),
 455   _conc_workers(NULL),     // may be set later
 456   _completed_initialization(false),
 457   _collection_count_start(0),
 458   _should_unload_classes(CMSClassUnloadingEnabled),
 459   _concurrent_cycles_since_last_unload(0),
 460   _roots_scanning_options(GenCollectedHeap::SO_None),
 461   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 462   _verifying(false),
 463   _collector_policy(cp),
 464   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 465   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 466   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 467   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 468   _cms_start_registered(false),
 469   _cmsGen(cmsGen),
 470   // Adjust span to cover old (cms) gen
 471   _span(cmsGen->reserved()),
 472   _ct(ct),
 473   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 474   _modUnionTable((CardTable::card_shift - LogHeapWordSize),
 475                  -1 /* lock-free */, "No_lock" /* dummy */),
 476   _restart_addr(NULL),
 477   _ser_pmc_preclean_ovflw(0),
 478   _ser_pmc_remark_ovflw(0),
 479   _par_pmc_remark_ovflw(0),
 480   _ser_kac_preclean_ovflw(0),
 481   _ser_kac_ovflw(0),
 482   _par_kac_ovflw(0),
 483 #ifndef PRODUCT
 484   _num_par_pushes(0),
 485 #endif
 486   _span_based_discoverer(_span),
 487   _ref_processor(NULL),    // will be set later
 488   // Construct the is_alive_closure with _span & markBitMap
 489   _is_alive_closure(_span, &_markBitMap),
 490   _modUnionClosurePar(&_modUnionTable),
 491   _between_prologue_and_epilogue(false),
 492   _abort_preclean(false),
 493   _start_sampling(false),
 494   _stats(cmsGen),
 495   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
 496                              //verify that this lock should be acquired with safepoint check.
 497                              Monitor::_safepoint_check_sometimes)),
 498   _eden_chunk_array(NULL),     // may be set in ctor body
 499   _eden_chunk_index(0),        // -- ditto --
 500   _eden_chunk_capacity(0),     // -- ditto --
 501   _survivor_chunk_array(NULL), // -- ditto --
 502   _survivor_chunk_index(0),    // -- ditto --
 503   _survivor_chunk_capacity(0), // -- ditto --
 504   _survivor_plab_array(NULL)   // -- ditto --
 505 {
 506   // Now expand the span and allocate the collection support structures
 507   // (MUT, marking bit map etc.) to cover both generations subject to
 508   // collection.
 509 
 510   // For use by dirty card to oop closures.
 511   _cmsGen->cmsSpace()->set_collector(this);
 512 
 513   // Allocate MUT and marking bit map
 514   {
 515     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 516     if (!_markBitMap.allocate(_span)) {
 517       log_warning(gc)("Failed to allocate CMS Bit Map");
 518       return;
 519     }
 520     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 521   }
 522   {
 523     _modUnionTable.allocate(_span);
 524     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 525   }
 526 
 527   if (!_markStack.allocate(MarkStackSize)) {
 528     log_warning(gc)("Failed to allocate CMS Marking Stack");
 529     return;
 530   }
 531 
 532   // Support for multi-threaded concurrent phases
 533   if (CMSConcurrentMTEnabled) {
 534     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 535       // just for now
 536       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
 537     }
 538     if (ConcGCThreads > 1) {
 539       _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
 540                                  ConcGCThreads, true);
 541       if (_conc_workers == NULL) {
 542         log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
 543         CMSConcurrentMTEnabled = false;
 544       } else {
 545         _conc_workers->initialize_workers();
 546       }
 547     } else {
 548       CMSConcurrentMTEnabled = false;
 549     }
 550   }
 551   if (!CMSConcurrentMTEnabled) {
 552     ConcGCThreads = 0;
 553   } else {
 554     // Turn off CMSCleanOnEnter optimization temporarily for
 555     // the MT case where it's not fixed yet; see 6178663.
 556     CMSCleanOnEnter = false;
 557   }
 558   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 559          "Inconsistency");
 560   log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
 561   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 562 
 563   // Parallel task queues; these are shared for the
 564   // concurrent and stop-world phases of CMS, but
 565   // are not shared with parallel scavenge (ParNew).
 566   {
 567     uint i;
 568     uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
 569 
 570     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 571          || ParallelRefProcEnabled)
 572         && num_queues > 0) {
 573       _task_queues = new OopTaskQueueSet(num_queues);
 574       if (_task_queues == NULL) {
 575         log_warning(gc)("task_queues allocation failure.");
 576         return;
 577       }
 578       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 579       for (i = 0; i < num_queues; i++) {
 580         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 581         if (q == NULL) {
 582           log_warning(gc)("work_queue allocation failure.");
 583           return;
 584         }
 585         _task_queues->register_queue(i, q);
 586       }
 587       for (i = 0; i < num_queues; i++) {
 588         _task_queues->queue(i)->initialize();
 589       }
 590     }
 591   }
 592 
 593   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 594 
 595   // Clip CMSBootstrapOccupancy between 0 and 100.
 596   _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
 597 
 598   // Now tell CMS generations the identity of their collector
 599   ConcurrentMarkSweepGeneration::set_collector(this);
 600 
 601   // Create & start a CMS thread for this CMS collector
 602   _cmsThread = ConcurrentMarkSweepThread::start(this);
 603   assert(cmsThread() != NULL, "CMS Thread should have been created");
 604   assert(cmsThread()->collector() == this,
 605          "CMS Thread should refer to this gen");
 606   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 607 
 608   // Support for parallelizing young gen rescan
 609   CMSHeap* heap = CMSHeap::heap();
 610   _young_gen = heap->young_gen();
 611   if (heap->supports_inline_contig_alloc()) {
 612     _top_addr = heap->top_addr();
 613     _end_addr = heap->end_addr();
 614     assert(_young_gen != NULL, "no _young_gen");
 615     _eden_chunk_index = 0;
 616     _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
 617     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 618   }
 619 
 620   // Support for parallelizing survivor space rescan
 621   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 622     const size_t max_plab_samples =
 623       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 624 
 625     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 626     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 627     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 628     _survivor_chunk_capacity = max_plab_samples;
 629     for (uint i = 0; i < ParallelGCThreads; i++) {
 630       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 631       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 632       assert(cur->end() == 0, "Should be 0");
 633       assert(cur->array() == vec, "Should be vec");
 634       assert(cur->capacity() == max_plab_samples, "Error");
 635     }
 636   }
 637 
 638   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 639   _gc_counters = new CollectorCounters("CMS", 1);
 640   _cgc_counters = new CollectorCounters("CMS stop-the-world phases", 2);
 641   _completed_initialization = true;
 642   _inter_sweep_timer.start();  // start of time
 643 }
 644 
 645 const char* ConcurrentMarkSweepGeneration::name() const {
 646   return "concurrent mark-sweep generation";
 647 }
 648 void ConcurrentMarkSweepGeneration::update_counters() {
 649   if (UsePerfData) {
 650     _space_counters->update_all();
 651     _gen_counters->update_all();
 652   }
 653 }
 654 
 655 // this is an optimized version of update_counters(). it takes the
 656 // used value as a parameter rather than computing it.
 657 //
 658 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 659   if (UsePerfData) {
 660     _space_counters->update_used(used);
 661     _space_counters->update_capacity();
 662     _gen_counters->update_all();
 663   }
 664 }
 665 
 666 void ConcurrentMarkSweepGeneration::print() const {
 667   Generation::print();
 668   cmsSpace()->print();
 669 }
 670 
 671 #ifndef PRODUCT
 672 void ConcurrentMarkSweepGeneration::print_statistics() {
 673   cmsSpace()->printFLCensus(0);
 674 }
 675 #endif
 676 
 677 size_t
 678 ConcurrentMarkSweepGeneration::contiguous_available() const {
 679   // dld proposes an improvement in precision here. If the committed
 680   // part of the space ends in a free block we should add that to
 681   // uncommitted size in the calculation below. Will make this
 682   // change later, staying with the approximation below for the
 683   // time being. -- ysr.
 684   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 685 }
 686 
 687 size_t
 688 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 689   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 690 }
 691 
 692 size_t ConcurrentMarkSweepGeneration::max_available() const {
 693   return free() + _virtual_space.uncommitted_size();
 694 }
 695 
 696 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 697   size_t available = max_available();
 698   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 699   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 700   log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 701                            res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 702   return res;
 703 }
 704 
 705 // At a promotion failure dump information on block layout in heap
 706 // (cms old generation).
 707 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 708   Log(gc, promotion) log;
 709   if (log.is_trace()) {
 710     LogStream ls(log.trace());
 711     cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);
 712   }
 713 }
 714 
 715 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 716   // Clear the promotion information.  These pointers can be adjusted
 717   // along with all the other pointers into the heap but
 718   // compaction is expected to be a rare event with
 719   // a heap using cms so don't do it without seeing the need.
 720   for (uint i = 0; i < ParallelGCThreads; i++) {
 721     _par_gc_thread_states[i]->promo.reset();
 722   }
 723 }
 724 
 725 void ConcurrentMarkSweepGeneration::compute_new_size() {
 726   assert_locked_or_safepoint(Heap_lock);
 727 
 728   // If incremental collection failed, we just want to expand
 729   // to the limit.
 730   if (incremental_collection_failed()) {
 731     clear_incremental_collection_failed();
 732     grow_to_reserved();
 733     return;
 734   }
 735 
 736   // The heap has been compacted but not reset yet.
 737   // Any metric such as free() or used() will be incorrect.
 738 
 739   CardGeneration::compute_new_size();
 740 
 741   // Reset again after a possible resizing
 742   if (did_compact()) {
 743     cmsSpace()->reset_after_compaction();
 744   }
 745 }
 746 
 747 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 748   assert_locked_or_safepoint(Heap_lock);
 749 
 750   // If incremental collection failed, we just want to expand
 751   // to the limit.
 752   if (incremental_collection_failed()) {
 753     clear_incremental_collection_failed();
 754     grow_to_reserved();
 755     return;
 756   }
 757 
 758   double free_percentage = ((double) free()) / capacity();
 759   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 760   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 761 
 762   // compute expansion delta needed for reaching desired free percentage
 763   if (free_percentage < desired_free_percentage) {
 764     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 765     assert(desired_capacity >= capacity(), "invalid expansion size");
 766     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 767     Log(gc) log;
 768     if (log.is_trace()) {
 769       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 770       log.trace("From compute_new_size: ");
 771       log.trace("  Free fraction %f", free_percentage);
 772       log.trace("  Desired free fraction %f", desired_free_percentage);
 773       log.trace("  Maximum free fraction %f", maximum_free_percentage);
 774       log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
 775       log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
 776       CMSHeap* heap = CMSHeap::heap();
 777       size_t young_size = heap->young_gen()->capacity();
 778       log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
 779       log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
 780       log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
 781       log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
 782     }
 783     // safe if expansion fails
 784     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 785     log.trace("  Expanded free fraction %f", ((double) free()) / capacity());
 786   } else {
 787     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 788     assert(desired_capacity <= capacity(), "invalid expansion size");
 789     size_t shrink_bytes = capacity() - desired_capacity;
 790     // Don't shrink unless the delta is greater than the minimum shrink we want
 791     if (shrink_bytes >= MinHeapDeltaBytes) {
 792       shrink_free_list_by(shrink_bytes);
 793     }
 794   }
 795 }
 796 
 797 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 798   return cmsSpace()->freelistLock();
 799 }
 800 
 801 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
 802   CMSSynchronousYieldRequest yr;
 803   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
 804   return have_lock_and_allocate(size, tlab);
 805 }
 806 
 807 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
 808                                                                 bool   tlab /* ignored */) {
 809   assert_lock_strong(freelistLock());
 810   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
 811   HeapWord* res = cmsSpace()->allocate(adjustedSize);
 812   // Allocate the object live (grey) if the background collector has
 813   // started marking. This is necessary because the marker may
 814   // have passed this address and consequently this object will
 815   // not otherwise be greyed and would be incorrectly swept up.
 816   // Note that if this object contains references, the writing
 817   // of those references will dirty the card containing this object
 818   // allowing the object to be blackened (and its references scanned)
 819   // either during a preclean phase or at the final checkpoint.
 820   if (res != NULL) {
 821     // We may block here with an uninitialized object with
 822     // its mark-bit or P-bits not yet set. Such objects need
 823     // to be safely navigable by block_start().
 824     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
 825     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
 826     collector()->direct_allocated(res, adjustedSize);
 827     _direct_allocated_words += adjustedSize;
 828     // allocation counters
 829     NOT_PRODUCT(
 830       _numObjectsAllocated++;
 831       _numWordsAllocated += (int)adjustedSize;
 832     )
 833   }
 834   return res;
 835 }
 836 
 837 // In the case of direct allocation by mutators in a generation that
 838 // is being concurrently collected, the object must be allocated
 839 // live (grey) if the background collector has started marking.
 840 // This is necessary because the marker may
 841 // have passed this address and consequently this object will
 842 // not otherwise be greyed and would be incorrectly swept up.
 843 // Note that if this object contains references, the writing
 844 // of those references will dirty the card containing this object
 845 // allowing the object to be blackened (and its references scanned)
 846 // either during a preclean phase or at the final checkpoint.
 847 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
 848   assert(_markBitMap.covers(start, size), "Out of bounds");
 849   if (_collectorState >= Marking) {
 850     MutexLockerEx y(_markBitMap.lock(),
 851                     Mutex::_no_safepoint_check_flag);
 852     // [see comments preceding SweepClosure::do_blk() below for details]
 853     //
 854     // Can the P-bits be deleted now?  JJJ
 855     //
 856     // 1. need to mark the object as live so it isn't collected
 857     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
 858     // 3. need to mark the end of the object so marking, precleaning or sweeping
 859     //    can skip over uninitialized or unparsable objects. An allocated
 860     //    object is considered uninitialized for our purposes as long as
 861     //    its klass word is NULL.  All old gen objects are parsable
 862     //    as soon as they are initialized.)
 863     _markBitMap.mark(start);          // object is live
 864     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
 865     _markBitMap.mark(start + size - 1);
 866                                       // mark end of object
 867   }
 868   // check that oop looks uninitialized
 869   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
 870 }
 871 
 872 void CMSCollector::promoted(bool par, HeapWord* start,
 873                             bool is_obj_array, size_t obj_size) {
 874   assert(_markBitMap.covers(start), "Out of bounds");
 875   // See comment in direct_allocated() about when objects should
 876   // be allocated live.
 877   if (_collectorState >= Marking) {
 878     // we already hold the marking bit map lock, taken in
 879     // the prologue
 880     if (par) {
 881       _markBitMap.par_mark(start);
 882     } else {
 883       _markBitMap.mark(start);
 884     }
 885     // We don't need to mark the object as uninitialized (as
 886     // in direct_allocated above) because this is being done with the
 887     // world stopped and the object will be initialized by the
 888     // time the marking, precleaning or sweeping get to look at it.
 889     // But see the code for copying objects into the CMS generation,
 890     // where we need to ensure that concurrent readers of the
 891     // block offset table are able to safely navigate a block that
 892     // is in flux from being free to being allocated (and in
 893     // transition while being copied into) and subsequently
 894     // becoming a bona-fide object when the copy/promotion is complete.
 895     assert(SafepointSynchronize::is_at_safepoint(),
 896            "expect promotion only at safepoints");
 897 
 898     if (_collectorState < Sweeping) {
 899       // Mark the appropriate cards in the modUnionTable, so that
 900       // this object gets scanned before the sweep. If this is
 901       // not done, CMS generation references in the object might
 902       // not get marked.
 903       // For the case of arrays, which are otherwise precisely
 904       // marked, we need to dirty the entire array, not just its head.
 905       if (is_obj_array) {
 906         // The [par_]mark_range() method expects mr.end() below to
 907         // be aligned to the granularity of a bit's representation
 908         // in the heap. In the case of the MUT below, that's a
 909         // card size.
 910         MemRegion mr(start,
 911                      align_up(start + obj_size,
 912                               CardTable::card_size /* bytes */));
 913         if (par) {
 914           _modUnionTable.par_mark_range(mr);
 915         } else {
 916           _modUnionTable.mark_range(mr);
 917         }
 918       } else {  // not an obj array; we can just mark the head
 919         if (par) {
 920           _modUnionTable.par_mark(start);
 921         } else {
 922           _modUnionTable.mark(start);
 923         }
 924       }
 925     }
 926   }
 927 }
 928 
 929 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
 930   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 931   // allocate, copy and if necessary update promoinfo --
 932   // delegate to underlying space.
 933   assert_lock_strong(freelistLock());
 934 
 935 #ifndef PRODUCT
 936   if (CMSHeap::heap()->promotion_should_fail()) {
 937     return NULL;
 938   }
 939 #endif  // #ifndef PRODUCT
 940 
 941   oop res = _cmsSpace->promote(obj, obj_size);
 942   if (res == NULL) {
 943     // expand and retry
 944     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
 945     expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
 946     // Since this is the old generation, we don't try to promote
 947     // into a more senior generation.
 948     res = _cmsSpace->promote(obj, obj_size);
 949   }
 950   if (res != NULL) {
 951     // See comment in allocate() about when objects should
 952     // be allocated live.
 953     assert(oopDesc::is_oop(obj), "Will dereference klass pointer below");
 954     collector()->promoted(false,           // Not parallel
 955                           (HeapWord*)res, obj->is_objArray(), obj_size);
 956     // promotion counters
 957     NOT_PRODUCT(
 958       _numObjectsPromoted++;
 959       _numWordsPromoted +=
 960         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
 961     )
 962   }
 963   return res;
 964 }
 965 
 966 
 967 // IMPORTANT: Notes on object size recognition in CMS.
 968 // ---------------------------------------------------
 969 // A block of storage in the CMS generation is always in
 970 // one of three states. A free block (FREE), an allocated
 971 // object (OBJECT) whose size() method reports the correct size,
 972 // and an intermediate state (TRANSIENT) in which its size cannot
 973 // be accurately determined.
 974 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
 975 // -----------------------------------------------------
 976 // FREE:      klass_word & 1 == 1; mark_word holds block size
 977 //
 978 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
 979 //            obj->size() computes correct size
 980 //
 981 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
 982 //
 983 // STATE IDENTIFICATION: (64 bit+COOPS)
 984 // ------------------------------------
 985 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
 986 //
 987 // OBJECT:    klass_word installed; klass_word != 0;
 988 //            obj->size() computes correct size
 989 //
 990 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
 991 //
 992 //
 993 // STATE TRANSITION DIAGRAM
 994 //
 995 //        mut / parnew                     mut  /  parnew
 996 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
 997 //  ^                                                                   |
 998 //  |------------------------ DEAD <------------------------------------|
 999 //         sweep                            mut
1000 //
1001 // While a block is in TRANSIENT state its size cannot be determined
1002 // so readers will either need to come back later or stall until
1003 // the size can be determined. Note that for the case of direct
1004 // allocation, P-bits, when available, may be used to determine the
1005 // size of an object that may not yet have been initialized.
1006 
1007 // Things to support parallel young-gen collection.
1008 oop
1009 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1010                                            oop old, markOop m,
1011                                            size_t word_sz) {
1012 #ifndef PRODUCT
1013   if (CMSHeap::heap()->promotion_should_fail()) {
1014     return NULL;
1015   }
1016 #endif  // #ifndef PRODUCT
1017 
1018   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1019   PromotionInfo* promoInfo = &ps->promo;
1020   // if we are tracking promotions, then first ensure space for
1021   // promotion (including spooling space for saving header if necessary).
1022   // then allocate and copy, then track promoted info if needed.
1023   // When tracking (see PromotionInfo::track()), the mark word may
1024   // be displaced and in this case restoration of the mark word
1025   // occurs in the (oop_since_save_marks_)iterate phase.
1026   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1027     // Out of space for allocating spooling buffers;
1028     // try expanding and allocating spooling buffers.
1029     if (!expand_and_ensure_spooling_space(promoInfo)) {
1030       return NULL;
1031     }
1032   }
1033   assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
1034   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1035   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1036   if (obj_ptr == NULL) {
1037      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1038      if (obj_ptr == NULL) {
1039        return NULL;
1040      }
1041   }
1042   oop obj = oop(obj_ptr);
1043   OrderAccess::storestore();
1044   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1045   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1046   // IMPORTANT: See note on object initialization for CMS above.
1047   // Otherwise, copy the object.  Here we must be careful to insert the
1048   // klass pointer last, since this marks the block as an allocated object.
1049   // Except with compressed oops it's the mark word.
1050   HeapWord* old_ptr = (HeapWord*)old;
1051   // Restore the mark word copied above.
1052   obj->set_mark_raw(m);
1053   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1054   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1055   OrderAccess::storestore();
1056 
1057   if (UseCompressedClassPointers) {
1058     // Copy gap missed by (aligned) header size calculation below
1059     obj->set_klass_gap(old->klass_gap());
1060   }
1061   if (word_sz > (size_t)oopDesc::header_size()) {
1062     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1063                                  obj_ptr + oopDesc::header_size(),
1064                                  word_sz - oopDesc::header_size());
1065   }
1066 
1067   // Now we can track the promoted object, if necessary.  We take care
1068   // to delay the transition from uninitialized to full object
1069   // (i.e., insertion of klass pointer) until after, so that it
1070   // atomically becomes a promoted object.
1071   if (promoInfo->tracking()) {
1072     promoInfo->track((PromotedObject*)obj, old->klass());
1073   }
1074   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1075   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1076   assert(oopDesc::is_oop(old), "Will use and dereference old klass ptr below");
1077 
1078   // Finally, install the klass pointer (this should be volatile).
1079   OrderAccess::storestore();
1080   obj->set_klass(old->klass());
1081   // We should now be able to calculate the right size for this object
1082   assert(oopDesc::is_oop(obj) && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1083 
1084   collector()->promoted(true,          // parallel
1085                         obj_ptr, old->is_objArray(), word_sz);
1086 
1087   NOT_PRODUCT(
1088     Atomic::inc(&_numObjectsPromoted);
1089     Atomic::add(alloc_sz, &_numWordsPromoted);
1090   )
1091 
1092   return obj;
1093 }
1094 
1095 void
1096 ConcurrentMarkSweepGeneration::
1097 par_promote_alloc_done(int thread_num) {
1098   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1099   ps->lab.retire(thread_num);
1100 }
1101 
1102 void
1103 ConcurrentMarkSweepGeneration::
1104 par_oop_since_save_marks_iterate_done(int thread_num) {
1105   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1106   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1107   ps->promo.promoted_oops_iterate(dummy_cl);
1108 
1109   // Because card-scanning has been completed, subsequent phases
1110   // (e.g., reference processing) will not need to recognize which
1111   // objects have been promoted during this GC. So, we can now disable
1112   // promotion tracking.
1113   ps->promo.stopTrackingPromotions();
1114 }
1115 
1116 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1117                                                    size_t size,
1118                                                    bool   tlab)
1119 {
1120   // We allow a STW collection only if a full
1121   // collection was requested.
1122   return full || should_allocate(size, tlab); // FIX ME !!!
1123   // This and promotion failure handling are connected at the
1124   // hip and should be fixed by untying them.
1125 }
1126 
1127 bool CMSCollector::shouldConcurrentCollect() {
1128   LogTarget(Trace, gc) log;
1129 
1130   if (_full_gc_requested) {
1131     log.print("CMSCollector: collect because of explicit  gc request (or GCLocker)");
1132     return true;
1133   }
1134 
1135   FreelistLocker x(this);
1136   // ------------------------------------------------------------------
1137   // Print out lots of information which affects the initiation of
1138   // a collection.
1139   if (log.is_enabled() && stats().valid()) {
1140     log.print("CMSCollector shouldConcurrentCollect: ");
1141 
1142     LogStream out(log);
1143     stats().print_on(&out);
1144 
1145     log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
1146     log.print("free=" SIZE_FORMAT, _cmsGen->free());
1147     log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
1148     log.print("promotion_rate=%g", stats().promotion_rate());
1149     log.print("cms_allocation_rate=%g", stats().cms_allocation_rate());
1150     log.print("occupancy=%3.7f", _cmsGen->occupancy());
1151     log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1152     log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1153     log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1154     log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
1155   }
1156   // ------------------------------------------------------------------
1157 
1158   // If the estimated time to complete a cms collection (cms_duration())
1159   // is less than the estimated time remaining until the cms generation
1160   // is full, start a collection.
1161   if (!UseCMSInitiatingOccupancyOnly) {
1162     if (stats().valid()) {
1163       if (stats().time_until_cms_start() == 0.0) {
1164         return true;
1165       }
1166     } else {
1167       // We want to conservatively collect somewhat early in order
1168       // to try and "bootstrap" our CMS/promotion statistics;
1169       // this branch will not fire after the first successful CMS
1170       // collection because the stats should then be valid.
1171       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1172         log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
1173                   _cmsGen->occupancy(), _bootstrap_occupancy);
1174         return true;
1175       }
1176     }
1177   }
1178 
1179   // Otherwise, we start a collection cycle if
1180   // old gen want a collection cycle started. Each may use
1181   // an appropriate criterion for making this decision.
1182   // XXX We need to make sure that the gen expansion
1183   // criterion dovetails well with this. XXX NEED TO FIX THIS
1184   if (_cmsGen->should_concurrent_collect()) {
1185     log.print("CMS old gen initiated");
1186     return true;
1187   }
1188 
1189   // We start a collection if we believe an incremental collection may fail;
1190   // this is not likely to be productive in practice because it's probably too
1191   // late anyway.
1192   CMSHeap* heap = CMSHeap::heap();
1193   if (heap->incremental_collection_will_fail(true /* consult_young */)) {
1194     log.print("CMSCollector: collect because incremental collection will fail ");
1195     return true;
1196   }
1197 
1198   if (MetaspaceGC::should_concurrent_collect()) {
1199     log.print("CMSCollector: collect for metadata allocation ");
1200     return true;
1201   }
1202 
1203   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1204   if (CMSTriggerInterval >= 0) {
1205     if (CMSTriggerInterval == 0) {
1206       // Trigger always
1207       return true;
1208     }
1209 
1210     // Check the CMS time since begin (we do not check the stats validity
1211     // as we want to be able to trigger the first CMS cycle as well)
1212     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1213       if (stats().valid()) {
1214         log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1215                   stats().cms_time_since_begin());
1216       } else {
1217         log.print("CMSCollector: collect because of trigger interval (first collection)");
1218       }
1219       return true;
1220     }
1221   }
1222 
1223   return false;
1224 }
1225 
1226 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1227 
1228 // Clear _expansion_cause fields of constituent generations
1229 void CMSCollector::clear_expansion_cause() {
1230   _cmsGen->clear_expansion_cause();
1231 }
1232 
1233 // We should be conservative in starting a collection cycle.  To
1234 // start too eagerly runs the risk of collecting too often in the
1235 // extreme.  To collect too rarely falls back on full collections,
1236 // which works, even if not optimum in terms of concurrent work.
1237 // As a work around for too eagerly collecting, use the flag
1238 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1239 // giving the user an easily understandable way of controlling the
1240 // collections.
1241 // We want to start a new collection cycle if any of the following
1242 // conditions hold:
1243 // . our current occupancy exceeds the configured initiating occupancy
1244 //   for this generation, or
1245 // . we recently needed to expand this space and have not, since that
1246 //   expansion, done a collection of this generation, or
1247 // . the underlying space believes that it may be a good idea to initiate
1248 //   a concurrent collection (this may be based on criteria such as the
1249 //   following: the space uses linear allocation and linear allocation is
1250 //   going to fail, or there is believed to be excessive fragmentation in
1251 //   the generation, etc... or ...
1252 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1253 //   the case of the old generation; see CR 6543076):
1254 //   we may be approaching a point at which allocation requests may fail because
1255 //   we will be out of sufficient free space given allocation rate estimates.]
1256 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1257 
1258   assert_lock_strong(freelistLock());
1259   if (occupancy() > initiating_occupancy()) {
1260     log_trace(gc)(" %s: collect because of occupancy %f / %f  ",
1261                   short_name(), occupancy(), initiating_occupancy());
1262     return true;
1263   }
1264   if (UseCMSInitiatingOccupancyOnly) {
1265     return false;
1266   }
1267   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1268     log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
1269     return true;
1270   }
1271   return false;
1272 }
1273 
1274 void ConcurrentMarkSweepGeneration::collect(bool   full,
1275                                             bool   clear_all_soft_refs,
1276                                             size_t size,
1277                                             bool   tlab)
1278 {
1279   collector()->collect(full, clear_all_soft_refs, size, tlab);
1280 }
1281 
1282 void CMSCollector::collect(bool   full,
1283                            bool   clear_all_soft_refs,
1284                            size_t size,
1285                            bool   tlab)
1286 {
1287   // The following "if" branch is present for defensive reasons.
1288   // In the current uses of this interface, it can be replaced with:
1289   // assert(!GCLocker.is_active(), "Can't be called otherwise");
1290   // But I am not placing that assert here to allow future
1291   // generality in invoking this interface.
1292   if (GCLocker::is_active()) {
1293     // A consistency test for GCLocker
1294     assert(GCLocker::needs_gc(), "Should have been set already");
1295     // Skip this foreground collection, instead
1296     // expanding the heap if necessary.
1297     // Need the free list locks for the call to free() in compute_new_size()
1298     compute_new_size();
1299     return;
1300   }
1301   acquire_control_and_collect(full, clear_all_soft_refs);
1302 }
1303 
1304 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1305   CMSHeap* heap = CMSHeap::heap();
1306   unsigned int gc_count = heap->total_full_collections();
1307   if (gc_count == full_gc_count) {
1308     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1309     _full_gc_requested = true;
1310     _full_gc_cause = cause;
1311     CGC_lock->notify();   // nudge CMS thread
1312   } else {
1313     assert(gc_count > full_gc_count, "Error: causal loop");
1314   }
1315 }
1316 
1317 bool CMSCollector::is_external_interruption() {
1318   GCCause::Cause cause = CMSHeap::heap()->gc_cause();
1319   return GCCause::is_user_requested_gc(cause) ||
1320          GCCause::is_serviceability_requested_gc(cause);
1321 }
1322 
1323 void CMSCollector::report_concurrent_mode_interruption() {
1324   if (is_external_interruption()) {
1325     log_debug(gc)("Concurrent mode interrupted");
1326   } else {
1327     log_debug(gc)("Concurrent mode failure");
1328     _gc_tracer_cm->report_concurrent_mode_failure();
1329   }
1330 }
1331 
1332 
1333 // The foreground and background collectors need to coordinate in order
1334 // to make sure that they do not mutually interfere with CMS collections.
1335 // When a background collection is active,
1336 // the foreground collector may need to take over (preempt) and
1337 // synchronously complete an ongoing collection. Depending on the
1338 // frequency of the background collections and the heap usage
1339 // of the application, this preemption can be seldom or frequent.
1340 // There are only certain
1341 // points in the background collection that the "collection-baton"
1342 // can be passed to the foreground collector.
1343 //
1344 // The foreground collector will wait for the baton before
1345 // starting any part of the collection.  The foreground collector
1346 // will only wait at one location.
1347 //
1348 // The background collector will yield the baton before starting a new
1349 // phase of the collection (e.g., before initial marking, marking from roots,
1350 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1351 // of the loop which switches the phases. The background collector does some
1352 // of the phases (initial mark, final re-mark) with the world stopped.
1353 // Because of locking involved in stopping the world,
1354 // the foreground collector should not block waiting for the background
1355 // collector when it is doing a stop-the-world phase.  The background
1356 // collector will yield the baton at an additional point just before
1357 // it enters a stop-the-world phase.  Once the world is stopped, the
1358 // background collector checks the phase of the collection.  If the
1359 // phase has not changed, it proceeds with the collection.  If the
1360 // phase has changed, it skips that phase of the collection.  See
1361 // the comments on the use of the Heap_lock in collect_in_background().
1362 //
1363 // Variable used in baton passing.
1364 //   _foregroundGCIsActive - Set to true by the foreground collector when
1365 //      it wants the baton.  The foreground clears it when it has finished
1366 //      the collection.
1367 //   _foregroundGCShouldWait - Set to true by the background collector
1368 //        when it is running.  The foreground collector waits while
1369 //      _foregroundGCShouldWait is true.
1370 //  CGC_lock - monitor used to protect access to the above variables
1371 //      and to notify the foreground and background collectors.
1372 //  _collectorState - current state of the CMS collection.
1373 //
1374 // The foreground collector
1375 //   acquires the CGC_lock
1376 //   sets _foregroundGCIsActive
1377 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1378 //     various locks acquired in preparation for the collection
1379 //     are released so as not to block the background collector
1380 //     that is in the midst of a collection
1381 //   proceeds with the collection
1382 //   clears _foregroundGCIsActive
1383 //   returns
1384 //
1385 // The background collector in a loop iterating on the phases of the
1386 //      collection
1387 //   acquires the CGC_lock
1388 //   sets _foregroundGCShouldWait
1389 //   if _foregroundGCIsActive is set
1390 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1391 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1392 //     and exits the loop.
1393 //   otherwise
1394 //     proceed with that phase of the collection
1395 //     if the phase is a stop-the-world phase,
1396 //       yield the baton once more just before enqueueing
1397 //       the stop-world CMS operation (executed by the VM thread).
1398 //   returns after all phases of the collection are done
1399 //
1400 
1401 void CMSCollector::acquire_control_and_collect(bool full,
1402         bool clear_all_soft_refs) {
1403   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1404   assert(!Thread::current()->is_ConcurrentGC_thread(),
1405          "shouldn't try to acquire control from self!");
1406 
1407   // Start the protocol for acquiring control of the
1408   // collection from the background collector (aka CMS thread).
1409   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1410          "VM thread should have CMS token");
1411   // Remember the possibly interrupted state of an ongoing
1412   // concurrent collection
1413   CollectorState first_state = _collectorState;
1414 
1415   // Signal to a possibly ongoing concurrent collection that
1416   // we want to do a foreground collection.
1417   _foregroundGCIsActive = true;
1418 
1419   // release locks and wait for a notify from the background collector
1420   // releasing the locks in only necessary for phases which
1421   // do yields to improve the granularity of the collection.
1422   assert_lock_strong(bitMapLock());
1423   // We need to lock the Free list lock for the space that we are
1424   // currently collecting.
1425   assert(haveFreelistLocks(), "Must be holding free list locks");
1426   bitMapLock()->unlock();
1427   releaseFreelistLocks();
1428   {
1429     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1430     if (_foregroundGCShouldWait) {
1431       // We are going to be waiting for action for the CMS thread;
1432       // it had better not be gone (for instance at shutdown)!
1433       assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
1434              "CMS thread must be running");
1435       // Wait here until the background collector gives us the go-ahead
1436       ConcurrentMarkSweepThread::clear_CMS_flag(
1437         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1438       // Get a possibly blocked CMS thread going:
1439       //   Note that we set _foregroundGCIsActive true above,
1440       //   without protection of the CGC_lock.
1441       CGC_lock->notify();
1442       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1443              "Possible deadlock");
1444       while (_foregroundGCShouldWait) {
1445         // wait for notification
1446         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1447         // Possibility of delay/starvation here, since CMS token does
1448         // not know to give priority to VM thread? Actually, i think
1449         // there wouldn't be any delay/starvation, but the proof of
1450         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1451       }
1452       ConcurrentMarkSweepThread::set_CMS_flag(
1453         ConcurrentMarkSweepThread::CMS_vm_has_token);
1454     }
1455   }
1456   // The CMS_token is already held.  Get back the other locks.
1457   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1458          "VM thread should have CMS token");
1459   getFreelistLocks();
1460   bitMapLock()->lock_without_safepoint_check();
1461   log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
1462                        p2i(Thread::current()), first_state);
1463   log_debug(gc, state)("    gets control with state %d", _collectorState);
1464 
1465   // Inform cms gen if this was due to partial collection failing.
1466   // The CMS gen may use this fact to determine its expansion policy.
1467   CMSHeap* heap = CMSHeap::heap();
1468   if (heap->incremental_collection_will_fail(false /* don't consult_young */)) {
1469     assert(!_cmsGen->incremental_collection_failed(),
1470            "Should have been noticed, reacted to and cleared");
1471     _cmsGen->set_incremental_collection_failed();
1472   }
1473 
1474   if (first_state > Idling) {
1475     report_concurrent_mode_interruption();
1476   }
1477 
1478   set_did_compact(true);
1479 
1480   // If the collection is being acquired from the background
1481   // collector, there may be references on the discovered
1482   // references lists.  Abandon those references, since some
1483   // of them may have become unreachable after concurrent
1484   // discovery; the STW compacting collector will redo discovery
1485   // more precisely, without being subject to floating garbage.
1486   // Leaving otherwise unreachable references in the discovered
1487   // lists would require special handling.
1488   ref_processor()->disable_discovery();
1489   ref_processor()->abandon_partial_discovery();
1490   ref_processor()->verify_no_references_recorded();
1491 
1492   if (first_state > Idling) {
1493     save_heap_summary();
1494   }
1495 
1496   do_compaction_work(clear_all_soft_refs);
1497 
1498   // Has the GC time limit been exceeded?
1499   size_t max_eden_size = _young_gen->max_eden_size();
1500   GCCause::Cause gc_cause = heap->gc_cause();
1501   size_policy()->check_gc_overhead_limit(_young_gen->used(),
1502                                          _young_gen->eden()->used(),
1503                                          _cmsGen->max_capacity(),
1504                                          max_eden_size,
1505                                          full,
1506                                          gc_cause,
1507                                          heap->soft_ref_policy());
1508 
1509   // Reset the expansion cause, now that we just completed
1510   // a collection cycle.
1511   clear_expansion_cause();
1512   _foregroundGCIsActive = false;
1513   return;
1514 }
1515 
1516 // Resize the tenured generation
1517 // after obtaining the free list locks for the
1518 // two generations.
1519 void CMSCollector::compute_new_size() {
1520   assert_locked_or_safepoint(Heap_lock);
1521   FreelistLocker z(this);
1522   MetaspaceGC::compute_new_size();
1523   _cmsGen->compute_new_size_free_list();
1524 }
1525 
1526 // A work method used by the foreground collector to do
1527 // a mark-sweep-compact.
1528 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1529   CMSHeap* heap = CMSHeap::heap();
1530 
1531   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1532   gc_timer->register_gc_start();
1533 
1534   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1535   gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
1536 
1537   heap->pre_full_gc_dump(gc_timer);
1538 
1539   GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1540 
1541   // Temporarily widen the span of the weak reference processing to
1542   // the entire heap.
1543   MemRegion new_span(CMSHeap::heap()->reserved_region());
1544   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1545   // Temporarily, clear the "is_alive_non_header" field of the
1546   // reference processor.
1547   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1548   // Temporarily make reference _processing_ single threaded (non-MT).
1549   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1550   // Temporarily make refs discovery atomic
1551   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1552   // Temporarily make reference _discovery_ single threaded (non-MT)
1553   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1554 
1555   ref_processor()->set_enqueuing_is_done(false);
1556   ref_processor()->enable_discovery();
1557   ref_processor()->setup_policy(clear_all_soft_refs);
1558   // If an asynchronous collection finishes, the _modUnionTable is
1559   // all clear.  If we are assuming the collection from an asynchronous
1560   // collection, clear the _modUnionTable.
1561   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1562     "_modUnionTable should be clear if the baton was not passed");
1563   _modUnionTable.clear_all();
1564   assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(),
1565     "mod union for klasses should be clear if the baton was passed");
1566   _ct->cld_rem_set()->clear_mod_union();
1567 
1568 
1569   // We must adjust the allocation statistics being maintained
1570   // in the free list space. We do so by reading and clearing
1571   // the sweep timer and updating the block flux rate estimates below.
1572   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1573   if (_inter_sweep_timer.is_active()) {
1574     _inter_sweep_timer.stop();
1575     // Note that we do not use this sample to update the _inter_sweep_estimate.
1576     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1577                                             _inter_sweep_estimate.padded_average(),
1578                                             _intra_sweep_estimate.padded_average());
1579   }
1580 
1581   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1582   #ifdef ASSERT
1583     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1584     size_t free_size = cms_space->free();
1585     assert(free_size ==
1586            pointer_delta(cms_space->end(), cms_space->compaction_top())
1587            * HeapWordSize,
1588       "All the free space should be compacted into one chunk at top");
1589     assert(cms_space->dictionary()->total_chunk_size(
1590                                       debug_only(cms_space->freelistLock())) == 0 ||
1591            cms_space->totalSizeInIndexedFreeLists() == 0,
1592       "All the free space should be in a single chunk");
1593     size_t num = cms_space->totalCount();
1594     assert((free_size == 0 && num == 0) ||
1595            (free_size > 0  && (num == 1 || num == 2)),
1596          "There should be at most 2 free chunks after compaction");
1597   #endif // ASSERT
1598   _collectorState = Resetting;
1599   assert(_restart_addr == NULL,
1600          "Should have been NULL'd before baton was passed");
1601   reset_stw();
1602   _cmsGen->reset_after_compaction();
1603   _concurrent_cycles_since_last_unload = 0;
1604 
1605   // Clear any data recorded in the PLAB chunk arrays.
1606   if (_survivor_plab_array != NULL) {
1607     reset_survivor_plab_arrays();
1608   }
1609 
1610   // Adjust the per-size allocation stats for the next epoch.
1611   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1612   // Restart the "inter sweep timer" for the next epoch.
1613   _inter_sweep_timer.reset();
1614   _inter_sweep_timer.start();
1615 
1616   // No longer a need to do a concurrent collection for Metaspace.
1617   MetaspaceGC::set_should_concurrent_collect(false);
1618 
1619   heap->post_full_gc_dump(gc_timer);
1620 
1621   gc_timer->register_gc_end();
1622 
1623   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1624 
1625   // For a mark-sweep-compact, compute_new_size() will be called
1626   // in the heap's do_collection() method.
1627 }
1628 
1629 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1630   Log(gc, heap) log;
1631   if (!log.is_trace()) {
1632     return;
1633   }
1634 
1635   ContiguousSpace* eden_space = _young_gen->eden();
1636   ContiguousSpace* from_space = _young_gen->from();
1637   ContiguousSpace* to_space   = _young_gen->to();
1638   // Eden
1639   if (_eden_chunk_array != NULL) {
1640     log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1641               p2i(eden_space->bottom()), p2i(eden_space->top()),
1642               p2i(eden_space->end()), eden_space->capacity());
1643     log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
1644               _eden_chunk_index, _eden_chunk_capacity);
1645     for (size_t i = 0; i < _eden_chunk_index; i++) {
1646       log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
1647     }
1648   }
1649   // Survivor
1650   if (_survivor_chunk_array != NULL) {
1651     log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1652               p2i(from_space->bottom()), p2i(from_space->top()),
1653               p2i(from_space->end()), from_space->capacity());
1654     log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
1655               _survivor_chunk_index, _survivor_chunk_capacity);
1656     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1657       log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
1658     }
1659   }
1660 }
1661 
1662 void CMSCollector::getFreelistLocks() const {
1663   // Get locks for all free lists in all generations that this
1664   // collector is responsible for
1665   _cmsGen->freelistLock()->lock_without_safepoint_check();
1666 }
1667 
1668 void CMSCollector::releaseFreelistLocks() const {
1669   // Release locks for all free lists in all generations that this
1670   // collector is responsible for
1671   _cmsGen->freelistLock()->unlock();
1672 }
1673 
1674 bool CMSCollector::haveFreelistLocks() const {
1675   // Check locks for all free lists in all generations that this
1676   // collector is responsible for
1677   assert_lock_strong(_cmsGen->freelistLock());
1678   PRODUCT_ONLY(ShouldNotReachHere());
1679   return true;
1680 }
1681 
1682 // A utility class that is used by the CMS collector to
1683 // temporarily "release" the foreground collector from its
1684 // usual obligation to wait for the background collector to
1685 // complete an ongoing phase before proceeding.
1686 class ReleaseForegroundGC: public StackObj {
1687  private:
1688   CMSCollector* _c;
1689  public:
1690   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1691     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1692     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1693     // allow a potentially blocked foreground collector to proceed
1694     _c->_foregroundGCShouldWait = false;
1695     if (_c->_foregroundGCIsActive) {
1696       CGC_lock->notify();
1697     }
1698     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1699            "Possible deadlock");
1700   }
1701 
1702   ~ReleaseForegroundGC() {
1703     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1704     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1705     _c->_foregroundGCShouldWait = true;
1706   }
1707 };
1708 
1709 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1710   assert(Thread::current()->is_ConcurrentGC_thread(),
1711     "A CMS asynchronous collection is only allowed on a CMS thread.");
1712 
1713   CMSHeap* heap = CMSHeap::heap();
1714   {
1715     bool safepoint_check = Mutex::_no_safepoint_check_flag;
1716     MutexLockerEx hl(Heap_lock, safepoint_check);
1717     FreelistLocker fll(this);
1718     MutexLockerEx x(CGC_lock, safepoint_check);
1719     if (_foregroundGCIsActive) {
1720       // The foreground collector is. Skip this
1721       // background collection.
1722       assert(!_foregroundGCShouldWait, "Should be clear");
1723       return;
1724     } else {
1725       assert(_collectorState == Idling, "Should be idling before start.");
1726       _collectorState = InitialMarking;
1727       register_gc_start(cause);
1728       // Reset the expansion cause, now that we are about to begin
1729       // a new cycle.
1730       clear_expansion_cause();
1731 
1732       // Clear the MetaspaceGC flag since a concurrent collection
1733       // is starting but also clear it after the collection.
1734       MetaspaceGC::set_should_concurrent_collect(false);
1735     }
1736     // Decide if we want to enable class unloading as part of the
1737     // ensuing concurrent GC cycle.
1738     update_should_unload_classes();
1739     _full_gc_requested = false;           // acks all outstanding full gc requests
1740     _full_gc_cause = GCCause::_no_gc;
1741     // Signal that we are about to start a collection
1742     heap->increment_total_full_collections();  // ... starting a collection cycle
1743     _collection_count_start = heap->total_full_collections();
1744   }
1745 
1746   size_t prev_used = _cmsGen->used();
1747 
1748   // The change of the collection state is normally done at this level;
1749   // the exceptions are phases that are executed while the world is
1750   // stopped.  For those phases the change of state is done while the
1751   // world is stopped.  For baton passing purposes this allows the
1752   // background collector to finish the phase and change state atomically.
1753   // The foreground collector cannot wait on a phase that is done
1754   // while the world is stopped because the foreground collector already
1755   // has the world stopped and would deadlock.
1756   while (_collectorState != Idling) {
1757     log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1758                          p2i(Thread::current()), _collectorState);
1759     // The foreground collector
1760     //   holds the Heap_lock throughout its collection.
1761     //   holds the CMS token (but not the lock)
1762     //     except while it is waiting for the background collector to yield.
1763     //
1764     // The foreground collector should be blocked (not for long)
1765     //   if the background collector is about to start a phase
1766     //   executed with world stopped.  If the background
1767     //   collector has already started such a phase, the
1768     //   foreground collector is blocked waiting for the
1769     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1770     //   are executed in the VM thread.
1771     //
1772     // The locking order is
1773     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1774     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1775     //   CMS token  (claimed in
1776     //                stop_world_and_do() -->
1777     //                  safepoint_synchronize() -->
1778     //                    CMSThread::synchronize())
1779 
1780     {
1781       // Check if the FG collector wants us to yield.
1782       CMSTokenSync x(true); // is cms thread
1783       if (waitForForegroundGC()) {
1784         // We yielded to a foreground GC, nothing more to be
1785         // done this round.
1786         assert(_foregroundGCShouldWait == false, "We set it to false in "
1787                "waitForForegroundGC()");
1788         log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1789                              p2i(Thread::current()), _collectorState);
1790         return;
1791       } else {
1792         // The background collector can run but check to see if the
1793         // foreground collector has done a collection while the
1794         // background collector was waiting to get the CGC_lock
1795         // above.  If yes, break so that _foregroundGCShouldWait
1796         // is cleared before returning.
1797         if (_collectorState == Idling) {
1798           break;
1799         }
1800       }
1801     }
1802 
1803     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1804       "should be waiting");
1805 
1806     switch (_collectorState) {
1807       case InitialMarking:
1808         {
1809           ReleaseForegroundGC x(this);
1810           stats().record_cms_begin();
1811           VM_CMS_Initial_Mark initial_mark_op(this);
1812           VMThread::execute(&initial_mark_op);
1813         }
1814         // The collector state may be any legal state at this point
1815         // since the background collector may have yielded to the
1816         // foreground collector.
1817         break;
1818       case Marking:
1819         // initial marking in checkpointRootsInitialWork has been completed
1820         if (markFromRoots()) { // we were successful
1821           assert(_collectorState == Precleaning, "Collector state should "
1822             "have changed");
1823         } else {
1824           assert(_foregroundGCIsActive, "Internal state inconsistency");
1825         }
1826         break;
1827       case Precleaning:
1828         // marking from roots in markFromRoots has been completed
1829         preclean();
1830         assert(_collectorState == AbortablePreclean ||
1831                _collectorState == FinalMarking,
1832                "Collector state should have changed");
1833         break;
1834       case AbortablePreclean:
1835         abortable_preclean();
1836         assert(_collectorState == FinalMarking, "Collector state should "
1837           "have changed");
1838         break;
1839       case FinalMarking:
1840         {
1841           ReleaseForegroundGC x(this);
1842 
1843           VM_CMS_Final_Remark final_remark_op(this);
1844           VMThread::execute(&final_remark_op);
1845         }
1846         assert(_foregroundGCShouldWait, "block post-condition");
1847         break;
1848       case Sweeping:
1849         // final marking in checkpointRootsFinal has been completed
1850         sweep();
1851         assert(_collectorState == Resizing, "Collector state change "
1852           "to Resizing must be done under the free_list_lock");
1853 
1854       case Resizing: {
1855         // Sweeping has been completed...
1856         // At this point the background collection has completed.
1857         // Don't move the call to compute_new_size() down
1858         // into code that might be executed if the background
1859         // collection was preempted.
1860         {
1861           ReleaseForegroundGC x(this);   // unblock FG collection
1862           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1863           CMSTokenSync        z(true);   // not strictly needed.
1864           if (_collectorState == Resizing) {
1865             compute_new_size();
1866             save_heap_summary();
1867             _collectorState = Resetting;
1868           } else {
1869             assert(_collectorState == Idling, "The state should only change"
1870                    " because the foreground collector has finished the collection");
1871           }
1872         }
1873         break;
1874       }
1875       case Resetting:
1876         // CMS heap resizing has been completed
1877         reset_concurrent();
1878         assert(_collectorState == Idling, "Collector state should "
1879           "have changed");
1880 
1881         MetaspaceGC::set_should_concurrent_collect(false);
1882 
1883         stats().record_cms_end();
1884         // Don't move the concurrent_phases_end() and compute_new_size()
1885         // calls to here because a preempted background collection
1886         // has it's state set to "Resetting".
1887         break;
1888       case Idling:
1889       default:
1890         ShouldNotReachHere();
1891         break;
1892     }
1893     log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1894                          p2i(Thread::current()), _collectorState);
1895     assert(_foregroundGCShouldWait, "block post-condition");
1896   }
1897 
1898   // Should this be in gc_epilogue?
1899   heap->counters()->update_counters();
1900 
1901   {
1902     // Clear _foregroundGCShouldWait and, in the event that the
1903     // foreground collector is waiting, notify it, before
1904     // returning.
1905     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1906     _foregroundGCShouldWait = false;
1907     if (_foregroundGCIsActive) {
1908       CGC_lock->notify();
1909     }
1910     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1911            "Possible deadlock");
1912   }
1913   log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1914                        p2i(Thread::current()), _collectorState);
1915   log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1916                      prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
1917 }
1918 
1919 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1920   _cms_start_registered = true;
1921   _gc_timer_cm->register_gc_start();
1922   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1923 }
1924 
1925 void CMSCollector::register_gc_end() {
1926   if (_cms_start_registered) {
1927     report_heap_summary(GCWhen::AfterGC);
1928 
1929     _gc_timer_cm->register_gc_end();
1930     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1931     _cms_start_registered = false;
1932   }
1933 }
1934 
1935 void CMSCollector::save_heap_summary() {
1936   CMSHeap* heap = CMSHeap::heap();
1937   _last_heap_summary = heap->create_heap_summary();
1938   _last_metaspace_summary = heap->create_metaspace_summary();
1939 }
1940 
1941 void CMSCollector::report_heap_summary(GCWhen::Type when) {
1942   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
1943   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
1944 }
1945 
1946 bool CMSCollector::waitForForegroundGC() {
1947   bool res = false;
1948   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1949          "CMS thread should have CMS token");
1950   // Block the foreground collector until the
1951   // background collectors decides whether to
1952   // yield.
1953   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1954   _foregroundGCShouldWait = true;
1955   if (_foregroundGCIsActive) {
1956     // The background collector yields to the
1957     // foreground collector and returns a value
1958     // indicating that it has yielded.  The foreground
1959     // collector can proceed.
1960     res = true;
1961     _foregroundGCShouldWait = false;
1962     ConcurrentMarkSweepThread::clear_CMS_flag(
1963       ConcurrentMarkSweepThread::CMS_cms_has_token);
1964     ConcurrentMarkSweepThread::set_CMS_flag(
1965       ConcurrentMarkSweepThread::CMS_cms_wants_token);
1966     // Get a possibly blocked foreground thread going
1967     CGC_lock->notify();
1968     log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
1969                          p2i(Thread::current()), _collectorState);
1970     while (_foregroundGCIsActive) {
1971       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1972     }
1973     ConcurrentMarkSweepThread::set_CMS_flag(
1974       ConcurrentMarkSweepThread::CMS_cms_has_token);
1975     ConcurrentMarkSweepThread::clear_CMS_flag(
1976       ConcurrentMarkSweepThread::CMS_cms_wants_token);
1977   }
1978   log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
1979                        p2i(Thread::current()), _collectorState);
1980   return res;
1981 }
1982 
1983 // Because of the need to lock the free lists and other structures in
1984 // the collector, common to all the generations that the collector is
1985 // collecting, we need the gc_prologues of individual CMS generations
1986 // delegate to their collector. It may have been simpler had the
1987 // current infrastructure allowed one to call a prologue on a
1988 // collector. In the absence of that we have the generation's
1989 // prologue delegate to the collector, which delegates back
1990 // some "local" work to a worker method in the individual generations
1991 // that it's responsible for collecting, while itself doing any
1992 // work common to all generations it's responsible for. A similar
1993 // comment applies to the  gc_epilogue()'s.
1994 // The role of the variable _between_prologue_and_epilogue is to
1995 // enforce the invocation protocol.
1996 void CMSCollector::gc_prologue(bool full) {
1997   // Call gc_prologue_work() for the CMSGen
1998   // we are responsible for.
1999 
2000   // The following locking discipline assumes that we are only called
2001   // when the world is stopped.
2002   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2003 
2004   // The CMSCollector prologue must call the gc_prologues for the
2005   // "generations" that it's responsible
2006   // for.
2007 
2008   assert(   Thread::current()->is_VM_thread()
2009          || (   CMSScavengeBeforeRemark
2010              && Thread::current()->is_ConcurrentGC_thread()),
2011          "Incorrect thread type for prologue execution");
2012 
2013   if (_between_prologue_and_epilogue) {
2014     // We have already been invoked; this is a gc_prologue delegation
2015     // from yet another CMS generation that we are responsible for, just
2016     // ignore it since all relevant work has already been done.
2017     return;
2018   }
2019 
2020   // set a bit saying prologue has been called; cleared in epilogue
2021   _between_prologue_and_epilogue = true;
2022   // Claim locks for common data structures, then call gc_prologue_work()
2023   // for each CMSGen.
2024 
2025   getFreelistLocks();   // gets free list locks on constituent spaces
2026   bitMapLock()->lock_without_safepoint_check();
2027 
2028   // Should call gc_prologue_work() for all cms gens we are responsible for
2029   bool duringMarking =    _collectorState >= Marking
2030                          && _collectorState < Sweeping;
2031 
2032   // The young collections clear the modified oops state, which tells if
2033   // there are any modified oops in the class. The remark phase also needs
2034   // that information. Tell the young collection to save the union of all
2035   // modified klasses.
2036   if (duringMarking) {
2037     _ct->cld_rem_set()->set_accumulate_modified_oops(true);
2038   }
2039 
2040   bool registerClosure = duringMarking;
2041 
2042   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2043 
2044   if (!full) {
2045     stats().record_gc0_begin();
2046   }
2047 }
2048 
2049 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2050 
2051   _capacity_at_prologue = capacity();
2052   _used_at_prologue = used();
2053 
2054   // We enable promotion tracking so that card-scanning can recognize
2055   // which objects have been promoted during this GC and skip them.
2056   for (uint i = 0; i < ParallelGCThreads; i++) {
2057     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2058   }
2059 
2060   // Delegate to CMScollector which knows how to coordinate between
2061   // this and any other CMS generations that it is responsible for
2062   // collecting.
2063   collector()->gc_prologue(full);
2064 }
2065 
2066 // This is a "private" interface for use by this generation's CMSCollector.
2067 // Not to be called directly by any other entity (for instance,
2068 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2069 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2070   bool registerClosure, ModUnionClosure* modUnionClosure) {
2071   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2072   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2073     "Should be NULL");
2074   if (registerClosure) {
2075     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2076   }
2077   cmsSpace()->gc_prologue();
2078   // Clear stat counters
2079   NOT_PRODUCT(
2080     assert(_numObjectsPromoted == 0, "check");
2081     assert(_numWordsPromoted   == 0, "check");
2082     log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
2083                                  _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2084     _numObjectsAllocated = 0;
2085     _numWordsAllocated   = 0;
2086   )
2087 }
2088 
2089 void CMSCollector::gc_epilogue(bool full) {
2090   // The following locking discipline assumes that we are only called
2091   // when the world is stopped.
2092   assert(SafepointSynchronize::is_at_safepoint(),
2093          "world is stopped assumption");
2094 
2095   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2096   // if linear allocation blocks need to be appropriately marked to allow the
2097   // the blocks to be parsable. We also check here whether we need to nudge the
2098   // CMS collector thread to start a new cycle (if it's not already active).
2099   assert(   Thread::current()->is_VM_thread()
2100          || (   CMSScavengeBeforeRemark
2101              && Thread::current()->is_ConcurrentGC_thread()),
2102          "Incorrect thread type for epilogue execution");
2103 
2104   if (!_between_prologue_and_epilogue) {
2105     // We have already been invoked; this is a gc_epilogue delegation
2106     // from yet another CMS generation that we are responsible for, just
2107     // ignore it since all relevant work has already been done.
2108     return;
2109   }
2110   assert(haveFreelistLocks(), "must have freelist locks");
2111   assert_lock_strong(bitMapLock());
2112 
2113   _ct->cld_rem_set()->set_accumulate_modified_oops(false);
2114 
2115   _cmsGen->gc_epilogue_work(full);
2116 
2117   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2118     // in case sampling was not already enabled, enable it
2119     _start_sampling = true;
2120   }
2121   // reset _eden_chunk_array so sampling starts afresh
2122   _eden_chunk_index = 0;
2123 
2124   size_t cms_used   = _cmsGen->cmsSpace()->used();
2125 
2126   // update performance counters - this uses a special version of
2127   // update_counters() that allows the utilization to be passed as a
2128   // parameter, avoiding multiple calls to used().
2129   //
2130   _cmsGen->update_counters(cms_used);
2131 
2132   bitMapLock()->unlock();
2133   releaseFreelistLocks();
2134 
2135   if (!CleanChunkPoolAsync) {
2136     Chunk::clean_chunk_pool();
2137   }
2138 
2139   set_did_compact(false);
2140   _between_prologue_and_epilogue = false;  // ready for next cycle
2141 }
2142 
2143 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2144   collector()->gc_epilogue(full);
2145 
2146   // When using ParNew, promotion tracking should have already been
2147   // disabled. However, the prologue (which enables promotion
2148   // tracking) and epilogue are called irrespective of the type of
2149   // GC. So they will also be called before and after Full GCs, during
2150   // which promotion tracking will not be explicitly disabled. So,
2151   // it's safer to also disable it here too (to be symmetric with
2152   // enabling it in the prologue).
2153   for (uint i = 0; i < ParallelGCThreads; i++) {
2154     _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2155   }
2156 }
2157 
2158 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2159   assert(!incremental_collection_failed(), "Should have been cleared");
2160   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2161   cmsSpace()->gc_epilogue();
2162     // Print stat counters
2163   NOT_PRODUCT(
2164     assert(_numObjectsAllocated == 0, "check");
2165     assert(_numWordsAllocated == 0, "check");
2166     log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
2167                                      _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2168     _numObjectsPromoted = 0;
2169     _numWordsPromoted   = 0;
2170   )
2171 
2172   // Call down the chain in contiguous_available needs the freelistLock
2173   // so print this out before releasing the freeListLock.
2174   log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
2175 }
2176 
2177 #ifndef PRODUCT
2178 bool CMSCollector::have_cms_token() {
2179   Thread* thr = Thread::current();
2180   if (thr->is_VM_thread()) {
2181     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2182   } else if (thr->is_ConcurrentGC_thread()) {
2183     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2184   } else if (thr->is_GC_task_thread()) {
2185     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2186            ParGCRareEvent_lock->owned_by_self();
2187   }
2188   return false;
2189 }
2190 
2191 // Check reachability of the given heap address in CMS generation,
2192 // treating all other generations as roots.
2193 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2194   // We could "guarantee" below, rather than assert, but I'll
2195   // leave these as "asserts" so that an adventurous debugger
2196   // could try this in the product build provided some subset of
2197   // the conditions were met, provided they were interested in the
2198   // results and knew that the computation below wouldn't interfere
2199   // with other concurrent computations mutating the structures
2200   // being read or written.
2201   assert(SafepointSynchronize::is_at_safepoint(),
2202          "Else mutations in object graph will make answer suspect");
2203   assert(have_cms_token(), "Should hold cms token");
2204   assert(haveFreelistLocks(), "must hold free list locks");
2205   assert_lock_strong(bitMapLock());
2206 
2207   // Clear the marking bit map array before starting, but, just
2208   // for kicks, first report if the given address is already marked
2209   tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2210                 _markBitMap.isMarked(addr) ? "" : " not");
2211 
2212   if (verify_after_remark()) {
2213     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2214     bool result = verification_mark_bm()->isMarked(addr);
2215     tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2216                   result ? "IS" : "is NOT");
2217     return result;
2218   } else {
2219     tty->print_cr("Could not compute result");
2220     return false;
2221   }
2222 }
2223 #endif
2224 
2225 void
2226 CMSCollector::print_on_error(outputStream* st) {
2227   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2228   if (collector != NULL) {
2229     CMSBitMap* bitmap = &collector->_markBitMap;
2230     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2231     bitmap->print_on_error(st, " Bits: ");
2232 
2233     st->cr();
2234 
2235     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2236     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2237     mut_bitmap->print_on_error(st, " Bits: ");
2238   }
2239 }
2240 
2241 ////////////////////////////////////////////////////////
2242 // CMS Verification Support
2243 ////////////////////////////////////////////////////////
2244 // Following the remark phase, the following invariant
2245 // should hold -- each object in the CMS heap which is
2246 // marked in markBitMap() should be marked in the verification_mark_bm().
2247 
2248 class VerifyMarkedClosure: public BitMapClosure {
2249   CMSBitMap* _marks;
2250   bool       _failed;
2251 
2252  public:
2253   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2254 
2255   bool do_bit(size_t offset) {
2256     HeapWord* addr = _marks->offsetToHeapWord(offset);
2257     if (!_marks->isMarked(addr)) {
2258       Log(gc, verify) log;
2259       ResourceMark rm;
2260       LogStream ls(log.error());
2261       oop(addr)->print_on(&ls);
2262       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2263       _failed = true;
2264     }
2265     return true;
2266   }
2267 
2268   bool failed() { return _failed; }
2269 };
2270 
2271 bool CMSCollector::verify_after_remark() {
2272   GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
2273   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2274   static bool init = false;
2275 
2276   assert(SafepointSynchronize::is_at_safepoint(),
2277          "Else mutations in object graph will make answer suspect");
2278   assert(have_cms_token(),
2279          "Else there may be mutual interference in use of "
2280          " verification data structures");
2281   assert(_collectorState > Marking && _collectorState <= Sweeping,
2282          "Else marking info checked here may be obsolete");
2283   assert(haveFreelistLocks(), "must hold free list locks");
2284   assert_lock_strong(bitMapLock());
2285 
2286 
2287   // Allocate marking bit map if not already allocated
2288   if (!init) { // first time
2289     if (!verification_mark_bm()->allocate(_span)) {
2290       return false;
2291     }
2292     init = true;
2293   }
2294 
2295   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2296 
2297   // Turn off refs discovery -- so we will be tracing through refs.
2298   // This is as intended, because by this time
2299   // GC must already have cleared any refs that need to be cleared,
2300   // and traced those that need to be marked; moreover,
2301   // the marking done here is not going to interfere in any
2302   // way with the marking information used by GC.
2303   NoRefDiscovery no_discovery(ref_processor());
2304 
2305 #if COMPILER2_OR_JVMCI
2306   DerivedPointerTableDeactivate dpt_deact;
2307 #endif
2308 
2309   // Clear any marks from a previous round
2310   verification_mark_bm()->clear_all();
2311   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2312   verify_work_stacks_empty();
2313 
2314   CMSHeap* heap = CMSHeap::heap();
2315   heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
2316   // Update the saved marks which may affect the root scans.
2317   heap->save_marks();
2318 
2319   if (CMSRemarkVerifyVariant == 1) {
2320     // In this first variant of verification, we complete
2321     // all marking, then check if the new marks-vector is
2322     // a subset of the CMS marks-vector.
2323     verify_after_remark_work_1();
2324   } else {
2325     guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
2326     // In this second variant of verification, we flag an error
2327     // (i.e. an object reachable in the new marks-vector not reachable
2328     // in the CMS marks-vector) immediately, also indicating the
2329     // identify of an object (A) that references the unmarked object (B) --
2330     // presumably, a mutation to A failed to be picked up by preclean/remark?
2331     verify_after_remark_work_2();
2332   }
2333 
2334   return true;
2335 }
2336 
2337 void CMSCollector::verify_after_remark_work_1() {
2338   ResourceMark rm;
2339   HandleMark  hm;
2340   CMSHeap* heap = CMSHeap::heap();
2341 
2342   // Get a clear set of claim bits for the roots processing to work with.
2343   ClassLoaderDataGraph::clear_claimed_marks();
2344 
2345   // Mark from roots one level into CMS
2346   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2347   heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2348 
2349   {
2350     StrongRootsScope srs(1);
2351 
2352     heap->cms_process_roots(&srs,
2353                            true,   // young gen as roots
2354                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2355                            should_unload_classes(),
2356                            &notOlder,
2357                            NULL);
2358   }
2359 
2360   // Now mark from the roots
2361   MarkFromRootsClosure markFromRootsClosure(this, _span,
2362     verification_mark_bm(), verification_mark_stack(),
2363     false /* don't yield */, true /* verifying */);
2364   assert(_restart_addr == NULL, "Expected pre-condition");
2365   verification_mark_bm()->iterate(&markFromRootsClosure);
2366   while (_restart_addr != NULL) {
2367     // Deal with stack overflow: by restarting at the indicated
2368     // address.
2369     HeapWord* ra = _restart_addr;
2370     markFromRootsClosure.reset(ra);
2371     _restart_addr = NULL;
2372     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2373   }
2374   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2375   verify_work_stacks_empty();
2376 
2377   // Marking completed -- now verify that each bit marked in
2378   // verification_mark_bm() is also marked in markBitMap(); flag all
2379   // errors by printing corresponding objects.
2380   VerifyMarkedClosure vcl(markBitMap());
2381   verification_mark_bm()->iterate(&vcl);
2382   if (vcl.failed()) {
2383     Log(gc, verify) log;
2384     log.error("Failed marking verification after remark");
2385     ResourceMark rm;
2386     LogStream ls(log.error());
2387     heap->print_on(&ls);
2388     fatal("CMS: failed marking verification after remark");
2389   }
2390 }
2391 
2392 class VerifyCLDOopsCLDClosure : public CLDClosure {
2393   class VerifyCLDOopsClosure : public OopClosure {
2394     CMSBitMap* _bitmap;
2395    public:
2396     VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2397     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2398     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2399   } _oop_closure;
2400  public:
2401   VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2402   void do_cld(ClassLoaderData* cld) {
2403     cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, false);
2404   }
2405 };
2406 
2407 void CMSCollector::verify_after_remark_work_2() {
2408   ResourceMark rm;
2409   HandleMark  hm;
2410   CMSHeap* heap = CMSHeap::heap();
2411 
2412   // Get a clear set of claim bits for the roots processing to work with.
2413   ClassLoaderDataGraph::clear_claimed_marks();
2414 
2415   // Mark from roots one level into CMS
2416   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2417                                      markBitMap());
2418   CLDToOopClosure cld_closure(&notOlder, ClassLoaderData::_claim_strong);
2419 
2420   heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2421 
2422   {
2423     StrongRootsScope srs(1);
2424 
2425     heap->cms_process_roots(&srs,
2426                            true,   // young gen as roots
2427                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2428                            should_unload_classes(),
2429                            &notOlder,
2430                            &cld_closure);
2431   }
2432 
2433   // Now mark from the roots
2434   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2435     verification_mark_bm(), markBitMap(), verification_mark_stack());
2436   assert(_restart_addr == NULL, "Expected pre-condition");
2437   verification_mark_bm()->iterate(&markFromRootsClosure);
2438   while (_restart_addr != NULL) {
2439     // Deal with stack overflow: by restarting at the indicated
2440     // address.
2441     HeapWord* ra = _restart_addr;
2442     markFromRootsClosure.reset(ra);
2443     _restart_addr = NULL;
2444     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2445   }
2446   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2447   verify_work_stacks_empty();
2448 
2449   VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
2450   ClassLoaderDataGraph::cld_do(&verify_cld_oops);
2451 
2452   // Marking completed -- now verify that each bit marked in
2453   // verification_mark_bm() is also marked in markBitMap(); flag all
2454   // errors by printing corresponding objects.
2455   VerifyMarkedClosure vcl(markBitMap());
2456   verification_mark_bm()->iterate(&vcl);
2457   assert(!vcl.failed(), "Else verification above should not have succeeded");
2458 }
2459 
2460 void ConcurrentMarkSweepGeneration::save_marks() {
2461   // delegate to CMS space
2462   cmsSpace()->save_marks();
2463 }
2464 
2465 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2466   return cmsSpace()->no_allocs_since_save_marks();
2467 }
2468 
2469 void
2470 ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) {
2471   if (freelistLock()->owned_by_self()) {
2472     Generation::oop_iterate(cl);
2473   } else {
2474     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2475     Generation::oop_iterate(cl);
2476   }
2477 }
2478 
2479 void
2480 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2481   if (freelistLock()->owned_by_self()) {
2482     Generation::object_iterate(cl);
2483   } else {
2484     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2485     Generation::object_iterate(cl);
2486   }
2487 }
2488 
2489 void
2490 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2491   if (freelistLock()->owned_by_self()) {
2492     Generation::safe_object_iterate(cl);
2493   } else {
2494     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2495     Generation::safe_object_iterate(cl);
2496   }
2497 }
2498 
2499 void
2500 ConcurrentMarkSweepGeneration::post_compact() {
2501 }
2502 
2503 void
2504 ConcurrentMarkSweepGeneration::prepare_for_verify() {
2505   // Fix the linear allocation blocks to look like free blocks.
2506 
2507   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2508   // are not called when the heap is verified during universe initialization and
2509   // at vm shutdown.
2510   if (freelistLock()->owned_by_self()) {
2511     cmsSpace()->prepare_for_verify();
2512   } else {
2513     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2514     cmsSpace()->prepare_for_verify();
2515   }
2516 }
2517 
2518 void
2519 ConcurrentMarkSweepGeneration::verify() {
2520   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2521   // are not called when the heap is verified during universe initialization and
2522   // at vm shutdown.
2523   if (freelistLock()->owned_by_self()) {
2524     cmsSpace()->verify();
2525   } else {
2526     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2527     cmsSpace()->verify();
2528   }
2529 }
2530 
2531 void CMSCollector::verify() {
2532   _cmsGen->verify();
2533 }
2534 
2535 #ifndef PRODUCT
2536 bool CMSCollector::overflow_list_is_empty() const {
2537   assert(_num_par_pushes >= 0, "Inconsistency");
2538   if (_overflow_list == NULL) {
2539     assert(_num_par_pushes == 0, "Inconsistency");
2540   }
2541   return _overflow_list == NULL;
2542 }
2543 
2544 // The methods verify_work_stacks_empty() and verify_overflow_empty()
2545 // merely consolidate assertion checks that appear to occur together frequently.
2546 void CMSCollector::verify_work_stacks_empty() const {
2547   assert(_markStack.isEmpty(), "Marking stack should be empty");
2548   assert(overflow_list_is_empty(), "Overflow list should be empty");
2549 }
2550 
2551 void CMSCollector::verify_overflow_empty() const {
2552   assert(overflow_list_is_empty(), "Overflow list should be empty");
2553   assert(no_preserved_marks(), "No preserved marks");
2554 }
2555 #endif // PRODUCT
2556 
2557 // Decide if we want to enable class unloading as part of the
2558 // ensuing concurrent GC cycle. We will collect and
2559 // unload classes if it's the case that:
2560 //  (a) class unloading is enabled at the command line, and
2561 //  (b) old gen is getting really full
2562 // NOTE: Provided there is no change in the state of the heap between
2563 // calls to this method, it should have idempotent results. Moreover,
2564 // its results should be monotonically increasing (i.e. going from 0 to 1,
2565 // but not 1 to 0) between successive calls between which the heap was
2566 // not collected. For the implementation below, it must thus rely on
2567 // the property that concurrent_cycles_since_last_unload()
2568 // will not decrease unless a collection cycle happened and that
2569 // _cmsGen->is_too_full() are
2570 // themselves also monotonic in that sense. See check_monotonicity()
2571 // below.
2572 void CMSCollector::update_should_unload_classes() {
2573   _should_unload_classes = false;
2574   if (CMSClassUnloadingEnabled) {
2575     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2576                               CMSClassUnloadingMaxInterval)
2577                            || _cmsGen->is_too_full();
2578   }
2579 }
2580 
2581 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2582   bool res = should_concurrent_collect();
2583   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2584   return res;
2585 }
2586 
2587 void CMSCollector::setup_cms_unloading_and_verification_state() {
2588   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2589                              || VerifyBeforeExit;
2590   const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2591 
2592   // We set the proper root for this CMS cycle here.
2593   if (should_unload_classes()) {   // Should unload classes this cycle
2594     remove_root_scanning_option(rso);  // Shrink the root set appropriately
2595     set_verifying(should_verify);    // Set verification state for this cycle
2596     return;                            // Nothing else needs to be done at this time
2597   }
2598 
2599   // Not unloading classes this cycle
2600   assert(!should_unload_classes(), "Inconsistency!");
2601 
2602   // If we are not unloading classes then add SO_AllCodeCache to root
2603   // scanning options.
2604   add_root_scanning_option(rso);
2605 
2606   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2607     set_verifying(true);
2608   } else if (verifying() && !should_verify) {
2609     // We were verifying, but some verification flags got disabled.
2610     set_verifying(false);
2611     // Exclude symbols, strings and code cache elements from root scanning to
2612     // reduce IM and RM pauses.
2613     remove_root_scanning_option(rso);
2614   }
2615 }
2616 
2617 
2618 #ifndef PRODUCT
2619 HeapWord* CMSCollector::block_start(const void* p) const {
2620   const HeapWord* addr = (HeapWord*)p;
2621   if (_span.contains(p)) {
2622     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2623       return _cmsGen->cmsSpace()->block_start(p);
2624     }
2625   }
2626   return NULL;
2627 }
2628 #endif
2629 
2630 HeapWord*
2631 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2632                                                    bool   tlab,
2633                                                    bool   parallel) {
2634   CMSSynchronousYieldRequest yr;
2635   assert(!tlab, "Can't deal with TLAB allocation");
2636   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2637   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2638   if (GCExpandToAllocateDelayMillis > 0) {
2639     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2640   }
2641   return have_lock_and_allocate(word_size, tlab);
2642 }
2643 
2644 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2645     size_t bytes,
2646     size_t expand_bytes,
2647     CMSExpansionCause::Cause cause)
2648 {
2649 
2650   bool success = expand(bytes, expand_bytes);
2651 
2652   // remember why we expanded; this information is used
2653   // by shouldConcurrentCollect() when making decisions on whether to start
2654   // a new CMS cycle.
2655   if (success) {
2656     set_expansion_cause(cause);
2657     log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));
2658   }
2659 }
2660 
2661 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2662   HeapWord* res = NULL;
2663   MutexLocker x(ParGCRareEvent_lock);
2664   while (true) {
2665     // Expansion by some other thread might make alloc OK now:
2666     res = ps->lab.alloc(word_sz);
2667     if (res != NULL) return res;
2668     // If there's not enough expansion space available, give up.
2669     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2670       return NULL;
2671     }
2672     // Otherwise, we try expansion.
2673     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2674     // Now go around the loop and try alloc again;
2675     // A competing par_promote might beat us to the expansion space,
2676     // so we may go around the loop again if promotion fails again.
2677     if (GCExpandToAllocateDelayMillis > 0) {
2678       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2679     }
2680   }
2681 }
2682 
2683 
2684 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2685   PromotionInfo* promo) {
2686   MutexLocker x(ParGCRareEvent_lock);
2687   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2688   while (true) {
2689     // Expansion by some other thread might make alloc OK now:
2690     if (promo->ensure_spooling_space()) {
2691       assert(promo->has_spooling_space(),
2692              "Post-condition of successful ensure_spooling_space()");
2693       return true;
2694     }
2695     // If there's not enough expansion space available, give up.
2696     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2697       return false;
2698     }
2699     // Otherwise, we try expansion.
2700     expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2701     // Now go around the loop and try alloc again;
2702     // A competing allocation might beat us to the expansion space,
2703     // so we may go around the loop again if allocation fails again.
2704     if (GCExpandToAllocateDelayMillis > 0) {
2705       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2706     }
2707   }
2708 }
2709 
2710 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2711   // Only shrink if a compaction was done so that all the free space
2712   // in the generation is in a contiguous block at the end.
2713   if (did_compact()) {
2714     CardGeneration::shrink(bytes);
2715   }
2716 }
2717 
2718 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2719   assert_locked_or_safepoint(Heap_lock);
2720 }
2721 
2722 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2723   assert_locked_or_safepoint(Heap_lock);
2724   assert_lock_strong(freelistLock());
2725   log_trace(gc)("Shrinking of CMS not yet implemented");
2726   return;
2727 }
2728 
2729 
2730 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2731 // phases.
2732 class CMSPhaseAccounting: public StackObj {
2733  public:
2734   CMSPhaseAccounting(CMSCollector *collector,
2735                      const char *title);
2736   ~CMSPhaseAccounting();
2737 
2738  private:
2739   CMSCollector *_collector;
2740   const char *_title;
2741   GCTraceConcTime(Info, gc) _trace_time;
2742 
2743  public:
2744   // Not MT-safe; so do not pass around these StackObj's
2745   // where they may be accessed by other threads.
2746   double wallclock_millis() {
2747     return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2748   }
2749 };
2750 
2751 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2752                                        const char *title) :
2753   _collector(collector), _title(title), _trace_time(title) {
2754 
2755   _collector->resetYields();
2756   _collector->resetTimer();
2757   _collector->startTimer();
2758   _collector->gc_timer_cm()->register_gc_concurrent_start(title);
2759 }
2760 
2761 CMSPhaseAccounting::~CMSPhaseAccounting() {
2762   _collector->gc_timer_cm()->register_gc_concurrent_end();
2763   _collector->stopTimer();
2764   log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
2765   log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2766 }
2767 
2768 // CMS work
2769 
2770 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2771 class CMSParMarkTask : public AbstractGangTask {
2772  protected:
2773   CMSCollector*     _collector;
2774   uint              _n_workers;
2775   OopStorage::ParState<false, false> _par_state_string;
2776   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2777       AbstractGangTask(name),
2778       _collector(collector),
2779       _n_workers(n_workers),
2780       _par_state_string(StringTable::weak_storage()) {}
2781   // Work method in support of parallel rescan ... of young gen spaces
2782   void do_young_space_rescan(OopsInGenClosure* cl,
2783                              ContiguousSpace* space,
2784                              HeapWord** chunk_array, size_t chunk_top);
2785   void work_on_young_gen_roots(OopsInGenClosure* cl);
2786 };
2787 
2788 // Parallel initial mark task
2789 class CMSParInitialMarkTask: public CMSParMarkTask {
2790   StrongRootsScope* _strong_roots_scope;
2791  public:
2792   CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2793       CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2794       _strong_roots_scope(strong_roots_scope) {}
2795   void work(uint worker_id);
2796 };
2797 
2798 // Checkpoint the roots into this generation from outside
2799 // this generation. [Note this initial checkpoint need only
2800 // be approximate -- we'll do a catch up phase subsequently.]
2801 void CMSCollector::checkpointRootsInitial() {
2802   assert(_collectorState == InitialMarking, "Wrong collector state");
2803   check_correct_thread_executing();
2804   TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
2805 
2806   save_heap_summary();
2807   report_heap_summary(GCWhen::BeforeGC);
2808 
2809   ReferenceProcessor* rp = ref_processor();
2810   assert(_restart_addr == NULL, "Control point invariant");
2811   {
2812     // acquire locks for subsequent manipulations
2813     MutexLockerEx x(bitMapLock(),
2814                     Mutex::_no_safepoint_check_flag);
2815     checkpointRootsInitialWork();
2816     // enable ("weak") refs discovery
2817     rp->enable_discovery();
2818     _collectorState = Marking;
2819   }
2820 }
2821 
2822 void CMSCollector::checkpointRootsInitialWork() {
2823   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2824   assert(_collectorState == InitialMarking, "just checking");
2825 
2826   // Already have locks.
2827   assert_lock_strong(bitMapLock());
2828   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2829 
2830   // Setup the verification and class unloading state for this
2831   // CMS collection cycle.
2832   setup_cms_unloading_and_verification_state();
2833 
2834   GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2835 
2836   // Reset all the PLAB chunk arrays if necessary.
2837   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2838     reset_survivor_plab_arrays();
2839   }
2840 
2841   ResourceMark rm;
2842   HandleMark  hm;
2843 
2844   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2845   CMSHeap* heap = CMSHeap::heap();
2846 
2847   verify_work_stacks_empty();
2848   verify_overflow_empty();
2849 
2850   heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
2851   // Update the saved marks which may affect the root scans.
2852   heap->save_marks();
2853 
2854   // weak reference processing has not started yet.
2855   ref_processor()->set_enqueuing_is_done(false);
2856 
2857   // Need to remember all newly created CLDs,
2858   // so that we can guarantee that the remark finds them.
2859   ClassLoaderDataGraph::remember_new_clds(true);
2860 
2861   // Whenever a CLD is found, it will be claimed before proceeding to mark
2862   // the klasses. The claimed marks need to be cleared before marking starts.
2863   ClassLoaderDataGraph::clear_claimed_marks();
2864 
2865   print_eden_and_survivor_chunk_arrays();
2866 
2867   {
2868 #if COMPILER2_OR_JVMCI
2869     DerivedPointerTableDeactivate dpt_deact;
2870 #endif
2871     if (CMSParallelInitialMarkEnabled) {
2872       // The parallel version.
2873       WorkGang* workers = heap->workers();
2874       assert(workers != NULL, "Need parallel worker threads.");
2875       uint n_workers = workers->active_workers();
2876 
2877       StrongRootsScope srs(n_workers);
2878 
2879       CMSParInitialMarkTask tsk(this, &srs, n_workers);
2880       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2881       // If the total workers is greater than 1, then multiple workers
2882       // may be used at some time and the initialization has been set
2883       // such that the single threaded path cannot be used.
2884       if (workers->total_workers() > 1) {
2885         workers->run_task(&tsk);
2886       } else {
2887         tsk.work(0);
2888       }
2889     } else {
2890       // The serial version.
2891       CLDToOopClosure cld_closure(&notOlder, ClassLoaderData::_claim_strong);
2892       heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2893 
2894       StrongRootsScope srs(1);
2895 
2896       heap->cms_process_roots(&srs,
2897                              true,   // young gen as roots
2898                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
2899                              should_unload_classes(),
2900                              &notOlder,
2901                              &cld_closure);
2902     }
2903   }
2904 
2905   // Clear mod-union table; it will be dirtied in the prologue of
2906   // CMS generation per each young generation collection.
2907 
2908   assert(_modUnionTable.isAllClear(),
2909        "Was cleared in most recent final checkpoint phase"
2910        " or no bits are set in the gc_prologue before the start of the next "
2911        "subsequent marking phase.");
2912 
2913   assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be");
2914 
2915   // Save the end of the used_region of the constituent generations
2916   // to be used to limit the extent of sweep in each generation.
2917   save_sweep_limits();
2918   verify_overflow_empty();
2919 }
2920 
2921 bool CMSCollector::markFromRoots() {
2922   // we might be tempted to assert that:
2923   // assert(!SafepointSynchronize::is_at_safepoint(),
2924   //        "inconsistent argument?");
2925   // However that wouldn't be right, because it's possible that
2926   // a safepoint is indeed in progress as a young generation
2927   // stop-the-world GC happens even as we mark in this generation.
2928   assert(_collectorState == Marking, "inconsistent state?");
2929   check_correct_thread_executing();
2930   verify_overflow_empty();
2931 
2932   // Weak ref discovery note: We may be discovering weak
2933   // refs in this generation concurrent (but interleaved) with
2934   // weak ref discovery by the young generation collector.
2935 
2936   CMSTokenSyncWithLocks ts(true, bitMapLock());
2937   GCTraceCPUTime tcpu;
2938   CMSPhaseAccounting pa(this, "Concurrent Mark");
2939   bool res = markFromRootsWork();
2940   if (res) {
2941     _collectorState = Precleaning;
2942   } else { // We failed and a foreground collection wants to take over
2943     assert(_foregroundGCIsActive, "internal state inconsistency");
2944     assert(_restart_addr == NULL,  "foreground will restart from scratch");
2945     log_debug(gc)("bailing out to foreground collection");
2946   }
2947   verify_overflow_empty();
2948   return res;
2949 }
2950 
2951 bool CMSCollector::markFromRootsWork() {
2952   // iterate over marked bits in bit map, doing a full scan and mark
2953   // from these roots using the following algorithm:
2954   // . if oop is to the right of the current scan pointer,
2955   //   mark corresponding bit (we'll process it later)
2956   // . else (oop is to left of current scan pointer)
2957   //   push oop on marking stack
2958   // . drain the marking stack
2959 
2960   // Note that when we do a marking step we need to hold the
2961   // bit map lock -- recall that direct allocation (by mutators)
2962   // and promotion (by the young generation collector) is also
2963   // marking the bit map. [the so-called allocate live policy.]
2964   // Because the implementation of bit map marking is not
2965   // robust wrt simultaneous marking of bits in the same word,
2966   // we need to make sure that there is no such interference
2967   // between concurrent such updates.
2968 
2969   // already have locks
2970   assert_lock_strong(bitMapLock());
2971 
2972   verify_work_stacks_empty();
2973   verify_overflow_empty();
2974   bool result = false;
2975   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
2976     result = do_marking_mt();
2977   } else {
2978     result = do_marking_st();
2979   }
2980   return result;
2981 }
2982 
2983 // Forward decl
2984 class CMSConcMarkingTask;
2985 
2986 class CMSConcMarkingParallelTerminator: public ParallelTaskTerminator {
2987   CMSCollector*       _collector;
2988   CMSConcMarkingTask* _task;
2989  public:
2990   virtual void yield();
2991 
2992   // "n_threads" is the number of threads to be terminated.
2993   // "queue_set" is a set of work queues of other threads.
2994   // "collector" is the CMS collector associated with this task terminator.
2995   // "yield" indicates whether we need the gang as a whole to yield.
2996   CMSConcMarkingParallelTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
2997     ParallelTaskTerminator(n_threads, queue_set),
2998     _collector(collector) { }
2999 
3000   void set_task(CMSConcMarkingTask* task) {
3001     _task = task;
3002   }
3003 };
3004 
3005 class CMSConcMarkingOWSTTerminator: public OWSTTaskTerminator {
3006   CMSCollector*       _collector;
3007   CMSConcMarkingTask* _task;
3008  public:
3009   virtual void yield();
3010 
3011   // "n_threads" is the number of threads to be terminated.
3012   // "queue_set" is a set of work queues of other threads.
3013   // "collector" is the CMS collector associated with this task terminator.
3014   // "yield" indicates whether we need the gang as a whole to yield.
3015   CMSConcMarkingOWSTTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3016     OWSTTaskTerminator(n_threads, queue_set),
3017     _collector(collector) { }
3018 
3019   void set_task(CMSConcMarkingTask* task) {
3020     _task = task;
3021   }
3022 };
3023 
3024 class CMSConcMarkingTaskTerminator {
3025  private:
3026   ParallelTaskTerminator* _term;
3027  public:
3028   CMSConcMarkingTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) {
3029     if (UseOWSTTaskTerminator) {
3030       _term = new CMSConcMarkingOWSTTerminator(n_threads, queue_set, collector);
3031     } else {
3032       _term = new CMSConcMarkingParallelTerminator(n_threads, queue_set, collector);
3033     }
3034   }
3035   ~CMSConcMarkingTaskTerminator() {
3036     assert(_term != NULL, "Must not be NULL");
3037     delete _term;
3038   }
3039 
3040   void set_task(CMSConcMarkingTask* task);
3041   ParallelTaskTerminator* terminator() const { return _term; }
3042 };
3043 
3044 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3045   CMSConcMarkingTask* _task;
3046  public:
3047   bool should_exit_termination();
3048   void set_task(CMSConcMarkingTask* task) {
3049     _task = task;
3050   }
3051 };
3052 
3053 // MT Concurrent Marking Task
3054 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3055   CMSCollector*             _collector;
3056   uint                      _n_workers;      // requested/desired # workers
3057   bool                      _result;
3058   CompactibleFreeListSpace* _cms_space;
3059   char                      _pad_front[64];   // padding to ...
3060   HeapWord* volatile        _global_finger;   // ... avoid sharing cache line
3061   char                      _pad_back[64];
3062   HeapWord*                 _restart_addr;
3063 
3064   //  Exposed here for yielding support
3065   Mutex* const _bit_map_lock;
3066 
3067   // The per thread work queues, available here for stealing
3068   OopTaskQueueSet*  _task_queues;
3069 
3070   // Termination (and yielding) support
3071   CMSConcMarkingTaskTerminator       _term;
3072   CMSConcMarkingTerminatorTerminator _term_term;
3073 
3074  public:
3075   CMSConcMarkingTask(CMSCollector* collector,
3076                  CompactibleFreeListSpace* cms_space,
3077                  YieldingFlexibleWorkGang* workers,
3078                  OopTaskQueueSet* task_queues):
3079     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3080     _collector(collector),
3081     _n_workers(0),
3082     _result(true),
3083     _cms_space(cms_space),
3084     _bit_map_lock(collector->bitMapLock()),
3085     _task_queues(task_queues),
3086     _term(_n_workers, task_queues, _collector)
3087   {
3088     _requested_size = _n_workers;
3089     _term.set_task(this);
3090     _term_term.set_task(this);
3091     _restart_addr = _global_finger = _cms_space->bottom();
3092   }
3093 
3094 
3095   OopTaskQueueSet* task_queues()  { return _task_queues; }
3096 
3097   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3098 
3099   HeapWord* volatile* global_finger_addr() { return &_global_finger; }
3100 
3101   ParallelTaskTerminator* terminator() { return _term.terminator(); }
3102 
3103   virtual void set_for_termination(uint active_workers) {
3104     terminator()->reset_for_reuse(active_workers);
3105   }
3106 
3107   void work(uint worker_id);
3108   bool should_yield() {
3109     return    ConcurrentMarkSweepThread::should_yield()
3110            && !_collector->foregroundGCIsActive();
3111   }
3112 
3113   virtual void coordinator_yield();  // stuff done by coordinator
3114   bool result() { return _result; }
3115 
3116   void reset(HeapWord* ra) {
3117     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3118     _restart_addr = _global_finger = ra;
3119     _term.terminator()->reset_for_reuse();
3120   }
3121 
3122   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3123                                            OopTaskQueue* work_q);
3124 
3125  private:
3126   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3127   void do_work_steal(int i);
3128   void bump_global_finger(HeapWord* f);
3129 };
3130 
3131 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3132   assert(_task != NULL, "Error");
3133   return _task->yielding();
3134   // Note that we do not need the disjunct || _task->should_yield() above
3135   // because we want terminating threads to yield only if the task
3136   // is already in the midst of yielding, which happens only after at least one
3137   // thread has yielded.
3138 }
3139 
3140 void CMSConcMarkingParallelTerminator::yield() {
3141   if (_task->should_yield()) {
3142     _task->yield();
3143   } else {
3144     ParallelTaskTerminator::yield();
3145   }
3146 }
3147 
3148 void CMSConcMarkingOWSTTerminator::yield() {
3149   if (_task->should_yield()) {
3150     _task->yield();
3151   } else {
3152     OWSTTaskTerminator::yield();
3153   }
3154 }
3155 
3156 void CMSConcMarkingTaskTerminator::set_task(CMSConcMarkingTask* task) {
3157   if (UseOWSTTaskTerminator) {
3158     ((CMSConcMarkingOWSTTerminator*)_term)->set_task(task);
3159   } else {
3160     ((CMSConcMarkingParallelTerminator*)_term)->set_task(task);
3161   }
3162 }
3163 
3164 ////////////////////////////////////////////////////////////////
3165 // Concurrent Marking Algorithm Sketch
3166 ////////////////////////////////////////////////////////////////
3167 // Until all tasks exhausted (both spaces):
3168 // -- claim next available chunk
3169 // -- bump global finger via CAS
3170 // -- find first object that starts in this chunk
3171 //    and start scanning bitmap from that position
3172 // -- scan marked objects for oops
3173 // -- CAS-mark target, and if successful:
3174 //    . if target oop is above global finger (volatile read)
3175 //      nothing to do
3176 //    . if target oop is in chunk and above local finger
3177 //        then nothing to do
3178 //    . else push on work-queue
3179 // -- Deal with possible overflow issues:
3180 //    . local work-queue overflow causes stuff to be pushed on
3181 //      global (common) overflow queue
3182 //    . always first empty local work queue
3183 //    . then get a batch of oops from global work queue if any
3184 //    . then do work stealing
3185 // -- When all tasks claimed (both spaces)
3186 //    and local work queue empty,
3187 //    then in a loop do:
3188 //    . check global overflow stack; steal a batch of oops and trace
3189 //    . try to steal from other threads oif GOS is empty
3190 //    . if neither is available, offer termination
3191 // -- Terminate and return result
3192 //
3193 void CMSConcMarkingTask::work(uint worker_id) {
3194   elapsedTimer _timer;
3195   ResourceMark rm;
3196   HandleMark hm;
3197 
3198   DEBUG_ONLY(_collector->verify_overflow_empty();)
3199 
3200   // Before we begin work, our work queue should be empty
3201   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3202   // Scan the bitmap covering _cms_space, tracing through grey objects.
3203   _timer.start();
3204   do_scan_and_mark(worker_id, _cms_space);
3205   _timer.stop();
3206   log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3207 
3208   // ... do work stealing
3209   _timer.reset();
3210   _timer.start();
3211   do_work_steal(worker_id);
3212   _timer.stop();
3213   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3214   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3215   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3216   // Note that under the current task protocol, the
3217   // following assertion is true even of the spaces
3218   // expanded since the completion of the concurrent
3219   // marking. XXX This will likely change under a strict
3220   // ABORT semantics.
3221   // After perm removal the comparison was changed to
3222   // greater than or equal to from strictly greater than.
3223   // Before perm removal the highest address sweep would
3224   // have been at the end of perm gen but now is at the
3225   // end of the tenured gen.
3226   assert(_global_finger >=  _cms_space->end(),
3227          "All tasks have been completed");
3228   DEBUG_ONLY(_collector->verify_overflow_empty();)
3229 }
3230 
3231 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3232   HeapWord* read = _global_finger;
3233   HeapWord* cur  = read;
3234   while (f > read) {
3235     cur = read;
3236     read = Atomic::cmpxchg(f, &_global_finger, cur);
3237     if (cur == read) {
3238       // our cas succeeded
3239       assert(_global_finger >= f, "protocol consistency");
3240       break;
3241     }
3242   }
3243 }
3244 
3245 // This is really inefficient, and should be redone by
3246 // using (not yet available) block-read and -write interfaces to the
3247 // stack and the work_queue. XXX FIX ME !!!
3248 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3249                                                       OopTaskQueue* work_q) {
3250   // Fast lock-free check
3251   if (ovflw_stk->length() == 0) {
3252     return false;
3253   }
3254   assert(work_q->size() == 0, "Shouldn't steal");
3255   MutexLockerEx ml(ovflw_stk->par_lock(),
3256                    Mutex::_no_safepoint_check_flag);
3257   // Grab up to 1/4 the size of the work queue
3258   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3259                     (size_t)ParGCDesiredObjsFromOverflowList);
3260   num = MIN2(num, ovflw_stk->length());
3261   for (int i = (int) num; i > 0; i--) {
3262     oop cur = ovflw_stk->pop();
3263     assert(cur != NULL, "Counted wrong?");
3264     work_q->push(cur);
3265   }
3266   return num > 0;
3267 }
3268 
3269 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3270   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3271   int n_tasks = pst->n_tasks();
3272   // We allow that there may be no tasks to do here because
3273   // we are restarting after a stack overflow.
3274   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3275   uint nth_task = 0;
3276 
3277   HeapWord* aligned_start = sp->bottom();
3278   if (sp->used_region().contains(_restart_addr)) {
3279     // Align down to a card boundary for the start of 0th task
3280     // for this space.
3281     aligned_start = align_down(_restart_addr, CardTable::card_size);
3282   }
3283 
3284   size_t chunk_size = sp->marking_task_size();
3285   while (pst->try_claim_task(/* reference */ nth_task)) {
3286     // Having claimed the nth task in this space,
3287     // compute the chunk that it corresponds to:
3288     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3289                                aligned_start + (nth_task+1)*chunk_size);
3290     // Try and bump the global finger via a CAS;
3291     // note that we need to do the global finger bump
3292     // _before_ taking the intersection below, because
3293     // the task corresponding to that region will be
3294     // deemed done even if the used_region() expands
3295     // because of allocation -- as it almost certainly will
3296     // during start-up while the threads yield in the
3297     // closure below.
3298     HeapWord* finger = span.end();
3299     bump_global_finger(finger);   // atomically
3300     // There are null tasks here corresponding to chunks
3301     // beyond the "top" address of the space.
3302     span = span.intersection(sp->used_region());
3303     if (!span.is_empty()) {  // Non-null task
3304       HeapWord* prev_obj;
3305       assert(!span.contains(_restart_addr) || nth_task == 0,
3306              "Inconsistency");
3307       if (nth_task == 0) {
3308         // For the 0th task, we'll not need to compute a block_start.
3309         if (span.contains(_restart_addr)) {
3310           // In the case of a restart because of stack overflow,
3311           // we might additionally skip a chunk prefix.
3312           prev_obj = _restart_addr;
3313         } else {
3314           prev_obj = span.start();
3315         }
3316       } else {
3317         // We want to skip the first object because
3318         // the protocol is to scan any object in its entirety
3319         // that _starts_ in this span; a fortiori, any
3320         // object starting in an earlier span is scanned
3321         // as part of an earlier claimed task.
3322         // Below we use the "careful" version of block_start
3323         // so we do not try to navigate uninitialized objects.
3324         prev_obj = sp->block_start_careful(span.start());
3325         // Below we use a variant of block_size that uses the
3326         // Printezis bits to avoid waiting for allocated
3327         // objects to become initialized/parsable.
3328         while (prev_obj < span.start()) {
3329           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3330           if (sz > 0) {
3331             prev_obj += sz;
3332           } else {
3333             // In this case we may end up doing a bit of redundant
3334             // scanning, but that appears unavoidable, short of
3335             // locking the free list locks; see bug 6324141.
3336             break;
3337           }
3338         }
3339       }
3340       if (prev_obj < span.end()) {
3341         MemRegion my_span = MemRegion(prev_obj, span.end());
3342         // Do the marking work within a non-empty span --
3343         // the last argument to the constructor indicates whether the
3344         // iteration should be incremental with periodic yields.
3345         ParMarkFromRootsClosure cl(this, _collector, my_span,
3346                                    &_collector->_markBitMap,
3347                                    work_queue(i),
3348                                    &_collector->_markStack);
3349         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3350       } // else nothing to do for this task
3351     }   // else nothing to do for this task
3352   }
3353   // We'd be tempted to assert here that since there are no
3354   // more tasks left to claim in this space, the global_finger
3355   // must exceed space->top() and a fortiori space->end(). However,
3356   // that would not quite be correct because the bumping of
3357   // global_finger occurs strictly after the claiming of a task,
3358   // so by the time we reach here the global finger may not yet
3359   // have been bumped up by the thread that claimed the last
3360   // task.
3361   pst->all_tasks_completed();
3362 }
3363 
3364 class ParConcMarkingClosure: public MetadataVisitingOopIterateClosure {
3365  private:
3366   CMSCollector* _collector;
3367   CMSConcMarkingTask* _task;
3368   MemRegion     _span;
3369   CMSBitMap*    _bit_map;
3370   CMSMarkStack* _overflow_stack;
3371   OopTaskQueue* _work_queue;
3372  protected:
3373   DO_OOP_WORK_DEFN
3374  public:
3375   ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3376                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3377     MetadataVisitingOopIterateClosure(collector->ref_processor()),
3378     _collector(collector),
3379     _task(task),
3380     _span(collector->_span),
3381     _bit_map(bit_map),
3382     _overflow_stack(overflow_stack),
3383     _work_queue(work_queue)
3384   { }
3385   virtual void do_oop(oop* p);
3386   virtual void do_oop(narrowOop* p);
3387 
3388   void trim_queue(size_t max);
3389   void handle_stack_overflow(HeapWord* lost);
3390   void do_yield_check() {
3391     if (_task->should_yield()) {
3392       _task->yield();
3393     }
3394   }
3395 };
3396 
3397 DO_OOP_WORK_IMPL(ParConcMarkingClosure)
3398 
3399 // Grey object scanning during work stealing phase --
3400 // the salient assumption here is that any references
3401 // that are in these stolen objects being scanned must
3402 // already have been initialized (else they would not have
3403 // been published), so we do not need to check for
3404 // uninitialized objects before pushing here.
3405 void ParConcMarkingClosure::do_oop(oop obj) {
3406   assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3407   HeapWord* addr = (HeapWord*)obj;
3408   // Check if oop points into the CMS generation
3409   // and is not marked
3410   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3411     // a white object ...
3412     // If we manage to "claim" the object, by being the
3413     // first thread to mark it, then we push it on our
3414     // marking stack
3415     if (_bit_map->par_mark(addr)) {     // ... now grey
3416       // push on work queue (grey set)
3417       bool simulate_overflow = false;
3418       NOT_PRODUCT(
3419         if (CMSMarkStackOverflowALot &&
3420             _collector->simulate_overflow()) {
3421           // simulate a stack overflow
3422           simulate_overflow = true;
3423         }
3424       )
3425       if (simulate_overflow ||
3426           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3427         // stack overflow
3428         log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3429         // We cannot assert that the overflow stack is full because
3430         // it may have been emptied since.
3431         assert(simulate_overflow ||
3432                _work_queue->size() == _work_queue->max_elems(),
3433               "Else push should have succeeded");
3434         handle_stack_overflow(addr);
3435       }
3436     } // Else, some other thread got there first
3437     do_yield_check();
3438   }
3439 }
3440 
3441 void ParConcMarkingClosure::trim_queue(size_t max) {
3442   while (_work_queue->size() > max) {
3443     oop new_oop;
3444     if (_work_queue->pop_local(new_oop)) {
3445       assert(oopDesc::is_oop(new_oop), "Should be an oop");
3446       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3447       assert(_span.contains((HeapWord*)new_oop), "Not in span");
3448       new_oop->oop_iterate(this);  // do_oop() above
3449       do_yield_check();
3450     }
3451   }
3452 }
3453 
3454 // Upon stack overflow, we discard (part of) the stack,
3455 // remembering the least address amongst those discarded
3456 // in CMSCollector's _restart_address.
3457 void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3458   // We need to do this under a mutex to prevent other
3459   // workers from interfering with the work done below.
3460   MutexLockerEx ml(_overflow_stack->par_lock(),
3461                    Mutex::_no_safepoint_check_flag);
3462   // Remember the least grey address discarded
3463   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3464   _collector->lower_restart_addr(ra);
3465   _overflow_stack->reset();  // discard stack contents
3466   _overflow_stack->expand(); // expand the stack if possible
3467 }
3468 
3469 
3470 void CMSConcMarkingTask::do_work_steal(int i) {
3471   OopTaskQueue* work_q = work_queue(i);
3472   oop obj_to_scan;
3473   CMSBitMap* bm = &(_collector->_markBitMap);
3474   CMSMarkStack* ovflw = &(_collector->_markStack);
3475   ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3476   while (true) {
3477     cl.trim_queue(0);
3478     assert(work_q->size() == 0, "Should have been emptied above");
3479     if (get_work_from_overflow_stack(ovflw, work_q)) {
3480       // Can't assert below because the work obtained from the
3481       // overflow stack may already have been stolen from us.
3482       // assert(work_q->size() > 0, "Work from overflow stack");
3483       continue;
3484     } else if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
3485       assert(oopDesc::is_oop(obj_to_scan), "Should be an oop");
3486       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3487       obj_to_scan->oop_iterate(&cl);
3488     } else if (terminator()->offer_termination(&_term_term)) {
3489       assert(work_q->size() == 0, "Impossible!");
3490       break;
3491     } else if (yielding() || should_yield()) {
3492       yield();
3493     }
3494   }
3495 }
3496 
3497 // This is run by the CMS (coordinator) thread.
3498 void CMSConcMarkingTask::coordinator_yield() {
3499   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3500          "CMS thread should hold CMS token");
3501   // First give up the locks, then yield, then re-lock
3502   // We should probably use a constructor/destructor idiom to
3503   // do this unlock/lock or modify the MutexUnlocker class to
3504   // serve our purpose. XXX
3505   assert_lock_strong(_bit_map_lock);
3506   _bit_map_lock->unlock();
3507   ConcurrentMarkSweepThread::desynchronize(true);
3508   _collector->stopTimer();
3509   _collector->incrementYields();
3510 
3511   // It is possible for whichever thread initiated the yield request
3512   // not to get a chance to wake up and take the bitmap lock between
3513   // this thread releasing it and reacquiring it. So, while the
3514   // should_yield() flag is on, let's sleep for a bit to give the
3515   // other thread a chance to wake up. The limit imposed on the number
3516   // of iterations is defensive, to avoid any unforseen circumstances
3517   // putting us into an infinite loop. Since it's always been this
3518   // (coordinator_yield()) method that was observed to cause the
3519   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3520   // which is by default non-zero. For the other seven methods that
3521   // also perform the yield operation, as are using a different
3522   // parameter (CMSYieldSleepCount) which is by default zero. This way we
3523   // can enable the sleeping for those methods too, if necessary.
3524   // See 6442774.
3525   //
3526   // We really need to reconsider the synchronization between the GC
3527   // thread and the yield-requesting threads in the future and we
3528   // should really use wait/notify, which is the recommended
3529   // way of doing this type of interaction. Additionally, we should
3530   // consolidate the eight methods that do the yield operation and they
3531   // are almost identical into one for better maintainability and
3532   // readability. See 6445193.
3533   //
3534   // Tony 2006.06.29
3535   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3536                    ConcurrentMarkSweepThread::should_yield() &&
3537                    !CMSCollector::foregroundGCIsActive(); ++i) {
3538     os::sleep(Thread::current(), 1, false);
3539   }
3540 
3541   ConcurrentMarkSweepThread::synchronize(true);
3542   _bit_map_lock->lock_without_safepoint_check();
3543   _collector->startTimer();
3544 }
3545 
3546 bool CMSCollector::do_marking_mt() {
3547   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3548   uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3549                                                             conc_workers()->active_workers(),
3550                                                             Threads::number_of_non_daemon_threads());
3551   num_workers = conc_workers()->update_active_workers(num_workers);
3552   log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
3553 
3554   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3555 
3556   CMSConcMarkingTask tsk(this,
3557                          cms_space,
3558                          conc_workers(),
3559                          task_queues());
3560 
3561   // Since the actual number of workers we get may be different
3562   // from the number we requested above, do we need to do anything different
3563   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3564   // class?? XXX
3565   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3566 
3567   // Refs discovery is already non-atomic.
3568   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3569   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3570   conc_workers()->start_task(&tsk);
3571   while (tsk.yielded()) {
3572     tsk.coordinator_yield();
3573     conc_workers()->continue_task(&tsk);
3574   }
3575   // If the task was aborted, _restart_addr will be non-NULL
3576   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3577   while (_restart_addr != NULL) {
3578     // XXX For now we do not make use of ABORTED state and have not
3579     // yet implemented the right abort semantics (even in the original
3580     // single-threaded CMS case). That needs some more investigation
3581     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3582     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3583     // If _restart_addr is non-NULL, a marking stack overflow
3584     // occurred; we need to do a fresh marking iteration from the
3585     // indicated restart address.
3586     if (_foregroundGCIsActive) {
3587       // We may be running into repeated stack overflows, having
3588       // reached the limit of the stack size, while making very
3589       // slow forward progress. It may be best to bail out and
3590       // let the foreground collector do its job.
3591       // Clear _restart_addr, so that foreground GC
3592       // works from scratch. This avoids the headache of
3593       // a "rescan" which would otherwise be needed because
3594       // of the dirty mod union table & card table.
3595       _restart_addr = NULL;
3596       return false;
3597     }
3598     // Adjust the task to restart from _restart_addr
3599     tsk.reset(_restart_addr);
3600     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3601                   _restart_addr);
3602     _restart_addr = NULL;
3603     // Get the workers going again
3604     conc_workers()->start_task(&tsk);
3605     while (tsk.yielded()) {
3606       tsk.coordinator_yield();
3607       conc_workers()->continue_task(&tsk);
3608     }
3609   }
3610   assert(tsk.completed(), "Inconsistency");
3611   assert(tsk.result() == true, "Inconsistency");
3612   return true;
3613 }
3614 
3615 bool CMSCollector::do_marking_st() {
3616   ResourceMark rm;
3617   HandleMark   hm;
3618 
3619   // Temporarily make refs discovery single threaded (non-MT)
3620   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3621   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3622     &_markStack, CMSYield);
3623   // the last argument to iterate indicates whether the iteration
3624   // should be incremental with periodic yields.
3625   _markBitMap.iterate(&markFromRootsClosure);
3626   // If _restart_addr is non-NULL, a marking stack overflow
3627   // occurred; we need to do a fresh iteration from the
3628   // indicated restart address.
3629   while (_restart_addr != NULL) {
3630     if (_foregroundGCIsActive) {
3631       // We may be running into repeated stack overflows, having
3632       // reached the limit of the stack size, while making very
3633       // slow forward progress. It may be best to bail out and
3634       // let the foreground collector do its job.
3635       // Clear _restart_addr, so that foreground GC
3636       // works from scratch. This avoids the headache of
3637       // a "rescan" which would otherwise be needed because
3638       // of the dirty mod union table & card table.
3639       _restart_addr = NULL;
3640       return false;  // indicating failure to complete marking
3641     }
3642     // Deal with stack overflow:
3643     // we restart marking from _restart_addr
3644     HeapWord* ra = _restart_addr;
3645     markFromRootsClosure.reset(ra);
3646     _restart_addr = NULL;
3647     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3648   }
3649   return true;
3650 }
3651 
3652 void CMSCollector::preclean() {
3653   check_correct_thread_executing();
3654   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3655   verify_work_stacks_empty();
3656   verify_overflow_empty();
3657   _abort_preclean = false;
3658   if (CMSPrecleaningEnabled) {
3659     if (!CMSEdenChunksRecordAlways) {
3660       _eden_chunk_index = 0;
3661     }
3662     size_t used = get_eden_used();
3663     size_t capacity = get_eden_capacity();
3664     // Don't start sampling unless we will get sufficiently
3665     // many samples.
3666     if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100)
3667                 * CMSScheduleRemarkEdenPenetration)) {
3668       _start_sampling = true;
3669     } else {
3670       _start_sampling = false;
3671     }
3672     GCTraceCPUTime tcpu;
3673     CMSPhaseAccounting pa(this, "Concurrent Preclean");
3674     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3675   }
3676   CMSTokenSync x(true); // is cms thread
3677   if (CMSPrecleaningEnabled) {
3678     sample_eden();
3679     _collectorState = AbortablePreclean;
3680   } else {
3681     _collectorState = FinalMarking;
3682   }
3683   verify_work_stacks_empty();
3684   verify_overflow_empty();
3685 }
3686 
3687 // Try and schedule the remark such that young gen
3688 // occupancy is CMSScheduleRemarkEdenPenetration %.
3689 void CMSCollector::abortable_preclean() {
3690   check_correct_thread_executing();
3691   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3692   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3693 
3694   // If Eden's current occupancy is below this threshold,
3695   // immediately schedule the remark; else preclean
3696   // past the next scavenge in an effort to
3697   // schedule the pause as described above. By choosing
3698   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3699   // we will never do an actual abortable preclean cycle.
3700   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3701     GCTraceCPUTime tcpu;
3702     CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3703     // We need more smarts in the abortable preclean
3704     // loop below to deal with cases where allocation
3705     // in young gen is very very slow, and our precleaning
3706     // is running a losing race against a horde of
3707     // mutators intent on flooding us with CMS updates
3708     // (dirty cards).
3709     // One, admittedly dumb, strategy is to give up
3710     // after a certain number of abortable precleaning loops
3711     // or after a certain maximum time. We want to make
3712     // this smarter in the next iteration.
3713     // XXX FIX ME!!! YSR
3714     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3715     while (!(should_abort_preclean() ||
3716              ConcurrentMarkSweepThread::cmst()->should_terminate())) {
3717       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3718       cumworkdone += workdone;
3719       loops++;
3720       // Voluntarily terminate abortable preclean phase if we have
3721       // been at it for too long.
3722       if ((CMSMaxAbortablePrecleanLoops != 0) &&
3723           loops >= CMSMaxAbortablePrecleanLoops) {
3724         log_debug(gc)(" CMS: abort preclean due to loops ");
3725         break;
3726       }
3727       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3728         log_debug(gc)(" CMS: abort preclean due to time ");
3729         break;
3730       }
3731       // If we are doing little work each iteration, we should
3732       // take a short break.
3733       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3734         // Sleep for some time, waiting for work to accumulate
3735         stopTimer();
3736         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3737         startTimer();
3738         waited++;
3739       }
3740     }
3741     log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3742                                loops, waited, cumworkdone);
3743   }
3744   CMSTokenSync x(true); // is cms thread
3745   if (_collectorState != Idling) {
3746     assert(_collectorState == AbortablePreclean,
3747            "Spontaneous state transition?");
3748     _collectorState = FinalMarking;
3749   } // Else, a foreground collection completed this CMS cycle.
3750   return;
3751 }
3752 
3753 // Respond to an Eden sampling opportunity
3754 void CMSCollector::sample_eden() {
3755   // Make sure a young gc cannot sneak in between our
3756   // reading and recording of a sample.
3757   assert(Thread::current()->is_ConcurrentGC_thread(),
3758          "Only the cms thread may collect Eden samples");
3759   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3760          "Should collect samples while holding CMS token");
3761   if (!_start_sampling) {
3762     return;
3763   }
3764   // When CMSEdenChunksRecordAlways is true, the eden chunk array
3765   // is populated by the young generation.
3766   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3767     if (_eden_chunk_index < _eden_chunk_capacity) {
3768       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3769       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3770              "Unexpected state of Eden");
3771       // We'd like to check that what we just sampled is an oop-start address;
3772       // however, we cannot do that here since the object may not yet have been
3773       // initialized. So we'll instead do the check when we _use_ this sample
3774       // later.
3775       if (_eden_chunk_index == 0 ||
3776           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3777                          _eden_chunk_array[_eden_chunk_index-1])
3778            >= CMSSamplingGrain)) {
3779         _eden_chunk_index++;  // commit sample
3780       }
3781     }
3782   }
3783   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3784     size_t used = get_eden_used();
3785     size_t capacity = get_eden_capacity();
3786     assert(used <= capacity, "Unexpected state of Eden");
3787     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3788       _abort_preclean = true;
3789     }
3790   }
3791 }
3792 
3793 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3794   assert(_collectorState == Precleaning ||
3795          _collectorState == AbortablePreclean, "incorrect state");
3796   ResourceMark rm;
3797   HandleMark   hm;
3798 
3799   // Precleaning is currently not MT but the reference processor
3800   // may be set for MT.  Disable it temporarily here.
3801   ReferenceProcessor* rp = ref_processor();
3802   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3803 
3804   // Do one pass of scrubbing the discovered reference lists
3805   // to remove any reference objects with strongly-reachable
3806   // referents.
3807   if (clean_refs) {
3808     CMSPrecleanRefsYieldClosure yield_cl(this);
3809     assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
3810     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3811                                    &_markStack, true /* preclean */);
3812     CMSDrainMarkingStackClosure complete_trace(this,
3813                                    _span, &_markBitMap, &_markStack,
3814                                    &keep_alive, true /* preclean */);
3815 
3816     // We don't want this step to interfere with a young
3817     // collection because we don't want to take CPU
3818     // or memory bandwidth away from the young GC threads
3819     // (which may be as many as there are CPUs).
3820     // Note that we don't need to protect ourselves from
3821     // interference with mutators because they can't
3822     // manipulate the discovered reference lists nor affect
3823     // the computed reachability of the referents, the
3824     // only properties manipulated by the precleaning
3825     // of these reference lists.
3826     stopTimer();
3827     CMSTokenSyncWithLocks x(true /* is cms thread */,
3828                             bitMapLock());
3829     startTimer();
3830     sample_eden();
3831 
3832     // The following will yield to allow foreground
3833     // collection to proceed promptly. XXX YSR:
3834     // The code in this method may need further
3835     // tweaking for better performance and some restructuring
3836     // for cleaner interfaces.
3837     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3838     rp->preclean_discovered_references(
3839           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3840           gc_timer);
3841   }
3842 
3843   if (clean_survivor) {  // preclean the active survivor space(s)
3844     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3845                              &_markBitMap, &_modUnionTable,
3846                              &_markStack, true /* precleaning phase */);
3847     stopTimer();
3848     CMSTokenSyncWithLocks ts(true /* is cms thread */,
3849                              bitMapLock());
3850     startTimer();
3851     unsigned int before_count =
3852       CMSHeap::heap()->total_collections();
3853     SurvivorSpacePrecleanClosure
3854       sss_cl(this, _span, &_markBitMap, &_markStack,
3855              &pam_cl, before_count, CMSYield);
3856     _young_gen->from()->object_iterate_careful(&sss_cl);
3857     _young_gen->to()->object_iterate_careful(&sss_cl);
3858   }
3859   MarkRefsIntoAndScanClosure
3860     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3861              &_markStack, this, CMSYield,
3862              true /* precleaning phase */);
3863   // CAUTION: The following closure has persistent state that may need to
3864   // be reset upon a decrease in the sequence of addresses it
3865   // processes.
3866   ScanMarkedObjectsAgainCarefullyClosure
3867     smoac_cl(this, _span,
3868       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3869 
3870   // Preclean dirty cards in ModUnionTable and CardTable using
3871   // appropriate convergence criterion;
3872   // repeat CMSPrecleanIter times unless we find that
3873   // we are losing.
3874   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3875   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3876          "Bad convergence multiplier");
3877   assert(CMSPrecleanThreshold >= 100,
3878          "Unreasonably low CMSPrecleanThreshold");
3879 
3880   size_t numIter, cumNumCards, lastNumCards, curNumCards;
3881   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3882        numIter < CMSPrecleanIter;
3883        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3884     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3885     log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3886     // Either there are very few dirty cards, so re-mark
3887     // pause will be small anyway, or our pre-cleaning isn't
3888     // that much faster than the rate at which cards are being
3889     // dirtied, so we might as well stop and re-mark since
3890     // precleaning won't improve our re-mark time by much.
3891     if (curNumCards <= CMSPrecleanThreshold ||
3892         (numIter > 0 &&
3893          (curNumCards * CMSPrecleanDenominator >
3894          lastNumCards * CMSPrecleanNumerator))) {
3895       numIter++;
3896       cumNumCards += curNumCards;
3897       break;
3898     }
3899   }
3900 
3901   preclean_cld(&mrias_cl, _cmsGen->freelistLock());
3902 
3903   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3904   cumNumCards += curNumCards;
3905   log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3906                              curNumCards, cumNumCards, numIter);
3907   return cumNumCards;   // as a measure of useful work done
3908 }
3909 
3910 // PRECLEANING NOTES:
3911 // Precleaning involves:
3912 // . reading the bits of the modUnionTable and clearing the set bits.
3913 // . For the cards corresponding to the set bits, we scan the
3914 //   objects on those cards. This means we need the free_list_lock
3915 //   so that we can safely iterate over the CMS space when scanning
3916 //   for oops.
3917 // . When we scan the objects, we'll be both reading and setting
3918 //   marks in the marking bit map, so we'll need the marking bit map.
3919 // . For protecting _collector_state transitions, we take the CGC_lock.
3920 //   Note that any races in the reading of of card table entries by the
3921 //   CMS thread on the one hand and the clearing of those entries by the
3922 //   VM thread or the setting of those entries by the mutator threads on the
3923 //   other are quite benign. However, for efficiency it makes sense to keep
3924 //   the VM thread from racing with the CMS thread while the latter is
3925 //   dirty card info to the modUnionTable. We therefore also use the
3926 //   CGC_lock to protect the reading of the card table and the mod union
3927 //   table by the CM thread.
3928 // . We run concurrently with mutator updates, so scanning
3929 //   needs to be done carefully  -- we should not try to scan
3930 //   potentially uninitialized objects.
3931 //
3932 // Locking strategy: While holding the CGC_lock, we scan over and
3933 // reset a maximal dirty range of the mod union / card tables, then lock
3934 // the free_list_lock and bitmap lock to do a full marking, then
3935 // release these locks; and repeat the cycle. This allows for a
3936 // certain amount of fairness in the sharing of these locks between
3937 // the CMS collector on the one hand, and the VM thread and the
3938 // mutators on the other.
3939 
3940 // NOTE: preclean_mod_union_table() and preclean_card_table()
3941 // further below are largely identical; if you need to modify
3942 // one of these methods, please check the other method too.
3943 
3944 size_t CMSCollector::preclean_mod_union_table(
3945   ConcurrentMarkSweepGeneration* old_gen,
3946   ScanMarkedObjectsAgainCarefullyClosure* cl) {
3947   verify_work_stacks_empty();
3948   verify_overflow_empty();
3949 
3950   // strategy: starting with the first card, accumulate contiguous
3951   // ranges of dirty cards; clear these cards, then scan the region
3952   // covered by these cards.
3953 
3954   // Since all of the MUT is committed ahead, we can just use
3955   // that, in case the generations expand while we are precleaning.
3956   // It might also be fine to just use the committed part of the
3957   // generation, but we might potentially miss cards when the
3958   // generation is rapidly expanding while we are in the midst
3959   // of precleaning.
3960   HeapWord* startAddr = old_gen->reserved().start();
3961   HeapWord* endAddr   = old_gen->reserved().end();
3962 
3963   cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
3964 
3965   size_t numDirtyCards, cumNumDirtyCards;
3966   HeapWord *nextAddr, *lastAddr;
3967   for (cumNumDirtyCards = numDirtyCards = 0,
3968        nextAddr = lastAddr = startAddr;
3969        nextAddr < endAddr;
3970        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
3971 
3972     ResourceMark rm;
3973     HandleMark   hm;
3974 
3975     MemRegion dirtyRegion;
3976     {
3977       stopTimer();
3978       // Potential yield point
3979       CMSTokenSync ts(true);
3980       startTimer();
3981       sample_eden();
3982       // Get dirty region starting at nextOffset (inclusive),
3983       // simultaneously clearing it.
3984       dirtyRegion =
3985         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
3986       assert(dirtyRegion.start() >= nextAddr,
3987              "returned region inconsistent?");
3988     }
3989     // Remember where the next search should begin.
3990     // The returned region (if non-empty) is a right open interval,
3991     // so lastOffset is obtained from the right end of that
3992     // interval.
3993     lastAddr = dirtyRegion.end();
3994     // Should do something more transparent and less hacky XXX
3995     numDirtyCards =
3996       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
3997 
3998     // We'll scan the cards in the dirty region (with periodic
3999     // yields for foreground GC as needed).
4000     if (!dirtyRegion.is_empty()) {
4001       assert(numDirtyCards > 0, "consistency check");
4002       HeapWord* stop_point = NULL;
4003       stopTimer();
4004       // Potential yield point
4005       CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
4006                                bitMapLock());
4007       startTimer();
4008       {
4009         verify_work_stacks_empty();
4010         verify_overflow_empty();
4011         sample_eden();
4012         stop_point =
4013           old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4014       }
4015       if (stop_point != NULL) {
4016         // The careful iteration stopped early either because it found an
4017         // uninitialized object, or because we were in the midst of an
4018         // "abortable preclean", which should now be aborted. Redirty
4019         // the bits corresponding to the partially-scanned or unscanned
4020         // cards. We'll either restart at the next block boundary or
4021         // abort the preclean.
4022         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4023                "Should only be AbortablePreclean.");
4024         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4025         if (should_abort_preclean()) {
4026           break; // out of preclean loop
4027         } else {
4028           // Compute the next address at which preclean should pick up;
4029           // might need bitMapLock in order to read P-bits.
4030           lastAddr = next_card_start_after_block(stop_point);
4031         }
4032       }
4033     } else {
4034       assert(lastAddr == endAddr, "consistency check");
4035       assert(numDirtyCards == 0, "consistency check");
4036       break;
4037     }
4038   }
4039   verify_work_stacks_empty();
4040   verify_overflow_empty();
4041   return cumNumDirtyCards;
4042 }
4043 
4044 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4045 // below are largely identical; if you need to modify
4046 // one of these methods, please check the other method too.
4047 
4048 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
4049   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4050   // strategy: it's similar to precleamModUnionTable above, in that
4051   // we accumulate contiguous ranges of dirty cards, mark these cards
4052   // precleaned, then scan the region covered by these cards.
4053   HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
4054   HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
4055 
4056   cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
4057 
4058   size_t numDirtyCards, cumNumDirtyCards;
4059   HeapWord *lastAddr, *nextAddr;
4060 
4061   for (cumNumDirtyCards = numDirtyCards = 0,
4062        nextAddr = lastAddr = startAddr;
4063        nextAddr < endAddr;
4064        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4065 
4066     ResourceMark rm;
4067     HandleMark   hm;
4068 
4069     MemRegion dirtyRegion;
4070     {
4071       // See comments in "Precleaning notes" above on why we
4072       // do this locking. XXX Could the locking overheads be
4073       // too high when dirty cards are sparse? [I don't think so.]
4074       stopTimer();
4075       CMSTokenSync x(true); // is cms thread
4076       startTimer();
4077       sample_eden();
4078       // Get and clear dirty region from card table
4079       dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr),
4080                                                       true,
4081                                                       CardTable::precleaned_card_val());
4082 
4083       assert(dirtyRegion.start() >= nextAddr,
4084              "returned region inconsistent?");
4085     }
4086     lastAddr = dirtyRegion.end();
4087     numDirtyCards =
4088       dirtyRegion.word_size()/CardTable::card_size_in_words;
4089 
4090     if (!dirtyRegion.is_empty()) {
4091       stopTimer();
4092       CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4093       startTimer();
4094       sample_eden();
4095       verify_work_stacks_empty();
4096       verify_overflow_empty();
4097       HeapWord* stop_point =
4098         old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4099       if (stop_point != NULL) {
4100         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4101                "Should only be AbortablePreclean.");
4102         _ct->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4103         if (should_abort_preclean()) {
4104           break; // out of preclean loop
4105         } else {
4106           // Compute the next address at which preclean should pick up.
4107           lastAddr = next_card_start_after_block(stop_point);
4108         }
4109       }
4110     } else {
4111       break;
4112     }
4113   }
4114   verify_work_stacks_empty();
4115   verify_overflow_empty();
4116   return cumNumDirtyCards;
4117 }
4118 
4119 class PrecleanCLDClosure : public CLDClosure {
4120   MetadataVisitingOopsInGenClosure* _cm_closure;
4121  public:
4122   PrecleanCLDClosure(MetadataVisitingOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
4123   void do_cld(ClassLoaderData* cld) {
4124     if (cld->has_accumulated_modified_oops()) {
4125       cld->clear_accumulated_modified_oops();
4126 
4127       _cm_closure->do_cld(cld);
4128     }
4129   }
4130 };
4131 
4132 // The freelist lock is needed to prevent asserts, is it really needed?
4133 void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4134   // Needed to walk CLDG
4135   MutexLocker ml(ClassLoaderDataGraph_lock);
4136 
4137   cl->set_freelistLock(freelistLock);
4138 
4139   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4140 
4141   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4142   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4143   PrecleanCLDClosure preclean_closure(cl);
4144   ClassLoaderDataGraph::cld_do(&preclean_closure);
4145 
4146   verify_work_stacks_empty();
4147   verify_overflow_empty();
4148 }
4149 
4150 void CMSCollector::checkpointRootsFinal() {
4151   assert(_collectorState == FinalMarking, "incorrect state transition?");
4152   check_correct_thread_executing();
4153   // world is stopped at this checkpoint
4154   assert(SafepointSynchronize::is_at_safepoint(),
4155          "world should be stopped");
4156   TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
4157 
4158   verify_work_stacks_empty();
4159   verify_overflow_empty();
4160 
4161   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4162                 _young_gen->used() / K, _young_gen->capacity() / K);
4163   {
4164     if (CMSScavengeBeforeRemark) {
4165       CMSHeap* heap = CMSHeap::heap();
4166       // Temporarily set flag to false, GCH->do_collection will
4167       // expect it to be false and set to true
4168       FlagSetting fl(heap->_is_gc_active, false);
4169 
4170       heap->do_collection(true,                      // full (i.e. force, see below)
4171                           false,                     // !clear_all_soft_refs
4172                           0,                         // size
4173                           false,                     // is_tlab
4174                           GenCollectedHeap::YoungGen // type
4175         );
4176     }
4177     FreelistLocker x(this);
4178     MutexLockerEx y(bitMapLock(),
4179                     Mutex::_no_safepoint_check_flag);
4180     checkpointRootsFinalWork();
4181   }
4182   verify_work_stacks_empty();
4183   verify_overflow_empty();
4184 }
4185 
4186 void CMSCollector::checkpointRootsFinalWork() {
4187   GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4188 
4189   assert(haveFreelistLocks(), "must have free list locks");
4190   assert_lock_strong(bitMapLock());
4191 
4192   ResourceMark rm;
4193   HandleMark   hm;
4194 
4195   CMSHeap* heap = CMSHeap::heap();
4196 
4197   if (should_unload_classes()) {
4198     CodeCache::gc_prologue();
4199   }
4200   assert(haveFreelistLocks(), "must have free list locks");
4201   assert_lock_strong(bitMapLock());
4202 
4203   // We might assume that we need not fill TLAB's when
4204   // CMSScavengeBeforeRemark is set, because we may have just done
4205   // a scavenge which would have filled all TLAB's -- and besides
4206   // Eden would be empty. This however may not always be the case --
4207   // for instance although we asked for a scavenge, it may not have
4208   // happened because of a JNI critical section. We probably need
4209   // a policy for deciding whether we can in that case wait until
4210   // the critical section releases and then do the remark following
4211   // the scavenge, and skip it here. In the absence of that policy,
4212   // or of an indication of whether the scavenge did indeed occur,
4213   // we cannot rely on TLAB's having been filled and must do
4214   // so here just in case a scavenge did not happen.
4215   heap->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4216   // Update the saved marks which may affect the root scans.
4217   heap->save_marks();
4218 
4219   print_eden_and_survivor_chunk_arrays();
4220 
4221   {
4222 #if COMPILER2_OR_JVMCI
4223     DerivedPointerTableDeactivate dpt_deact;
4224 #endif
4225 
4226     // Note on the role of the mod union table:
4227     // Since the marker in "markFromRoots" marks concurrently with
4228     // mutators, it is possible for some reachable objects not to have been
4229     // scanned. For instance, an only reference to an object A was
4230     // placed in object B after the marker scanned B. Unless B is rescanned,
4231     // A would be collected. Such updates to references in marked objects
4232     // are detected via the mod union table which is the set of all cards
4233     // dirtied since the first checkpoint in this GC cycle and prior to
4234     // the most recent young generation GC, minus those cleaned up by the
4235     // concurrent precleaning.
4236     if (CMSParallelRemarkEnabled) {
4237       GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
4238       do_remark_parallel();
4239     } else {
4240       GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
4241       do_remark_non_parallel();
4242     }
4243   }
4244   verify_work_stacks_empty();
4245   verify_overflow_empty();
4246 
4247   {
4248     GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
4249     refProcessingWork();
4250   }
4251   verify_work_stacks_empty();
4252   verify_overflow_empty();
4253 
4254   if (should_unload_classes()) {
4255     CodeCache::gc_epilogue();
4256   }
4257   JvmtiExport::gc_epilogue();
4258 
4259   // If we encountered any (marking stack / work queue) overflow
4260   // events during the current CMS cycle, take appropriate
4261   // remedial measures, where possible, so as to try and avoid
4262   // recurrence of that condition.
4263   assert(_markStack.isEmpty(), "No grey objects");
4264   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4265                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4266   if (ser_ovflw > 0) {
4267     log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
4268                          _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4269     _markStack.expand();
4270     _ser_pmc_remark_ovflw = 0;
4271     _ser_pmc_preclean_ovflw = 0;
4272     _ser_kac_preclean_ovflw = 0;
4273     _ser_kac_ovflw = 0;
4274   }
4275   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4276      log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4277                           _par_pmc_remark_ovflw, _par_kac_ovflw);
4278      _par_pmc_remark_ovflw = 0;
4279     _par_kac_ovflw = 0;
4280   }
4281    if (_markStack._hit_limit > 0) {
4282      log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4283                           _markStack._hit_limit);
4284    }
4285    if (_markStack._failed_double > 0) {
4286      log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4287                           _markStack._failed_double, _markStack.capacity());
4288    }
4289   _markStack._hit_limit = 0;
4290   _markStack._failed_double = 0;
4291 
4292   if ((VerifyAfterGC || VerifyDuringGC) &&
4293       CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
4294     verify_after_remark();
4295   }
4296 
4297   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4298 
4299   // Change under the freelistLocks.
4300   _collectorState = Sweeping;
4301   // Call isAllClear() under bitMapLock
4302   assert(_modUnionTable.isAllClear(),
4303       "Should be clear by end of the final marking");
4304   assert(_ct->cld_rem_set()->mod_union_is_clear(),
4305       "Should be clear by end of the final marking");
4306 }
4307 
4308 void CMSParInitialMarkTask::work(uint worker_id) {
4309   elapsedTimer _timer;
4310   ResourceMark rm;
4311   HandleMark   hm;
4312 
4313   // ---------- scan from roots --------------
4314   _timer.start();
4315   CMSHeap* heap = CMSHeap::heap();
4316   ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4317 
4318   // ---------- young gen roots --------------
4319   {
4320     work_on_young_gen_roots(&par_mri_cl);
4321     _timer.stop();
4322     log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4323   }
4324 
4325   // ---------- remaining roots --------------
4326   _timer.reset();
4327   _timer.start();
4328 
4329   CLDToOopClosure cld_closure(&par_mri_cl, ClassLoaderData::_claim_strong);
4330 
4331   heap->cms_process_roots(_strong_roots_scope,
4332                           false,     // yg was scanned above
4333                           GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4334                           _collector->should_unload_classes(),
4335                           &par_mri_cl,
4336                           &cld_closure,
4337                           &_par_state_string);
4338 
4339   assert(_collector->should_unload_classes()
4340          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4341          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4342   _timer.stop();
4343   log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4344 }
4345 
4346 // Parallel remark task
4347 class CMSParRemarkTask: public CMSParMarkTask {
4348   CompactibleFreeListSpace* _cms_space;
4349 
4350   // The per-thread work queues, available here for stealing.
4351   OopTaskQueueSet*       _task_queues;
4352   TaskTerminator         _term;
4353   StrongRootsScope*      _strong_roots_scope;
4354 
4355  public:
4356   // A value of 0 passed to n_workers will cause the number of
4357   // workers to be taken from the active workers in the work gang.
4358   CMSParRemarkTask(CMSCollector* collector,
4359                    CompactibleFreeListSpace* cms_space,
4360                    uint n_workers, WorkGang* workers,
4361                    OopTaskQueueSet* task_queues,
4362                    StrongRootsScope* strong_roots_scope):
4363     CMSParMarkTask("Rescan roots and grey objects in parallel",
4364                    collector, n_workers),
4365     _cms_space(cms_space),
4366     _task_queues(task_queues),
4367     _term(n_workers, task_queues),
4368     _strong_roots_scope(strong_roots_scope) { }
4369 
4370   OopTaskQueueSet* task_queues() { return _task_queues; }
4371 
4372   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4373 
4374   ParallelTaskTerminator* terminator() { return _term.terminator(); }
4375   uint n_workers() { return _n_workers; }
4376 
4377   void work(uint worker_id);
4378 
4379  private:
4380   // ... of  dirty cards in old space
4381   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4382                                   ParMarkRefsIntoAndScanClosure* cl);
4383 
4384   // ... work stealing for the above
4385   void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl);
4386 };
4387 
4388 class RemarkCLDClosure : public CLDClosure {
4389   CLDToOopClosure _cm_closure;
4390  public:
4391   RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure, ClassLoaderData::_claim_strong) {}
4392   void do_cld(ClassLoaderData* cld) {
4393     // Check if we have modified any oops in the CLD during the concurrent marking.
4394     if (cld->has_accumulated_modified_oops()) {
4395       cld->clear_accumulated_modified_oops();
4396 
4397       // We could have transfered the current modified marks to the accumulated marks,
4398       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4399     } else if (cld->has_modified_oops()) {
4400       // Don't clear anything, this info is needed by the next young collection.
4401     } else {
4402       // No modified oops in the ClassLoaderData.
4403       return;
4404     }
4405 
4406     // The klass has modified fields, need to scan the klass.
4407     _cm_closure.do_cld(cld);
4408   }
4409 };
4410 
4411 void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
4412   ParNewGeneration* young_gen = _collector->_young_gen;
4413   ContiguousSpace* eden_space = young_gen->eden();
4414   ContiguousSpace* from_space = young_gen->from();
4415   ContiguousSpace* to_space   = young_gen->to();
4416 
4417   HeapWord** eca = _collector->_eden_chunk_array;
4418   size_t     ect = _collector->_eden_chunk_index;
4419   HeapWord** sca = _collector->_survivor_chunk_array;
4420   size_t     sct = _collector->_survivor_chunk_index;
4421 
4422   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4423   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4424 
4425   do_young_space_rescan(cl, to_space, NULL, 0);
4426   do_young_space_rescan(cl, from_space, sca, sct);
4427   do_young_space_rescan(cl, eden_space, eca, ect);
4428 }
4429 
4430 // work_queue(i) is passed to the closure
4431 // ParMarkRefsIntoAndScanClosure.  The "i" parameter
4432 // also is passed to do_dirty_card_rescan_tasks() and to
4433 // do_work_steal() to select the i-th task_queue.
4434 
4435 void CMSParRemarkTask::work(uint worker_id) {
4436   elapsedTimer _timer;
4437   ResourceMark rm;
4438   HandleMark   hm;
4439 
4440   // ---------- rescan from roots --------------
4441   _timer.start();
4442   CMSHeap* heap = CMSHeap::heap();
4443   ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4444     _collector->_span, _collector->ref_processor(),
4445     &(_collector->_markBitMap),
4446     work_queue(worker_id));
4447 
4448   // Rescan young gen roots first since these are likely
4449   // coarsely partitioned and may, on that account, constitute
4450   // the critical path; thus, it's best to start off that
4451   // work first.
4452   // ---------- young gen roots --------------
4453   {
4454     work_on_young_gen_roots(&par_mrias_cl);
4455     _timer.stop();
4456     log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4457   }
4458 
4459   // ---------- remaining roots --------------
4460   _timer.reset();
4461   _timer.start();
4462   heap->cms_process_roots(_strong_roots_scope,
4463                           false,     // yg was scanned above
4464                           GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4465                           _collector->should_unload_classes(),
4466                           &par_mrias_cl,
4467                           NULL,     // The dirty klasses will be handled below
4468                           &_par_state_string);
4469 
4470   assert(_collector->should_unload_classes()
4471          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4472          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4473   _timer.stop();
4474   log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());
4475 
4476   // ---------- unhandled CLD scanning ----------
4477   if (worker_id == 0) { // Single threaded at the moment.
4478     _timer.reset();
4479     _timer.start();
4480 
4481     // Scan all new class loader data objects and new dependencies that were
4482     // introduced during concurrent marking.
4483     ResourceMark rm;
4484     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4485     for (int i = 0; i < array->length(); i++) {
4486       Devirtualizer::do_cld(&par_mrias_cl, array->at(i));
4487     }
4488 
4489     // We don't need to keep track of new CLDs anymore.
4490     ClassLoaderDataGraph::remember_new_clds(false);
4491 
4492     _timer.stop();
4493     log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4494   }
4495 
4496   // We might have added oops to ClassLoaderData::_handles during the
4497   // concurrent marking phase. These oops do not always point to newly allocated objects
4498   // that are guaranteed to be kept alive.  Hence,
4499   // we do have to revisit the _handles block during the remark phase.
4500 
4501   // ---------- dirty CLD scanning ----------
4502   if (worker_id == 0) { // Single threaded at the moment.
4503     _timer.reset();
4504     _timer.start();
4505 
4506     // Scan all classes that was dirtied during the concurrent marking phase.
4507     RemarkCLDClosure remark_closure(&par_mrias_cl);
4508     ClassLoaderDataGraph::cld_do(&remark_closure);
4509 
4510     _timer.stop();
4511     log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4512   }
4513 
4514 
4515   // ---------- rescan dirty cards ------------
4516   _timer.reset();
4517   _timer.start();
4518 
4519   // Do the rescan tasks for each of the two spaces
4520   // (cms_space) in turn.
4521   // "worker_id" is passed to select the task_queue for "worker_id"
4522   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4523   _timer.stop();
4524   log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4525 
4526   // ---------- steal work from other threads ...
4527   // ---------- ... and drain overflow list.
4528   _timer.reset();
4529   _timer.start();
4530   do_work_steal(worker_id, &par_mrias_cl);
4531   _timer.stop();
4532   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4533 }
4534 
4535 void
4536 CMSParMarkTask::do_young_space_rescan(
4537   OopsInGenClosure* cl, ContiguousSpace* space,
4538   HeapWord** chunk_array, size_t chunk_top) {
4539   // Until all tasks completed:
4540   // . claim an unclaimed task
4541   // . compute region boundaries corresponding to task claimed
4542   //   using chunk_array
4543   // . par_oop_iterate(cl) over that region
4544 
4545   ResourceMark rm;
4546   HandleMark   hm;
4547 
4548   SequentialSubTasksDone* pst = space->par_seq_tasks();
4549 
4550   uint nth_task = 0;
4551   uint n_tasks  = pst->n_tasks();
4552 
4553   if (n_tasks > 0) {
4554     assert(pst->valid(), "Uninitialized use?");
4555     HeapWord *start, *end;
4556     while (pst->try_claim_task(/* reference */ nth_task)) {
4557       // We claimed task # nth_task; compute its boundaries.
4558       if (chunk_top == 0) {  // no samples were taken
4559         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4560         start = space->bottom();
4561         end   = space->top();
4562       } else if (nth_task == 0) {
4563         start = space->bottom();
4564         end   = chunk_array[nth_task];
4565       } else if (nth_task < (uint)chunk_top) {
4566         assert(nth_task >= 1, "Control point invariant");
4567         start = chunk_array[nth_task - 1];
4568         end   = chunk_array[nth_task];
4569       } else {
4570         assert(nth_task == (uint)chunk_top, "Control point invariant");
4571         start = chunk_array[chunk_top - 1];
4572         end   = space->top();
4573       }
4574       MemRegion mr(start, end);
4575       // Verify that mr is in space
4576       assert(mr.is_empty() || space->used_region().contains(mr),
4577              "Should be in space");
4578       // Verify that "start" is an object boundary
4579       assert(mr.is_empty() || oopDesc::is_oop(oop(mr.start())),
4580              "Should be an oop");
4581       space->par_oop_iterate(mr, cl);
4582     }
4583     pst->all_tasks_completed();
4584   }
4585 }
4586 
4587 void
4588 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4589   CompactibleFreeListSpace* sp, int i,
4590   ParMarkRefsIntoAndScanClosure* cl) {
4591   // Until all tasks completed:
4592   // . claim an unclaimed task
4593   // . compute region boundaries corresponding to task claimed
4594   // . transfer dirty bits ct->mut for that region
4595   // . apply rescanclosure to dirty mut bits for that region
4596 
4597   ResourceMark rm;
4598   HandleMark   hm;
4599 
4600   OopTaskQueue* work_q = work_queue(i);
4601   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4602   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4603   // CAUTION: This closure has state that persists across calls to
4604   // the work method dirty_range_iterate_clear() in that it has
4605   // embedded in it a (subtype of) UpwardsObjectClosure. The
4606   // use of that state in the embedded UpwardsObjectClosure instance
4607   // assumes that the cards are always iterated (even if in parallel
4608   // by several threads) in monotonically increasing order per each
4609   // thread. This is true of the implementation below which picks
4610   // card ranges (chunks) in monotonically increasing order globally
4611   // and, a-fortiori, in monotonically increasing order per thread
4612   // (the latter order being a subsequence of the former).
4613   // If the work code below is ever reorganized into a more chaotic
4614   // work-partitioning form than the current "sequential tasks"
4615   // paradigm, the use of that persistent state will have to be
4616   // revisited and modified appropriately. See also related
4617   // bug 4756801 work on which should examine this code to make
4618   // sure that the changes there do not run counter to the
4619   // assumptions made here and necessary for correctness and
4620   // efficiency. Note also that this code might yield inefficient
4621   // behavior in the case of very large objects that span one or
4622   // more work chunks. Such objects would potentially be scanned
4623   // several times redundantly. Work on 4756801 should try and
4624   // address that performance anomaly if at all possible. XXX
4625   MemRegion  full_span  = _collector->_span;
4626   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4627   MarkFromDirtyCardsClosure
4628     greyRescanClosure(_collector, full_span, // entire span of interest
4629                       sp, bm, work_q, cl);
4630 
4631   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4632   assert(pst->valid(), "Uninitialized use?");
4633   uint nth_task = 0;
4634   const int alignment = CardTable::card_size * BitsPerWord;
4635   MemRegion span = sp->used_region();
4636   HeapWord* start_addr = span.start();
4637   HeapWord* end_addr = align_up(span.end(), alignment);
4638   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4639   assert(is_aligned(start_addr, alignment), "Check alignment");
4640   assert(is_aligned(chunk_size, alignment), "Check alignment");
4641 
4642   while (pst->try_claim_task(/* reference */ nth_task)) {
4643     // Having claimed the nth_task, compute corresponding mem-region,
4644     // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4645     // The alignment restriction ensures that we do not need any
4646     // synchronization with other gang-workers while setting or
4647     // clearing bits in thus chunk of the MUT.
4648     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4649                                     start_addr + (nth_task+1)*chunk_size);
4650     // The last chunk's end might be way beyond end of the
4651     // used region. In that case pull back appropriately.
4652     if (this_span.end() > end_addr) {
4653       this_span.set_end(end_addr);
4654       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4655     }
4656     // Iterate over the dirty cards covering this chunk, marking them
4657     // precleaned, and setting the corresponding bits in the mod union
4658     // table. Since we have been careful to partition at Card and MUT-word
4659     // boundaries no synchronization is needed between parallel threads.
4660     _collector->_ct->dirty_card_iterate(this_span,
4661                                                  &modUnionClosure);
4662 
4663     // Having transferred these marks into the modUnionTable,
4664     // rescan the marked objects on the dirty cards in the modUnionTable.
4665     // Even if this is at a synchronous collection, the initial marking
4666     // may have been done during an asynchronous collection so there
4667     // may be dirty bits in the mod-union table.
4668     _collector->_modUnionTable.dirty_range_iterate_clear(
4669                   this_span, &greyRescanClosure);
4670     _collector->_modUnionTable.verifyNoOneBitsInRange(
4671                                  this_span.start(),
4672                                  this_span.end());
4673   }
4674   pst->all_tasks_completed();  // declare that i am done
4675 }
4676 
4677 // . see if we can share work_queues with ParNew? XXX
4678 void
4679 CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl) {
4680   OopTaskQueue* work_q = work_queue(i);
4681   NOT_PRODUCT(int num_steals = 0;)
4682   oop obj_to_scan;
4683   CMSBitMap* bm = &(_collector->_markBitMap);
4684 
4685   while (true) {
4686     // Completely finish any left over work from (an) earlier round(s)
4687     cl->trim_queue(0);
4688     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4689                                          (size_t)ParGCDesiredObjsFromOverflowList);
4690     // Now check if there's any work in the overflow list
4691     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4692     // only affects the number of attempts made to get work from the
4693     // overflow list and does not affect the number of workers.  Just
4694     // pass ParallelGCThreads so this behavior is unchanged.
4695     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4696                                                 work_q,
4697                                                 ParallelGCThreads)) {
4698       // found something in global overflow list;
4699       // not yet ready to go stealing work from others.
4700       // We'd like to assert(work_q->size() != 0, ...)
4701       // because we just took work from the overflow list,
4702       // but of course we can't since all of that could have
4703       // been already stolen from us.
4704       // "He giveth and He taketh away."
4705       continue;
4706     }
4707     // Verify that we have no work before we resort to stealing
4708     assert(work_q->size() == 0, "Have work, shouldn't steal");
4709     // Try to steal from other queues that have work
4710     if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
4711       NOT_PRODUCT(num_steals++;)
4712       assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
4713       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4714       // Do scanning work
4715       obj_to_scan->oop_iterate(cl);
4716       // Loop around, finish this work, and try to steal some more
4717     } else if (terminator()->offer_termination()) {
4718         break;  // nirvana from the infinite cycle
4719     }
4720   }
4721   log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4722   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4723          "Else our work is not yet done");
4724 }
4725 
4726 // Record object boundaries in _eden_chunk_array by sampling the eden
4727 // top in the slow-path eden object allocation code path and record
4728 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4729 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4730 // sampling in sample_eden() that activates during the part of the
4731 // preclean phase.
4732 void CMSCollector::sample_eden_chunk() {
4733   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4734     if (_eden_chunk_lock->try_lock()) {
4735       // Record a sample. This is the critical section. The contents
4736       // of the _eden_chunk_array have to be non-decreasing in the
4737       // address order.
4738       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4739       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4740              "Unexpected state of Eden");
4741       if (_eden_chunk_index == 0 ||
4742           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4743            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4744                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4745         _eden_chunk_index++;  // commit sample
4746       }
4747       _eden_chunk_lock->unlock();
4748     }
4749   }
4750 }
4751 
4752 // Return a thread-local PLAB recording array, as appropriate.
4753 void* CMSCollector::get_data_recorder(int thr_num) {
4754   if (_survivor_plab_array != NULL &&
4755       (CMSPLABRecordAlways ||
4756        (_collectorState > Marking && _collectorState < FinalMarking))) {
4757     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4758     ChunkArray* ca = &_survivor_plab_array[thr_num];
4759     ca->reset();   // clear it so that fresh data is recorded
4760     return (void*) ca;
4761   } else {
4762     return NULL;
4763   }
4764 }
4765 
4766 // Reset all the thread-local PLAB recording arrays
4767 void CMSCollector::reset_survivor_plab_arrays() {
4768   for (uint i = 0; i < ParallelGCThreads; i++) {
4769     _survivor_plab_array[i].reset();
4770   }
4771 }
4772 
4773 // Merge the per-thread plab arrays into the global survivor chunk
4774 // array which will provide the partitioning of the survivor space
4775 // for CMS initial scan and rescan.
4776 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4777                                               int no_of_gc_threads) {
4778   assert(_survivor_plab_array  != NULL, "Error");
4779   assert(_survivor_chunk_array != NULL, "Error");
4780   assert(_collectorState == FinalMarking ||
4781          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4782   for (int j = 0; j < no_of_gc_threads; j++) {
4783     _cursor[j] = 0;
4784   }
4785   HeapWord* top = surv->top();
4786   size_t i;
4787   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4788     HeapWord* min_val = top;          // Higher than any PLAB address
4789     uint      min_tid = 0;            // position of min_val this round
4790     for (int j = 0; j < no_of_gc_threads; j++) {
4791       ChunkArray* cur_sca = &_survivor_plab_array[j];
4792       if (_cursor[j] == cur_sca->end()) {
4793         continue;
4794       }
4795       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4796       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4797       assert(surv->used_region().contains(cur_val), "Out of bounds value");
4798       if (cur_val < min_val) {
4799         min_tid = j;
4800         min_val = cur_val;
4801       } else {
4802         assert(cur_val < top, "All recorded addresses should be less");
4803       }
4804     }
4805     // At this point min_val and min_tid are respectively
4806     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4807     // and the thread (j) that witnesses that address.
4808     // We record this address in the _survivor_chunk_array[i]
4809     // and increment _cursor[min_tid] prior to the next round i.
4810     if (min_val == top) {
4811       break;
4812     }
4813     _survivor_chunk_array[i] = min_val;
4814     _cursor[min_tid]++;
4815   }
4816   // We are all done; record the size of the _survivor_chunk_array
4817   _survivor_chunk_index = i; // exclusive: [0, i)
4818   log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4819   // Verify that we used up all the recorded entries
4820   #ifdef ASSERT
4821     size_t total = 0;
4822     for (int j = 0; j < no_of_gc_threads; j++) {
4823       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4824       total += _cursor[j];
4825     }
4826     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4827     // Check that the merged array is in sorted order
4828     if (total > 0) {
4829       for (size_t i = 0; i < total - 1; i++) {
4830         log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4831                                      i, p2i(_survivor_chunk_array[i]));
4832         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4833                "Not sorted");
4834       }
4835     }
4836   #endif // ASSERT
4837 }
4838 
4839 // Set up the space's par_seq_tasks structure for work claiming
4840 // for parallel initial scan and rescan of young gen.
4841 // See ParRescanTask where this is currently used.
4842 void
4843 CMSCollector::
4844 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4845   assert(n_threads > 0, "Unexpected n_threads argument");
4846 
4847   // Eden space
4848   if (!_young_gen->eden()->is_empty()) {
4849     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4850     assert(!pst->valid(), "Clobbering existing data?");
4851     // Each valid entry in [0, _eden_chunk_index) represents a task.
4852     size_t n_tasks = _eden_chunk_index + 1;
4853     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
4854     // Sets the condition for completion of the subtask (how many threads
4855     // need to finish in order to be done).
4856     pst->set_n_threads(n_threads);
4857     pst->set_n_tasks((int)n_tasks);
4858   }
4859 
4860   // Merge the survivor plab arrays into _survivor_chunk_array
4861   if (_survivor_plab_array != NULL) {
4862     merge_survivor_plab_arrays(_young_gen->from(), n_threads);
4863   } else {
4864     assert(_survivor_chunk_index == 0, "Error");
4865   }
4866 
4867   // To space
4868   {
4869     SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
4870     assert(!pst->valid(), "Clobbering existing data?");
4871     // Sets the condition for completion of the subtask (how many threads
4872     // need to finish in order to be done).
4873     pst->set_n_threads(n_threads);
4874     pst->set_n_tasks(1);
4875     assert(pst->valid(), "Error");
4876   }
4877 
4878   // From space
4879   {
4880     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
4881     assert(!pst->valid(), "Clobbering existing data?");
4882     size_t n_tasks = _survivor_chunk_index + 1;
4883     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
4884     // Sets the condition for completion of the subtask (how many threads
4885     // need to finish in order to be done).
4886     pst->set_n_threads(n_threads);
4887     pst->set_n_tasks((int)n_tasks);
4888     assert(pst->valid(), "Error");
4889   }
4890 }
4891 
4892 // Parallel version of remark
4893 void CMSCollector::do_remark_parallel() {
4894   CMSHeap* heap = CMSHeap::heap();
4895   WorkGang* workers = heap->workers();
4896   assert(workers != NULL, "Need parallel worker threads.");
4897   // Choose to use the number of GC workers most recently set
4898   // into "active_workers".
4899   uint n_workers = workers->active_workers();
4900 
4901   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4902 
4903   StrongRootsScope srs(n_workers);
4904 
4905   CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
4906 
4907   // We won't be iterating over the cards in the card table updating
4908   // the younger_gen cards, so we shouldn't call the following else
4909   // the verification code as well as subsequent younger_refs_iterate
4910   // code would get confused. XXX
4911   // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
4912 
4913   // The young gen rescan work will not be done as part of
4914   // process_roots (which currently doesn't know how to
4915   // parallelize such a scan), but rather will be broken up into
4916   // a set of parallel tasks (via the sampling that the [abortable]
4917   // preclean phase did of eden, plus the [two] tasks of
4918   // scanning the [two] survivor spaces. Further fine-grain
4919   // parallelization of the scanning of the survivor spaces
4920   // themselves, and of precleaning of the young gen itself
4921   // is deferred to the future.
4922   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
4923 
4924   // The dirty card rescan work is broken up into a "sequence"
4925   // of parallel tasks (per constituent space) that are dynamically
4926   // claimed by the parallel threads.
4927   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
4928 
4929   // It turns out that even when we're using 1 thread, doing the work in a
4930   // separate thread causes wide variance in run times.  We can't help this
4931   // in the multi-threaded case, but we special-case n=1 here to get
4932   // repeatable measurements of the 1-thread overhead of the parallel code.
4933   if (n_workers > 1) {
4934     // Make refs discovery MT-safe, if it isn't already: it may not
4935     // necessarily be so, since it's possible that we are doing
4936     // ST marking.
4937     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
4938     workers->run_task(&tsk);
4939   } else {
4940     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4941     tsk.work(0);
4942   }
4943 
4944   // restore, single-threaded for now, any preserved marks
4945   // as a result of work_q overflow
4946   restore_preserved_marks_if_any();
4947 }
4948 
4949 // Non-parallel version of remark
4950 void CMSCollector::do_remark_non_parallel() {
4951   ResourceMark rm;
4952   HandleMark   hm;
4953   CMSHeap* heap = CMSHeap::heap();
4954   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4955 
4956   MarkRefsIntoAndScanClosure
4957     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
4958              &_markStack, this,
4959              false /* should_yield */, false /* not precleaning */);
4960   MarkFromDirtyCardsClosure
4961     markFromDirtyCardsClosure(this, _span,
4962                               NULL,  // space is set further below
4963                               &_markBitMap, &_markStack, &mrias_cl);
4964   {
4965     GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
4966     // Iterate over the dirty cards, setting the corresponding bits in the
4967     // mod union table.
4968     {
4969       ModUnionClosure modUnionClosure(&_modUnionTable);
4970       _ct->dirty_card_iterate(_cmsGen->used_region(),
4971                               &modUnionClosure);
4972     }
4973     // Having transferred these marks into the modUnionTable, we just need
4974     // to rescan the marked objects on the dirty cards in the modUnionTable.
4975     // The initial marking may have been done during an asynchronous
4976     // collection so there may be dirty bits in the mod-union table.
4977     const int alignment = CardTable::card_size * BitsPerWord;
4978     {
4979       // ... First handle dirty cards in CMS gen
4980       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
4981       MemRegion ur = _cmsGen->used_region();
4982       HeapWord* lb = ur.start();
4983       HeapWord* ub = align_up(ur.end(), alignment);
4984       MemRegion cms_span(lb, ub);
4985       _modUnionTable.dirty_range_iterate_clear(cms_span,
4986                                                &markFromDirtyCardsClosure);
4987       verify_work_stacks_empty();
4988       log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
4989     }
4990   }
4991   if (VerifyDuringGC &&
4992       CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
4993     HandleMark hm;  // Discard invalid handles created during verification
4994     Universe::verify();
4995   }
4996   {
4997     GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
4998 
4999     verify_work_stacks_empty();
5000 
5001     heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5002     StrongRootsScope srs(1);
5003 
5004     heap->cms_process_roots(&srs,
5005                             true,  // young gen as roots
5006                             GenCollectedHeap::ScanningOption(roots_scanning_options()),
5007                             should_unload_classes(),
5008                             &mrias_cl,
5009                             NULL); // The dirty klasses will be handled below
5010 
5011     assert(should_unload_classes()
5012            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5013            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5014   }
5015 
5016   {
5017     GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
5018 
5019     verify_work_stacks_empty();
5020 
5021     // Scan all class loader data objects that might have been introduced
5022     // during concurrent marking.
5023     ResourceMark rm;
5024     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5025     for (int i = 0; i < array->length(); i++) {
5026       Devirtualizer::do_cld(&mrias_cl, array->at(i));
5027     }
5028 
5029     // We don't need to keep track of new CLDs anymore.
5030     ClassLoaderDataGraph::remember_new_clds(false);
5031 
5032     verify_work_stacks_empty();
5033   }
5034 
5035   // We might have added oops to ClassLoaderData::_handles during the
5036   // concurrent marking phase. These oops do not point to newly allocated objects
5037   // that are guaranteed to be kept alive.  Hence,
5038   // we do have to revisit the _handles block during the remark phase.
5039   {
5040     GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
5041 
5042     verify_work_stacks_empty();
5043 
5044     RemarkCLDClosure remark_closure(&mrias_cl);
5045     ClassLoaderDataGraph::cld_do(&remark_closure);
5046 
5047     verify_work_stacks_empty();
5048   }
5049 
5050   verify_work_stacks_empty();
5051   // Restore evacuated mark words, if any, used for overflow list links
5052   restore_preserved_marks_if_any();
5053 
5054   verify_overflow_empty();
5055 }
5056 
5057 ////////////////////////////////////////////////////////
5058 // Parallel Reference Processing Task Proxy Class
5059 ////////////////////////////////////////////////////////
5060 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5061   OopTaskQueueSet*       _queues;
5062   TaskTerminator         _terminator;
5063  public:
5064   AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5065     AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5066   ParallelTaskTerminator* terminator() { return _terminator.terminator(); }
5067   OopTaskQueueSet* queues() { return _queues; }
5068 };
5069 
5070 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5071   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5072   CMSCollector*          _collector;
5073   CMSBitMap*             _mark_bit_map;
5074   const MemRegion        _span;
5075   ProcessTask&           _task;
5076 
5077 public:
5078   CMSRefProcTaskProxy(ProcessTask&     task,
5079                       CMSCollector*    collector,
5080                       const MemRegion& span,
5081                       CMSBitMap*       mark_bit_map,
5082                       AbstractWorkGang* workers,
5083                       OopTaskQueueSet* task_queues):
5084     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5085       task_queues,
5086       workers->active_workers()),
5087     _collector(collector),
5088     _mark_bit_map(mark_bit_map),
5089     _span(span),
5090     _task(task)
5091   {
5092     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5093            "Inconsistency in _span");
5094   }
5095 
5096   OopTaskQueueSet* task_queues() { return queues(); }
5097 
5098   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5099 
5100   void do_work_steal(int i,
5101                      CMSParDrainMarkingStackClosure* drain,
5102                      CMSParKeepAliveClosure* keep_alive);
5103 
5104   virtual void work(uint worker_id);
5105 };
5106 
5107 void CMSRefProcTaskProxy::work(uint worker_id) {
5108   ResourceMark rm;
5109   HandleMark hm;
5110   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5111   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5112                                         _mark_bit_map,
5113                                         work_queue(worker_id));
5114   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5115                                                  _mark_bit_map,
5116                                                  work_queue(worker_id));
5117   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5118   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5119   if (_task.marks_oops_alive()) {
5120     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive);
5121   }
5122   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5123   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5124 }
5125 
5126 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5127   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5128    _span(span),
5129    _work_queue(work_queue),
5130    _bit_map(bit_map),
5131    _mark_and_push(collector, span, bit_map, work_queue),
5132    _low_water_mark(MIN2((work_queue->max_elems()/4),
5133                         ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5134 { }
5135 
5136 // . see if we can share work_queues with ParNew? XXX
5137 void CMSRefProcTaskProxy::do_work_steal(int i,
5138   CMSParDrainMarkingStackClosure* drain,
5139   CMSParKeepAliveClosure* keep_alive) {
5140   OopTaskQueue* work_q = work_queue(i);
5141   NOT_PRODUCT(int num_steals = 0;)
5142   oop obj_to_scan;
5143 
5144   while (true) {
5145     // Completely finish any left over work from (an) earlier round(s)
5146     drain->trim_queue(0);
5147     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5148                                          (size_t)ParGCDesiredObjsFromOverflowList);
5149     // Now check if there's any work in the overflow list
5150     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5151     // only affects the number of attempts made to get work from the
5152     // overflow list and does not affect the number of workers.  Just
5153     // pass ParallelGCThreads so this behavior is unchanged.
5154     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5155                                                 work_q,
5156                                                 ParallelGCThreads)) {
5157       // Found something in global overflow list;
5158       // not yet ready to go stealing work from others.
5159       // We'd like to assert(work_q->size() != 0, ...)
5160       // because we just took work from the overflow list,
5161       // but of course we can't, since all of that might have
5162       // been already stolen from us.
5163       continue;
5164     }
5165     // Verify that we have no work before we resort to stealing
5166     assert(work_q->size() == 0, "Have work, shouldn't steal");
5167     // Try to steal from other queues that have work
5168     if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
5169       NOT_PRODUCT(num_steals++;)
5170       assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
5171       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5172       // Do scanning work
5173       obj_to_scan->oop_iterate(keep_alive);
5174       // Loop around, finish this work, and try to steal some more
5175     } else if (terminator()->offer_termination()) {
5176       break;  // nirvana from the infinite cycle
5177     }
5178   }
5179   log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5180 }
5181 
5182 void CMSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) {
5183   CMSHeap* heap = CMSHeap::heap();
5184   WorkGang* workers = heap->workers();
5185   assert(workers != NULL, "Need parallel worker threads.");
5186   assert(workers->active_workers() == ergo_workers,
5187          "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
5188          ergo_workers, workers->active_workers());
5189   CMSRefProcTaskProxy rp_task(task, &_collector,
5190                               _collector.ref_processor_span(),
5191                               _collector.markBitMap(),
5192                               workers, _collector.task_queues());
5193   workers->run_task(&rp_task, workers->active_workers());
5194 }
5195 
5196 void CMSCollector::refProcessingWork() {
5197   ResourceMark rm;
5198   HandleMark   hm;
5199 
5200   ReferenceProcessor* rp = ref_processor();
5201   assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
5202   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5203   // Process weak references.
5204   rp->setup_policy(false);
5205   verify_work_stacks_empty();
5206 
5207   ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
5208   {
5209     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5210 
5211     // Setup keep_alive and complete closures.
5212     CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5213                                             &_markStack, false /* !preclean */);
5214     CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5215                                   _span, &_markBitMap, &_markStack,
5216                                   &cmsKeepAliveClosure, false /* !preclean */);
5217 
5218     ReferenceProcessorStats stats;
5219     if (rp->processing_is_mt()) {
5220       // Set the degree of MT here.  If the discovery is done MT, there
5221       // may have been a different number of threads doing the discovery
5222       // and a different number of discovered lists may have Ref objects.
5223       // That is OK as long as the Reference lists are balanced (see
5224       // balance_all_queues() and balance_queues()).
5225       CMSHeap* heap = CMSHeap::heap();
5226       uint active_workers = ParallelGCThreads;
5227       WorkGang* workers = heap->workers();
5228       if (workers != NULL) {
5229         active_workers = workers->active_workers();
5230         // The expectation is that active_workers will have already
5231         // been set to a reasonable value.  If it has not been set,
5232         // investigate.
5233         assert(active_workers > 0, "Should have been set during scavenge");
5234       }
5235       rp->set_active_mt_degree(active_workers);
5236       CMSRefProcTaskExecutor task_executor(*this);
5237       stats = rp->process_discovered_references(&_is_alive_closure,
5238                                         &cmsKeepAliveClosure,
5239                                         &cmsDrainMarkingStackClosure,
5240                                         &task_executor,
5241                                         &pt);
5242     } else {
5243       stats = rp->process_discovered_references(&_is_alive_closure,
5244                                         &cmsKeepAliveClosure,
5245                                         &cmsDrainMarkingStackClosure,
5246                                         NULL,
5247                                         &pt);
5248     }
5249     _gc_tracer_cm->report_gc_reference_stats(stats);
5250     pt.print_all_references();
5251   }
5252 
5253   // This is the point where the entire marking should have completed.
5254   verify_work_stacks_empty();
5255 
5256   {
5257     GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer_cm);
5258     WeakProcessor::weak_oops_do(&_is_alive_closure, &do_nothing_cl);
5259   }
5260 
5261   if (should_unload_classes()) {
5262     {
5263       GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
5264 
5265       // Unload classes and purge the SystemDictionary.
5266       bool purged_class = SystemDictionary::do_unloading(_gc_timer_cm);
5267 
5268       // Unload nmethods.
5269       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5270 
5271       // Prune dead klasses from subklass/sibling/implementor lists.
5272       Klass::clean_weak_klass_links(purged_class);
5273     }
5274 
5275     {
5276       GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer_cm);
5277       // Clean up unreferenced symbols in symbol table.
5278       SymbolTable::unlink();
5279     }
5280 
5281     {
5282       GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer_cm);
5283       // Delete entries for dead interned strings.
5284       StringTable::unlink(&_is_alive_closure);
5285     }
5286   }
5287 
5288   // Restore any preserved marks as a result of mark stack or
5289   // work queue overflow
5290   restore_preserved_marks_if_any();  // done single-threaded for now
5291 
5292   rp->set_enqueuing_is_done(true);
5293   rp->verify_no_references_recorded();
5294 }
5295 
5296 #ifndef PRODUCT
5297 void CMSCollector::check_correct_thread_executing() {
5298   Thread* t = Thread::current();
5299   // Only the VM thread or the CMS thread should be here.
5300   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5301          "Unexpected thread type");
5302   // If this is the vm thread, the foreground process
5303   // should not be waiting.  Note that _foregroundGCIsActive is
5304   // true while the foreground collector is waiting.
5305   if (_foregroundGCShouldWait) {
5306     // We cannot be the VM thread
5307     assert(t->is_ConcurrentGC_thread(),
5308            "Should be CMS thread");
5309   } else {
5310     // We can be the CMS thread only if we are in a stop-world
5311     // phase of CMS collection.
5312     if (t->is_ConcurrentGC_thread()) {
5313       assert(_collectorState == InitialMarking ||
5314              _collectorState == FinalMarking,
5315              "Should be a stop-world phase");
5316       // The CMS thread should be holding the CMS_token.
5317       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5318              "Potential interference with concurrently "
5319              "executing VM thread");
5320     }
5321   }
5322 }
5323 #endif
5324 
5325 void CMSCollector::sweep() {
5326   assert(_collectorState == Sweeping, "just checking");
5327   check_correct_thread_executing();
5328   verify_work_stacks_empty();
5329   verify_overflow_empty();
5330   increment_sweep_count();
5331   TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
5332 
5333   _inter_sweep_timer.stop();
5334   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5335 
5336   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5337   _intra_sweep_timer.reset();
5338   _intra_sweep_timer.start();
5339   {
5340     GCTraceCPUTime tcpu;
5341     CMSPhaseAccounting pa(this, "Concurrent Sweep");
5342     // First sweep the old gen
5343     {
5344       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5345                                bitMapLock());
5346       sweepWork(_cmsGen);
5347     }
5348 
5349     // Update Universe::_heap_*_at_gc figures.
5350     // We need all the free list locks to make the abstract state
5351     // transition from Sweeping to Resetting. See detailed note
5352     // further below.
5353     {
5354       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5355       // Update heap occupancy information which is used as
5356       // input to soft ref clearing policy at the next gc.
5357       Universe::update_heap_info_at_gc();
5358       _collectorState = Resizing;
5359     }
5360   }
5361   verify_work_stacks_empty();
5362   verify_overflow_empty();
5363 
5364   if (should_unload_classes()) {
5365     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5366     // requires that the virtual spaces are stable and not deleted.
5367     ClassLoaderDataGraph::set_should_purge(true);
5368   }
5369 
5370   _intra_sweep_timer.stop();
5371   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5372 
5373   _inter_sweep_timer.reset();
5374   _inter_sweep_timer.start();
5375 
5376   // We need to use a monotonically non-decreasing time in ms
5377   // or we will see time-warp warnings and os::javaTimeMillis()
5378   // does not guarantee monotonicity.
5379   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5380   update_time_of_last_gc(now);
5381 
5382   // NOTE on abstract state transitions:
5383   // Mutators allocate-live and/or mark the mod-union table dirty
5384   // based on the state of the collection.  The former is done in
5385   // the interval [Marking, Sweeping] and the latter in the interval
5386   // [Marking, Sweeping).  Thus the transitions into the Marking state
5387   // and out of the Sweeping state must be synchronously visible
5388   // globally to the mutators.
5389   // The transition into the Marking state happens with the world
5390   // stopped so the mutators will globally see it.  Sweeping is
5391   // done asynchronously by the background collector so the transition
5392   // from the Sweeping state to the Resizing state must be done
5393   // under the freelistLock (as is the check for whether to
5394   // allocate-live and whether to dirty the mod-union table).
5395   assert(_collectorState == Resizing, "Change of collector state to"
5396     " Resizing must be done under the freelistLocks (plural)");
5397 
5398   // Now that sweeping has been completed, we clear
5399   // the incremental_collection_failed flag,
5400   // thus inviting a younger gen collection to promote into
5401   // this generation. If such a promotion may still fail,
5402   // the flag will be set again when a young collection is
5403   // attempted.
5404   CMSHeap* heap = CMSHeap::heap();
5405   heap->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5406   heap->update_full_collections_completed(_collection_count_start);
5407 }
5408 
5409 // FIX ME!!! Looks like this belongs in CFLSpace, with
5410 // CMSGen merely delegating to it.
5411 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5412   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5413   HeapWord*  minAddr        = _cmsSpace->bottom();
5414   HeapWord*  largestAddr    =
5415     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5416   if (largestAddr == NULL) {
5417     // The dictionary appears to be empty.  In this case
5418     // try to coalesce at the end of the heap.
5419     largestAddr = _cmsSpace->end();
5420   }
5421   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5422   size_t nearLargestOffset =
5423     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5424   log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5425                           p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5426   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5427 }
5428 
5429 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5430   return addr >= _cmsSpace->nearLargestChunk();
5431 }
5432 
5433 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5434   return _cmsSpace->find_chunk_at_end();
5435 }
5436 
5437 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5438                                                     bool full) {
5439   // If the young generation has been collected, gather any statistics
5440   // that are of interest at this point.
5441   bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
5442   if (!full && current_is_young) {
5443     // Gather statistics on the young generation collection.
5444     collector()->stats().record_gc0_end(used());
5445   }
5446 }
5447 
5448 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5449   // We iterate over the space(s) underlying this generation,
5450   // checking the mark bit map to see if the bits corresponding
5451   // to specific blocks are marked or not. Blocks that are
5452   // marked are live and are not swept up. All remaining blocks
5453   // are swept up, with coalescing on-the-fly as we sweep up
5454   // contiguous free and/or garbage blocks:
5455   // We need to ensure that the sweeper synchronizes with allocators
5456   // and stop-the-world collectors. In particular, the following
5457   // locks are used:
5458   // . CMS token: if this is held, a stop the world collection cannot occur
5459   // . freelistLock: if this is held no allocation can occur from this
5460   //                 generation by another thread
5461   // . bitMapLock: if this is held, no other thread can access or update
5462   //
5463 
5464   // Note that we need to hold the freelistLock if we use
5465   // block iterate below; else the iterator might go awry if
5466   // a mutator (or promotion) causes block contents to change
5467   // (for instance if the allocator divvies up a block).
5468   // If we hold the free list lock, for all practical purposes
5469   // young generation GC's can't occur (they'll usually need to
5470   // promote), so we might as well prevent all young generation
5471   // GC's while we do a sweeping step. For the same reason, we might
5472   // as well take the bit map lock for the entire duration
5473 
5474   // check that we hold the requisite locks
5475   assert(have_cms_token(), "Should hold cms token");
5476   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5477   assert_lock_strong(old_gen->freelistLock());
5478   assert_lock_strong(bitMapLock());
5479 
5480   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5481   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5482   old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5483                                           _inter_sweep_estimate.padded_average(),
5484                                           _intra_sweep_estimate.padded_average());
5485   old_gen->setNearLargestChunk();
5486 
5487   {
5488     SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5489     old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5490     // We need to free-up/coalesce garbage/blocks from a
5491     // co-terminal free run. This is done in the SweepClosure
5492     // destructor; so, do not remove this scope, else the
5493     // end-of-sweep-census below will be off by a little bit.
5494   }
5495   old_gen->cmsSpace()->sweep_completed();
5496   old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5497   if (should_unload_classes()) {                // unloaded classes this cycle,
5498     _concurrent_cycles_since_last_unload = 0;   // ... reset count
5499   } else {                                      // did not unload classes,
5500     _concurrent_cycles_since_last_unload++;     // ... increment count
5501   }
5502 }
5503 
5504 // Reset CMS data structures (for now just the marking bit map)
5505 // preparatory for the next cycle.
5506 void CMSCollector::reset_concurrent() {
5507   CMSTokenSyncWithLocks ts(true, bitMapLock());
5508 
5509   // If the state is not "Resetting", the foreground  thread
5510   // has done a collection and the resetting.
5511   if (_collectorState != Resetting) {
5512     assert(_collectorState == Idling, "The state should only change"
5513       " because the foreground collector has finished the collection");
5514     return;
5515   }
5516 
5517   {
5518     // Clear the mark bitmap (no grey objects to start with)
5519     // for the next cycle.
5520     GCTraceCPUTime tcpu;
5521     CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5522 
5523     HeapWord* curAddr = _markBitMap.startWord();
5524     while (curAddr < _markBitMap.endWord()) {
5525       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5526       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5527       _markBitMap.clear_large_range(chunk);
5528       if (ConcurrentMarkSweepThread::should_yield() &&
5529           !foregroundGCIsActive() &&
5530           CMSYield) {
5531         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5532                "CMS thread should hold CMS token");
5533         assert_lock_strong(bitMapLock());
5534         bitMapLock()->unlock();
5535         ConcurrentMarkSweepThread::desynchronize(true);
5536         stopTimer();
5537         incrementYields();
5538 
5539         // See the comment in coordinator_yield()
5540         for (unsigned i = 0; i < CMSYieldSleepCount &&
5541                          ConcurrentMarkSweepThread::should_yield() &&
5542                          !CMSCollector::foregroundGCIsActive(); ++i) {
5543           os::sleep(Thread::current(), 1, false);
5544         }
5545 
5546         ConcurrentMarkSweepThread::synchronize(true);
5547         bitMapLock()->lock_without_safepoint_check();
5548         startTimer();
5549       }
5550       curAddr = chunk.end();
5551     }
5552     // A successful mostly concurrent collection has been done.
5553     // Because only the full (i.e., concurrent mode failure) collections
5554     // are being measured for gc overhead limits, clean the "near" flag
5555     // and count.
5556     size_policy()->reset_gc_overhead_limit_count();
5557     _collectorState = Idling;
5558   }
5559 
5560   register_gc_end();
5561 }
5562 
5563 // Same as above but for STW paths
5564 void CMSCollector::reset_stw() {
5565   // already have the lock
5566   assert(_collectorState == Resetting, "just checking");
5567   assert_lock_strong(bitMapLock());
5568   GCIdMark gc_id_mark(_cmsThread->gc_id());
5569   _markBitMap.clear_all();
5570   _collectorState = Idling;
5571   register_gc_end();
5572 }
5573 
5574 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5575   GCTraceCPUTime tcpu;
5576   TraceCollectorStats tcs_cgc(cgc_counters());
5577 
5578   switch (op) {
5579     case CMS_op_checkpointRootsInitial: {
5580       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5581       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5582       checkpointRootsInitial();
5583       break;
5584     }
5585     case CMS_op_checkpointRootsFinal: {
5586       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5587       SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
5588       checkpointRootsFinal();
5589       break;
5590     }
5591     default:
5592       fatal("No such CMS_op");
5593   }
5594 }
5595 
5596 #ifndef PRODUCT
5597 size_t const CMSCollector::skip_header_HeapWords() {
5598   return FreeChunk::header_size();
5599 }
5600 
5601 // Try and collect here conditions that should hold when
5602 // CMS thread is exiting. The idea is that the foreground GC
5603 // thread should not be blocked if it wants to terminate
5604 // the CMS thread and yet continue to run the VM for a while
5605 // after that.
5606 void CMSCollector::verify_ok_to_terminate() const {
5607   assert(Thread::current()->is_ConcurrentGC_thread(),
5608          "should be called by CMS thread");
5609   assert(!_foregroundGCShouldWait, "should be false");
5610   // We could check here that all the various low-level locks
5611   // are not held by the CMS thread, but that is overkill; see
5612   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5613   // is checked.
5614 }
5615 #endif
5616 
5617 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5618    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5619           "missing Printezis mark?");
5620   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5621   size_t size = pointer_delta(nextOneAddr + 1, addr);
5622   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5623          "alignment problem");
5624   assert(size >= 3, "Necessary for Printezis marks to work");
5625   return size;
5626 }
5627 
5628 // A variant of the above (block_size_using_printezis_bits()) except
5629 // that we return 0 if the P-bits are not yet set.
5630 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5631   if (_markBitMap.isMarked(addr + 1)) {
5632     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5633     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5634     size_t size = pointer_delta(nextOneAddr + 1, addr);
5635     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5636            "alignment problem");
5637     assert(size >= 3, "Necessary for Printezis marks to work");
5638     return size;
5639   }
5640   return 0;
5641 }
5642 
5643 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5644   size_t sz = 0;
5645   oop p = (oop)addr;
5646   if (p->klass_or_null_acquire() != NULL) {
5647     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5648   } else {
5649     sz = block_size_using_printezis_bits(addr);
5650   }
5651   assert(sz > 0, "size must be nonzero");
5652   HeapWord* next_block = addr + sz;
5653   HeapWord* next_card  = align_up(next_block, CardTable::card_size);
5654   assert(align_down((uintptr_t)addr,      CardTable::card_size) <
5655          align_down((uintptr_t)next_card, CardTable::card_size),
5656          "must be different cards");
5657   return next_card;
5658 }
5659 
5660 
5661 // CMS Bit Map Wrapper /////////////////////////////////////////
5662 
5663 // Construct a CMS bit map infrastructure, but don't create the
5664 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5665 // further below.
5666 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5667   _shifter(shifter),
5668   _bm(),
5669   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5670                                     Monitor::_safepoint_check_sometimes) : NULL)
5671 {
5672   _bmStartWord = 0;
5673   _bmWordSize  = 0;
5674 }
5675 
5676 bool CMSBitMap::allocate(MemRegion mr) {
5677   _bmStartWord = mr.start();
5678   _bmWordSize  = mr.word_size();
5679   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5680                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5681   if (!brs.is_reserved()) {
5682     log_warning(gc)("CMS bit map allocation failure");
5683     return false;
5684   }
5685   // For now we'll just commit all of the bit map up front.
5686   // Later on we'll try to be more parsimonious with swap.
5687   if (!_virtual_space.initialize(brs, brs.size())) {
5688     log_warning(gc)("CMS bit map backing store failure");
5689     return false;
5690   }
5691   assert(_virtual_space.committed_size() == brs.size(),
5692          "didn't reserve backing store for all of CMS bit map?");
5693   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5694          _bmWordSize, "inconsistency in bit map sizing");
5695   _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
5696 
5697   // bm.clear(); // can we rely on getting zero'd memory? verify below
5698   assert(isAllClear(),
5699          "Expected zero'd memory from ReservedSpace constructor");
5700   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5701          "consistency check");
5702   return true;
5703 }
5704 
5705 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5706   HeapWord *next_addr, *end_addr, *last_addr;
5707   assert_locked();
5708   assert(covers(mr), "out-of-range error");
5709   // XXX assert that start and end are appropriately aligned
5710   for (next_addr = mr.start(), end_addr = mr.end();
5711        next_addr < end_addr; next_addr = last_addr) {
5712     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5713     last_addr = dirty_region.end();
5714     if (!dirty_region.is_empty()) {
5715       cl->do_MemRegion(dirty_region);
5716     } else {
5717       assert(last_addr == end_addr, "program logic");
5718       return;
5719     }
5720   }
5721 }
5722 
5723 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5724   _bm.print_on_error(st, prefix);
5725 }
5726 
5727 #ifndef PRODUCT
5728 void CMSBitMap::assert_locked() const {
5729   CMSLockVerifier::assert_locked(lock());
5730 }
5731 
5732 bool CMSBitMap::covers(MemRegion mr) const {
5733   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5734   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5735          "size inconsistency");
5736   return (mr.start() >= _bmStartWord) &&
5737          (mr.end()   <= endWord());
5738 }
5739 
5740 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5741     return (start >= _bmStartWord && (start + size) <= endWord());
5742 }
5743 
5744 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5745   // verify that there are no 1 bits in the interval [left, right)
5746   FalseBitMapClosure falseBitMapClosure;
5747   iterate(&falseBitMapClosure, left, right);
5748 }
5749 
5750 void CMSBitMap::region_invariant(MemRegion mr)
5751 {
5752   assert_locked();
5753   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5754   assert(!mr.is_empty(), "unexpected empty region");
5755   assert(covers(mr), "mr should be covered by bit map");
5756   // convert address range into offset range
5757   size_t start_ofs = heapWordToOffset(mr.start());
5758   // Make sure that end() is appropriately aligned
5759   assert(mr.end() == align_up(mr.end(), (1 << (_shifter+LogHeapWordSize))),
5760          "Misaligned mr.end()");
5761   size_t end_ofs   = heapWordToOffset(mr.end());
5762   assert(end_ofs > start_ofs, "Should mark at least one bit");
5763 }
5764 
5765 #endif
5766 
5767 bool CMSMarkStack::allocate(size_t size) {
5768   // allocate a stack of the requisite depth
5769   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5770                    size * sizeof(oop)));
5771   if (!rs.is_reserved()) {
5772     log_warning(gc)("CMSMarkStack allocation failure");
5773     return false;
5774   }
5775   if (!_virtual_space.initialize(rs, rs.size())) {
5776     log_warning(gc)("CMSMarkStack backing store failure");
5777     return false;
5778   }
5779   assert(_virtual_space.committed_size() == rs.size(),
5780          "didn't reserve backing store for all of CMS stack?");
5781   _base = (oop*)(_virtual_space.low());
5782   _index = 0;
5783   _capacity = size;
5784   NOT_PRODUCT(_max_depth = 0);
5785   return true;
5786 }
5787 
5788 // XXX FIX ME !!! In the MT case we come in here holding a
5789 // leaf lock. For printing we need to take a further lock
5790 // which has lower rank. We need to recalibrate the two
5791 // lock-ranks involved in order to be able to print the
5792 // messages below. (Or defer the printing to the caller.
5793 // For now we take the expedient path of just disabling the
5794 // messages for the problematic case.)
5795 void CMSMarkStack::expand() {
5796   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5797   if (_capacity == MarkStackSizeMax) {
5798     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5799       // We print a warning message only once per CMS cycle.
5800       log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5801     }
5802     return;
5803   }
5804   // Double capacity if possible
5805   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5806   // Do not give up existing stack until we have managed to
5807   // get the double capacity that we desired.
5808   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5809                    new_capacity * sizeof(oop)));
5810   if (rs.is_reserved()) {
5811     // Release the backing store associated with old stack
5812     _virtual_space.release();
5813     // Reinitialize virtual space for new stack
5814     if (!_virtual_space.initialize(rs, rs.size())) {
5815       fatal("Not enough swap for expanded marking stack");
5816     }
5817     _base = (oop*)(_virtual_space.low());
5818     _index = 0;
5819     _capacity = new_capacity;
5820   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5821     // Failed to double capacity, continue;
5822     // we print a detail message only once per CMS cycle.
5823     log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5824                         _capacity / K, new_capacity / K);
5825   }
5826 }
5827 
5828 
5829 // Closures
5830 // XXX: there seems to be a lot of code  duplication here;
5831 // should refactor and consolidate common code.
5832 
5833 // This closure is used to mark refs into the CMS generation in
5834 // the CMS bit map. Called at the first checkpoint. This closure
5835 // assumes that we do not need to re-mark dirty cards; if the CMS
5836 // generation on which this is used is not an oldest
5837 // generation then this will lose younger_gen cards!
5838 
5839 MarkRefsIntoClosure::MarkRefsIntoClosure(
5840   MemRegion span, CMSBitMap* bitMap):
5841     _span(span),
5842     _bitMap(bitMap)
5843 {
5844   assert(ref_discoverer() == NULL, "deliberately left NULL");
5845   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5846 }
5847 
5848 void MarkRefsIntoClosure::do_oop(oop obj) {
5849   // if p points into _span, then mark corresponding bit in _markBitMap
5850   assert(oopDesc::is_oop(obj), "expected an oop");
5851   HeapWord* addr = (HeapWord*)obj;
5852   if (_span.contains(addr)) {
5853     // this should be made more efficient
5854     _bitMap->mark(addr);
5855   }
5856 }
5857 
5858 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5859   MemRegion span, CMSBitMap* bitMap):
5860     _span(span),
5861     _bitMap(bitMap)
5862 {
5863   assert(ref_discoverer() == NULL, "deliberately left NULL");
5864   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5865 }
5866 
5867 void ParMarkRefsIntoClosure::do_oop(oop obj) {
5868   // if p points into _span, then mark corresponding bit in _markBitMap
5869   assert(oopDesc::is_oop(obj), "expected an oop");
5870   HeapWord* addr = (HeapWord*)obj;
5871   if (_span.contains(addr)) {
5872     // this should be made more efficient
5873     _bitMap->par_mark(addr);
5874   }
5875 }
5876 
5877 // A variant of the above, used for CMS marking verification.
5878 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5879   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5880     _span(span),
5881     _verification_bm(verification_bm),
5882     _cms_bm(cms_bm)
5883 {
5884   assert(ref_discoverer() == NULL, "deliberately left NULL");
5885   assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5886 }
5887 
5888 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5889   // if p points into _span, then mark corresponding bit in _markBitMap
5890   assert(oopDesc::is_oop(obj), "expected an oop");
5891   HeapWord* addr = (HeapWord*)obj;
5892   if (_span.contains(addr)) {
5893     _verification_bm->mark(addr);
5894     if (!_cms_bm->isMarked(addr)) {
5895       Log(gc, verify) log;
5896       ResourceMark rm;
5897       LogStream ls(log.error());
5898       oop(addr)->print_on(&ls);
5899       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5900       fatal("... aborting");
5901     }
5902   }
5903 }
5904 
5905 //////////////////////////////////////////////////
5906 // MarkRefsIntoAndScanClosure
5907 //////////////////////////////////////////////////
5908 
5909 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5910                                                        ReferenceDiscoverer* rd,
5911                                                        CMSBitMap* bit_map,
5912                                                        CMSBitMap* mod_union_table,
5913                                                        CMSMarkStack*  mark_stack,
5914                                                        CMSCollector* collector,
5915                                                        bool should_yield,
5916                                                        bool concurrent_precleaning):
5917   _span(span),
5918   _bit_map(bit_map),
5919   _mark_stack(mark_stack),
5920   _pushAndMarkClosure(collector, span, rd, bit_map, mod_union_table,
5921                       mark_stack, concurrent_precleaning),
5922   _collector(collector),
5923   _freelistLock(NULL),
5924   _yield(should_yield),
5925   _concurrent_precleaning(concurrent_precleaning)
5926 {
5927   // FIXME: Should initialize in base class constructor.
5928   assert(rd != NULL, "ref_discoverer shouldn't be NULL");
5929   set_ref_discoverer_internal(rd);
5930 }
5931 
5932 // This closure is used to mark refs into the CMS generation at the
5933 // second (final) checkpoint, and to scan and transitively follow
5934 // the unmarked oops. It is also used during the concurrent precleaning
5935 // phase while scanning objects on dirty cards in the CMS generation.
5936 // The marks are made in the marking bit map and the marking stack is
5937 // used for keeping the (newly) grey objects during the scan.
5938 // The parallel version (Par_...) appears further below.
5939 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5940   if (obj != NULL) {
5941     assert(oopDesc::is_oop(obj), "expected an oop");
5942     HeapWord* addr = (HeapWord*)obj;
5943     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5944     assert(_collector->overflow_list_is_empty(),
5945            "overflow list should be empty");
5946     if (_span.contains(addr) &&
5947         !_bit_map->isMarked(addr)) {
5948       // mark bit map (object is now grey)
5949       _bit_map->mark(addr);
5950       // push on marking stack (stack should be empty), and drain the
5951       // stack by applying this closure to the oops in the oops popped
5952       // from the stack (i.e. blacken the grey objects)
5953       bool res = _mark_stack->push(obj);
5954       assert(res, "Should have space to push on empty stack");
5955       do {
5956         oop new_oop = _mark_stack->pop();
5957         assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
5958         assert(_bit_map->isMarked((HeapWord*)new_oop),
5959                "only grey objects on this stack");
5960         // iterate over the oops in this oop, marking and pushing
5961         // the ones in CMS heap (i.e. in _span).
5962         new_oop->oop_iterate(&_pushAndMarkClosure);
5963         // check if it's time to yield
5964         do_yield_check();
5965       } while (!_mark_stack->isEmpty() ||
5966                (!_concurrent_precleaning && take_from_overflow_list()));
5967         // if marking stack is empty, and we are not doing this
5968         // during precleaning, then check the overflow list
5969     }
5970     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5971     assert(_collector->overflow_list_is_empty(),
5972            "overflow list was drained above");
5973 
5974     assert(_collector->no_preserved_marks(),
5975            "All preserved marks should have been restored above");
5976   }
5977 }
5978 
5979 void MarkRefsIntoAndScanClosure::do_yield_work() {
5980   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5981          "CMS thread should hold CMS token");
5982   assert_lock_strong(_freelistLock);
5983   assert_lock_strong(_bit_map->lock());
5984   // relinquish the free_list_lock and bitMaplock()
5985   _bit_map->lock()->unlock();
5986   _freelistLock->unlock();
5987   ConcurrentMarkSweepThread::desynchronize(true);
5988   _collector->stopTimer();
5989   _collector->incrementYields();
5990 
5991   // See the comment in coordinator_yield()
5992   for (unsigned i = 0;
5993        i < CMSYieldSleepCount &&
5994        ConcurrentMarkSweepThread::should_yield() &&
5995        !CMSCollector::foregroundGCIsActive();
5996        ++i) {
5997     os::sleep(Thread::current(), 1, false);
5998   }
5999 
6000   ConcurrentMarkSweepThread::synchronize(true);
6001   _freelistLock->lock_without_safepoint_check();
6002   _bit_map->lock()->lock_without_safepoint_check();
6003   _collector->startTimer();
6004 }
6005 
6006 ///////////////////////////////////////////////////////////
6007 // ParMarkRefsIntoAndScanClosure: a parallel version of
6008 //                                MarkRefsIntoAndScanClosure
6009 ///////////////////////////////////////////////////////////
6010 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
6011   CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd,
6012   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6013   _span(span),
6014   _bit_map(bit_map),
6015   _work_queue(work_queue),
6016   _low_water_mark(MIN2((work_queue->max_elems()/4),
6017                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6018   _parPushAndMarkClosure(collector, span, rd, bit_map, work_queue)
6019 {
6020   // FIXME: Should initialize in base class constructor.
6021   assert(rd != NULL, "ref_discoverer shouldn't be NULL");
6022   set_ref_discoverer_internal(rd);
6023 }
6024 
6025 // This closure is used to mark refs into the CMS generation at the
6026 // second (final) checkpoint, and to scan and transitively follow
6027 // the unmarked oops. The marks are made in the marking bit map and
6028 // the work_queue is used for keeping the (newly) grey objects during
6029 // the scan phase whence they are also available for stealing by parallel
6030 // threads. Since the marking bit map is shared, updates are
6031 // synchronized (via CAS).
6032 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6033   if (obj != NULL) {
6034     // Ignore mark word because this could be an already marked oop
6035     // that may be chained at the end of the overflow list.
6036     assert(oopDesc::is_oop(obj, true), "expected an oop");
6037     HeapWord* addr = (HeapWord*)obj;
6038     if (_span.contains(addr) &&
6039         !_bit_map->isMarked(addr)) {
6040       // mark bit map (object will become grey):
6041       // It is possible for several threads to be
6042       // trying to "claim" this object concurrently;
6043       // the unique thread that succeeds in marking the
6044       // object first will do the subsequent push on
6045       // to the work queue (or overflow list).
6046       if (_bit_map->par_mark(addr)) {
6047         // push on work_queue (which may not be empty), and trim the
6048         // queue to an appropriate length by applying this closure to
6049         // the oops in the oops popped from the stack (i.e. blacken the
6050         // grey objects)
6051         bool res = _work_queue->push(obj);
6052         assert(res, "Low water mark should be less than capacity?");
6053         trim_queue(_low_water_mark);
6054       } // Else, another thread claimed the object
6055     }
6056   }
6057 }
6058 
6059 // This closure is used to rescan the marked objects on the dirty cards
6060 // in the mod union table and the card table proper.
6061 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6062   oop p, MemRegion mr) {
6063 
6064   size_t size = 0;
6065   HeapWord* addr = (HeapWord*)p;
6066   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6067   assert(_span.contains(addr), "we are scanning the CMS generation");
6068   // check if it's time to yield
6069   if (do_yield_check()) {
6070     // We yielded for some foreground stop-world work,
6071     // and we have been asked to abort this ongoing preclean cycle.
6072     return 0;
6073   }
6074   if (_bitMap->isMarked(addr)) {
6075     // it's marked; is it potentially uninitialized?
6076     if (p->klass_or_null_acquire() != NULL) {
6077         // an initialized object; ignore mark word in verification below
6078         // since we are running concurrent with mutators
6079         assert(oopDesc::is_oop(p, true), "should be an oop");
6080         if (p->is_objArray()) {
6081           // objArrays are precisely marked; restrict scanning
6082           // to dirty cards only.
6083           size = CompactibleFreeListSpace::adjustObjectSize(
6084                    p->oop_iterate_size(_scanningClosure, mr));
6085         } else {
6086           // A non-array may have been imprecisely marked; we need
6087           // to scan object in its entirety.
6088           size = CompactibleFreeListSpace::adjustObjectSize(
6089                    p->oop_iterate_size(_scanningClosure));
6090         }
6091       #ifdef ASSERT
6092         size_t direct_size =
6093           CompactibleFreeListSpace::adjustObjectSize(p->size());
6094         assert(size == direct_size, "Inconsistency in size");
6095         assert(size >= 3, "Necessary for Printezis marks to work");
6096         HeapWord* start_pbit = addr + 1;
6097         HeapWord* end_pbit = addr + size - 1;
6098         assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
6099                "inconsistent Printezis mark");
6100         // Verify inner mark bits (between Printezis bits) are clear,
6101         // but don't repeat if there are multiple dirty regions for
6102         // the same object, to avoid potential O(N^2) performance.
6103         if (addr != _last_scanned_object) {
6104           _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit);
6105           _last_scanned_object = addr;
6106         }
6107       #endif // ASSERT
6108     } else {
6109       // An uninitialized object.
6110       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6111       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6112       size = pointer_delta(nextOneAddr + 1, addr);
6113       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6114              "alignment problem");
6115       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6116       // will dirty the card when the klass pointer is installed in the
6117       // object (signaling the completion of initialization).
6118     }
6119   } else {
6120     // Either a not yet marked object or an uninitialized object
6121     if (p->klass_or_null_acquire() == NULL) {
6122       // An uninitialized object, skip to the next card, since
6123       // we may not be able to read its P-bits yet.
6124       assert(size == 0, "Initial value");
6125     } else {
6126       // An object not (yet) reached by marking: we merely need to
6127       // compute its size so as to go look at the next block.
6128       assert(oopDesc::is_oop(p, true), "should be an oop");
6129       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6130     }
6131   }
6132   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6133   return size;
6134 }
6135 
6136 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6137   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6138          "CMS thread should hold CMS token");
6139   assert_lock_strong(_freelistLock);
6140   assert_lock_strong(_bitMap->lock());
6141   // relinquish the free_list_lock and bitMaplock()
6142   _bitMap->lock()->unlock();
6143   _freelistLock->unlock();
6144   ConcurrentMarkSweepThread::desynchronize(true);
6145   _collector->stopTimer();
6146   _collector->incrementYields();
6147 
6148   // See the comment in coordinator_yield()
6149   for (unsigned i = 0; i < CMSYieldSleepCount &&
6150                    ConcurrentMarkSweepThread::should_yield() &&
6151                    !CMSCollector::foregroundGCIsActive(); ++i) {
6152     os::sleep(Thread::current(), 1, false);
6153   }
6154 
6155   ConcurrentMarkSweepThread::synchronize(true);
6156   _freelistLock->lock_without_safepoint_check();
6157   _bitMap->lock()->lock_without_safepoint_check();
6158   _collector->startTimer();
6159 }
6160 
6161 
6162 //////////////////////////////////////////////////////////////////
6163 // SurvivorSpacePrecleanClosure
6164 //////////////////////////////////////////////////////////////////
6165 // This (single-threaded) closure is used to preclean the oops in
6166 // the survivor spaces.
6167 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6168 
6169   HeapWord* addr = (HeapWord*)p;
6170   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6171   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6172   assert(p->klass_or_null() != NULL, "object should be initialized");
6173   // an initialized object; ignore mark word in verification below
6174   // since we are running concurrent with mutators
6175   assert(oopDesc::is_oop(p, true), "should be an oop");
6176   // Note that we do not yield while we iterate over
6177   // the interior oops of p, pushing the relevant ones
6178   // on our marking stack.
6179   size_t size = p->oop_iterate_size(_scanning_closure);
6180   do_yield_check();
6181   // Observe that below, we do not abandon the preclean
6182   // phase as soon as we should; rather we empty the
6183   // marking stack before returning. This is to satisfy
6184   // some existing assertions. In general, it may be a
6185   // good idea to abort immediately and complete the marking
6186   // from the grey objects at a later time.
6187   while (!_mark_stack->isEmpty()) {
6188     oop new_oop = _mark_stack->pop();
6189     assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
6190     assert(_bit_map->isMarked((HeapWord*)new_oop),
6191            "only grey objects on this stack");
6192     // iterate over the oops in this oop, marking and pushing
6193     // the ones in CMS heap (i.e. in _span).
6194     new_oop->oop_iterate(_scanning_closure);
6195     // check if it's time to yield
6196     do_yield_check();
6197   }
6198   unsigned int after_count =
6199     CMSHeap::heap()->total_collections();
6200   bool abort = (_before_count != after_count) ||
6201                _collector->should_abort_preclean();
6202   return abort ? 0 : size;
6203 }
6204 
6205 void SurvivorSpacePrecleanClosure::do_yield_work() {
6206   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6207          "CMS thread should hold CMS token");
6208   assert_lock_strong(_bit_map->lock());
6209   // Relinquish the bit map lock
6210   _bit_map->lock()->unlock();
6211   ConcurrentMarkSweepThread::desynchronize(true);
6212   _collector->stopTimer();
6213   _collector->incrementYields();
6214 
6215   // See the comment in coordinator_yield()
6216   for (unsigned i = 0; i < CMSYieldSleepCount &&
6217                        ConcurrentMarkSweepThread::should_yield() &&
6218                        !CMSCollector::foregroundGCIsActive(); ++i) {
6219     os::sleep(Thread::current(), 1, false);
6220   }
6221 
6222   ConcurrentMarkSweepThread::synchronize(true);
6223   _bit_map->lock()->lock_without_safepoint_check();
6224   _collector->startTimer();
6225 }
6226 
6227 // This closure is used to rescan the marked objects on the dirty cards
6228 // in the mod union table and the card table proper. In the parallel
6229 // case, although the bitMap is shared, we do a single read so the
6230 // isMarked() query is "safe".
6231 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6232   // Ignore mark word because we are running concurrent with mutators
6233   assert(oopDesc::is_oop_or_null(p, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6234   HeapWord* addr = (HeapWord*)p;
6235   assert(_span.contains(addr), "we are scanning the CMS generation");
6236   bool is_obj_array = false;
6237   #ifdef ASSERT
6238     if (!_parallel) {
6239       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6240       assert(_collector->overflow_list_is_empty(),
6241              "overflow list should be empty");
6242 
6243     }
6244   #endif // ASSERT
6245   if (_bit_map->isMarked(addr)) {
6246     // Obj arrays are precisely marked, non-arrays are not;
6247     // so we scan objArrays precisely and non-arrays in their
6248     // entirety.
6249     if (p->is_objArray()) {
6250       is_obj_array = true;
6251       if (_parallel) {
6252         p->oop_iterate(_par_scan_closure, mr);
6253       } else {
6254         p->oop_iterate(_scan_closure, mr);
6255       }
6256     } else {
6257       if (_parallel) {
6258         p->oop_iterate(_par_scan_closure);
6259       } else {
6260         p->oop_iterate(_scan_closure);
6261       }
6262     }
6263   }
6264   #ifdef ASSERT
6265     if (!_parallel) {
6266       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6267       assert(_collector->overflow_list_is_empty(),
6268              "overflow list should be empty");
6269 
6270     }
6271   #endif // ASSERT
6272   return is_obj_array;
6273 }
6274 
6275 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6276                         MemRegion span,
6277                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
6278                         bool should_yield, bool verifying):
6279   _collector(collector),
6280   _span(span),
6281   _bitMap(bitMap),
6282   _mut(&collector->_modUnionTable),
6283   _markStack(markStack),
6284   _yield(should_yield),
6285   _skipBits(0)
6286 {
6287   assert(_markStack->isEmpty(), "stack should be empty");
6288   _finger = _bitMap->startWord();
6289   _threshold = _finger;
6290   assert(_collector->_restart_addr == NULL, "Sanity check");
6291   assert(_span.contains(_finger), "Out of bounds _finger?");
6292   DEBUG_ONLY(_verifying = verifying;)
6293 }
6294 
6295 void MarkFromRootsClosure::reset(HeapWord* addr) {
6296   assert(_markStack->isEmpty(), "would cause duplicates on stack");
6297   assert(_span.contains(addr), "Out of bounds _finger?");
6298   _finger = addr;
6299   _threshold = align_up(_finger, CardTable::card_size);
6300 }
6301 
6302 // Should revisit to see if this should be restructured for
6303 // greater efficiency.
6304 bool MarkFromRootsClosure::do_bit(size_t offset) {
6305   if (_skipBits > 0) {
6306     _skipBits--;
6307     return true;
6308   }
6309   // convert offset into a HeapWord*
6310   HeapWord* addr = _bitMap->startWord() + offset;
6311   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6312          "address out of range");
6313   assert(_bitMap->isMarked(addr), "tautology");
6314   if (_bitMap->isMarked(addr+1)) {
6315     // this is an allocated but not yet initialized object
6316     assert(_skipBits == 0, "tautology");
6317     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6318     oop p = oop(addr);
6319     if (p->klass_or_null_acquire() == NULL) {
6320       DEBUG_ONLY(if (!_verifying) {)
6321         // We re-dirty the cards on which this object lies and increase
6322         // the _threshold so that we'll come back to scan this object
6323         // during the preclean or remark phase. (CMSCleanOnEnter)
6324         if (CMSCleanOnEnter) {
6325           size_t sz = _collector->block_size_using_printezis_bits(addr);
6326           HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
6327           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6328           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6329           // Bump _threshold to end_card_addr; note that
6330           // _threshold cannot possibly exceed end_card_addr, anyhow.
6331           // This prevents future clearing of the card as the scan proceeds
6332           // to the right.
6333           assert(_threshold <= end_card_addr,
6334                  "Because we are just scanning into this object");
6335           if (_threshold < end_card_addr) {
6336             _threshold = end_card_addr;
6337           }
6338           if (p->klass_or_null_acquire() != NULL) {
6339             // Redirty the range of cards...
6340             _mut->mark_range(redirty_range);
6341           } // ...else the setting of klass will dirty the card anyway.
6342         }
6343       DEBUG_ONLY(})
6344       return true;
6345     }
6346   }
6347   scanOopsInOop(addr);
6348   return true;
6349 }
6350 
6351 // We take a break if we've been at this for a while,
6352 // so as to avoid monopolizing the locks involved.
6353 void MarkFromRootsClosure::do_yield_work() {
6354   // First give up the locks, then yield, then re-lock
6355   // We should probably use a constructor/destructor idiom to
6356   // do this unlock/lock or modify the MutexUnlocker class to
6357   // serve our purpose. XXX
6358   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6359          "CMS thread should hold CMS token");
6360   assert_lock_strong(_bitMap->lock());
6361   _bitMap->lock()->unlock();
6362   ConcurrentMarkSweepThread::desynchronize(true);
6363   _collector->stopTimer();
6364   _collector->incrementYields();
6365 
6366   // See the comment in coordinator_yield()
6367   for (unsigned i = 0; i < CMSYieldSleepCount &&
6368                        ConcurrentMarkSweepThread::should_yield() &&
6369                        !CMSCollector::foregroundGCIsActive(); ++i) {
6370     os::sleep(Thread::current(), 1, false);
6371   }
6372 
6373   ConcurrentMarkSweepThread::synchronize(true);
6374   _bitMap->lock()->lock_without_safepoint_check();
6375   _collector->startTimer();
6376 }
6377 
6378 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6379   assert(_bitMap->isMarked(ptr), "expected bit to be set");
6380   assert(_markStack->isEmpty(),
6381          "should drain stack to limit stack usage");
6382   // convert ptr to an oop preparatory to scanning
6383   oop obj = oop(ptr);
6384   // Ignore mark word in verification below, since we
6385   // may be running concurrent with mutators.
6386   assert(oopDesc::is_oop(obj, true), "should be an oop");
6387   assert(_finger <= ptr, "_finger runneth ahead");
6388   // advance the finger to right end of this object
6389   _finger = ptr + obj->size();
6390   assert(_finger > ptr, "we just incremented it above");
6391   // On large heaps, it may take us some time to get through
6392   // the marking phase. During
6393   // this time it's possible that a lot of mutations have
6394   // accumulated in the card table and the mod union table --
6395   // these mutation records are redundant until we have
6396   // actually traced into the corresponding card.
6397   // Here, we check whether advancing the finger would make
6398   // us cross into a new card, and if so clear corresponding
6399   // cards in the MUT (preclean them in the card-table in the
6400   // future).
6401 
6402   DEBUG_ONLY(if (!_verifying) {)
6403     // The clean-on-enter optimization is disabled by default,
6404     // until we fix 6178663.
6405     if (CMSCleanOnEnter && (_finger > _threshold)) {
6406       // [_threshold, _finger) represents the interval
6407       // of cards to be cleared  in MUT (or precleaned in card table).
6408       // The set of cards to be cleared is all those that overlap
6409       // with the interval [_threshold, _finger); note that
6410       // _threshold is always kept card-aligned but _finger isn't
6411       // always card-aligned.
6412       HeapWord* old_threshold = _threshold;
6413       assert(is_aligned(old_threshold, CardTable::card_size),
6414              "_threshold should always be card-aligned");
6415       _threshold = align_up(_finger, CardTable::card_size);
6416       MemRegion mr(old_threshold, _threshold);
6417       assert(!mr.is_empty(), "Control point invariant");
6418       assert(_span.contains(mr), "Should clear within span");
6419       _mut->clear_range(mr);
6420     }
6421   DEBUG_ONLY(})
6422   // Note: the finger doesn't advance while we drain
6423   // the stack below.
6424   PushOrMarkClosure pushOrMarkClosure(_collector,
6425                                       _span, _bitMap, _markStack,
6426                                       _finger, this);
6427   bool res = _markStack->push(obj);
6428   assert(res, "Empty non-zero size stack should have space for single push");
6429   while (!_markStack->isEmpty()) {
6430     oop new_oop = _markStack->pop();
6431     // Skip verifying header mark word below because we are
6432     // running concurrent with mutators.
6433     assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
6434     // now scan this oop's oops
6435     new_oop->oop_iterate(&pushOrMarkClosure);
6436     do_yield_check();
6437   }
6438   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6439 }
6440 
6441 ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6442                        CMSCollector* collector, MemRegion span,
6443                        CMSBitMap* bit_map,
6444                        OopTaskQueue* work_queue,
6445                        CMSMarkStack*  overflow_stack):
6446   _collector(collector),
6447   _whole_span(collector->_span),
6448   _span(span),
6449   _bit_map(bit_map),
6450   _mut(&collector->_modUnionTable),
6451   _work_queue(work_queue),
6452   _overflow_stack(overflow_stack),
6453   _skip_bits(0),
6454   _task(task)
6455 {
6456   assert(_work_queue->size() == 0, "work_queue should be empty");
6457   _finger = span.start();
6458   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6459   assert(_span.contains(_finger), "Out of bounds _finger?");
6460 }
6461 
6462 // Should revisit to see if this should be restructured for
6463 // greater efficiency.
6464 bool ParMarkFromRootsClosure::do_bit(size_t offset) {
6465   if (_skip_bits > 0) {
6466     _skip_bits--;
6467     return true;
6468   }
6469   // convert offset into a HeapWord*
6470   HeapWord* addr = _bit_map->startWord() + offset;
6471   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6472          "address out of range");
6473   assert(_bit_map->isMarked(addr), "tautology");
6474   if (_bit_map->isMarked(addr+1)) {
6475     // this is an allocated object that might not yet be initialized
6476     assert(_skip_bits == 0, "tautology");
6477     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6478     oop p = oop(addr);
6479     if (p->klass_or_null_acquire() == NULL) {
6480       // in the case of Clean-on-Enter optimization, redirty card
6481       // and avoid clearing card by increasing  the threshold.
6482       return true;
6483     }
6484   }
6485   scan_oops_in_oop(addr);
6486   return true;
6487 }
6488 
6489 void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6490   assert(_bit_map->isMarked(ptr), "expected bit to be set");
6491   // Should we assert that our work queue is empty or
6492   // below some drain limit?
6493   assert(_work_queue->size() == 0,
6494          "should drain stack to limit stack usage");
6495   // convert ptr to an oop preparatory to scanning
6496   oop obj = oop(ptr);
6497   // Ignore mark word in verification below, since we
6498   // may be running concurrent with mutators.
6499   assert(oopDesc::is_oop(obj, true), "should be an oop");
6500   assert(_finger <= ptr, "_finger runneth ahead");
6501   // advance the finger to right end of this object
6502   _finger = ptr + obj->size();
6503   assert(_finger > ptr, "we just incremented it above");
6504   // On large heaps, it may take us some time to get through
6505   // the marking phase. During
6506   // this time it's possible that a lot of mutations have
6507   // accumulated in the card table and the mod union table --
6508   // these mutation records are redundant until we have
6509   // actually traced into the corresponding card.
6510   // Here, we check whether advancing the finger would make
6511   // us cross into a new card, and if so clear corresponding
6512   // cards in the MUT (preclean them in the card-table in the
6513   // future).
6514 
6515   // The clean-on-enter optimization is disabled by default,
6516   // until we fix 6178663.
6517   if (CMSCleanOnEnter && (_finger > _threshold)) {
6518     // [_threshold, _finger) represents the interval
6519     // of cards to be cleared  in MUT (or precleaned in card table).
6520     // The set of cards to be cleared is all those that overlap
6521     // with the interval [_threshold, _finger); note that
6522     // _threshold is always kept card-aligned but _finger isn't
6523     // always card-aligned.
6524     HeapWord* old_threshold = _threshold;
6525     assert(is_aligned(old_threshold, CardTable::card_size),
6526            "_threshold should always be card-aligned");
6527     _threshold = align_up(_finger, CardTable::card_size);
6528     MemRegion mr(old_threshold, _threshold);
6529     assert(!mr.is_empty(), "Control point invariant");
6530     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6531     _mut->clear_range(mr);
6532   }
6533 
6534   // Note: the local finger doesn't advance while we drain
6535   // the stack below, but the global finger sure can and will.
6536   HeapWord* volatile* gfa = _task->global_finger_addr();
6537   ParPushOrMarkClosure pushOrMarkClosure(_collector,
6538                                          _span, _bit_map,
6539                                          _work_queue,
6540                                          _overflow_stack,
6541                                          _finger,
6542                                          gfa, this);
6543   bool res = _work_queue->push(obj);   // overflow could occur here
6544   assert(res, "Will hold once we use workqueues");
6545   while (true) {
6546     oop new_oop;
6547     if (!_work_queue->pop_local(new_oop)) {
6548       // We emptied our work_queue; check if there's stuff that can
6549       // be gotten from the overflow stack.
6550       if (CMSConcMarkingTask::get_work_from_overflow_stack(
6551             _overflow_stack, _work_queue)) {
6552         do_yield_check();
6553         continue;
6554       } else {  // done
6555         break;
6556       }
6557     }
6558     // Skip verifying header mark word below because we are
6559     // running concurrent with mutators.
6560     assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
6561     // now scan this oop's oops
6562     new_oop->oop_iterate(&pushOrMarkClosure);
6563     do_yield_check();
6564   }
6565   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6566 }
6567 
6568 // Yield in response to a request from VM Thread or
6569 // from mutators.
6570 void ParMarkFromRootsClosure::do_yield_work() {
6571   assert(_task != NULL, "sanity");
6572   _task->yield();
6573 }
6574 
6575 // A variant of the above used for verifying CMS marking work.
6576 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6577                         MemRegion span,
6578                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6579                         CMSMarkStack*  mark_stack):
6580   _collector(collector),
6581   _span(span),
6582   _verification_bm(verification_bm),
6583   _cms_bm(cms_bm),
6584   _mark_stack(mark_stack),
6585   _pam_verify_closure(collector, span, verification_bm, cms_bm,
6586                       mark_stack)
6587 {
6588   assert(_mark_stack->isEmpty(), "stack should be empty");
6589   _finger = _verification_bm->startWord();
6590   assert(_collector->_restart_addr == NULL, "Sanity check");
6591   assert(_span.contains(_finger), "Out of bounds _finger?");
6592 }
6593 
6594 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6595   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6596   assert(_span.contains(addr), "Out of bounds _finger?");
6597   _finger = addr;
6598 }
6599 
6600 // Should revisit to see if this should be restructured for
6601 // greater efficiency.
6602 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6603   // convert offset into a HeapWord*
6604   HeapWord* addr = _verification_bm->startWord() + offset;
6605   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6606          "address out of range");
6607   assert(_verification_bm->isMarked(addr), "tautology");
6608   assert(_cms_bm->isMarked(addr), "tautology");
6609 
6610   assert(_mark_stack->isEmpty(),
6611          "should drain stack to limit stack usage");
6612   // convert addr to an oop preparatory to scanning
6613   oop obj = oop(addr);
6614   assert(oopDesc::is_oop(obj), "should be an oop");
6615   assert(_finger <= addr, "_finger runneth ahead");
6616   // advance the finger to right end of this object
6617   _finger = addr + obj->size();
6618   assert(_finger > addr, "we just incremented it above");
6619   // Note: the finger doesn't advance while we drain
6620   // the stack below.
6621   bool res = _mark_stack->push(obj);
6622   assert(res, "Empty non-zero size stack should have space for single push");
6623   while (!_mark_stack->isEmpty()) {
6624     oop new_oop = _mark_stack->pop();
6625     assert(oopDesc::is_oop(new_oop), "Oops! expected to pop an oop");
6626     // now scan this oop's oops
6627     new_oop->oop_iterate(&_pam_verify_closure);
6628   }
6629   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6630   return true;
6631 }
6632 
6633 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6634   CMSCollector* collector, MemRegion span,
6635   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6636   CMSMarkStack*  mark_stack):
6637   MetadataVisitingOopIterateClosure(collector->ref_processor()),
6638   _collector(collector),
6639   _span(span),
6640   _verification_bm(verification_bm),
6641   _cms_bm(cms_bm),
6642   _mark_stack(mark_stack)
6643 { }
6644 
6645 template <class T> void PushAndMarkVerifyClosure::do_oop_work(T *p) {
6646   oop obj = RawAccess<>::oop_load(p);
6647   do_oop(obj);
6648 }
6649 
6650 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6651 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6652 
6653 // Upon stack overflow, we discard (part of) the stack,
6654 // remembering the least address amongst those discarded
6655 // in CMSCollector's _restart_address.
6656 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6657   // Remember the least grey address discarded
6658   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6659   _collector->lower_restart_addr(ra);
6660   _mark_stack->reset();  // discard stack contents
6661   _mark_stack->expand(); // expand the stack if possible
6662 }
6663 
6664 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6665   assert(oopDesc::is_oop_or_null(obj), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6666   HeapWord* addr = (HeapWord*)obj;
6667   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6668     // Oop lies in _span and isn't yet grey or black
6669     _verification_bm->mark(addr);            // now grey
6670     if (!_cms_bm->isMarked(addr)) {
6671       Log(gc, verify) log;
6672       ResourceMark rm;
6673       LogStream ls(log.error());
6674       oop(addr)->print_on(&ls);
6675       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6676       fatal("... aborting");
6677     }
6678 
6679     if (!_mark_stack->push(obj)) { // stack overflow
6680       log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6681       assert(_mark_stack->isFull(), "Else push should have succeeded");
6682       handle_stack_overflow(addr);
6683     }
6684     // anything including and to the right of _finger
6685     // will be scanned as we iterate over the remainder of the
6686     // bit map
6687   }
6688 }
6689 
6690 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6691                      MemRegion span,
6692                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
6693                      HeapWord* finger, MarkFromRootsClosure* parent) :
6694   MetadataVisitingOopIterateClosure(collector->ref_processor()),
6695   _collector(collector),
6696   _span(span),
6697   _bitMap(bitMap),
6698   _markStack(markStack),
6699   _finger(finger),
6700   _parent(parent)
6701 { }
6702 
6703 ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6704                                            MemRegion span,
6705                                            CMSBitMap* bit_map,
6706                                            OopTaskQueue* work_queue,
6707                                            CMSMarkStack*  overflow_stack,
6708                                            HeapWord* finger,
6709                                            HeapWord* volatile* global_finger_addr,
6710                                            ParMarkFromRootsClosure* parent) :
6711   MetadataVisitingOopIterateClosure(collector->ref_processor()),
6712   _collector(collector),
6713   _whole_span(collector->_span),
6714   _span(span),
6715   _bit_map(bit_map),
6716   _work_queue(work_queue),
6717   _overflow_stack(overflow_stack),
6718   _finger(finger),
6719   _global_finger_addr(global_finger_addr),
6720   _parent(parent)
6721 { }
6722 
6723 // Assumes thread-safe access by callers, who are
6724 // responsible for mutual exclusion.
6725 void CMSCollector::lower_restart_addr(HeapWord* low) {
6726   assert(_span.contains(low), "Out of bounds addr");
6727   if (_restart_addr == NULL) {
6728     _restart_addr = low;
6729   } else {
6730     _restart_addr = MIN2(_restart_addr, low);
6731   }
6732 }
6733 
6734 // Upon stack overflow, we discard (part of) the stack,
6735 // remembering the least address amongst those discarded
6736 // in CMSCollector's _restart_address.
6737 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6738   // Remember the least grey address discarded
6739   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6740   _collector->lower_restart_addr(ra);
6741   _markStack->reset();  // discard stack contents
6742   _markStack->expand(); // expand the stack if possible
6743 }
6744 
6745 // Upon stack overflow, we discard (part of) the stack,
6746 // remembering the least address amongst those discarded
6747 // in CMSCollector's _restart_address.
6748 void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6749   // We need to do this under a mutex to prevent other
6750   // workers from interfering with the work done below.
6751   MutexLockerEx ml(_overflow_stack->par_lock(),
6752                    Mutex::_no_safepoint_check_flag);
6753   // Remember the least grey address discarded
6754   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6755   _collector->lower_restart_addr(ra);
6756   _overflow_stack->reset();  // discard stack contents
6757   _overflow_stack->expand(); // expand the stack if possible
6758 }
6759 
6760 void PushOrMarkClosure::do_oop(oop obj) {
6761   // Ignore mark word because we are running concurrent with mutators.
6762   assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6763   HeapWord* addr = (HeapWord*)obj;
6764   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6765     // Oop lies in _span and isn't yet grey or black
6766     _bitMap->mark(addr);            // now grey
6767     if (addr < _finger) {
6768       // the bit map iteration has already either passed, or
6769       // sampled, this bit in the bit map; we'll need to
6770       // use the marking stack to scan this oop's oops.
6771       bool simulate_overflow = false;
6772       NOT_PRODUCT(
6773         if (CMSMarkStackOverflowALot &&
6774             _collector->simulate_overflow()) {
6775           // simulate a stack overflow
6776           simulate_overflow = true;
6777         }
6778       )
6779       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6780         log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6781         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6782         handle_stack_overflow(addr);
6783       }
6784     }
6785     // anything including and to the right of _finger
6786     // will be scanned as we iterate over the remainder of the
6787     // bit map
6788     do_yield_check();
6789   }
6790 }
6791 
6792 void ParPushOrMarkClosure::do_oop(oop obj) {
6793   // Ignore mark word because we are running concurrent with mutators.
6794   assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6795   HeapWord* addr = (HeapWord*)obj;
6796   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6797     // Oop lies in _span and isn't yet grey or black
6798     // We read the global_finger (volatile read) strictly after marking oop
6799     bool res = _bit_map->par_mark(addr);    // now grey
6800     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6801     // Should we push this marked oop on our stack?
6802     // -- if someone else marked it, nothing to do
6803     // -- if target oop is above global finger nothing to do
6804     // -- if target oop is in chunk and above local finger
6805     //      then nothing to do
6806     // -- else push on work queue
6807     if (   !res       // someone else marked it, they will deal with it
6808         || (addr >= *gfa)  // will be scanned in a later task
6809         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6810       return;
6811     }
6812     // the bit map iteration has already either passed, or
6813     // sampled, this bit in the bit map; we'll need to
6814     // use the marking stack to scan this oop's oops.
6815     bool simulate_overflow = false;
6816     NOT_PRODUCT(
6817       if (CMSMarkStackOverflowALot &&
6818           _collector->simulate_overflow()) {
6819         // simulate a stack overflow
6820         simulate_overflow = true;
6821       }
6822     )
6823     if (simulate_overflow ||
6824         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6825       // stack overflow
6826       log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6827       // We cannot assert that the overflow stack is full because
6828       // it may have been emptied since.
6829       assert(simulate_overflow ||
6830              _work_queue->size() == _work_queue->max_elems(),
6831             "Else push should have succeeded");
6832       handle_stack_overflow(addr);
6833     }
6834     do_yield_check();
6835   }
6836 }
6837 
6838 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6839                                        MemRegion span,
6840                                        ReferenceDiscoverer* rd,
6841                                        CMSBitMap* bit_map,
6842                                        CMSBitMap* mod_union_table,
6843                                        CMSMarkStack*  mark_stack,
6844                                        bool           concurrent_precleaning):
6845   MetadataVisitingOopIterateClosure(rd),
6846   _collector(collector),
6847   _span(span),
6848   _bit_map(bit_map),
6849   _mod_union_table(mod_union_table),
6850   _mark_stack(mark_stack),
6851   _concurrent_precleaning(concurrent_precleaning)
6852 {
6853   assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
6854 }
6855 
6856 // Grey object rescan during pre-cleaning and second checkpoint phases --
6857 // the non-parallel version (the parallel version appears further below.)
6858 void PushAndMarkClosure::do_oop(oop obj) {
6859   // Ignore mark word verification. If during concurrent precleaning,
6860   // the object monitor may be locked. If during the checkpoint
6861   // phases, the object may already have been reached by a  different
6862   // path and may be at the end of the global overflow list (so
6863   // the mark word may be NULL).
6864   assert(oopDesc::is_oop_or_null(obj, true /* ignore mark word */),
6865          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6866   HeapWord* addr = (HeapWord*)obj;
6867   // Check if oop points into the CMS generation
6868   // and is not marked
6869   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6870     // a white object ...
6871     _bit_map->mark(addr);         // ... now grey
6872     // push on the marking stack (grey set)
6873     bool simulate_overflow = false;
6874     NOT_PRODUCT(
6875       if (CMSMarkStackOverflowALot &&
6876           _collector->simulate_overflow()) {
6877         // simulate a stack overflow
6878         simulate_overflow = true;
6879       }
6880     )
6881     if (simulate_overflow || !_mark_stack->push(obj)) {
6882       if (_concurrent_precleaning) {
6883          // During precleaning we can just dirty the appropriate card(s)
6884          // in the mod union table, thus ensuring that the object remains
6885          // in the grey set  and continue. In the case of object arrays
6886          // we need to dirty all of the cards that the object spans,
6887          // since the rescan of object arrays will be limited to the
6888          // dirty cards.
6889          // Note that no one can be interfering with us in this action
6890          // of dirtying the mod union table, so no locking or atomics
6891          // are required.
6892          if (obj->is_objArray()) {
6893            size_t sz = obj->size();
6894            HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
6895            MemRegion redirty_range = MemRegion(addr, end_card_addr);
6896            assert(!redirty_range.is_empty(), "Arithmetical tautology");
6897            _mod_union_table->mark_range(redirty_range);
6898          } else {
6899            _mod_union_table->mark(addr);
6900          }
6901          _collector->_ser_pmc_preclean_ovflw++;
6902       } else {
6903          // During the remark phase, we need to remember this oop
6904          // in the overflow list.
6905          _collector->push_on_overflow_list(obj);
6906          _collector->_ser_pmc_remark_ovflw++;
6907       }
6908     }
6909   }
6910 }
6911 
6912 ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6913                                              MemRegion span,
6914                                              ReferenceDiscoverer* rd,
6915                                              CMSBitMap* bit_map,
6916                                              OopTaskQueue* work_queue):
6917   MetadataVisitingOopIterateClosure(rd),
6918   _collector(collector),
6919   _span(span),
6920   _bit_map(bit_map),
6921   _work_queue(work_queue)
6922 {
6923   assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
6924 }
6925 
6926 // Grey object rescan during second checkpoint phase --
6927 // the parallel version.
6928 void ParPushAndMarkClosure::do_oop(oop obj) {
6929   // In the assert below, we ignore the mark word because
6930   // this oop may point to an already visited object that is
6931   // on the overflow stack (in which case the mark word has
6932   // been hijacked for chaining into the overflow stack --
6933   // if this is the last object in the overflow stack then
6934   // its mark word will be NULL). Because this object may
6935   // have been subsequently popped off the global overflow
6936   // stack, and the mark word possibly restored to the prototypical
6937   // value, by the time we get to examined this failing assert in
6938   // the debugger, is_oop_or_null(false) may subsequently start
6939   // to hold.
6940   assert(oopDesc::is_oop_or_null(obj, true),
6941          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6942   HeapWord* addr = (HeapWord*)obj;
6943   // Check if oop points into the CMS generation
6944   // and is not marked
6945   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6946     // a white object ...
6947     // If we manage to "claim" the object, by being the
6948     // first thread to mark it, then we push it on our
6949     // marking stack
6950     if (_bit_map->par_mark(addr)) {     // ... now grey
6951       // push on work queue (grey set)
6952       bool simulate_overflow = false;
6953       NOT_PRODUCT(
6954         if (CMSMarkStackOverflowALot &&
6955             _collector->par_simulate_overflow()) {
6956           // simulate a stack overflow
6957           simulate_overflow = true;
6958         }
6959       )
6960       if (simulate_overflow || !_work_queue->push(obj)) {
6961         _collector->par_push_on_overflow_list(obj);
6962         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
6963       }
6964     } // Else, some other thread got there first
6965   }
6966 }
6967 
6968 void CMSPrecleanRefsYieldClosure::do_yield_work() {
6969   Mutex* bml = _collector->bitMapLock();
6970   assert_lock_strong(bml);
6971   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6972          "CMS thread should hold CMS token");
6973 
6974   bml->unlock();
6975   ConcurrentMarkSweepThread::desynchronize(true);
6976 
6977   _collector->stopTimer();
6978   _collector->incrementYields();
6979 
6980   // See the comment in coordinator_yield()
6981   for (unsigned i = 0; i < CMSYieldSleepCount &&
6982                        ConcurrentMarkSweepThread::should_yield() &&
6983                        !CMSCollector::foregroundGCIsActive(); ++i) {
6984     os::sleep(Thread::current(), 1, false);
6985   }
6986 
6987   ConcurrentMarkSweepThread::synchronize(true);
6988   bml->lock();
6989 
6990   _collector->startTimer();
6991 }
6992 
6993 bool CMSPrecleanRefsYieldClosure::should_return() {
6994   if (ConcurrentMarkSweepThread::should_yield()) {
6995     do_yield_work();
6996   }
6997   return _collector->foregroundGCIsActive();
6998 }
6999 
7000 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7001   assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
7002          "mr should be aligned to start at a card boundary");
7003   // We'd like to assert:
7004   // assert(mr.word_size()%CardTable::card_size_in_words == 0,
7005   //        "mr should be a range of cards");
7006   // However, that would be too strong in one case -- the last
7007   // partition ends at _unallocated_block which, in general, can be
7008   // an arbitrary boundary, not necessarily card aligned.
7009   _num_dirty_cards += mr.word_size()/CardTable::card_size_in_words;
7010   _space->object_iterate_mem(mr, &_scan_cl);
7011 }
7012 
7013 SweepClosure::SweepClosure(CMSCollector* collector,
7014                            ConcurrentMarkSweepGeneration* g,
7015                            CMSBitMap* bitMap, bool should_yield) :
7016   _collector(collector),
7017   _g(g),
7018   _sp(g->cmsSpace()),
7019   _limit(_sp->sweep_limit()),
7020   _freelistLock(_sp->freelistLock()),
7021   _bitMap(bitMap),
7022   _inFreeRange(false),           // No free range at beginning of sweep
7023   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7024   _lastFreeRangeCoalesced(false),
7025   _yield(should_yield),
7026   _freeFinger(g->used_region().start())
7027 {
7028   NOT_PRODUCT(
7029     _numObjectsFreed = 0;
7030     _numWordsFreed   = 0;
7031     _numObjectsLive = 0;
7032     _numWordsLive = 0;
7033     _numObjectsAlreadyFree = 0;
7034     _numWordsAlreadyFree = 0;
7035     _last_fc = NULL;
7036 
7037     _sp->initializeIndexedFreeListArrayReturnedBytes();
7038     _sp->dictionary()->initialize_dict_returned_bytes();
7039   )
7040   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7041          "sweep _limit out of bounds");
7042   log_develop_trace(gc, sweep)("====================");
7043   log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
7044 }
7045 
7046 void SweepClosure::print_on(outputStream* st) const {
7047   st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7048                p2i(_sp->bottom()), p2i(_sp->end()));
7049   st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7050   st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7051   NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7052   st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7053                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7054 }
7055 
7056 #ifndef PRODUCT
7057 // Assertion checking only:  no useful work in product mode --
7058 // however, if any of the flags below become product flags,
7059 // you may need to review this code to see if it needs to be
7060 // enabled in product mode.
7061 SweepClosure::~SweepClosure() {
7062   assert_lock_strong(_freelistLock);
7063   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7064          "sweep _limit out of bounds");
7065   if (inFreeRange()) {
7066     Log(gc, sweep) log;
7067     log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
7068     ResourceMark rm;
7069     LogStream ls(log.error());
7070     print_on(&ls);
7071     ShouldNotReachHere();
7072   }
7073 
7074   if (log_is_enabled(Debug, gc, sweep)) {
7075     log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7076                          _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7077     log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7078                          _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7079     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7080     log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7081   }
7082 
7083   if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7084     size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7085     size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7086     size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7087     log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
7088                          returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7089   }
7090   log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
7091   log_develop_trace(gc, sweep)("================");
7092 }
7093 #endif  // PRODUCT
7094 
7095 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7096     bool freeRangeInFreeLists) {
7097   log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
7098                                p2i(freeFinger), freeRangeInFreeLists);
7099   assert(!inFreeRange(), "Trampling existing free range");
7100   set_inFreeRange(true);
7101   set_lastFreeRangeCoalesced(false);
7102 
7103   set_freeFinger(freeFinger);
7104   set_freeRangeInFreeLists(freeRangeInFreeLists);
7105   if (CMSTestInFreeList) {
7106     if (freeRangeInFreeLists) {
7107       FreeChunk* fc = (FreeChunk*) freeFinger;
7108       assert(fc->is_free(), "A chunk on the free list should be free.");
7109       assert(fc->size() > 0, "Free range should have a size");
7110       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7111     }
7112   }
7113 }
7114 
7115 // Note that the sweeper runs concurrently with mutators. Thus,
7116 // it is possible for direct allocation in this generation to happen
7117 // in the middle of the sweep. Note that the sweeper also coalesces
7118 // contiguous free blocks. Thus, unless the sweeper and the allocator
7119 // synchronize appropriately freshly allocated blocks may get swept up.
7120 // This is accomplished by the sweeper locking the free lists while
7121 // it is sweeping. Thus blocks that are determined to be free are
7122 // indeed free. There is however one additional complication:
7123 // blocks that have been allocated since the final checkpoint and
7124 // mark, will not have been marked and so would be treated as
7125 // unreachable and swept up. To prevent this, the allocator marks
7126 // the bit map when allocating during the sweep phase. This leads,
7127 // however, to a further complication -- objects may have been allocated
7128 // but not yet initialized -- in the sense that the header isn't yet
7129 // installed. The sweeper can not then determine the size of the block
7130 // in order to skip over it. To deal with this case, we use a technique
7131 // (due to Printezis) to encode such uninitialized block sizes in the
7132 // bit map. Since the bit map uses a bit per every HeapWord, but the
7133 // CMS generation has a minimum object size of 3 HeapWords, it follows
7134 // that "normal marks" won't be adjacent in the bit map (there will
7135 // always be at least two 0 bits between successive 1 bits). We make use
7136 // of these "unused" bits to represent uninitialized blocks -- the bit
7137 // corresponding to the start of the uninitialized object and the next
7138 // bit are both set. Finally, a 1 bit marks the end of the object that
7139 // started with the two consecutive 1 bits to indicate its potentially
7140 // uninitialized state.
7141 
7142 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7143   FreeChunk* fc = (FreeChunk*)addr;
7144   size_t res;
7145 
7146   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7147   // than "addr == _limit" because although _limit was a block boundary when
7148   // we started the sweep, it may no longer be one because heap expansion
7149   // may have caused us to coalesce the block ending at the address _limit
7150   // with a newly expanded chunk (this happens when _limit was set to the
7151   // previous _end of the space), so we may have stepped past _limit:
7152   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7153   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7154     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7155            "sweep _limit out of bounds");
7156     assert(addr < _sp->end(), "addr out of bounds");
7157     // Flush any free range we might be holding as a single
7158     // coalesced chunk to the appropriate free list.
7159     if (inFreeRange()) {
7160       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7161              "freeFinger() " PTR_FORMAT " is out of bounds", p2i(freeFinger()));
7162       flush_cur_free_chunk(freeFinger(),
7163                            pointer_delta(addr, freeFinger()));
7164       log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
7165                                    p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7166                                    lastFreeRangeCoalesced() ? 1 : 0);
7167     }
7168 
7169     // help the iterator loop finish
7170     return pointer_delta(_sp->end(), addr);
7171   }
7172 
7173   assert(addr < _limit, "sweep invariant");
7174   // check if we should yield
7175   do_yield_check(addr);
7176   if (fc->is_free()) {
7177     // Chunk that is already free
7178     res = fc->size();
7179     do_already_free_chunk(fc);
7180     debug_only(_sp->verifyFreeLists());
7181     // If we flush the chunk at hand in lookahead_and_flush()
7182     // and it's coalesced with a preceding chunk, then the
7183     // process of "mangling" the payload of the coalesced block
7184     // will cause erasure of the size information from the
7185     // (erstwhile) header of all the coalesced blocks but the
7186     // first, so the first disjunct in the assert will not hold
7187     // in that specific case (in which case the second disjunct
7188     // will hold).
7189     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7190            "Otherwise the size info doesn't change at this step");
7191     NOT_PRODUCT(
7192       _numObjectsAlreadyFree++;
7193       _numWordsAlreadyFree += res;
7194     )
7195     NOT_PRODUCT(_last_fc = fc;)
7196   } else if (!_bitMap->isMarked(addr)) {
7197     // Chunk is fresh garbage
7198     res = do_garbage_chunk(fc);
7199     debug_only(_sp->verifyFreeLists());
7200     NOT_PRODUCT(
7201       _numObjectsFreed++;
7202       _numWordsFreed += res;
7203     )
7204   } else {
7205     // Chunk that is alive.
7206     res = do_live_chunk(fc);
7207     debug_only(_sp->verifyFreeLists());
7208     NOT_PRODUCT(
7209         _numObjectsLive++;
7210         _numWordsLive += res;
7211     )
7212   }
7213   return res;
7214 }
7215 
7216 // For the smart allocation, record following
7217 //  split deaths - a free chunk is removed from its free list because
7218 //      it is being split into two or more chunks.
7219 //  split birth - a free chunk is being added to its free list because
7220 //      a larger free chunk has been split and resulted in this free chunk.
7221 //  coal death - a free chunk is being removed from its free list because
7222 //      it is being coalesced into a large free chunk.
7223 //  coal birth - a free chunk is being added to its free list because
7224 //      it was created when two or more free chunks where coalesced into
7225 //      this free chunk.
7226 //
7227 // These statistics are used to determine the desired number of free
7228 // chunks of a given size.  The desired number is chosen to be relative
7229 // to the end of a CMS sweep.  The desired number at the end of a sweep
7230 // is the
7231 //      count-at-end-of-previous-sweep (an amount that was enough)
7232 //              - count-at-beginning-of-current-sweep  (the excess)
7233 //              + split-births  (gains in this size during interval)
7234 //              - split-deaths  (demands on this size during interval)
7235 // where the interval is from the end of one sweep to the end of the
7236 // next.
7237 //
7238 // When sweeping the sweeper maintains an accumulated chunk which is
7239 // the chunk that is made up of chunks that have been coalesced.  That
7240 // will be termed the left-hand chunk.  A new chunk of garbage that
7241 // is being considered for coalescing will be referred to as the
7242 // right-hand chunk.
7243 //
7244 // When making a decision on whether to coalesce a right-hand chunk with
7245 // the current left-hand chunk, the current count vs. the desired count
7246 // of the left-hand chunk is considered.  Also if the right-hand chunk
7247 // is near the large chunk at the end of the heap (see
7248 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7249 // left-hand chunk is coalesced.
7250 //
7251 // When making a decision about whether to split a chunk, the desired count
7252 // vs. the current count of the candidate to be split is also considered.
7253 // If the candidate is underpopulated (currently fewer chunks than desired)
7254 // a chunk of an overpopulated (currently more chunks than desired) size may
7255 // be chosen.  The "hint" associated with a free list, if non-null, points
7256 // to a free list which may be overpopulated.
7257 //
7258 
7259 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7260   const size_t size = fc->size();
7261   // Chunks that cannot be coalesced are not in the
7262   // free lists.
7263   if (CMSTestInFreeList && !fc->cantCoalesce()) {
7264     assert(_sp->verify_chunk_in_free_list(fc),
7265            "free chunk should be in free lists");
7266   }
7267   // a chunk that is already free, should not have been
7268   // marked in the bit map
7269   HeapWord* const addr = (HeapWord*) fc;
7270   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7271   // Verify that the bit map has no bits marked between
7272   // addr and purported end of this block.
7273   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7274 
7275   // Some chunks cannot be coalesced under any circumstances.
7276   // See the definition of cantCoalesce().
7277   if (!fc->cantCoalesce()) {
7278     // This chunk can potentially be coalesced.
7279     // All the work is done in
7280     do_post_free_or_garbage_chunk(fc, size);
7281     // Note that if the chunk is not coalescable (the else arm
7282     // below), we unconditionally flush, without needing to do
7283     // a "lookahead," as we do below.
7284     if (inFreeRange()) lookahead_and_flush(fc, size);
7285   } else {
7286     // Code path common to both original and adaptive free lists.
7287 
7288     // cant coalesce with previous block; this should be treated
7289     // as the end of a free run if any
7290     if (inFreeRange()) {
7291       // we kicked some butt; time to pick up the garbage
7292       assert(freeFinger() < addr, "freeFinger points too high");
7293       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7294     }
7295     // else, nothing to do, just continue
7296   }
7297 }
7298 
7299 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7300   // This is a chunk of garbage.  It is not in any free list.
7301   // Add it to a free list or let it possibly be coalesced into
7302   // a larger chunk.
7303   HeapWord* const addr = (HeapWord*) fc;
7304   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7305 
7306   // Verify that the bit map has no bits marked between
7307   // addr and purported end of just dead object.
7308   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7309   do_post_free_or_garbage_chunk(fc, size);
7310 
7311   assert(_limit >= addr + size,
7312          "A freshly garbage chunk can't possibly straddle over _limit");
7313   if (inFreeRange()) lookahead_and_flush(fc, size);
7314   return size;
7315 }
7316 
7317 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7318   HeapWord* addr = (HeapWord*) fc;
7319   // The sweeper has just found a live object. Return any accumulated
7320   // left hand chunk to the free lists.
7321   if (inFreeRange()) {
7322     assert(freeFinger() < addr, "freeFinger points too high");
7323     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7324   }
7325 
7326   // This object is live: we'd normally expect this to be
7327   // an oop, and like to assert the following:
7328   // assert(oopDesc::is_oop(oop(addr)), "live block should be an oop");
7329   // However, as we commented above, this may be an object whose
7330   // header hasn't yet been initialized.
7331   size_t size;
7332   assert(_bitMap->isMarked(addr), "Tautology for this control point");
7333   if (_bitMap->isMarked(addr + 1)) {
7334     // Determine the size from the bit map, rather than trying to
7335     // compute it from the object header.
7336     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7337     size = pointer_delta(nextOneAddr + 1, addr);
7338     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7339            "alignment problem");
7340 
7341 #ifdef ASSERT
7342       if (oop(addr)->klass_or_null_acquire() != NULL) {
7343         // Ignore mark word because we are running concurrent with mutators
7344         assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
7345         assert(size ==
7346                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7347                "P-mark and computed size do not agree");
7348       }
7349 #endif
7350 
7351   } else {
7352     // This should be an initialized object that's alive.
7353     assert(oop(addr)->klass_or_null_acquire() != NULL,
7354            "Should be an initialized object");
7355     // Ignore mark word because we are running concurrent with mutators
7356     assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
7357     // Verify that the bit map has no bits marked between
7358     // addr and purported end of this block.
7359     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7360     assert(size >= 3, "Necessary for Printezis marks to work");
7361     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7362     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7363   }
7364   return size;
7365 }
7366 
7367 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7368                                                  size_t chunkSize) {
7369   // do_post_free_or_garbage_chunk() should only be called in the case
7370   // of the adaptive free list allocator.
7371   const bool fcInFreeLists = fc->is_free();
7372   assert((HeapWord*)fc <= _limit, "sweep invariant");
7373   if (CMSTestInFreeList && fcInFreeLists) {
7374     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7375   }
7376 
7377   log_develop_trace(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7378 
7379   HeapWord* const fc_addr = (HeapWord*) fc;
7380 
7381   bool coalesce = false;
7382   const size_t left  = pointer_delta(fc_addr, freeFinger());
7383   const size_t right = chunkSize;
7384   switch (FLSCoalescePolicy) {
7385     // numeric value forms a coalition aggressiveness metric
7386     case 0:  { // never coalesce
7387       coalesce = false;
7388       break;
7389     }
7390     case 1: { // coalesce if left & right chunks on overpopulated lists
7391       coalesce = _sp->coalOverPopulated(left) &&
7392                  _sp->coalOverPopulated(right);
7393       break;
7394     }
7395     case 2: { // coalesce if left chunk on overpopulated list (default)
7396       coalesce = _sp->coalOverPopulated(left);
7397       break;
7398     }
7399     case 3: { // coalesce if left OR right chunk on overpopulated list
7400       coalesce = _sp->coalOverPopulated(left) ||
7401                  _sp->coalOverPopulated(right);
7402       break;
7403     }
7404     case 4: { // always coalesce
7405       coalesce = true;
7406       break;
7407     }
7408     default:
7409      ShouldNotReachHere();
7410   }
7411 
7412   // Should the current free range be coalesced?
7413   // If the chunk is in a free range and either we decided to coalesce above
7414   // or the chunk is near the large block at the end of the heap
7415   // (isNearLargestChunk() returns true), then coalesce this chunk.
7416   const bool doCoalesce = inFreeRange()
7417                           && (coalesce || _g->isNearLargestChunk(fc_addr));
7418   if (doCoalesce) {
7419     // Coalesce the current free range on the left with the new
7420     // chunk on the right.  If either is on a free list,
7421     // it must be removed from the list and stashed in the closure.
7422     if (freeRangeInFreeLists()) {
7423       FreeChunk* const ffc = (FreeChunk*)freeFinger();
7424       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7425              "Size of free range is inconsistent with chunk size.");
7426       if (CMSTestInFreeList) {
7427         assert(_sp->verify_chunk_in_free_list(ffc),
7428                "Chunk is not in free lists");
7429       }
7430       _sp->coalDeath(ffc->size());
7431       _sp->removeFreeChunkFromFreeLists(ffc);
7432       set_freeRangeInFreeLists(false);
7433     }
7434     if (fcInFreeLists) {
7435       _sp->coalDeath(chunkSize);
7436       assert(fc->size() == chunkSize,
7437         "The chunk has the wrong size or is not in the free lists");
7438       _sp->removeFreeChunkFromFreeLists(fc);
7439     }
7440     set_lastFreeRangeCoalesced(true);
7441     print_free_block_coalesced(fc);
7442   } else {  // not in a free range and/or should not coalesce
7443     // Return the current free range and start a new one.
7444     if (inFreeRange()) {
7445       // In a free range but cannot coalesce with the right hand chunk.
7446       // Put the current free range into the free lists.
7447       flush_cur_free_chunk(freeFinger(),
7448                            pointer_delta(fc_addr, freeFinger()));
7449     }
7450     // Set up for new free range.  Pass along whether the right hand
7451     // chunk is in the free lists.
7452     initialize_free_range((HeapWord*)fc, fcInFreeLists);
7453   }
7454 }
7455 
7456 // Lookahead flush:
7457 // If we are tracking a free range, and this is the last chunk that
7458 // we'll look at because its end crosses past _limit, we'll preemptively
7459 // flush it along with any free range we may be holding on to. Note that
7460 // this can be the case only for an already free or freshly garbage
7461 // chunk. If this block is an object, it can never straddle
7462 // over _limit. The "straddling" occurs when _limit is set at
7463 // the previous end of the space when this cycle started, and
7464 // a subsequent heap expansion caused the previously co-terminal
7465 // free block to be coalesced with the newly expanded portion,
7466 // thus rendering _limit a non-block-boundary making it dangerous
7467 // for the sweeper to step over and examine.
7468 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7469   assert(inFreeRange(), "Should only be called if currently in a free range.");
7470   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7471   assert(_sp->used_region().contains(eob - 1),
7472          "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7473          " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7474          " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7475          p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7476   if (eob >= _limit) {
7477     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7478     log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
7479                                  "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7480                                  "[" PTR_FORMAT "," PTR_FORMAT ")",
7481                                  p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7482     // Return the storage we are tracking back into the free lists.
7483     log_develop_trace(gc, sweep)("Flushing ... ");
7484     assert(freeFinger() < eob, "Error");
7485     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7486   }
7487 }
7488 
7489 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7490   assert(inFreeRange(), "Should only be called if currently in a free range.");
7491   assert(size > 0,
7492     "A zero sized chunk cannot be added to the free lists.");
7493   if (!freeRangeInFreeLists()) {
7494     if (CMSTestInFreeList) {
7495       FreeChunk* fc = (FreeChunk*) chunk;
7496       fc->set_size(size);
7497       assert(!_sp->verify_chunk_in_free_list(fc),
7498              "chunk should not be in free lists yet");
7499     }
7500     log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
7501     // A new free range is going to be starting.  The current
7502     // free range has not been added to the free lists yet or
7503     // was removed so add it back.
7504     // If the current free range was coalesced, then the death
7505     // of the free range was recorded.  Record a birth now.
7506     if (lastFreeRangeCoalesced()) {
7507       _sp->coalBirth(size);
7508     }
7509     _sp->addChunkAndRepairOffsetTable(chunk, size,
7510             lastFreeRangeCoalesced());
7511   } else {
7512     log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
7513   }
7514   set_inFreeRange(false);
7515   set_freeRangeInFreeLists(false);
7516 }
7517 
7518 // We take a break if we've been at this for a while,
7519 // so as to avoid monopolizing the locks involved.
7520 void SweepClosure::do_yield_work(HeapWord* addr) {
7521   // Return current free chunk being used for coalescing (if any)
7522   // to the appropriate freelist.  After yielding, the next
7523   // free block encountered will start a coalescing range of
7524   // free blocks.  If the next free block is adjacent to the
7525   // chunk just flushed, they will need to wait for the next
7526   // sweep to be coalesced.
7527   if (inFreeRange()) {
7528     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7529   }
7530 
7531   // First give up the locks, then yield, then re-lock.
7532   // We should probably use a constructor/destructor idiom to
7533   // do this unlock/lock or modify the MutexUnlocker class to
7534   // serve our purpose. XXX
7535   assert_lock_strong(_bitMap->lock());
7536   assert_lock_strong(_freelistLock);
7537   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7538          "CMS thread should hold CMS token");
7539   _bitMap->lock()->unlock();
7540   _freelistLock->unlock();
7541   ConcurrentMarkSweepThread::desynchronize(true);
7542   _collector->stopTimer();
7543   _collector->incrementYields();
7544 
7545   // See the comment in coordinator_yield()
7546   for (unsigned i = 0; i < CMSYieldSleepCount &&
7547                        ConcurrentMarkSweepThread::should_yield() &&
7548                        !CMSCollector::foregroundGCIsActive(); ++i) {
7549     os::sleep(Thread::current(), 1, false);
7550   }
7551 
7552   ConcurrentMarkSweepThread::synchronize(true);
7553   _freelistLock->lock();
7554   _bitMap->lock()->lock_without_safepoint_check();
7555   _collector->startTimer();
7556 }
7557 
7558 #ifndef PRODUCT
7559 // This is actually very useful in a product build if it can
7560 // be called from the debugger.  Compile it into the product
7561 // as needed.
7562 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7563   return debug_cms_space->verify_chunk_in_free_list(fc);
7564 }
7565 #endif
7566 
7567 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7568   log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7569                                p2i(fc), fc->size());
7570 }
7571 
7572 // CMSIsAliveClosure
7573 bool CMSIsAliveClosure::do_object_b(oop obj) {
7574   HeapWord* addr = (HeapWord*)obj;
7575   return addr != NULL &&
7576          (!_span.contains(addr) || _bit_map->isMarked(addr));
7577 }
7578 
7579 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7580                       MemRegion span,
7581                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7582                       bool cpc):
7583   _collector(collector),
7584   _span(span),
7585   _mark_stack(mark_stack),
7586   _bit_map(bit_map),
7587   _concurrent_precleaning(cpc) {
7588   assert(!_span.is_empty(), "Empty span could spell trouble");
7589 }
7590 
7591 
7592 // CMSKeepAliveClosure: the serial version
7593 void CMSKeepAliveClosure::do_oop(oop obj) {
7594   HeapWord* addr = (HeapWord*)obj;
7595   if (_span.contains(addr) &&
7596       !_bit_map->isMarked(addr)) {
7597     _bit_map->mark(addr);
7598     bool simulate_overflow = false;
7599     NOT_PRODUCT(
7600       if (CMSMarkStackOverflowALot &&
7601           _collector->simulate_overflow()) {
7602         // simulate a stack overflow
7603         simulate_overflow = true;
7604       }
7605     )
7606     if (simulate_overflow || !_mark_stack->push(obj)) {
7607       if (_concurrent_precleaning) {
7608         // We dirty the overflown object and let the remark
7609         // phase deal with it.
7610         assert(_collector->overflow_list_is_empty(), "Error");
7611         // In the case of object arrays, we need to dirty all of
7612         // the cards that the object spans. No locking or atomics
7613         // are needed since no one else can be mutating the mod union
7614         // table.
7615         if (obj->is_objArray()) {
7616           size_t sz = obj->size();
7617           HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
7618           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7619           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7620           _collector->_modUnionTable.mark_range(redirty_range);
7621         } else {
7622           _collector->_modUnionTable.mark(addr);
7623         }
7624         _collector->_ser_kac_preclean_ovflw++;
7625       } else {
7626         _collector->push_on_overflow_list(obj);
7627         _collector->_ser_kac_ovflw++;
7628       }
7629     }
7630   }
7631 }
7632 
7633 // CMSParKeepAliveClosure: a parallel version of the above.
7634 // The work queues are private to each closure (thread),
7635 // but (may be) available for stealing by other threads.
7636 void CMSParKeepAliveClosure::do_oop(oop obj) {
7637   HeapWord* addr = (HeapWord*)obj;
7638   if (_span.contains(addr) &&
7639       !_bit_map->isMarked(addr)) {
7640     // In general, during recursive tracing, several threads
7641     // may be concurrently getting here; the first one to
7642     // "tag" it, claims it.
7643     if (_bit_map->par_mark(addr)) {
7644       bool res = _work_queue->push(obj);
7645       assert(res, "Low water mark should be much less than capacity");
7646       // Do a recursive trim in the hope that this will keep
7647       // stack usage lower, but leave some oops for potential stealers
7648       trim_queue(_low_water_mark);
7649     } // Else, another thread got there first
7650   }
7651 }
7652 
7653 void CMSParKeepAliveClosure::trim_queue(uint max) {
7654   while (_work_queue->size() > max) {
7655     oop new_oop;
7656     if (_work_queue->pop_local(new_oop)) {
7657       assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
7658       assert(_bit_map->isMarked((HeapWord*)new_oop),
7659              "no white objects on this stack!");
7660       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7661       // iterate over the oops in this oop, marking and pushing
7662       // the ones in CMS heap (i.e. in _span).
7663       new_oop->oop_iterate(&_mark_and_push);
7664     }
7665   }
7666 }
7667 
7668 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7669                                 CMSCollector* collector,
7670                                 MemRegion span, CMSBitMap* bit_map,
7671                                 OopTaskQueue* work_queue):
7672   _collector(collector),
7673   _span(span),
7674   _work_queue(work_queue),
7675   _bit_map(bit_map) { }
7676 
7677 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7678   HeapWord* addr = (HeapWord*)obj;
7679   if (_span.contains(addr) &&
7680       !_bit_map->isMarked(addr)) {
7681     if (_bit_map->par_mark(addr)) {
7682       bool simulate_overflow = false;
7683       NOT_PRODUCT(
7684         if (CMSMarkStackOverflowALot &&
7685             _collector->par_simulate_overflow()) {
7686           // simulate a stack overflow
7687           simulate_overflow = true;
7688         }
7689       )
7690       if (simulate_overflow || !_work_queue->push(obj)) {
7691         _collector->par_push_on_overflow_list(obj);
7692         _collector->_par_kac_ovflw++;
7693       }
7694     } // Else another thread got there already
7695   }
7696 }
7697 
7698 //////////////////////////////////////////////////////////////////
7699 //  CMSExpansionCause                /////////////////////////////
7700 //////////////////////////////////////////////////////////////////
7701 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
7702   switch (cause) {
7703     case _no_expansion:
7704       return "No expansion";
7705     case _satisfy_free_ratio:
7706       return "Free ratio";
7707     case _satisfy_promotion:
7708       return "Satisfy promotion";
7709     case _satisfy_allocation:
7710       return "allocation";
7711     case _allocate_par_lab:
7712       return "Par LAB";
7713     case _allocate_par_spooling_space:
7714       return "Par Spooling Space";
7715     case _adaptive_size_policy:
7716       return "Ergonomics";
7717     default:
7718       return "unknown";
7719   }
7720 }
7721 
7722 void CMSDrainMarkingStackClosure::do_void() {
7723   // the max number to take from overflow list at a time
7724   const size_t num = _mark_stack->capacity()/4;
7725   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7726          "Overflow list should be NULL during concurrent phases");
7727   while (!_mark_stack->isEmpty() ||
7728          // if stack is empty, check the overflow list
7729          _collector->take_from_overflow_list(num, _mark_stack)) {
7730     oop obj = _mark_stack->pop();
7731     HeapWord* addr = (HeapWord*)obj;
7732     assert(_span.contains(addr), "Should be within span");
7733     assert(_bit_map->isMarked(addr), "Should be marked");
7734     assert(oopDesc::is_oop(obj), "Should be an oop");
7735     obj->oop_iterate(_keep_alive);
7736   }
7737 }
7738 
7739 void CMSParDrainMarkingStackClosure::do_void() {
7740   // drain queue
7741   trim_queue(0);
7742 }
7743 
7744 // Trim our work_queue so its length is below max at return
7745 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7746   while (_work_queue->size() > max) {
7747     oop new_oop;
7748     if (_work_queue->pop_local(new_oop)) {
7749       assert(oopDesc::is_oop(new_oop), "Expected an oop");
7750       assert(_bit_map->isMarked((HeapWord*)new_oop),
7751              "no white objects on this stack!");
7752       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7753       // iterate over the oops in this oop, marking and pushing
7754       // the ones in CMS heap (i.e. in _span).
7755       new_oop->oop_iterate(&_mark_and_push);
7756     }
7757   }
7758 }
7759 
7760 ////////////////////////////////////////////////////////////////////
7761 // Support for Marking Stack Overflow list handling and related code
7762 ////////////////////////////////////////////////////////////////////
7763 // Much of the following code is similar in shape and spirit to the
7764 // code used in ParNewGC. We should try and share that code
7765 // as much as possible in the future.
7766 
7767 #ifndef PRODUCT
7768 // Debugging support for CMSStackOverflowALot
7769 
7770 // It's OK to call this multi-threaded;  the worst thing
7771 // that can happen is that we'll get a bunch of closely
7772 // spaced simulated overflows, but that's OK, in fact
7773 // probably good as it would exercise the overflow code
7774 // under contention.
7775 bool CMSCollector::simulate_overflow() {
7776   if (_overflow_counter-- <= 0) { // just being defensive
7777     _overflow_counter = CMSMarkStackOverflowInterval;
7778     return true;
7779   } else {
7780     return false;
7781   }
7782 }
7783 
7784 bool CMSCollector::par_simulate_overflow() {
7785   return simulate_overflow();
7786 }
7787 #endif
7788 
7789 // Single-threaded
7790 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7791   assert(stack->isEmpty(), "Expected precondition");
7792   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7793   size_t i = num;
7794   oop  cur = _overflow_list;
7795   const markOop proto = markOopDesc::prototype();
7796   NOT_PRODUCT(ssize_t n = 0;)
7797   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7798     next = oop(cur->mark_raw());
7799     cur->set_mark_raw(proto);   // until proven otherwise
7800     assert(oopDesc::is_oop(cur), "Should be an oop");
7801     bool res = stack->push(cur);
7802     assert(res, "Bit off more than can chew?");
7803     NOT_PRODUCT(n++;)
7804   }
7805   _overflow_list = cur;
7806 #ifndef PRODUCT
7807   assert(_num_par_pushes >= n, "Too many pops?");
7808   _num_par_pushes -=n;
7809 #endif
7810   return !stack->isEmpty();
7811 }
7812 
7813 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
7814 // (MT-safe) Get a prefix of at most "num" from the list.
7815 // The overflow list is chained through the mark word of
7816 // each object in the list. We fetch the entire list,
7817 // break off a prefix of the right size and return the
7818 // remainder. If other threads try to take objects from
7819 // the overflow list at that time, they will wait for
7820 // some time to see if data becomes available. If (and
7821 // only if) another thread places one or more object(s)
7822 // on the global list before we have returned the suffix
7823 // to the global list, we will walk down our local list
7824 // to find its end and append the global list to
7825 // our suffix before returning it. This suffix walk can
7826 // prove to be expensive (quadratic in the amount of traffic)
7827 // when there are many objects in the overflow list and
7828 // there is much producer-consumer contention on the list.
7829 // *NOTE*: The overflow list manipulation code here and
7830 // in ParNewGeneration:: are very similar in shape,
7831 // except that in the ParNew case we use the old (from/eden)
7832 // copy of the object to thread the list via its klass word.
7833 // Because of the common code, if you make any changes in
7834 // the code below, please check the ParNew version to see if
7835 // similar changes might be needed.
7836 // CR 6797058 has been filed to consolidate the common code.
7837 bool CMSCollector::par_take_from_overflow_list(size_t num,
7838                                                OopTaskQueue* work_q,
7839                                                int no_of_gc_threads) {
7840   assert(work_q->size() == 0, "First empty local work queue");
7841   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7842   if (_overflow_list == NULL) {
7843     return false;
7844   }
7845   // Grab the entire list; we'll put back a suffix
7846   oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
7847   Thread* tid = Thread::current();
7848   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7849   // set to ParallelGCThreads.
7850   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7851   size_t sleep_time_millis = MAX2((size_t)1, num/100);
7852   // If the list is busy, we spin for a short while,
7853   // sleeping between attempts to get the list.
7854   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7855     os::sleep(tid, sleep_time_millis, false);
7856     if (_overflow_list == NULL) {
7857       // Nothing left to take
7858       return false;
7859     } else if (_overflow_list != BUSY) {
7860       // Try and grab the prefix
7861       prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
7862     }
7863   }
7864   // If the list was found to be empty, or we spun long
7865   // enough, we give up and return empty-handed. If we leave
7866   // the list in the BUSY state below, it must be the case that
7867   // some other thread holds the overflow list and will set it
7868   // to a non-BUSY state in the future.
7869   if (prefix == NULL || prefix == BUSY) {
7870      // Nothing to take or waited long enough
7871      if (prefix == NULL) {
7872        // Write back the NULL in case we overwrote it with BUSY above
7873        // and it is still the same value.
7874        Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7875      }
7876      return false;
7877   }
7878   assert(prefix != NULL && prefix != BUSY, "Error");
7879   size_t i = num;
7880   oop cur = prefix;
7881   // Walk down the first "num" objects, unless we reach the end.
7882   for (; i > 1 && cur->mark_raw() != NULL; cur = oop(cur->mark_raw()), i--);
7883   if (cur->mark_raw() == NULL) {
7884     // We have "num" or fewer elements in the list, so there
7885     // is nothing to return to the global list.
7886     // Write back the NULL in lieu of the BUSY we wrote
7887     // above, if it is still the same value.
7888     if (_overflow_list == BUSY) {
7889       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
7890     }
7891   } else {
7892     // Chop off the suffix and return it to the global list.
7893     assert(cur->mark_raw() != BUSY, "Error");
7894     oop suffix_head = cur->mark_raw(); // suffix will be put back on global list
7895     cur->set_mark_raw(NULL);           // break off suffix
7896     // It's possible that the list is still in the empty(busy) state
7897     // we left it in a short while ago; in that case we may be
7898     // able to place back the suffix without incurring the cost
7899     // of a walk down the list.
7900     oop observed_overflow_list = _overflow_list;
7901     oop cur_overflow_list = observed_overflow_list;
7902     bool attached = false;
7903     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7904       observed_overflow_list =
7905         Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
7906       if (cur_overflow_list == observed_overflow_list) {
7907         attached = true;
7908         break;
7909       } else cur_overflow_list = observed_overflow_list;
7910     }
7911     if (!attached) {
7912       // Too bad, someone else sneaked in (at least) an element; we'll need
7913       // to do a splice. Find tail of suffix so we can prepend suffix to global
7914       // list.
7915       for (cur = suffix_head; cur->mark_raw() != NULL; cur = (oop)(cur->mark_raw()));
7916       oop suffix_tail = cur;
7917       assert(suffix_tail != NULL && suffix_tail->mark_raw() == NULL,
7918              "Tautology");
7919       observed_overflow_list = _overflow_list;
7920       do {
7921         cur_overflow_list = observed_overflow_list;
7922         if (cur_overflow_list != BUSY) {
7923           // Do the splice ...
7924           suffix_tail->set_mark_raw(markOop(cur_overflow_list));
7925         } else { // cur_overflow_list == BUSY
7926           suffix_tail->set_mark_raw(NULL);
7927         }
7928         // ... and try to place spliced list back on overflow_list ...
7929         observed_overflow_list =
7930           Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
7931       } while (cur_overflow_list != observed_overflow_list);
7932       // ... until we have succeeded in doing so.
7933     }
7934   }
7935 
7936   // Push the prefix elements on work_q
7937   assert(prefix != NULL, "control point invariant");
7938   const markOop proto = markOopDesc::prototype();
7939   oop next;
7940   NOT_PRODUCT(ssize_t n = 0;)
7941   for (cur = prefix; cur != NULL; cur = next) {
7942     next = oop(cur->mark_raw());
7943     cur->set_mark_raw(proto);   // until proven otherwise
7944     assert(oopDesc::is_oop(cur), "Should be an oop");
7945     bool res = work_q->push(cur);
7946     assert(res, "Bit off more than we can chew?");
7947     NOT_PRODUCT(n++;)
7948   }
7949 #ifndef PRODUCT
7950   assert(_num_par_pushes >= n, "Too many pops?");
7951   Atomic::sub(n, &_num_par_pushes);
7952 #endif
7953   return true;
7954 }
7955 
7956 // Single-threaded
7957 void CMSCollector::push_on_overflow_list(oop p) {
7958   NOT_PRODUCT(_num_par_pushes++;)
7959   assert(oopDesc::is_oop(p), "Not an oop");
7960   preserve_mark_if_necessary(p);
7961   p->set_mark_raw((markOop)_overflow_list);
7962   _overflow_list = p;
7963 }
7964 
7965 // Multi-threaded; use CAS to prepend to overflow list
7966 void CMSCollector::par_push_on_overflow_list(oop p) {
7967   NOT_PRODUCT(Atomic::inc(&_num_par_pushes);)
7968   assert(oopDesc::is_oop(p), "Not an oop");
7969   par_preserve_mark_if_necessary(p);
7970   oop observed_overflow_list = _overflow_list;
7971   oop cur_overflow_list;
7972   do {
7973     cur_overflow_list = observed_overflow_list;
7974     if (cur_overflow_list != BUSY) {
7975       p->set_mark_raw(markOop(cur_overflow_list));
7976     } else {
7977       p->set_mark_raw(NULL);
7978     }
7979     observed_overflow_list =
7980       Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
7981   } while (cur_overflow_list != observed_overflow_list);
7982 }
7983 #undef BUSY
7984 
7985 // Single threaded
7986 // General Note on GrowableArray: pushes may silently fail
7987 // because we are (temporarily) out of C-heap for expanding
7988 // the stack. The problem is quite ubiquitous and affects
7989 // a lot of code in the JVM. The prudent thing for GrowableArray
7990 // to do (for now) is to exit with an error. However, that may
7991 // be too draconian in some cases because the caller may be
7992 // able to recover without much harm. For such cases, we
7993 // should probably introduce a "soft_push" method which returns
7994 // an indication of success or failure with the assumption that
7995 // the caller may be able to recover from a failure; code in
7996 // the VM can then be changed, incrementally, to deal with such
7997 // failures where possible, thus, incrementally hardening the VM
7998 // in such low resource situations.
7999 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8000   _preserved_oop_stack.push(p);
8001   _preserved_mark_stack.push(m);
8002   assert(m == p->mark_raw(), "Mark word changed");
8003   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8004          "bijection");
8005 }
8006 
8007 // Single threaded
8008 void CMSCollector::preserve_mark_if_necessary(oop p) {
8009   markOop m = p->mark_raw();
8010   if (m->must_be_preserved(p)) {
8011     preserve_mark_work(p, m);
8012   }
8013 }
8014 
8015 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8016   markOop m = p->mark_raw();
8017   if (m->must_be_preserved(p)) {
8018     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8019     // Even though we read the mark word without holding
8020     // the lock, we are assured that it will not change
8021     // because we "own" this oop, so no other thread can
8022     // be trying to push it on the overflow list; see
8023     // the assertion in preserve_mark_work() that checks
8024     // that m == p->mark_raw().
8025     preserve_mark_work(p, m);
8026   }
8027 }
8028 
8029 // We should be able to do this multi-threaded,
8030 // a chunk of stack being a task (this is
8031 // correct because each oop only ever appears
8032 // once in the overflow list. However, it's
8033 // not very easy to completely overlap this with
8034 // other operations, so will generally not be done
8035 // until all work's been completed. Because we
8036 // expect the preserved oop stack (set) to be small,
8037 // it's probably fine to do this single-threaded.
8038 // We can explore cleverer concurrent/overlapped/parallel
8039 // processing of preserved marks if we feel the
8040 // need for this in the future. Stack overflow should
8041 // be so rare in practice and, when it happens, its
8042 // effect on performance so great that this will
8043 // likely just be in the noise anyway.
8044 void CMSCollector::restore_preserved_marks_if_any() {
8045   assert(SafepointSynchronize::is_at_safepoint(),
8046          "world should be stopped");
8047   assert(Thread::current()->is_ConcurrentGC_thread() ||
8048          Thread::current()->is_VM_thread(),
8049          "should be single-threaded");
8050   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8051          "bijection");
8052 
8053   while (!_preserved_oop_stack.is_empty()) {
8054     oop p = _preserved_oop_stack.pop();
8055     assert(oopDesc::is_oop(p), "Should be an oop");
8056     assert(_span.contains(p), "oop should be in _span");
8057     assert(p->mark_raw() == markOopDesc::prototype(),
8058            "Set when taken from overflow list");
8059     markOop m = _preserved_mark_stack.pop();
8060     p->set_mark_raw(m);
8061   }
8062   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8063          "stacks were cleared above");
8064 }
8065 
8066 #ifndef PRODUCT
8067 bool CMSCollector::no_preserved_marks() const {
8068   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8069 }
8070 #endif
8071 
8072 // Transfer some number of overflown objects to usual marking
8073 // stack. Return true if some objects were transferred.
8074 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8075   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8076                     (size_t)ParGCDesiredObjsFromOverflowList);
8077 
8078   bool res = _collector->take_from_overflow_list(num, _mark_stack);
8079   assert(_collector->overflow_list_is_empty() || res,
8080          "If list is not empty, we should have taken something");
8081   assert(!res || !_mark_stack->isEmpty(),
8082          "If we took something, it should now be on our stack");
8083   return res;
8084 }
8085 
8086 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8087   size_t res = _sp->block_size_no_stall(addr, _collector);
8088   if (_sp->block_is_obj(addr)) {
8089     if (_live_bit_map->isMarked(addr)) {
8090       // It can't have been dead in a previous cycle
8091       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8092     } else {
8093       _dead_bit_map->mark(addr);      // mark the dead object
8094     }
8095   }
8096   // Could be 0, if the block size could not be computed without stalling.
8097   return res;
8098 }
8099 
8100 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8101   GCMemoryManager* manager = CMSHeap::heap()->old_manager();
8102   switch (phase) {
8103     case CMSCollector::InitialMarking:
8104       initialize(manager /* GC manager */ ,
8105                  cause   /* cause of the GC */,
8106                  true    /* allMemoryPoolsAffected */,
8107                  true    /* recordGCBeginTime */,
8108                  true    /* recordPreGCUsage */,
8109                  false   /* recordPeakUsage */,
8110                  false   /* recordPostGCusage */,
8111                  true    /* recordAccumulatedGCTime */,
8112                  false   /* recordGCEndTime */,
8113                  false   /* countCollection */  );
8114       break;
8115 
8116     case CMSCollector::FinalMarking:
8117       initialize(manager /* GC manager */ ,
8118                  cause   /* cause of the GC */,
8119                  true    /* allMemoryPoolsAffected */,
8120                  false   /* recordGCBeginTime */,
8121                  false   /* recordPreGCUsage */,
8122                  false   /* recordPeakUsage */,
8123                  false   /* recordPostGCusage */,
8124                  true    /* recordAccumulatedGCTime */,
8125                  false   /* recordGCEndTime */,
8126                  false   /* countCollection */  );
8127       break;
8128 
8129     case CMSCollector::Sweeping:
8130       initialize(manager /* GC manager */ ,
8131                  cause   /* cause of the GC */,
8132                  true    /* allMemoryPoolsAffected */,
8133                  false   /* recordGCBeginTime */,
8134                  false   /* recordPreGCUsage */,
8135                  true    /* recordPeakUsage */,
8136                  true    /* recordPostGCusage */,
8137                  false   /* recordAccumulatedGCTime */,
8138                  true    /* recordGCEndTime */,
8139                  true    /* countCollection */  );
8140       break;
8141 
8142     default:
8143       ShouldNotReachHere();
8144   }
8145 }