1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "gc/cms/cmsCollectorPolicy.hpp"
  32 #include "gc/cms/cmsOopClosures.inline.hpp"
  33 #include "gc/cms/compactibleFreeListSpace.hpp"
  34 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  35 #include "gc/cms/concurrentMarkSweepThread.hpp"
  36 #include "gc/cms/parNewGeneration.hpp"
  37 #include "gc/cms/vmCMSOperations.hpp"
  38 #include "gc/serial/genMarkSweep.hpp"
  39 #include "gc/serial/tenuredGeneration.hpp"
  40 #include "gc/shared/adaptiveSizePolicy.hpp"
  41 #include "gc/shared/cardGeneration.inline.hpp"
  42 #include "gc/shared/cardTableRS.hpp"
  43 #include "gc/shared/collectedHeap.inline.hpp"
  44 #include "gc/shared/collectorCounters.hpp"
  45 #include "gc/shared/collectorPolicy.hpp"
  46 #include "gc/shared/gcLocker.inline.hpp"
  47 #include "gc/shared/gcPolicyCounters.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/genCollectedHeap.hpp"
  52 #include "gc/shared/genOopClosures.inline.hpp"
  53 #include "gc/shared/isGCActiveMark.hpp"
  54 #include "gc/shared/referencePolicy.hpp"
  55 #include "gc/shared/strongRootsScope.hpp"
  56 #include "gc/shared/taskqueue.inline.hpp"
  57 #include "logging/log.hpp"
  58 #include "memory/allocation.hpp"
  59 #include "memory/iterator.inline.hpp"
  60 #include "memory/padded.hpp"
  61 #include "memory/resourceArea.hpp"
  62 #include "oops/oop.inline.hpp"
  63 #include "prims/jvmtiExport.hpp"
  64 #include "runtime/atomic.hpp"
  65 #include "runtime/globals_extension.hpp"
  66 #include "runtime/handles.inline.hpp"
  67 #include "runtime/java.hpp"
  68 #include "runtime/orderAccess.inline.hpp"
  69 #include "runtime/timer.hpp"
  70 #include "runtime/vmThread.hpp"
  71 #include "services/memoryService.hpp"
  72 #include "services/runtimeService.hpp"
  73 #include "utilities/align.hpp"
  74 #include "utilities/stack.inline.hpp"
  75 
  76 // statics
  77 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  78 bool CMSCollector::_full_gc_requested = false;
  79 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  80 
  81 //////////////////////////////////////////////////////////////////
  82 // In support of CMS/VM thread synchronization
  83 //////////////////////////////////////////////////////////////////
  84 // We split use of the CGC_lock into 2 "levels".
  85 // The low-level locking is of the usual CGC_lock monitor. We introduce
  86 // a higher level "token" (hereafter "CMS token") built on top of the
  87 // low level monitor (hereafter "CGC lock").
  88 // The token-passing protocol gives priority to the VM thread. The
  89 // CMS-lock doesn't provide any fairness guarantees, but clients
  90 // should ensure that it is only held for very short, bounded
  91 // durations.
  92 //
  93 // When either of the CMS thread or the VM thread is involved in
  94 // collection operations during which it does not want the other
  95 // thread to interfere, it obtains the CMS token.
  96 //
  97 // If either thread tries to get the token while the other has
  98 // it, that thread waits. However, if the VM thread and CMS thread
  99 // both want the token, then the VM thread gets priority while the
 100 // CMS thread waits. This ensures, for instance, that the "concurrent"
 101 // phases of the CMS thread's work do not block out the VM thread
 102 // for long periods of time as the CMS thread continues to hog
 103 // the token. (See bug 4616232).
 104 //
 105 // The baton-passing functions are, however, controlled by the
 106 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
 107 // and here the low-level CMS lock, not the high level token,
 108 // ensures mutual exclusion.
 109 //
 110 // Two important conditions that we have to satisfy:
 111 // 1. if a thread does a low-level wait on the CMS lock, then it
 112 //    relinquishes the CMS token if it were holding that token
 113 //    when it acquired the low-level CMS lock.
 114 // 2. any low-level notifications on the low-level lock
 115 //    should only be sent when a thread has relinquished the token.
 116 //
 117 // In the absence of either property, we'd have potential deadlock.
 118 //
 119 // We protect each of the CMS (concurrent and sequential) phases
 120 // with the CMS _token_, not the CMS _lock_.
 121 //
 122 // The only code protected by CMS lock is the token acquisition code
 123 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 124 // baton-passing code.
 125 //
 126 // Unfortunately, i couldn't come up with a good abstraction to factor and
 127 // hide the naked CGC_lock manipulation in the baton-passing code
 128 // further below. That's something we should try to do. Also, the proof
 129 // of correctness of this 2-level locking scheme is far from obvious,
 130 // and potentially quite slippery. We have an uneasy suspicion, for instance,
 131 // that there may be a theoretical possibility of delay/starvation in the
 132 // low-level lock/wait/notify scheme used for the baton-passing because of
 133 // potential interference with the priority scheme embodied in the
 134 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 135 // invocation further below and marked with "XXX 20011219YSR".
 136 // Indeed, as we note elsewhere, this may become yet more slippery
 137 // in the presence of multiple CMS and/or multiple VM threads. XXX
 138 
 139 class CMSTokenSync: public StackObj {
 140  private:
 141   bool _is_cms_thread;
 142  public:
 143   CMSTokenSync(bool is_cms_thread):
 144     _is_cms_thread(is_cms_thread) {
 145     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 146            "Incorrect argument to constructor");
 147     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 148   }
 149 
 150   ~CMSTokenSync() {
 151     assert(_is_cms_thread ?
 152              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 153              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 154           "Incorrect state");
 155     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 156   }
 157 };
 158 
 159 // Convenience class that does a CMSTokenSync, and then acquires
 160 // upto three locks.
 161 class CMSTokenSyncWithLocks: public CMSTokenSync {
 162  private:
 163   // Note: locks are acquired in textual declaration order
 164   // and released in the opposite order
 165   MutexLockerEx _locker1, _locker2, _locker3;
 166  public:
 167   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 168                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 169     CMSTokenSync(is_cms_thread),
 170     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 171     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 172     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 173   { }
 174 };
 175 
 176 
 177 //////////////////////////////////////////////////////////////////
 178 //  Concurrent Mark-Sweep Generation /////////////////////////////
 179 //////////////////////////////////////////////////////////////////
 180 
 181 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 182 
 183 // This struct contains per-thread things necessary to support parallel
 184 // young-gen collection.
 185 class CMSParGCThreadState: public CHeapObj<mtGC> {
 186  public:
 187   CompactibleFreeListSpaceLAB lab;
 188   PromotionInfo promo;
 189 
 190   // Constructor.
 191   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 192     promo.setSpace(cfls);
 193   }
 194 };
 195 
 196 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 197      ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
 198   CardGeneration(rs, initial_byte_size, ct),
 199   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 200   _did_compact(false)
 201 {
 202   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 203   HeapWord* end    = (HeapWord*) _virtual_space.high();
 204 
 205   _direct_allocated_words = 0;
 206   NOT_PRODUCT(
 207     _numObjectsPromoted = 0;
 208     _numWordsPromoted = 0;
 209     _numObjectsAllocated = 0;
 210     _numWordsAllocated = 0;
 211   )
 212 
 213   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
 214   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 215   _cmsSpace->_old_gen = this;
 216 
 217   _gc_stats = new CMSGCStats();
 218 
 219   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 220   // offsets match. The ability to tell free chunks from objects
 221   // depends on this property.
 222   debug_only(
 223     FreeChunk* junk = NULL;
 224     assert(UseCompressedClassPointers ||
 225            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 226            "Offset of FreeChunk::_prev within FreeChunk must match"
 227            "  that of OopDesc::_klass within OopDesc");
 228   )
 229 
 230   _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
 231   for (uint i = 0; i < ParallelGCThreads; i++) {
 232     _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 233   }
 234 
 235   _incremental_collection_failed = false;
 236   // The "dilatation_factor" is the expansion that can occur on
 237   // account of the fact that the minimum object size in the CMS
 238   // generation may be larger than that in, say, a contiguous young
 239   //  generation.
 240   // Ideally, in the calculation below, we'd compute the dilatation
 241   // factor as: MinChunkSize/(promoting_gen's min object size)
 242   // Since we do not have such a general query interface for the
 243   // promoting generation, we'll instead just use the minimum
 244   // object size (which today is a header's worth of space);
 245   // note that all arithmetic is in units of HeapWords.
 246   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 247   assert(_dilatation_factor >= 1.0, "from previous assert");
 248 }
 249 
 250 
 251 // The field "_initiating_occupancy" represents the occupancy percentage
 252 // at which we trigger a new collection cycle.  Unless explicitly specified
 253 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 254 // is calculated by:
 255 //
 256 //   Let "f" be MinHeapFreeRatio in
 257 //
 258 //    _initiating_occupancy = 100-f +
 259 //                           f * (CMSTriggerRatio/100)
 260 //   where CMSTriggerRatio is the argument "tr" below.
 261 //
 262 // That is, if we assume the heap is at its desired maximum occupancy at the
 263 // end of a collection, we let CMSTriggerRatio of the (purported) free
 264 // space be allocated before initiating a new collection cycle.
 265 //
 266 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 267   assert(io <= 100 && tr <= 100, "Check the arguments");
 268   if (io >= 0) {
 269     _initiating_occupancy = (double)io / 100.0;
 270   } else {
 271     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 272                              (double)(tr * MinHeapFreeRatio) / 100.0)
 273                             / 100.0;
 274   }
 275 }
 276 
 277 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 278   assert(collector() != NULL, "no collector");
 279   collector()->ref_processor_init();
 280 }
 281 
 282 void CMSCollector::ref_processor_init() {
 283   if (_ref_processor == NULL) {
 284     // Allocate and initialize a reference processor
 285     _ref_processor =
 286       new ReferenceProcessor(_span,                               // span
 287                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 288                              ParallelGCThreads,                   // mt processing degree
 289                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 290                              MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 291                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 292                              &_is_alive_closure);                 // closure for liveness info
 293     // Initialize the _ref_processor field of CMSGen
 294     _cmsGen->set_ref_processor(_ref_processor);
 295 
 296   }
 297 }
 298 
 299 AdaptiveSizePolicy* CMSCollector::size_policy() {
 300   GenCollectedHeap* gch = GenCollectedHeap::heap();
 301   return gch->gen_policy()->size_policy();
 302 }
 303 
 304 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 305 
 306   const char* gen_name = "old";
 307   GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
 308   // Generation Counters - generation 1, 1 subspace
 309   _gen_counters = new GenerationCounters(gen_name, 1, 1,
 310       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
 311 
 312   _space_counters = new GSpaceCounters(gen_name, 0,
 313                                        _virtual_space.reserved_size(),
 314                                        this, _gen_counters);
 315 }
 316 
 317 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 318   _cms_gen(cms_gen)
 319 {
 320   assert(alpha <= 100, "bad value");
 321   _saved_alpha = alpha;
 322 
 323   // Initialize the alphas to the bootstrap value of 100.
 324   _gc0_alpha = _cms_alpha = 100;
 325 
 326   _cms_begin_time.update();
 327   _cms_end_time.update();
 328 
 329   _gc0_duration = 0.0;
 330   _gc0_period = 0.0;
 331   _gc0_promoted = 0;
 332 
 333   _cms_duration = 0.0;
 334   _cms_period = 0.0;
 335   _cms_allocated = 0;
 336 
 337   _cms_used_at_gc0_begin = 0;
 338   _cms_used_at_gc0_end = 0;
 339   _allow_duty_cycle_reduction = false;
 340   _valid_bits = 0;
 341 }
 342 
 343 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 344   // TBD: CR 6909490
 345   return 1.0;
 346 }
 347 
 348 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 349 }
 350 
 351 // If promotion failure handling is on use
 352 // the padded average size of the promotion for each
 353 // young generation collection.
 354 double CMSStats::time_until_cms_gen_full() const {
 355   size_t cms_free = _cms_gen->cmsSpace()->free();
 356   GenCollectedHeap* gch = GenCollectedHeap::heap();
 357   size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
 358                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 359   if (cms_free > expected_promotion) {
 360     // Start a cms collection if there isn't enough space to promote
 361     // for the next young collection.  Use the padded average as
 362     // a safety factor.
 363     cms_free -= expected_promotion;
 364 
 365     // Adjust by the safety factor.
 366     double cms_free_dbl = (double)cms_free;
 367     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
 368     // Apply a further correction factor which tries to adjust
 369     // for recent occurance of concurrent mode failures.
 370     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 371     cms_free_dbl = cms_free_dbl * cms_adjustment;
 372 
 373     log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 374                   cms_free, expected_promotion);
 375     log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
 376     // Add 1 in case the consumption rate goes to zero.
 377     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 378   }
 379   return 0.0;
 380 }
 381 
 382 // Compare the duration of the cms collection to the
 383 // time remaining before the cms generation is empty.
 384 // Note that the time from the start of the cms collection
 385 // to the start of the cms sweep (less than the total
 386 // duration of the cms collection) can be used.  This
 387 // has been tried and some applications experienced
 388 // promotion failures early in execution.  This was
 389 // possibly because the averages were not accurate
 390 // enough at the beginning.
 391 double CMSStats::time_until_cms_start() const {
 392   // We add "gc0_period" to the "work" calculation
 393   // below because this query is done (mostly) at the
 394   // end of a scavenge, so we need to conservatively
 395   // account for that much possible delay
 396   // in the query so as to avoid concurrent mode failures
 397   // due to starting the collection just a wee bit too
 398   // late.
 399   double work = cms_duration() + gc0_period();
 400   double deadline = time_until_cms_gen_full();
 401   // If a concurrent mode failure occurred recently, we want to be
 402   // more conservative and halve our expected time_until_cms_gen_full()
 403   if (work > deadline) {
 404     log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
 405                           cms_duration(), gc0_period(), time_until_cms_gen_full());
 406     return 0.0;
 407   }
 408   return work - deadline;
 409 }
 410 
 411 #ifndef PRODUCT
 412 void CMSStats::print_on(outputStream *st) const {
 413   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 414   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 415                gc0_duration(), gc0_period(), gc0_promoted());
 416   st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 417             cms_duration(), cms_period(), cms_allocated());
 418   st->print(",cms_since_beg=%g,cms_since_end=%g",
 419             cms_time_since_begin(), cms_time_since_end());
 420   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 421             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 422 
 423   if (valid()) {
 424     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 425               promotion_rate(), cms_allocation_rate());
 426     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 427               cms_consumption_rate(), time_until_cms_gen_full());
 428   }
 429   st->cr();
 430 }
 431 #endif // #ifndef PRODUCT
 432 
 433 CMSCollector::CollectorState CMSCollector::_collectorState =
 434                              CMSCollector::Idling;
 435 bool CMSCollector::_foregroundGCIsActive = false;
 436 bool CMSCollector::_foregroundGCShouldWait = false;
 437 
 438 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 439                            CardTableRS*                   ct,
 440                            ConcurrentMarkSweepPolicy*     cp):
 441   _cmsGen(cmsGen),
 442   _ct(ct),
 443   _ref_processor(NULL),    // will be set later
 444   _conc_workers(NULL),     // may be set later
 445   _abort_preclean(false),
 446   _start_sampling(false),
 447   _between_prologue_and_epilogue(false),
 448   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 449   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 450                  -1 /* lock-free */, "No_lock" /* dummy */),
 451   _modUnionClosurePar(&_modUnionTable),
 452   // Adjust my span to cover old (cms) gen
 453   _span(cmsGen->reserved()),
 454   // Construct the is_alive_closure with _span & markBitMap
 455   _is_alive_closure(_span, &_markBitMap),
 456   _restart_addr(NULL),
 457   _overflow_list(NULL),
 458   _stats(cmsGen),
 459   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
 460                              //verify that this lock should be acquired with safepoint check.
 461                              Monitor::_safepoint_check_sometimes)),
 462   _eden_chunk_array(NULL),     // may be set in ctor body
 463   _eden_chunk_capacity(0),     // -- ditto --
 464   _eden_chunk_index(0),        // -- ditto --
 465   _survivor_plab_array(NULL),  // -- ditto --
 466   _survivor_chunk_array(NULL), // -- ditto --
 467   _survivor_chunk_capacity(0), // -- ditto --
 468   _survivor_chunk_index(0),    // -- ditto --
 469   _ser_pmc_preclean_ovflw(0),
 470   _ser_kac_preclean_ovflw(0),
 471   _ser_pmc_remark_ovflw(0),
 472   _par_pmc_remark_ovflw(0),
 473   _ser_kac_ovflw(0),
 474   _par_kac_ovflw(0),
 475 #ifndef PRODUCT
 476   _num_par_pushes(0),
 477 #endif
 478   _collection_count_start(0),
 479   _verifying(false),
 480   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 481   _completed_initialization(false),
 482   _collector_policy(cp),
 483   _should_unload_classes(CMSClassUnloadingEnabled),
 484   _concurrent_cycles_since_last_unload(0),
 485   _roots_scanning_options(GenCollectedHeap::SO_None),
 486   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 487   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 488   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 489   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 490   _cms_start_registered(false)
 491 {
 492   // Now expand the span and allocate the collection support structures
 493   // (MUT, marking bit map etc.) to cover both generations subject to
 494   // collection.
 495 
 496   // For use by dirty card to oop closures.
 497   _cmsGen->cmsSpace()->set_collector(this);
 498 
 499   // Allocate MUT and marking bit map
 500   {
 501     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 502     if (!_markBitMap.allocate(_span)) {
 503       log_warning(gc)("Failed to allocate CMS Bit Map");
 504       return;
 505     }
 506     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 507   }
 508   {
 509     _modUnionTable.allocate(_span);
 510     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 511   }
 512 
 513   if (!_markStack.allocate(MarkStackSize)) {
 514     log_warning(gc)("Failed to allocate CMS Marking Stack");
 515     return;
 516   }
 517 
 518   // Support for multi-threaded concurrent phases
 519   if (CMSConcurrentMTEnabled) {
 520     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 521       // just for now
 522       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
 523     }
 524     if (ConcGCThreads > 1) {
 525       _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
 526                                  ConcGCThreads, true);
 527       if (_conc_workers == NULL) {
 528         log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
 529         CMSConcurrentMTEnabled = false;
 530       } else {
 531         _conc_workers->initialize_workers();
 532       }
 533     } else {
 534       CMSConcurrentMTEnabled = false;
 535     }
 536   }
 537   if (!CMSConcurrentMTEnabled) {
 538     ConcGCThreads = 0;
 539   } else {
 540     // Turn off CMSCleanOnEnter optimization temporarily for
 541     // the MT case where it's not fixed yet; see 6178663.
 542     CMSCleanOnEnter = false;
 543   }
 544   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 545          "Inconsistency");
 546   log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
 547   log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
 548 
 549   // Parallel task queues; these are shared for the
 550   // concurrent and stop-world phases of CMS, but
 551   // are not shared with parallel scavenge (ParNew).
 552   {
 553     uint i;
 554     uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
 555 
 556     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 557          || ParallelRefProcEnabled)
 558         && num_queues > 0) {
 559       _task_queues = new OopTaskQueueSet(num_queues);
 560       if (_task_queues == NULL) {
 561         log_warning(gc)("task_queues allocation failure.");
 562         return;
 563       }
 564       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 565       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 566       for (i = 0; i < num_queues; i++) {
 567         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 568         if (q == NULL) {
 569           log_warning(gc)("work_queue allocation failure.");
 570           return;
 571         }
 572         _task_queues->register_queue(i, q);
 573       }
 574       for (i = 0; i < num_queues; i++) {
 575         _task_queues->queue(i)->initialize();
 576         _hash_seed[i] = 17;  // copied from ParNew
 577       }
 578     }
 579   }
 580 
 581   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 582 
 583   // Clip CMSBootstrapOccupancy between 0 and 100.
 584   _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
 585 
 586   // Now tell CMS generations the identity of their collector
 587   ConcurrentMarkSweepGeneration::set_collector(this);
 588 
 589   // Create & start a CMS thread for this CMS collector
 590   _cmsThread = ConcurrentMarkSweepThread::start(this);
 591   assert(cmsThread() != NULL, "CMS Thread should have been created");
 592   assert(cmsThread()->collector() == this,
 593          "CMS Thread should refer to this gen");
 594   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 595 
 596   // Support for parallelizing young gen rescan
 597   GenCollectedHeap* gch = GenCollectedHeap::heap();
 598   assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
 599   _young_gen = (ParNewGeneration*)gch->young_gen();
 600   if (gch->supports_inline_contig_alloc()) {
 601     _top_addr = gch->top_addr();
 602     _end_addr = gch->end_addr();
 603     assert(_young_gen != NULL, "no _young_gen");
 604     _eden_chunk_index = 0;
 605     _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
 606     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 607   }
 608 
 609   // Support for parallelizing survivor space rescan
 610   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 611     const size_t max_plab_samples =
 612       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 613 
 614     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 615     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 616     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 617     _survivor_chunk_capacity = max_plab_samples;
 618     for (uint i = 0; i < ParallelGCThreads; i++) {
 619       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 620       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 621       assert(cur->end() == 0, "Should be 0");
 622       assert(cur->array() == vec, "Should be vec");
 623       assert(cur->capacity() == max_plab_samples, "Error");
 624     }
 625   }
 626 
 627   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 628   _gc_counters = new CollectorCounters("CMS", 1);
 629   _completed_initialization = true;
 630   _inter_sweep_timer.start();  // start of time
 631 }
 632 
 633 const char* ConcurrentMarkSweepGeneration::name() const {
 634   return "concurrent mark-sweep generation";
 635 }
 636 void ConcurrentMarkSweepGeneration::update_counters() {
 637   if (UsePerfData) {
 638     _space_counters->update_all();
 639     _gen_counters->update_all();
 640   }
 641 }
 642 
 643 // this is an optimized version of update_counters(). it takes the
 644 // used value as a parameter rather than computing it.
 645 //
 646 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 647   if (UsePerfData) {
 648     _space_counters->update_used(used);
 649     _space_counters->update_capacity();
 650     _gen_counters->update_all();
 651   }
 652 }
 653 
 654 void ConcurrentMarkSweepGeneration::print() const {
 655   Generation::print();
 656   cmsSpace()->print();
 657 }
 658 
 659 #ifndef PRODUCT
 660 void ConcurrentMarkSweepGeneration::print_statistics() {
 661   cmsSpace()->printFLCensus(0);
 662 }
 663 #endif
 664 
 665 size_t
 666 ConcurrentMarkSweepGeneration::contiguous_available() const {
 667   // dld proposes an improvement in precision here. If the committed
 668   // part of the space ends in a free block we should add that to
 669   // uncommitted size in the calculation below. Will make this
 670   // change later, staying with the approximation below for the
 671   // time being. -- ysr.
 672   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 673 }
 674 
 675 size_t
 676 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 677   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 678 }
 679 
 680 size_t ConcurrentMarkSweepGeneration::max_available() const {
 681   return free() + _virtual_space.uncommitted_size();
 682 }
 683 
 684 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 685   size_t available = max_available();
 686   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 687   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 688   log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 689                            res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 690   return res;
 691 }
 692 
 693 // At a promotion failure dump information on block layout in heap
 694 // (cms old generation).
 695 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 696   Log(gc, promotion) log;
 697   if (log.is_trace()) {
 698     ResourceMark rm;
 699     cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream());
 700   }
 701 }
 702 
 703 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 704   // Clear the promotion information.  These pointers can be adjusted
 705   // along with all the other pointers into the heap but
 706   // compaction is expected to be a rare event with
 707   // a heap using cms so don't do it without seeing the need.
 708   for (uint i = 0; i < ParallelGCThreads; i++) {
 709     _par_gc_thread_states[i]->promo.reset();
 710   }
 711 }
 712 
 713 void ConcurrentMarkSweepGeneration::compute_new_size() {
 714   assert_locked_or_safepoint(Heap_lock);
 715 
 716   // If incremental collection failed, we just want to expand
 717   // to the limit.
 718   if (incremental_collection_failed()) {
 719     clear_incremental_collection_failed();
 720     grow_to_reserved();
 721     return;
 722   }
 723 
 724   // The heap has been compacted but not reset yet.
 725   // Any metric such as free() or used() will be incorrect.
 726 
 727   CardGeneration::compute_new_size();
 728 
 729   // Reset again after a possible resizing
 730   if (did_compact()) {
 731     cmsSpace()->reset_after_compaction();
 732   }
 733 }
 734 
 735 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 736   assert_locked_or_safepoint(Heap_lock);
 737 
 738   // If incremental collection failed, we just want to expand
 739   // to the limit.
 740   if (incremental_collection_failed()) {
 741     clear_incremental_collection_failed();
 742     grow_to_reserved();
 743     return;
 744   }
 745 
 746   double free_percentage = ((double) free()) / capacity();
 747   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 748   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 749 
 750   // compute expansion delta needed for reaching desired free percentage
 751   if (free_percentage < desired_free_percentage) {
 752     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 753     assert(desired_capacity >= capacity(), "invalid expansion size");
 754     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 755     Log(gc) log;
 756     if (log.is_trace()) {
 757       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 758       log.trace("From compute_new_size: ");
 759       log.trace("  Free fraction %f", free_percentage);
 760       log.trace("  Desired free fraction %f", desired_free_percentage);
 761       log.trace("  Maximum free fraction %f", maximum_free_percentage);
 762       log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
 763       log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
 764       GenCollectedHeap* gch = GenCollectedHeap::heap();
 765       assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
 766       size_t young_size = gch->young_gen()->capacity();
 767       log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
 768       log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
 769       log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
 770       log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
 771     }
 772     // safe if expansion fails
 773     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 774     log.trace("  Expanded free fraction %f", ((double) free()) / capacity());
 775   } else {
 776     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 777     assert(desired_capacity <= capacity(), "invalid expansion size");
 778     size_t shrink_bytes = capacity() - desired_capacity;
 779     // Don't shrink unless the delta is greater than the minimum shrink we want
 780     if (shrink_bytes >= MinHeapDeltaBytes) {
 781       shrink_free_list_by(shrink_bytes);
 782     }
 783   }
 784 }
 785 
 786 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 787   return cmsSpace()->freelistLock();
 788 }
 789 
 790 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
 791   CMSSynchronousYieldRequest yr;
 792   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
 793   return have_lock_and_allocate(size, tlab);
 794 }
 795 
 796 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
 797                                                                 bool   tlab /* ignored */) {
 798   assert_lock_strong(freelistLock());
 799   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
 800   HeapWord* res = cmsSpace()->allocate(adjustedSize);
 801   // Allocate the object live (grey) if the background collector has
 802   // started marking. This is necessary because the marker may
 803   // have passed this address and consequently this object will
 804   // not otherwise be greyed and would be incorrectly swept up.
 805   // Note that if this object contains references, the writing
 806   // of those references will dirty the card containing this object
 807   // allowing the object to be blackened (and its references scanned)
 808   // either during a preclean phase or at the final checkpoint.
 809   if (res != NULL) {
 810     // We may block here with an uninitialized object with
 811     // its mark-bit or P-bits not yet set. Such objects need
 812     // to be safely navigable by block_start().
 813     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
 814     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
 815     collector()->direct_allocated(res, adjustedSize);
 816     _direct_allocated_words += adjustedSize;
 817     // allocation counters
 818     NOT_PRODUCT(
 819       _numObjectsAllocated++;
 820       _numWordsAllocated += (int)adjustedSize;
 821     )
 822   }
 823   return res;
 824 }
 825 
 826 // In the case of direct allocation by mutators in a generation that
 827 // is being concurrently collected, the object must be allocated
 828 // live (grey) if the background collector has started marking.
 829 // This is necessary because the marker may
 830 // have passed this address and consequently this object will
 831 // not otherwise be greyed and would be incorrectly swept up.
 832 // Note that if this object contains references, the writing
 833 // of those references will dirty the card containing this object
 834 // allowing the object to be blackened (and its references scanned)
 835 // either during a preclean phase or at the final checkpoint.
 836 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
 837   assert(_markBitMap.covers(start, size), "Out of bounds");
 838   if (_collectorState >= Marking) {
 839     MutexLockerEx y(_markBitMap.lock(),
 840                     Mutex::_no_safepoint_check_flag);
 841     // [see comments preceding SweepClosure::do_blk() below for details]
 842     //
 843     // Can the P-bits be deleted now?  JJJ
 844     //
 845     // 1. need to mark the object as live so it isn't collected
 846     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
 847     // 3. need to mark the end of the object so marking, precleaning or sweeping
 848     //    can skip over uninitialized or unparsable objects. An allocated
 849     //    object is considered uninitialized for our purposes as long as
 850     //    its klass word is NULL.  All old gen objects are parsable
 851     //    as soon as they are initialized.)
 852     _markBitMap.mark(start);          // object is live
 853     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
 854     _markBitMap.mark(start + size - 1);
 855                                       // mark end of object
 856   }
 857   // check that oop looks uninitialized
 858   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
 859 }
 860 
 861 void CMSCollector::promoted(bool par, HeapWord* start,
 862                             bool is_obj_array, size_t obj_size) {
 863   assert(_markBitMap.covers(start), "Out of bounds");
 864   // See comment in direct_allocated() about when objects should
 865   // be allocated live.
 866   if (_collectorState >= Marking) {
 867     // we already hold the marking bit map lock, taken in
 868     // the prologue
 869     if (par) {
 870       _markBitMap.par_mark(start);
 871     } else {
 872       _markBitMap.mark(start);
 873     }
 874     // We don't need to mark the object as uninitialized (as
 875     // in direct_allocated above) because this is being done with the
 876     // world stopped and the object will be initialized by the
 877     // time the marking, precleaning or sweeping get to look at it.
 878     // But see the code for copying objects into the CMS generation,
 879     // where we need to ensure that concurrent readers of the
 880     // block offset table are able to safely navigate a block that
 881     // is in flux from being free to being allocated (and in
 882     // transition while being copied into) and subsequently
 883     // becoming a bona-fide object when the copy/promotion is complete.
 884     assert(SafepointSynchronize::is_at_safepoint(),
 885            "expect promotion only at safepoints");
 886 
 887     if (_collectorState < Sweeping) {
 888       // Mark the appropriate cards in the modUnionTable, so that
 889       // this object gets scanned before the sweep. If this is
 890       // not done, CMS generation references in the object might
 891       // not get marked.
 892       // For the case of arrays, which are otherwise precisely
 893       // marked, we need to dirty the entire array, not just its head.
 894       if (is_obj_array) {
 895         // The [par_]mark_range() method expects mr.end() below to
 896         // be aligned to the granularity of a bit's representation
 897         // in the heap. In the case of the MUT below, that's a
 898         // card size.
 899         MemRegion mr(start,
 900                      align_up(start + obj_size,
 901                         CardTableModRefBS::card_size /* bytes */));
 902         if (par) {
 903           _modUnionTable.par_mark_range(mr);
 904         } else {
 905           _modUnionTable.mark_range(mr);
 906         }
 907       } else {  // not an obj array; we can just mark the head
 908         if (par) {
 909           _modUnionTable.par_mark(start);
 910         } else {
 911           _modUnionTable.mark(start);
 912         }
 913       }
 914     }
 915   }
 916 }
 917 
 918 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
 919   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 920   // allocate, copy and if necessary update promoinfo --
 921   // delegate to underlying space.
 922   assert_lock_strong(freelistLock());
 923 
 924 #ifndef PRODUCT
 925   if (GenCollectedHeap::heap()->promotion_should_fail()) {
 926     return NULL;
 927   }
 928 #endif  // #ifndef PRODUCT
 929 
 930   oop res = _cmsSpace->promote(obj, obj_size);
 931   if (res == NULL) {
 932     // expand and retry
 933     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
 934     expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
 935     // Since this is the old generation, we don't try to promote
 936     // into a more senior generation.
 937     res = _cmsSpace->promote(obj, obj_size);
 938   }
 939   if (res != NULL) {
 940     // See comment in allocate() about when objects should
 941     // be allocated live.
 942     assert(obj->is_oop(), "Will dereference klass pointer below");
 943     collector()->promoted(false,           // Not parallel
 944                           (HeapWord*)res, obj->is_objArray(), obj_size);
 945     // promotion counters
 946     NOT_PRODUCT(
 947       _numObjectsPromoted++;
 948       _numWordsPromoted +=
 949         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
 950     )
 951   }
 952   return res;
 953 }
 954 
 955 
 956 // IMPORTANT: Notes on object size recognition in CMS.
 957 // ---------------------------------------------------
 958 // A block of storage in the CMS generation is always in
 959 // one of three states. A free block (FREE), an allocated
 960 // object (OBJECT) whose size() method reports the correct size,
 961 // and an intermediate state (TRANSIENT) in which its size cannot
 962 // be accurately determined.
 963 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
 964 // -----------------------------------------------------
 965 // FREE:      klass_word & 1 == 1; mark_word holds block size
 966 //
 967 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
 968 //            obj->size() computes correct size
 969 //
 970 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
 971 //
 972 // STATE IDENTIFICATION: (64 bit+COOPS)
 973 // ------------------------------------
 974 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
 975 //
 976 // OBJECT:    klass_word installed; klass_word != 0;
 977 //            obj->size() computes correct size
 978 //
 979 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
 980 //
 981 //
 982 // STATE TRANSITION DIAGRAM
 983 //
 984 //        mut / parnew                     mut  /  parnew
 985 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
 986 //  ^                                                                   |
 987 //  |------------------------ DEAD <------------------------------------|
 988 //         sweep                            mut
 989 //
 990 // While a block is in TRANSIENT state its size cannot be determined
 991 // so readers will either need to come back later or stall until
 992 // the size can be determined. Note that for the case of direct
 993 // allocation, P-bits, when available, may be used to determine the
 994 // size of an object that may not yet have been initialized.
 995 
 996 // Things to support parallel young-gen collection.
 997 oop
 998 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
 999                                            oop old, markOop m,
1000                                            size_t word_sz) {
1001 #ifndef PRODUCT
1002   if (GenCollectedHeap::heap()->promotion_should_fail()) {
1003     return NULL;
1004   }
1005 #endif  // #ifndef PRODUCT
1006 
1007   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1008   PromotionInfo* promoInfo = &ps->promo;
1009   // if we are tracking promotions, then first ensure space for
1010   // promotion (including spooling space for saving header if necessary).
1011   // then allocate and copy, then track promoted info if needed.
1012   // When tracking (see PromotionInfo::track()), the mark word may
1013   // be displaced and in this case restoration of the mark word
1014   // occurs in the (oop_since_save_marks_)iterate phase.
1015   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1016     // Out of space for allocating spooling buffers;
1017     // try expanding and allocating spooling buffers.
1018     if (!expand_and_ensure_spooling_space(promoInfo)) {
1019       return NULL;
1020     }
1021   }
1022   assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
1023   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1024   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1025   if (obj_ptr == NULL) {
1026      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1027      if (obj_ptr == NULL) {
1028        return NULL;
1029      }
1030   }
1031   oop obj = oop(obj_ptr);
1032   OrderAccess::storestore();
1033   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1034   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1035   // IMPORTANT: See note on object initialization for CMS above.
1036   // Otherwise, copy the object.  Here we must be careful to insert the
1037   // klass pointer last, since this marks the block as an allocated object.
1038   // Except with compressed oops it's the mark word.
1039   HeapWord* old_ptr = (HeapWord*)old;
1040   // Restore the mark word copied above.
1041   obj->set_mark(m);
1042   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1043   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1044   OrderAccess::storestore();
1045 
1046   if (UseCompressedClassPointers) {
1047     // Copy gap missed by (aligned) header size calculation below
1048     obj->set_klass_gap(old->klass_gap());
1049   }
1050   if (word_sz > (size_t)oopDesc::header_size()) {
1051     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1052                                  obj_ptr + oopDesc::header_size(),
1053                                  word_sz - oopDesc::header_size());
1054   }
1055 
1056   // Now we can track the promoted object, if necessary.  We take care
1057   // to delay the transition from uninitialized to full object
1058   // (i.e., insertion of klass pointer) until after, so that it
1059   // atomically becomes a promoted object.
1060   if (promoInfo->tracking()) {
1061     promoInfo->track((PromotedObject*)obj, old->klass());
1062   }
1063   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1064   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1065   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1066 
1067   // Finally, install the klass pointer (this should be volatile).
1068   OrderAccess::storestore();
1069   obj->set_klass(old->klass());
1070   // We should now be able to calculate the right size for this object
1071   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1072 
1073   collector()->promoted(true,          // parallel
1074                         obj_ptr, old->is_objArray(), word_sz);
1075 
1076   NOT_PRODUCT(
1077     Atomic::inc_ptr(&_numObjectsPromoted);
1078     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1079   )
1080 
1081   return obj;
1082 }
1083 
1084 void
1085 ConcurrentMarkSweepGeneration::
1086 par_promote_alloc_done(int thread_num) {
1087   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1088   ps->lab.retire(thread_num);
1089 }
1090 
1091 void
1092 ConcurrentMarkSweepGeneration::
1093 par_oop_since_save_marks_iterate_done(int thread_num) {
1094   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1095   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1096   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1097 
1098   // Because card-scanning has been completed, subsequent phases
1099   // (e.g., reference processing) will not need to recognize which
1100   // objects have been promoted during this GC. So, we can now disable
1101   // promotion tracking.
1102   ps->promo.stopTrackingPromotions();
1103 }
1104 
1105 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1106                                                    size_t size,
1107                                                    bool   tlab)
1108 {
1109   // We allow a STW collection only if a full
1110   // collection was requested.
1111   return full || should_allocate(size, tlab); // FIX ME !!!
1112   // This and promotion failure handling are connected at the
1113   // hip and should be fixed by untying them.
1114 }
1115 
1116 bool CMSCollector::shouldConcurrentCollect() {
1117   LogTarget(Trace, gc) log;
1118 
1119   if (_full_gc_requested) {
1120     log.print("CMSCollector: collect because of explicit  gc request (or GCLocker)");
1121     return true;
1122   }
1123 
1124   FreelistLocker x(this);
1125   // ------------------------------------------------------------------
1126   // Print out lots of information which affects the initiation of
1127   // a collection.
1128   if (log.is_enabled() && stats().valid()) {
1129     log.print("CMSCollector shouldConcurrentCollect: ");
1130 
1131     LogStream out(log);
1132     stats().print_on(&out);
1133 
1134     log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
1135     log.print("free=" SIZE_FORMAT, _cmsGen->free());
1136     log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
1137     log.print("promotion_rate=%g", stats().promotion_rate());
1138     log.print("cms_allocation_rate=%g", stats().cms_allocation_rate());
1139     log.print("occupancy=%3.7f", _cmsGen->occupancy());
1140     log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1141     log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1142     log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1143     log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
1144   }
1145   // ------------------------------------------------------------------
1146 
1147   // If the estimated time to complete a cms collection (cms_duration())
1148   // is less than the estimated time remaining until the cms generation
1149   // is full, start a collection.
1150   if (!UseCMSInitiatingOccupancyOnly) {
1151     if (stats().valid()) {
1152       if (stats().time_until_cms_start() == 0.0) {
1153         return true;
1154       }
1155     } else {
1156       // We want to conservatively collect somewhat early in order
1157       // to try and "bootstrap" our CMS/promotion statistics;
1158       // this branch will not fire after the first successful CMS
1159       // collection because the stats should then be valid.
1160       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1161         log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
1162                   _cmsGen->occupancy(), _bootstrap_occupancy);
1163         return true;
1164       }
1165     }
1166   }
1167 
1168   // Otherwise, we start a collection cycle if
1169   // old gen want a collection cycle started. Each may use
1170   // an appropriate criterion for making this decision.
1171   // XXX We need to make sure that the gen expansion
1172   // criterion dovetails well with this. XXX NEED TO FIX THIS
1173   if (_cmsGen->should_concurrent_collect()) {
1174     log.print("CMS old gen initiated");
1175     return true;
1176   }
1177 
1178   // We start a collection if we believe an incremental collection may fail;
1179   // this is not likely to be productive in practice because it's probably too
1180   // late anyway.
1181   GenCollectedHeap* gch = GenCollectedHeap::heap();
1182   assert(gch->collector_policy()->is_generation_policy(),
1183          "You may want to check the correctness of the following");
1184   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1185     log.print("CMSCollector: collect because incremental collection will fail ");
1186     return true;
1187   }
1188 
1189   if (MetaspaceGC::should_concurrent_collect()) {
1190     log.print("CMSCollector: collect for metadata allocation ");
1191     return true;
1192   }
1193 
1194   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1195   if (CMSTriggerInterval >= 0) {
1196     if (CMSTriggerInterval == 0) {
1197       // Trigger always
1198       return true;
1199     }
1200 
1201     // Check the CMS time since begin (we do not check the stats validity
1202     // as we want to be able to trigger the first CMS cycle as well)
1203     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1204       if (stats().valid()) {
1205         log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1206                   stats().cms_time_since_begin());
1207       } else {
1208         log.print("CMSCollector: collect because of trigger interval (first collection)");
1209       }
1210       return true;
1211     }
1212   }
1213 
1214   return false;
1215 }
1216 
1217 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1218 
1219 // Clear _expansion_cause fields of constituent generations
1220 void CMSCollector::clear_expansion_cause() {
1221   _cmsGen->clear_expansion_cause();
1222 }
1223 
1224 // We should be conservative in starting a collection cycle.  To
1225 // start too eagerly runs the risk of collecting too often in the
1226 // extreme.  To collect too rarely falls back on full collections,
1227 // which works, even if not optimum in terms of concurrent work.
1228 // As a work around for too eagerly collecting, use the flag
1229 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1230 // giving the user an easily understandable way of controlling the
1231 // collections.
1232 // We want to start a new collection cycle if any of the following
1233 // conditions hold:
1234 // . our current occupancy exceeds the configured initiating occupancy
1235 //   for this generation, or
1236 // . we recently needed to expand this space and have not, since that
1237 //   expansion, done a collection of this generation, or
1238 // . the underlying space believes that it may be a good idea to initiate
1239 //   a concurrent collection (this may be based on criteria such as the
1240 //   following: the space uses linear allocation and linear allocation is
1241 //   going to fail, or there is believed to be excessive fragmentation in
1242 //   the generation, etc... or ...
1243 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1244 //   the case of the old generation; see CR 6543076):
1245 //   we may be approaching a point at which allocation requests may fail because
1246 //   we will be out of sufficient free space given allocation rate estimates.]
1247 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1248 
1249   assert_lock_strong(freelistLock());
1250   if (occupancy() > initiating_occupancy()) {
1251     log_trace(gc)(" %s: collect because of occupancy %f / %f  ",
1252                   short_name(), occupancy(), initiating_occupancy());
1253     return true;
1254   }
1255   if (UseCMSInitiatingOccupancyOnly) {
1256     return false;
1257   }
1258   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1259     log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
1260     return true;
1261   }
1262   return false;
1263 }
1264 
1265 void ConcurrentMarkSweepGeneration::collect(bool   full,
1266                                             bool   clear_all_soft_refs,
1267                                             size_t size,
1268                                             bool   tlab)
1269 {
1270   collector()->collect(full, clear_all_soft_refs, size, tlab);
1271 }
1272 
1273 void CMSCollector::collect(bool   full,
1274                            bool   clear_all_soft_refs,
1275                            size_t size,
1276                            bool   tlab)
1277 {
1278   // The following "if" branch is present for defensive reasons.
1279   // In the current uses of this interface, it can be replaced with:
1280   // assert(!GCLocker.is_active(), "Can't be called otherwise");
1281   // But I am not placing that assert here to allow future
1282   // generality in invoking this interface.
1283   if (GCLocker::is_active()) {
1284     // A consistency test for GCLocker
1285     assert(GCLocker::needs_gc(), "Should have been set already");
1286     // Skip this foreground collection, instead
1287     // expanding the heap if necessary.
1288     // Need the free list locks for the call to free() in compute_new_size()
1289     compute_new_size();
1290     return;
1291   }
1292   acquire_control_and_collect(full, clear_all_soft_refs);
1293 }
1294 
1295 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1296   GenCollectedHeap* gch = GenCollectedHeap::heap();
1297   unsigned int gc_count = gch->total_full_collections();
1298   if (gc_count == full_gc_count) {
1299     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1300     _full_gc_requested = true;
1301     _full_gc_cause = cause;
1302     CGC_lock->notify();   // nudge CMS thread
1303   } else {
1304     assert(gc_count > full_gc_count, "Error: causal loop");
1305   }
1306 }
1307 
1308 bool CMSCollector::is_external_interruption() {
1309   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1310   return GCCause::is_user_requested_gc(cause) ||
1311          GCCause::is_serviceability_requested_gc(cause);
1312 }
1313 
1314 void CMSCollector::report_concurrent_mode_interruption() {
1315   if (is_external_interruption()) {
1316     log_debug(gc)("Concurrent mode interrupted");
1317   } else {
1318     log_debug(gc)("Concurrent mode failure");
1319     _gc_tracer_cm->report_concurrent_mode_failure();
1320   }
1321 }
1322 
1323 
1324 // The foreground and background collectors need to coordinate in order
1325 // to make sure that they do not mutually interfere with CMS collections.
1326 // When a background collection is active,
1327 // the foreground collector may need to take over (preempt) and
1328 // synchronously complete an ongoing collection. Depending on the
1329 // frequency of the background collections and the heap usage
1330 // of the application, this preemption can be seldom or frequent.
1331 // There are only certain
1332 // points in the background collection that the "collection-baton"
1333 // can be passed to the foreground collector.
1334 //
1335 // The foreground collector will wait for the baton before
1336 // starting any part of the collection.  The foreground collector
1337 // will only wait at one location.
1338 //
1339 // The background collector will yield the baton before starting a new
1340 // phase of the collection (e.g., before initial marking, marking from roots,
1341 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1342 // of the loop which switches the phases. The background collector does some
1343 // of the phases (initial mark, final re-mark) with the world stopped.
1344 // Because of locking involved in stopping the world,
1345 // the foreground collector should not block waiting for the background
1346 // collector when it is doing a stop-the-world phase.  The background
1347 // collector will yield the baton at an additional point just before
1348 // it enters a stop-the-world phase.  Once the world is stopped, the
1349 // background collector checks the phase of the collection.  If the
1350 // phase has not changed, it proceeds with the collection.  If the
1351 // phase has changed, it skips that phase of the collection.  See
1352 // the comments on the use of the Heap_lock in collect_in_background().
1353 //
1354 // Variable used in baton passing.
1355 //   _foregroundGCIsActive - Set to true by the foreground collector when
1356 //      it wants the baton.  The foreground clears it when it has finished
1357 //      the collection.
1358 //   _foregroundGCShouldWait - Set to true by the background collector
1359 //        when it is running.  The foreground collector waits while
1360 //      _foregroundGCShouldWait is true.
1361 //  CGC_lock - monitor used to protect access to the above variables
1362 //      and to notify the foreground and background collectors.
1363 //  _collectorState - current state of the CMS collection.
1364 //
1365 // The foreground collector
1366 //   acquires the CGC_lock
1367 //   sets _foregroundGCIsActive
1368 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1369 //     various locks acquired in preparation for the collection
1370 //     are released so as not to block the background collector
1371 //     that is in the midst of a collection
1372 //   proceeds with the collection
1373 //   clears _foregroundGCIsActive
1374 //   returns
1375 //
1376 // The background collector in a loop iterating on the phases of the
1377 //      collection
1378 //   acquires the CGC_lock
1379 //   sets _foregroundGCShouldWait
1380 //   if _foregroundGCIsActive is set
1381 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1382 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1383 //     and exits the loop.
1384 //   otherwise
1385 //     proceed with that phase of the collection
1386 //     if the phase is a stop-the-world phase,
1387 //       yield the baton once more just before enqueueing
1388 //       the stop-world CMS operation (executed by the VM thread).
1389 //   returns after all phases of the collection are done
1390 //
1391 
1392 void CMSCollector::acquire_control_and_collect(bool full,
1393         bool clear_all_soft_refs) {
1394   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1395   assert(!Thread::current()->is_ConcurrentGC_thread(),
1396          "shouldn't try to acquire control from self!");
1397 
1398   // Start the protocol for acquiring control of the
1399   // collection from the background collector (aka CMS thread).
1400   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1401          "VM thread should have CMS token");
1402   // Remember the possibly interrupted state of an ongoing
1403   // concurrent collection
1404   CollectorState first_state = _collectorState;
1405 
1406   // Signal to a possibly ongoing concurrent collection that
1407   // we want to do a foreground collection.
1408   _foregroundGCIsActive = true;
1409 
1410   // release locks and wait for a notify from the background collector
1411   // releasing the locks in only necessary for phases which
1412   // do yields to improve the granularity of the collection.
1413   assert_lock_strong(bitMapLock());
1414   // We need to lock the Free list lock for the space that we are
1415   // currently collecting.
1416   assert(haveFreelistLocks(), "Must be holding free list locks");
1417   bitMapLock()->unlock();
1418   releaseFreelistLocks();
1419   {
1420     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1421     if (_foregroundGCShouldWait) {
1422       // We are going to be waiting for action for the CMS thread;
1423       // it had better not be gone (for instance at shutdown)!
1424       assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
1425              "CMS thread must be running");
1426       // Wait here until the background collector gives us the go-ahead
1427       ConcurrentMarkSweepThread::clear_CMS_flag(
1428         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1429       // Get a possibly blocked CMS thread going:
1430       //   Note that we set _foregroundGCIsActive true above,
1431       //   without protection of the CGC_lock.
1432       CGC_lock->notify();
1433       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1434              "Possible deadlock");
1435       while (_foregroundGCShouldWait) {
1436         // wait for notification
1437         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1438         // Possibility of delay/starvation here, since CMS token does
1439         // not know to give priority to VM thread? Actually, i think
1440         // there wouldn't be any delay/starvation, but the proof of
1441         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1442       }
1443       ConcurrentMarkSweepThread::set_CMS_flag(
1444         ConcurrentMarkSweepThread::CMS_vm_has_token);
1445     }
1446   }
1447   // The CMS_token is already held.  Get back the other locks.
1448   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1449          "VM thread should have CMS token");
1450   getFreelistLocks();
1451   bitMapLock()->lock_without_safepoint_check();
1452   log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
1453                        p2i(Thread::current()), first_state);
1454   log_debug(gc, state)("    gets control with state %d", _collectorState);
1455 
1456   // Inform cms gen if this was due to partial collection failing.
1457   // The CMS gen may use this fact to determine its expansion policy.
1458   GenCollectedHeap* gch = GenCollectedHeap::heap();
1459   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1460     assert(!_cmsGen->incremental_collection_failed(),
1461            "Should have been noticed, reacted to and cleared");
1462     _cmsGen->set_incremental_collection_failed();
1463   }
1464 
1465   if (first_state > Idling) {
1466     report_concurrent_mode_interruption();
1467   }
1468 
1469   set_did_compact(true);
1470 
1471   // If the collection is being acquired from the background
1472   // collector, there may be references on the discovered
1473   // references lists.  Abandon those references, since some
1474   // of them may have become unreachable after concurrent
1475   // discovery; the STW compacting collector will redo discovery
1476   // more precisely, without being subject to floating garbage.
1477   // Leaving otherwise unreachable references in the discovered
1478   // lists would require special handling.
1479   ref_processor()->disable_discovery();
1480   ref_processor()->abandon_partial_discovery();
1481   ref_processor()->verify_no_references_recorded();
1482 
1483   if (first_state > Idling) {
1484     save_heap_summary();
1485   }
1486 
1487   do_compaction_work(clear_all_soft_refs);
1488 
1489   // Has the GC time limit been exceeded?
1490   size_t max_eden_size = _young_gen->max_eden_size();
1491   GCCause::Cause gc_cause = gch->gc_cause();
1492   size_policy()->check_gc_overhead_limit(_young_gen->used(),
1493                                          _young_gen->eden()->used(),
1494                                          _cmsGen->max_capacity(),
1495                                          max_eden_size,
1496                                          full,
1497                                          gc_cause,
1498                                          gch->collector_policy());
1499 
1500   // Reset the expansion cause, now that we just completed
1501   // a collection cycle.
1502   clear_expansion_cause();
1503   _foregroundGCIsActive = false;
1504   return;
1505 }
1506 
1507 // Resize the tenured generation
1508 // after obtaining the free list locks for the
1509 // two generations.
1510 void CMSCollector::compute_new_size() {
1511   assert_locked_or_safepoint(Heap_lock);
1512   FreelistLocker z(this);
1513   MetaspaceGC::compute_new_size();
1514   _cmsGen->compute_new_size_free_list();
1515 }
1516 
1517 // A work method used by the foreground collector to do
1518 // a mark-sweep-compact.
1519 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1520   GenCollectedHeap* gch = GenCollectedHeap::heap();
1521 
1522   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1523   gc_timer->register_gc_start();
1524 
1525   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1526   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1527 
1528   gch->pre_full_gc_dump(gc_timer);
1529 
1530   GCTraceTime(Trace, gc, phases) t("CMS:MSC");
1531 
1532   // Temporarily widen the span of the weak reference processing to
1533   // the entire heap.
1534   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1535   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1536   // Temporarily, clear the "is_alive_non_header" field of the
1537   // reference processor.
1538   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1539   // Temporarily make reference _processing_ single threaded (non-MT).
1540   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1541   // Temporarily make refs discovery atomic
1542   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1543   // Temporarily make reference _discovery_ single threaded (non-MT)
1544   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1545 
1546   ref_processor()->set_enqueuing_is_done(false);
1547   ref_processor()->enable_discovery();
1548   ref_processor()->setup_policy(clear_all_soft_refs);
1549   // If an asynchronous collection finishes, the _modUnionTable is
1550   // all clear.  If we are assuming the collection from an asynchronous
1551   // collection, clear the _modUnionTable.
1552   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1553     "_modUnionTable should be clear if the baton was not passed");
1554   _modUnionTable.clear_all();
1555   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1556     "mod union for klasses should be clear if the baton was passed");
1557   _ct->klass_rem_set()->clear_mod_union();
1558 
1559   // We must adjust the allocation statistics being maintained
1560   // in the free list space. We do so by reading and clearing
1561   // the sweep timer and updating the block flux rate estimates below.
1562   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1563   if (_inter_sweep_timer.is_active()) {
1564     _inter_sweep_timer.stop();
1565     // Note that we do not use this sample to update the _inter_sweep_estimate.
1566     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1567                                             _inter_sweep_estimate.padded_average(),
1568                                             _intra_sweep_estimate.padded_average());
1569   }
1570 
1571   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1572   #ifdef ASSERT
1573     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1574     size_t free_size = cms_space->free();
1575     assert(free_size ==
1576            pointer_delta(cms_space->end(), cms_space->compaction_top())
1577            * HeapWordSize,
1578       "All the free space should be compacted into one chunk at top");
1579     assert(cms_space->dictionary()->total_chunk_size(
1580                                       debug_only(cms_space->freelistLock())) == 0 ||
1581            cms_space->totalSizeInIndexedFreeLists() == 0,
1582       "All the free space should be in a single chunk");
1583     size_t num = cms_space->totalCount();
1584     assert((free_size == 0 && num == 0) ||
1585            (free_size > 0  && (num == 1 || num == 2)),
1586          "There should be at most 2 free chunks after compaction");
1587   #endif // ASSERT
1588   _collectorState = Resetting;
1589   assert(_restart_addr == NULL,
1590          "Should have been NULL'd before baton was passed");
1591   reset_stw();
1592   _cmsGen->reset_after_compaction();
1593   _concurrent_cycles_since_last_unload = 0;
1594 
1595   // Clear any data recorded in the PLAB chunk arrays.
1596   if (_survivor_plab_array != NULL) {
1597     reset_survivor_plab_arrays();
1598   }
1599 
1600   // Adjust the per-size allocation stats for the next epoch.
1601   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1602   // Restart the "inter sweep timer" for the next epoch.
1603   _inter_sweep_timer.reset();
1604   _inter_sweep_timer.start();
1605 
1606   // No longer a need to do a concurrent collection for Metaspace.
1607   MetaspaceGC::set_should_concurrent_collect(false);
1608 
1609   gch->post_full_gc_dump(gc_timer);
1610 
1611   gc_timer->register_gc_end();
1612 
1613   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1614 
1615   // For a mark-sweep-compact, compute_new_size() will be called
1616   // in the heap's do_collection() method.
1617 }
1618 
1619 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1620   Log(gc, heap) log;
1621   if (!log.is_trace()) {
1622     return;
1623   }
1624 
1625   ContiguousSpace* eden_space = _young_gen->eden();
1626   ContiguousSpace* from_space = _young_gen->from();
1627   ContiguousSpace* to_space   = _young_gen->to();
1628   // Eden
1629   if (_eden_chunk_array != NULL) {
1630     log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1631               p2i(eden_space->bottom()), p2i(eden_space->top()),
1632               p2i(eden_space->end()), eden_space->capacity());
1633     log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
1634               _eden_chunk_index, _eden_chunk_capacity);
1635     for (size_t i = 0; i < _eden_chunk_index; i++) {
1636       log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
1637     }
1638   }
1639   // Survivor
1640   if (_survivor_chunk_array != NULL) {
1641     log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1642               p2i(from_space->bottom()), p2i(from_space->top()),
1643               p2i(from_space->end()), from_space->capacity());
1644     log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
1645               _survivor_chunk_index, _survivor_chunk_capacity);
1646     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1647       log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
1648     }
1649   }
1650 }
1651 
1652 void CMSCollector::getFreelistLocks() const {
1653   // Get locks for all free lists in all generations that this
1654   // collector is responsible for
1655   _cmsGen->freelistLock()->lock_without_safepoint_check();
1656 }
1657 
1658 void CMSCollector::releaseFreelistLocks() const {
1659   // Release locks for all free lists in all generations that this
1660   // collector is responsible for
1661   _cmsGen->freelistLock()->unlock();
1662 }
1663 
1664 bool CMSCollector::haveFreelistLocks() const {
1665   // Check locks for all free lists in all generations that this
1666   // collector is responsible for
1667   assert_lock_strong(_cmsGen->freelistLock());
1668   PRODUCT_ONLY(ShouldNotReachHere());
1669   return true;
1670 }
1671 
1672 // A utility class that is used by the CMS collector to
1673 // temporarily "release" the foreground collector from its
1674 // usual obligation to wait for the background collector to
1675 // complete an ongoing phase before proceeding.
1676 class ReleaseForegroundGC: public StackObj {
1677  private:
1678   CMSCollector* _c;
1679  public:
1680   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1681     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1682     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1683     // allow a potentially blocked foreground collector to proceed
1684     _c->_foregroundGCShouldWait = false;
1685     if (_c->_foregroundGCIsActive) {
1686       CGC_lock->notify();
1687     }
1688     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1689            "Possible deadlock");
1690   }
1691 
1692   ~ReleaseForegroundGC() {
1693     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1694     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1695     _c->_foregroundGCShouldWait = true;
1696   }
1697 };
1698 
1699 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1700   assert(Thread::current()->is_ConcurrentGC_thread(),
1701     "A CMS asynchronous collection is only allowed on a CMS thread.");
1702 
1703   GenCollectedHeap* gch = GenCollectedHeap::heap();
1704   {
1705     bool safepoint_check = Mutex::_no_safepoint_check_flag;
1706     MutexLockerEx hl(Heap_lock, safepoint_check);
1707     FreelistLocker fll(this);
1708     MutexLockerEx x(CGC_lock, safepoint_check);
1709     if (_foregroundGCIsActive) {
1710       // The foreground collector is. Skip this
1711       // background collection.
1712       assert(!_foregroundGCShouldWait, "Should be clear");
1713       return;
1714     } else {
1715       assert(_collectorState == Idling, "Should be idling before start.");
1716       _collectorState = InitialMarking;
1717       register_gc_start(cause);
1718       // Reset the expansion cause, now that we are about to begin
1719       // a new cycle.
1720       clear_expansion_cause();
1721 
1722       // Clear the MetaspaceGC flag since a concurrent collection
1723       // is starting but also clear it after the collection.
1724       MetaspaceGC::set_should_concurrent_collect(false);
1725     }
1726     // Decide if we want to enable class unloading as part of the
1727     // ensuing concurrent GC cycle.
1728     update_should_unload_classes();
1729     _full_gc_requested = false;           // acks all outstanding full gc requests
1730     _full_gc_cause = GCCause::_no_gc;
1731     // Signal that we are about to start a collection
1732     gch->increment_total_full_collections();  // ... starting a collection cycle
1733     _collection_count_start = gch->total_full_collections();
1734   }
1735 
1736   size_t prev_used = _cmsGen->used();
1737 
1738   // The change of the collection state is normally done at this level;
1739   // the exceptions are phases that are executed while the world is
1740   // stopped.  For those phases the change of state is done while the
1741   // world is stopped.  For baton passing purposes this allows the
1742   // background collector to finish the phase and change state atomically.
1743   // The foreground collector cannot wait on a phase that is done
1744   // while the world is stopped because the foreground collector already
1745   // has the world stopped and would deadlock.
1746   while (_collectorState != Idling) {
1747     log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1748                          p2i(Thread::current()), _collectorState);
1749     // The foreground collector
1750     //   holds the Heap_lock throughout its collection.
1751     //   holds the CMS token (but not the lock)
1752     //     except while it is waiting for the background collector to yield.
1753     //
1754     // The foreground collector should be blocked (not for long)
1755     //   if the background collector is about to start a phase
1756     //   executed with world stopped.  If the background
1757     //   collector has already started such a phase, the
1758     //   foreground collector is blocked waiting for the
1759     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1760     //   are executed in the VM thread.
1761     //
1762     // The locking order is
1763     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1764     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1765     //   CMS token  (claimed in
1766     //                stop_world_and_do() -->
1767     //                  safepoint_synchronize() -->
1768     //                    CMSThread::synchronize())
1769 
1770     {
1771       // Check if the FG collector wants us to yield.
1772       CMSTokenSync x(true); // is cms thread
1773       if (waitForForegroundGC()) {
1774         // We yielded to a foreground GC, nothing more to be
1775         // done this round.
1776         assert(_foregroundGCShouldWait == false, "We set it to false in "
1777                "waitForForegroundGC()");
1778         log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1779                              p2i(Thread::current()), _collectorState);
1780         return;
1781       } else {
1782         // The background collector can run but check to see if the
1783         // foreground collector has done a collection while the
1784         // background collector was waiting to get the CGC_lock
1785         // above.  If yes, break so that _foregroundGCShouldWait
1786         // is cleared before returning.
1787         if (_collectorState == Idling) {
1788           break;
1789         }
1790       }
1791     }
1792 
1793     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1794       "should be waiting");
1795 
1796     switch (_collectorState) {
1797       case InitialMarking:
1798         {
1799           ReleaseForegroundGC x(this);
1800           stats().record_cms_begin();
1801           VM_CMS_Initial_Mark initial_mark_op(this);
1802           VMThread::execute(&initial_mark_op);
1803         }
1804         // The collector state may be any legal state at this point
1805         // since the background collector may have yielded to the
1806         // foreground collector.
1807         break;
1808       case Marking:
1809         // initial marking in checkpointRootsInitialWork has been completed
1810         if (markFromRoots()) { // we were successful
1811           assert(_collectorState == Precleaning, "Collector state should "
1812             "have changed");
1813         } else {
1814           assert(_foregroundGCIsActive, "Internal state inconsistency");
1815         }
1816         break;
1817       case Precleaning:
1818         // marking from roots in markFromRoots has been completed
1819         preclean();
1820         assert(_collectorState == AbortablePreclean ||
1821                _collectorState == FinalMarking,
1822                "Collector state should have changed");
1823         break;
1824       case AbortablePreclean:
1825         abortable_preclean();
1826         assert(_collectorState == FinalMarking, "Collector state should "
1827           "have changed");
1828         break;
1829       case FinalMarking:
1830         {
1831           ReleaseForegroundGC x(this);
1832 
1833           VM_CMS_Final_Remark final_remark_op(this);
1834           VMThread::execute(&final_remark_op);
1835         }
1836         assert(_foregroundGCShouldWait, "block post-condition");
1837         break;
1838       case Sweeping:
1839         // final marking in checkpointRootsFinal has been completed
1840         sweep();
1841         assert(_collectorState == Resizing, "Collector state change "
1842           "to Resizing must be done under the free_list_lock");
1843 
1844       case Resizing: {
1845         // Sweeping has been completed...
1846         // At this point the background collection has completed.
1847         // Don't move the call to compute_new_size() down
1848         // into code that might be executed if the background
1849         // collection was preempted.
1850         {
1851           ReleaseForegroundGC x(this);   // unblock FG collection
1852           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1853           CMSTokenSync        z(true);   // not strictly needed.
1854           if (_collectorState == Resizing) {
1855             compute_new_size();
1856             save_heap_summary();
1857             _collectorState = Resetting;
1858           } else {
1859             assert(_collectorState == Idling, "The state should only change"
1860                    " because the foreground collector has finished the collection");
1861           }
1862         }
1863         break;
1864       }
1865       case Resetting:
1866         // CMS heap resizing has been completed
1867         reset_concurrent();
1868         assert(_collectorState == Idling, "Collector state should "
1869           "have changed");
1870 
1871         MetaspaceGC::set_should_concurrent_collect(false);
1872 
1873         stats().record_cms_end();
1874         // Don't move the concurrent_phases_end() and compute_new_size()
1875         // calls to here because a preempted background collection
1876         // has it's state set to "Resetting".
1877         break;
1878       case Idling:
1879       default:
1880         ShouldNotReachHere();
1881         break;
1882     }
1883     log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1884                          p2i(Thread::current()), _collectorState);
1885     assert(_foregroundGCShouldWait, "block post-condition");
1886   }
1887 
1888   // Should this be in gc_epilogue?
1889   collector_policy()->counters()->update_counters();
1890 
1891   {
1892     // Clear _foregroundGCShouldWait and, in the event that the
1893     // foreground collector is waiting, notify it, before
1894     // returning.
1895     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1896     _foregroundGCShouldWait = false;
1897     if (_foregroundGCIsActive) {
1898       CGC_lock->notify();
1899     }
1900     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1901            "Possible deadlock");
1902   }
1903   log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1904                        p2i(Thread::current()), _collectorState);
1905   log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1906                      prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
1907 }
1908 
1909 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1910   _cms_start_registered = true;
1911   _gc_timer_cm->register_gc_start();
1912   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1913 }
1914 
1915 void CMSCollector::register_gc_end() {
1916   if (_cms_start_registered) {
1917     report_heap_summary(GCWhen::AfterGC);
1918 
1919     _gc_timer_cm->register_gc_end();
1920     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1921     _cms_start_registered = false;
1922   }
1923 }
1924 
1925 void CMSCollector::save_heap_summary() {
1926   GenCollectedHeap* gch = GenCollectedHeap::heap();
1927   _last_heap_summary = gch->create_heap_summary();
1928   _last_metaspace_summary = gch->create_metaspace_summary();
1929 }
1930 
1931 void CMSCollector::report_heap_summary(GCWhen::Type when) {
1932   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
1933   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
1934 }
1935 
1936 bool CMSCollector::waitForForegroundGC() {
1937   bool res = false;
1938   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1939          "CMS thread should have CMS token");
1940   // Block the foreground collector until the
1941   // background collectors decides whether to
1942   // yield.
1943   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1944   _foregroundGCShouldWait = true;
1945   if (_foregroundGCIsActive) {
1946     // The background collector yields to the
1947     // foreground collector and returns a value
1948     // indicating that it has yielded.  The foreground
1949     // collector can proceed.
1950     res = true;
1951     _foregroundGCShouldWait = false;
1952     ConcurrentMarkSweepThread::clear_CMS_flag(
1953       ConcurrentMarkSweepThread::CMS_cms_has_token);
1954     ConcurrentMarkSweepThread::set_CMS_flag(
1955       ConcurrentMarkSweepThread::CMS_cms_wants_token);
1956     // Get a possibly blocked foreground thread going
1957     CGC_lock->notify();
1958     log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
1959                          p2i(Thread::current()), _collectorState);
1960     while (_foregroundGCIsActive) {
1961       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1962     }
1963     ConcurrentMarkSweepThread::set_CMS_flag(
1964       ConcurrentMarkSweepThread::CMS_cms_has_token);
1965     ConcurrentMarkSweepThread::clear_CMS_flag(
1966       ConcurrentMarkSweepThread::CMS_cms_wants_token);
1967   }
1968   log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
1969                        p2i(Thread::current()), _collectorState);
1970   return res;
1971 }
1972 
1973 // Because of the need to lock the free lists and other structures in
1974 // the collector, common to all the generations that the collector is
1975 // collecting, we need the gc_prologues of individual CMS generations
1976 // delegate to their collector. It may have been simpler had the
1977 // current infrastructure allowed one to call a prologue on a
1978 // collector. In the absence of that we have the generation's
1979 // prologue delegate to the collector, which delegates back
1980 // some "local" work to a worker method in the individual generations
1981 // that it's responsible for collecting, while itself doing any
1982 // work common to all generations it's responsible for. A similar
1983 // comment applies to the  gc_epilogue()'s.
1984 // The role of the variable _between_prologue_and_epilogue is to
1985 // enforce the invocation protocol.
1986 void CMSCollector::gc_prologue(bool full) {
1987   // Call gc_prologue_work() for the CMSGen
1988   // we are responsible for.
1989 
1990   // The following locking discipline assumes that we are only called
1991   // when the world is stopped.
1992   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
1993 
1994   // The CMSCollector prologue must call the gc_prologues for the
1995   // "generations" that it's responsible
1996   // for.
1997 
1998   assert(   Thread::current()->is_VM_thread()
1999          || (   CMSScavengeBeforeRemark
2000              && Thread::current()->is_ConcurrentGC_thread()),
2001          "Incorrect thread type for prologue execution");
2002 
2003   if (_between_prologue_and_epilogue) {
2004     // We have already been invoked; this is a gc_prologue delegation
2005     // from yet another CMS generation that we are responsible for, just
2006     // ignore it since all relevant work has already been done.
2007     return;
2008   }
2009 
2010   // set a bit saying prologue has been called; cleared in epilogue
2011   _between_prologue_and_epilogue = true;
2012   // Claim locks for common data structures, then call gc_prologue_work()
2013   // for each CMSGen.
2014 
2015   getFreelistLocks();   // gets free list locks on constituent spaces
2016   bitMapLock()->lock_without_safepoint_check();
2017 
2018   // Should call gc_prologue_work() for all cms gens we are responsible for
2019   bool duringMarking =    _collectorState >= Marking
2020                          && _collectorState < Sweeping;
2021 
2022   // The young collections clear the modified oops state, which tells if
2023   // there are any modified oops in the class. The remark phase also needs
2024   // that information. Tell the young collection to save the union of all
2025   // modified klasses.
2026   if (duringMarking) {
2027     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2028   }
2029 
2030   bool registerClosure = duringMarking;
2031 
2032   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2033 
2034   if (!full) {
2035     stats().record_gc0_begin();
2036   }
2037 }
2038 
2039 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2040 
2041   _capacity_at_prologue = capacity();
2042   _used_at_prologue = used();
2043 
2044   // We enable promotion tracking so that card-scanning can recognize
2045   // which objects have been promoted during this GC and skip them.
2046   for (uint i = 0; i < ParallelGCThreads; i++) {
2047     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2048   }
2049 
2050   // Delegate to CMScollector which knows how to coordinate between
2051   // this and any other CMS generations that it is responsible for
2052   // collecting.
2053   collector()->gc_prologue(full);
2054 }
2055 
2056 // This is a "private" interface for use by this generation's CMSCollector.
2057 // Not to be called directly by any other entity (for instance,
2058 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2059 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2060   bool registerClosure, ModUnionClosure* modUnionClosure) {
2061   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2062   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2063     "Should be NULL");
2064   if (registerClosure) {
2065     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2066   }
2067   cmsSpace()->gc_prologue();
2068   // Clear stat counters
2069   NOT_PRODUCT(
2070     assert(_numObjectsPromoted == 0, "check");
2071     assert(_numWordsPromoted   == 0, "check");
2072     log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
2073                                  _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2074     _numObjectsAllocated = 0;
2075     _numWordsAllocated   = 0;
2076   )
2077 }
2078 
2079 void CMSCollector::gc_epilogue(bool full) {
2080   // The following locking discipline assumes that we are only called
2081   // when the world is stopped.
2082   assert(SafepointSynchronize::is_at_safepoint(),
2083          "world is stopped assumption");
2084 
2085   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2086   // if linear allocation blocks need to be appropriately marked to allow the
2087   // the blocks to be parsable. We also check here whether we need to nudge the
2088   // CMS collector thread to start a new cycle (if it's not already active).
2089   assert(   Thread::current()->is_VM_thread()
2090          || (   CMSScavengeBeforeRemark
2091              && Thread::current()->is_ConcurrentGC_thread()),
2092          "Incorrect thread type for epilogue execution");
2093 
2094   if (!_between_prologue_and_epilogue) {
2095     // We have already been invoked; this is a gc_epilogue delegation
2096     // from yet another CMS generation that we are responsible for, just
2097     // ignore it since all relevant work has already been done.
2098     return;
2099   }
2100   assert(haveFreelistLocks(), "must have freelist locks");
2101   assert_lock_strong(bitMapLock());
2102 
2103   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2104 
2105   _cmsGen->gc_epilogue_work(full);
2106 
2107   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2108     // in case sampling was not already enabled, enable it
2109     _start_sampling = true;
2110   }
2111   // reset _eden_chunk_array so sampling starts afresh
2112   _eden_chunk_index = 0;
2113 
2114   size_t cms_used   = _cmsGen->cmsSpace()->used();
2115 
2116   // update performance counters - this uses a special version of
2117   // update_counters() that allows the utilization to be passed as a
2118   // parameter, avoiding multiple calls to used().
2119   //
2120   _cmsGen->update_counters(cms_used);
2121 
2122   bitMapLock()->unlock();
2123   releaseFreelistLocks();
2124 
2125   if (!CleanChunkPoolAsync) {
2126     Chunk::clean_chunk_pool();
2127   }
2128 
2129   set_did_compact(false);
2130   _between_prologue_and_epilogue = false;  // ready for next cycle
2131 }
2132 
2133 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2134   collector()->gc_epilogue(full);
2135 
2136   // When using ParNew, promotion tracking should have already been
2137   // disabled. However, the prologue (which enables promotion
2138   // tracking) and epilogue are called irrespective of the type of
2139   // GC. So they will also be called before and after Full GCs, during
2140   // which promotion tracking will not be explicitly disabled. So,
2141   // it's safer to also disable it here too (to be symmetric with
2142   // enabling it in the prologue).
2143   for (uint i = 0; i < ParallelGCThreads; i++) {
2144     _par_gc_thread_states[i]->promo.stopTrackingPromotions();
2145   }
2146 }
2147 
2148 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2149   assert(!incremental_collection_failed(), "Should have been cleared");
2150   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2151   cmsSpace()->gc_epilogue();
2152     // Print stat counters
2153   NOT_PRODUCT(
2154     assert(_numObjectsAllocated == 0, "check");
2155     assert(_numWordsAllocated == 0, "check");
2156     log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
2157                                      _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2158     _numObjectsPromoted = 0;
2159     _numWordsPromoted   = 0;
2160   )
2161 
2162   // Call down the chain in contiguous_available needs the freelistLock
2163   // so print this out before releasing the freeListLock.
2164   log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
2165 }
2166 
2167 #ifndef PRODUCT
2168 bool CMSCollector::have_cms_token() {
2169   Thread* thr = Thread::current();
2170   if (thr->is_VM_thread()) {
2171     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2172   } else if (thr->is_ConcurrentGC_thread()) {
2173     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2174   } else if (thr->is_GC_task_thread()) {
2175     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2176            ParGCRareEvent_lock->owned_by_self();
2177   }
2178   return false;
2179 }
2180 
2181 // Check reachability of the given heap address in CMS generation,
2182 // treating all other generations as roots.
2183 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2184   // We could "guarantee" below, rather than assert, but I'll
2185   // leave these as "asserts" so that an adventurous debugger
2186   // could try this in the product build provided some subset of
2187   // the conditions were met, provided they were interested in the
2188   // results and knew that the computation below wouldn't interfere
2189   // with other concurrent computations mutating the structures
2190   // being read or written.
2191   assert(SafepointSynchronize::is_at_safepoint(),
2192          "Else mutations in object graph will make answer suspect");
2193   assert(have_cms_token(), "Should hold cms token");
2194   assert(haveFreelistLocks(), "must hold free list locks");
2195   assert_lock_strong(bitMapLock());
2196 
2197   // Clear the marking bit map array before starting, but, just
2198   // for kicks, first report if the given address is already marked
2199   tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2200                 _markBitMap.isMarked(addr) ? "" : " not");
2201 
2202   if (verify_after_remark()) {
2203     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2204     bool result = verification_mark_bm()->isMarked(addr);
2205     tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2206                   result ? "IS" : "is NOT");
2207     return result;
2208   } else {
2209     tty->print_cr("Could not compute result");
2210     return false;
2211   }
2212 }
2213 #endif
2214 
2215 void
2216 CMSCollector::print_on_error(outputStream* st) {
2217   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2218   if (collector != NULL) {
2219     CMSBitMap* bitmap = &collector->_markBitMap;
2220     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2221     bitmap->print_on_error(st, " Bits: ");
2222 
2223     st->cr();
2224 
2225     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2226     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2227     mut_bitmap->print_on_error(st, " Bits: ");
2228   }
2229 }
2230 
2231 ////////////////////////////////////////////////////////
2232 // CMS Verification Support
2233 ////////////////////////////////////////////////////////
2234 // Following the remark phase, the following invariant
2235 // should hold -- each object in the CMS heap which is
2236 // marked in markBitMap() should be marked in the verification_mark_bm().
2237 
2238 class VerifyMarkedClosure: public BitMapClosure {
2239   CMSBitMap* _marks;
2240   bool       _failed;
2241 
2242  public:
2243   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2244 
2245   bool do_bit(size_t offset) {
2246     HeapWord* addr = _marks->offsetToHeapWord(offset);
2247     if (!_marks->isMarked(addr)) {
2248       Log(gc, verify) log;
2249       ResourceMark rm;
2250       oop(addr)->print_on(log.error_stream());
2251       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2252       _failed = true;
2253     }
2254     return true;
2255   }
2256 
2257   bool failed() { return _failed; }
2258 };
2259 
2260 bool CMSCollector::verify_after_remark() {
2261   GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
2262   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2263   static bool init = false;
2264 
2265   assert(SafepointSynchronize::is_at_safepoint(),
2266          "Else mutations in object graph will make answer suspect");
2267   assert(have_cms_token(),
2268          "Else there may be mutual interference in use of "
2269          " verification data structures");
2270   assert(_collectorState > Marking && _collectorState <= Sweeping,
2271          "Else marking info checked here may be obsolete");
2272   assert(haveFreelistLocks(), "must hold free list locks");
2273   assert_lock_strong(bitMapLock());
2274 
2275 
2276   // Allocate marking bit map if not already allocated
2277   if (!init) { // first time
2278     if (!verification_mark_bm()->allocate(_span)) {
2279       return false;
2280     }
2281     init = true;
2282   }
2283 
2284   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2285 
2286   // Turn off refs discovery -- so we will be tracing through refs.
2287   // This is as intended, because by this time
2288   // GC must already have cleared any refs that need to be cleared,
2289   // and traced those that need to be marked; moreover,
2290   // the marking done here is not going to interfere in any
2291   // way with the marking information used by GC.
2292   NoRefDiscovery no_discovery(ref_processor());
2293 
2294 #if defined(COMPILER2) || INCLUDE_JVMCI
2295   DerivedPointerTableDeactivate dpt_deact;
2296 #endif
2297 
2298   // Clear any marks from a previous round
2299   verification_mark_bm()->clear_all();
2300   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2301   verify_work_stacks_empty();
2302 
2303   GenCollectedHeap* gch = GenCollectedHeap::heap();
2304   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2305   // Update the saved marks which may affect the root scans.
2306   gch->save_marks();
2307 
2308   if (CMSRemarkVerifyVariant == 1) {
2309     // In this first variant of verification, we complete
2310     // all marking, then check if the new marks-vector is
2311     // a subset of the CMS marks-vector.
2312     verify_after_remark_work_1();
2313   } else {
2314     guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
2315     // In this second variant of verification, we flag an error
2316     // (i.e. an object reachable in the new marks-vector not reachable
2317     // in the CMS marks-vector) immediately, also indicating the
2318     // identify of an object (A) that references the unmarked object (B) --
2319     // presumably, a mutation to A failed to be picked up by preclean/remark?
2320     verify_after_remark_work_2();
2321   }
2322 
2323   return true;
2324 }
2325 
2326 void CMSCollector::verify_after_remark_work_1() {
2327   ResourceMark rm;
2328   HandleMark  hm;
2329   GenCollectedHeap* gch = GenCollectedHeap::heap();
2330 
2331   // Get a clear set of claim bits for the roots processing to work with.
2332   ClassLoaderDataGraph::clear_claimed_marks();
2333 
2334   // Mark from roots one level into CMS
2335   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2336   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2337 
2338   {
2339     StrongRootsScope srs(1);
2340 
2341     gch->cms_process_roots(&srs,
2342                            true,   // young gen as roots
2343                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2344                            should_unload_classes(),
2345                            &notOlder,
2346                            NULL);
2347   }
2348 
2349   // Now mark from the roots
2350   MarkFromRootsClosure markFromRootsClosure(this, _span,
2351     verification_mark_bm(), verification_mark_stack(),
2352     false /* don't yield */, true /* verifying */);
2353   assert(_restart_addr == NULL, "Expected pre-condition");
2354   verification_mark_bm()->iterate(&markFromRootsClosure);
2355   while (_restart_addr != NULL) {
2356     // Deal with stack overflow: by restarting at the indicated
2357     // address.
2358     HeapWord* ra = _restart_addr;
2359     markFromRootsClosure.reset(ra);
2360     _restart_addr = NULL;
2361     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2362   }
2363   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2364   verify_work_stacks_empty();
2365 
2366   // Marking completed -- now verify that each bit marked in
2367   // verification_mark_bm() is also marked in markBitMap(); flag all
2368   // errors by printing corresponding objects.
2369   VerifyMarkedClosure vcl(markBitMap());
2370   verification_mark_bm()->iterate(&vcl);
2371   if (vcl.failed()) {
2372     Log(gc, verify) log;
2373     log.error("Failed marking verification after remark");
2374     ResourceMark rm;
2375     gch->print_on(log.error_stream());
2376     fatal("CMS: failed marking verification after remark");
2377   }
2378 }
2379 
2380 class VerifyKlassOopsKlassClosure : public KlassClosure {
2381   class VerifyKlassOopsClosure : public OopClosure {
2382     CMSBitMap* _bitmap;
2383    public:
2384     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2385     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2386     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2387   } _oop_closure;
2388  public:
2389   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2390   void do_klass(Klass* k) {
2391     k->oops_do(&_oop_closure);
2392   }
2393 };
2394 
2395 void CMSCollector::verify_after_remark_work_2() {
2396   ResourceMark rm;
2397   HandleMark  hm;
2398   GenCollectedHeap* gch = GenCollectedHeap::heap();
2399 
2400   // Get a clear set of claim bits for the roots processing to work with.
2401   ClassLoaderDataGraph::clear_claimed_marks();
2402 
2403   // Mark from roots one level into CMS
2404   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2405                                      markBitMap());
2406   CLDToOopClosure cld_closure(&notOlder, true);
2407 
2408   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2409 
2410   {
2411     StrongRootsScope srs(1);
2412 
2413     gch->cms_process_roots(&srs,
2414                            true,   // young gen as roots
2415                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2416                            should_unload_classes(),
2417                            &notOlder,
2418                            &cld_closure);
2419   }
2420 
2421   // Now mark from the roots
2422   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2423     verification_mark_bm(), markBitMap(), verification_mark_stack());
2424   assert(_restart_addr == NULL, "Expected pre-condition");
2425   verification_mark_bm()->iterate(&markFromRootsClosure);
2426   while (_restart_addr != NULL) {
2427     // Deal with stack overflow: by restarting at the indicated
2428     // address.
2429     HeapWord* ra = _restart_addr;
2430     markFromRootsClosure.reset(ra);
2431     _restart_addr = NULL;
2432     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2433   }
2434   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2435   verify_work_stacks_empty();
2436 
2437   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2438   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2439 
2440   // Marking completed -- now verify that each bit marked in
2441   // verification_mark_bm() is also marked in markBitMap(); flag all
2442   // errors by printing corresponding objects.
2443   VerifyMarkedClosure vcl(markBitMap());
2444   verification_mark_bm()->iterate(&vcl);
2445   assert(!vcl.failed(), "Else verification above should not have succeeded");
2446 }
2447 
2448 void ConcurrentMarkSweepGeneration::save_marks() {
2449   // delegate to CMS space
2450   cmsSpace()->save_marks();
2451 }
2452 
2453 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2454   return cmsSpace()->no_allocs_since_save_marks();
2455 }
2456 
2457 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2458                                                                 \
2459 void ConcurrentMarkSweepGeneration::                            \
2460 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2461   cl->set_generation(this);                                     \
2462   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2463   cl->reset_generation();                                       \
2464   save_marks();                                                 \
2465 }
2466 
2467 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2468 
2469 void
2470 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2471   if (freelistLock()->owned_by_self()) {
2472     Generation::oop_iterate(cl);
2473   } else {
2474     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2475     Generation::oop_iterate(cl);
2476   }
2477 }
2478 
2479 void
2480 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2481   if (freelistLock()->owned_by_self()) {
2482     Generation::object_iterate(cl);
2483   } else {
2484     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2485     Generation::object_iterate(cl);
2486   }
2487 }
2488 
2489 void
2490 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2491   if (freelistLock()->owned_by_self()) {
2492     Generation::safe_object_iterate(cl);
2493   } else {
2494     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2495     Generation::safe_object_iterate(cl);
2496   }
2497 }
2498 
2499 void
2500 ConcurrentMarkSweepGeneration::post_compact() {
2501 }
2502 
2503 void
2504 ConcurrentMarkSweepGeneration::prepare_for_verify() {
2505   // Fix the linear allocation blocks to look like free blocks.
2506 
2507   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2508   // are not called when the heap is verified during universe initialization and
2509   // at vm shutdown.
2510   if (freelistLock()->owned_by_self()) {
2511     cmsSpace()->prepare_for_verify();
2512   } else {
2513     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2514     cmsSpace()->prepare_for_verify();
2515   }
2516 }
2517 
2518 void
2519 ConcurrentMarkSweepGeneration::verify() {
2520   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2521   // are not called when the heap is verified during universe initialization and
2522   // at vm shutdown.
2523   if (freelistLock()->owned_by_self()) {
2524     cmsSpace()->verify();
2525   } else {
2526     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2527     cmsSpace()->verify();
2528   }
2529 }
2530 
2531 void CMSCollector::verify() {
2532   _cmsGen->verify();
2533 }
2534 
2535 #ifndef PRODUCT
2536 bool CMSCollector::overflow_list_is_empty() const {
2537   assert(_num_par_pushes >= 0, "Inconsistency");
2538   if (_overflow_list == NULL) {
2539     assert(_num_par_pushes == 0, "Inconsistency");
2540   }
2541   return _overflow_list == NULL;
2542 }
2543 
2544 // The methods verify_work_stacks_empty() and verify_overflow_empty()
2545 // merely consolidate assertion checks that appear to occur together frequently.
2546 void CMSCollector::verify_work_stacks_empty() const {
2547   assert(_markStack.isEmpty(), "Marking stack should be empty");
2548   assert(overflow_list_is_empty(), "Overflow list should be empty");
2549 }
2550 
2551 void CMSCollector::verify_overflow_empty() const {
2552   assert(overflow_list_is_empty(), "Overflow list should be empty");
2553   assert(no_preserved_marks(), "No preserved marks");
2554 }
2555 #endif // PRODUCT
2556 
2557 // Decide if we want to enable class unloading as part of the
2558 // ensuing concurrent GC cycle. We will collect and
2559 // unload classes if it's the case that:
2560 //  (a) class unloading is enabled at the command line, and
2561 //  (b) old gen is getting really full
2562 // NOTE: Provided there is no change in the state of the heap between
2563 // calls to this method, it should have idempotent results. Moreover,
2564 // its results should be monotonically increasing (i.e. going from 0 to 1,
2565 // but not 1 to 0) between successive calls between which the heap was
2566 // not collected. For the implementation below, it must thus rely on
2567 // the property that concurrent_cycles_since_last_unload()
2568 // will not decrease unless a collection cycle happened and that
2569 // _cmsGen->is_too_full() are
2570 // themselves also monotonic in that sense. See check_monotonicity()
2571 // below.
2572 void CMSCollector::update_should_unload_classes() {
2573   _should_unload_classes = false;
2574   if (CMSClassUnloadingEnabled) {
2575     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2576                               CMSClassUnloadingMaxInterval)
2577                            || _cmsGen->is_too_full();
2578   }
2579 }
2580 
2581 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2582   bool res = should_concurrent_collect();
2583   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2584   return res;
2585 }
2586 
2587 void CMSCollector::setup_cms_unloading_and_verification_state() {
2588   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2589                              || VerifyBeforeExit;
2590   const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2591 
2592   // We set the proper root for this CMS cycle here.
2593   if (should_unload_classes()) {   // Should unload classes this cycle
2594     remove_root_scanning_option(rso);  // Shrink the root set appropriately
2595     set_verifying(should_verify);    // Set verification state for this cycle
2596     return;                            // Nothing else needs to be done at this time
2597   }
2598 
2599   // Not unloading classes this cycle
2600   assert(!should_unload_classes(), "Inconsistency!");
2601 
2602   // If we are not unloading classes then add SO_AllCodeCache to root
2603   // scanning options.
2604   add_root_scanning_option(rso);
2605 
2606   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2607     set_verifying(true);
2608   } else if (verifying() && !should_verify) {
2609     // We were verifying, but some verification flags got disabled.
2610     set_verifying(false);
2611     // Exclude symbols, strings and code cache elements from root scanning to
2612     // reduce IM and RM pauses.
2613     remove_root_scanning_option(rso);
2614   }
2615 }
2616 
2617 
2618 #ifndef PRODUCT
2619 HeapWord* CMSCollector::block_start(const void* p) const {
2620   const HeapWord* addr = (HeapWord*)p;
2621   if (_span.contains(p)) {
2622     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2623       return _cmsGen->cmsSpace()->block_start(p);
2624     }
2625   }
2626   return NULL;
2627 }
2628 #endif
2629 
2630 HeapWord*
2631 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2632                                                    bool   tlab,
2633                                                    bool   parallel) {
2634   CMSSynchronousYieldRequest yr;
2635   assert(!tlab, "Can't deal with TLAB allocation");
2636   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2637   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2638   if (GCExpandToAllocateDelayMillis > 0) {
2639     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2640   }
2641   return have_lock_and_allocate(word_size, tlab);
2642 }
2643 
2644 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2645     size_t bytes,
2646     size_t expand_bytes,
2647     CMSExpansionCause::Cause cause)
2648 {
2649 
2650   bool success = expand(bytes, expand_bytes);
2651 
2652   // remember why we expanded; this information is used
2653   // by shouldConcurrentCollect() when making decisions on whether to start
2654   // a new CMS cycle.
2655   if (success) {
2656     set_expansion_cause(cause);
2657     log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));
2658   }
2659 }
2660 
2661 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2662   HeapWord* res = NULL;
2663   MutexLocker x(ParGCRareEvent_lock);
2664   while (true) {
2665     // Expansion by some other thread might make alloc OK now:
2666     res = ps->lab.alloc(word_sz);
2667     if (res != NULL) return res;
2668     // If there's not enough expansion space available, give up.
2669     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2670       return NULL;
2671     }
2672     // Otherwise, we try expansion.
2673     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2674     // Now go around the loop and try alloc again;
2675     // A competing par_promote might beat us to the expansion space,
2676     // so we may go around the loop again if promotion fails again.
2677     if (GCExpandToAllocateDelayMillis > 0) {
2678       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2679     }
2680   }
2681 }
2682 
2683 
2684 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2685   PromotionInfo* promo) {
2686   MutexLocker x(ParGCRareEvent_lock);
2687   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2688   while (true) {
2689     // Expansion by some other thread might make alloc OK now:
2690     if (promo->ensure_spooling_space()) {
2691       assert(promo->has_spooling_space(),
2692              "Post-condition of successful ensure_spooling_space()");
2693       return true;
2694     }
2695     // If there's not enough expansion space available, give up.
2696     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2697       return false;
2698     }
2699     // Otherwise, we try expansion.
2700     expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2701     // Now go around the loop and try alloc again;
2702     // A competing allocation might beat us to the expansion space,
2703     // so we may go around the loop again if allocation fails again.
2704     if (GCExpandToAllocateDelayMillis > 0) {
2705       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2706     }
2707   }
2708 }
2709 
2710 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2711   // Only shrink if a compaction was done so that all the free space
2712   // in the generation is in a contiguous block at the end.
2713   if (did_compact()) {
2714     CardGeneration::shrink(bytes);
2715   }
2716 }
2717 
2718 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2719   assert_locked_or_safepoint(Heap_lock);
2720 }
2721 
2722 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2723   assert_locked_or_safepoint(Heap_lock);
2724   assert_lock_strong(freelistLock());
2725   log_trace(gc)("Shrinking of CMS not yet implemented");
2726   return;
2727 }
2728 
2729 
2730 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2731 // phases.
2732 class CMSPhaseAccounting: public StackObj {
2733  public:
2734   CMSPhaseAccounting(CMSCollector *collector,
2735                      const char *title);
2736   ~CMSPhaseAccounting();
2737 
2738  private:
2739   CMSCollector *_collector;
2740   const char *_title;
2741   GCTraceConcTime(Info, gc) _trace_time;
2742 
2743  public:
2744   // Not MT-safe; so do not pass around these StackObj's
2745   // where they may be accessed by other threads.
2746   double wallclock_millis() {
2747     return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2748   }
2749 };
2750 
2751 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2752                                        const char *title) :
2753   _collector(collector), _title(title), _trace_time(title) {
2754 
2755   _collector->resetYields();
2756   _collector->resetTimer();
2757   _collector->startTimer();
2758   _collector->gc_timer_cm()->register_gc_concurrent_start(title);
2759 }
2760 
2761 CMSPhaseAccounting::~CMSPhaseAccounting() {
2762   _collector->gc_timer_cm()->register_gc_concurrent_end();
2763   _collector->stopTimer();
2764   log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
2765   log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2766 }
2767 
2768 // CMS work
2769 
2770 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2771 class CMSParMarkTask : public AbstractGangTask {
2772  protected:
2773   CMSCollector*     _collector;
2774   uint              _n_workers;
2775   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2776       AbstractGangTask(name),
2777       _collector(collector),
2778       _n_workers(n_workers) {}
2779   // Work method in support of parallel rescan ... of young gen spaces
2780   void do_young_space_rescan(OopsInGenClosure* cl,
2781                              ContiguousSpace* space,
2782                              HeapWord** chunk_array, size_t chunk_top);
2783   void work_on_young_gen_roots(OopsInGenClosure* cl);
2784 };
2785 
2786 // Parallel initial mark task
2787 class CMSParInitialMarkTask: public CMSParMarkTask {
2788   StrongRootsScope* _strong_roots_scope;
2789  public:
2790   CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2791       CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2792       _strong_roots_scope(strong_roots_scope) {}
2793   void work(uint worker_id);
2794 };
2795 
2796 // Checkpoint the roots into this generation from outside
2797 // this generation. [Note this initial checkpoint need only
2798 // be approximate -- we'll do a catch up phase subsequently.]
2799 void CMSCollector::checkpointRootsInitial() {
2800   assert(_collectorState == InitialMarking, "Wrong collector state");
2801   check_correct_thread_executing();
2802   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2803 
2804   save_heap_summary();
2805   report_heap_summary(GCWhen::BeforeGC);
2806 
2807   ReferenceProcessor* rp = ref_processor();
2808   assert(_restart_addr == NULL, "Control point invariant");
2809   {
2810     // acquire locks for subsequent manipulations
2811     MutexLockerEx x(bitMapLock(),
2812                     Mutex::_no_safepoint_check_flag);
2813     checkpointRootsInitialWork();
2814     // enable ("weak") refs discovery
2815     rp->enable_discovery();
2816     _collectorState = Marking;
2817   }
2818 }
2819 
2820 void CMSCollector::checkpointRootsInitialWork() {
2821   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2822   assert(_collectorState == InitialMarking, "just checking");
2823 
2824   // Already have locks.
2825   assert_lock_strong(bitMapLock());
2826   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2827 
2828   // Setup the verification and class unloading state for this
2829   // CMS collection cycle.
2830   setup_cms_unloading_and_verification_state();
2831 
2832   GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
2833 
2834   // Reset all the PLAB chunk arrays if necessary.
2835   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2836     reset_survivor_plab_arrays();
2837   }
2838 
2839   ResourceMark rm;
2840   HandleMark  hm;
2841 
2842   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2843   GenCollectedHeap* gch = GenCollectedHeap::heap();
2844 
2845   verify_work_stacks_empty();
2846   verify_overflow_empty();
2847 
2848   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2849   // Update the saved marks which may affect the root scans.
2850   gch->save_marks();
2851 
2852   // weak reference processing has not started yet.
2853   ref_processor()->set_enqueuing_is_done(false);
2854 
2855   // Need to remember all newly created CLDs,
2856   // so that we can guarantee that the remark finds them.
2857   ClassLoaderDataGraph::remember_new_clds(true);
2858 
2859   // Whenever a CLD is found, it will be claimed before proceeding to mark
2860   // the klasses. The claimed marks need to be cleared before marking starts.
2861   ClassLoaderDataGraph::clear_claimed_marks();
2862 
2863   print_eden_and_survivor_chunk_arrays();
2864 
2865   {
2866 #if defined(COMPILER2) || INCLUDE_JVMCI
2867     DerivedPointerTableDeactivate dpt_deact;
2868 #endif
2869     if (CMSParallelInitialMarkEnabled) {
2870       // The parallel version.
2871       WorkGang* workers = gch->workers();
2872       assert(workers != NULL, "Need parallel worker threads.");
2873       uint n_workers = workers->active_workers();
2874 
2875       StrongRootsScope srs(n_workers);
2876 
2877       CMSParInitialMarkTask tsk(this, &srs, n_workers);
2878       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2879       // If the total workers is greater than 1, then multiple workers
2880       // may be used at some time and the initialization has been set
2881       // such that the single threaded path cannot be used.
2882       if (workers->total_workers() > 1) {
2883         workers->run_task(&tsk);
2884       } else {
2885         tsk.work(0);
2886       }
2887     } else {
2888       // The serial version.
2889       CLDToOopClosure cld_closure(&notOlder, true);
2890       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2891 
2892       StrongRootsScope srs(1);
2893 
2894       gch->cms_process_roots(&srs,
2895                              true,   // young gen as roots
2896                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
2897                              should_unload_classes(),
2898                              &notOlder,
2899                              &cld_closure);
2900     }
2901   }
2902 
2903   // Clear mod-union table; it will be dirtied in the prologue of
2904   // CMS generation per each young generation collection.
2905 
2906   assert(_modUnionTable.isAllClear(),
2907        "Was cleared in most recent final checkpoint phase"
2908        " or no bits are set in the gc_prologue before the start of the next "
2909        "subsequent marking phase.");
2910 
2911   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
2912 
2913   // Save the end of the used_region of the constituent generations
2914   // to be used to limit the extent of sweep in each generation.
2915   save_sweep_limits();
2916   verify_overflow_empty();
2917 }
2918 
2919 bool CMSCollector::markFromRoots() {
2920   // we might be tempted to assert that:
2921   // assert(!SafepointSynchronize::is_at_safepoint(),
2922   //        "inconsistent argument?");
2923   // However that wouldn't be right, because it's possible that
2924   // a safepoint is indeed in progress as a young generation
2925   // stop-the-world GC happens even as we mark in this generation.
2926   assert(_collectorState == Marking, "inconsistent state?");
2927   check_correct_thread_executing();
2928   verify_overflow_empty();
2929 
2930   // Weak ref discovery note: We may be discovering weak
2931   // refs in this generation concurrent (but interleaved) with
2932   // weak ref discovery by the young generation collector.
2933 
2934   CMSTokenSyncWithLocks ts(true, bitMapLock());
2935   GCTraceCPUTime tcpu;
2936   CMSPhaseAccounting pa(this, "Concurrent Mark");
2937   bool res = markFromRootsWork();
2938   if (res) {
2939     _collectorState = Precleaning;
2940   } else { // We failed and a foreground collection wants to take over
2941     assert(_foregroundGCIsActive, "internal state inconsistency");
2942     assert(_restart_addr == NULL,  "foreground will restart from scratch");
2943     log_debug(gc)("bailing out to foreground collection");
2944   }
2945   verify_overflow_empty();
2946   return res;
2947 }
2948 
2949 bool CMSCollector::markFromRootsWork() {
2950   // iterate over marked bits in bit map, doing a full scan and mark
2951   // from these roots using the following algorithm:
2952   // . if oop is to the right of the current scan pointer,
2953   //   mark corresponding bit (we'll process it later)
2954   // . else (oop is to left of current scan pointer)
2955   //   push oop on marking stack
2956   // . drain the marking stack
2957 
2958   // Note that when we do a marking step we need to hold the
2959   // bit map lock -- recall that direct allocation (by mutators)
2960   // and promotion (by the young generation collector) is also
2961   // marking the bit map. [the so-called allocate live policy.]
2962   // Because the implementation of bit map marking is not
2963   // robust wrt simultaneous marking of bits in the same word,
2964   // we need to make sure that there is no such interference
2965   // between concurrent such updates.
2966 
2967   // already have locks
2968   assert_lock_strong(bitMapLock());
2969 
2970   verify_work_stacks_empty();
2971   verify_overflow_empty();
2972   bool result = false;
2973   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
2974     result = do_marking_mt();
2975   } else {
2976     result = do_marking_st();
2977   }
2978   return result;
2979 }
2980 
2981 // Forward decl
2982 class CMSConcMarkingTask;
2983 
2984 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
2985   CMSCollector*       _collector;
2986   CMSConcMarkingTask* _task;
2987  public:
2988   virtual void yield();
2989 
2990   // "n_threads" is the number of threads to be terminated.
2991   // "queue_set" is a set of work queues of other threads.
2992   // "collector" is the CMS collector associated with this task terminator.
2993   // "yield" indicates whether we need the gang as a whole to yield.
2994   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
2995     ParallelTaskTerminator(n_threads, queue_set),
2996     _collector(collector) { }
2997 
2998   void set_task(CMSConcMarkingTask* task) {
2999     _task = task;
3000   }
3001 };
3002 
3003 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3004   CMSConcMarkingTask* _task;
3005  public:
3006   bool should_exit_termination();
3007   void set_task(CMSConcMarkingTask* task) {
3008     _task = task;
3009   }
3010 };
3011 
3012 // MT Concurrent Marking Task
3013 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3014   CMSCollector*             _collector;
3015   uint                      _n_workers;      // requested/desired # workers
3016   bool                      _result;
3017   CompactibleFreeListSpace* _cms_space;
3018   char                      _pad_front[64];   // padding to ...
3019   HeapWord* volatile        _global_finger;   // ... avoid sharing cache line
3020   char                      _pad_back[64];
3021   HeapWord*                 _restart_addr;
3022 
3023   //  Exposed here for yielding support
3024   Mutex* const _bit_map_lock;
3025 
3026   // The per thread work queues, available here for stealing
3027   OopTaskQueueSet*  _task_queues;
3028 
3029   // Termination (and yielding) support
3030   CMSConcMarkingTerminator _term;
3031   CMSConcMarkingTerminatorTerminator _term_term;
3032 
3033  public:
3034   CMSConcMarkingTask(CMSCollector* collector,
3035                  CompactibleFreeListSpace* cms_space,
3036                  YieldingFlexibleWorkGang* workers,
3037                  OopTaskQueueSet* task_queues):
3038     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3039     _collector(collector),
3040     _cms_space(cms_space),
3041     _n_workers(0), _result(true),
3042     _task_queues(task_queues),
3043     _term(_n_workers, task_queues, _collector),
3044     _bit_map_lock(collector->bitMapLock())
3045   {
3046     _requested_size = _n_workers;
3047     _term.set_task(this);
3048     _term_term.set_task(this);
3049     _restart_addr = _global_finger = _cms_space->bottom();
3050   }
3051 
3052 
3053   OopTaskQueueSet* task_queues()  { return _task_queues; }
3054 
3055   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3056 
3057   HeapWord* volatile* global_finger_addr() { return &_global_finger; }
3058 
3059   CMSConcMarkingTerminator* terminator() { return &_term; }
3060 
3061   virtual void set_for_termination(uint active_workers) {
3062     terminator()->reset_for_reuse(active_workers);
3063   }
3064 
3065   void work(uint worker_id);
3066   bool should_yield() {
3067     return    ConcurrentMarkSweepThread::should_yield()
3068            && !_collector->foregroundGCIsActive();
3069   }
3070 
3071   virtual void coordinator_yield();  // stuff done by coordinator
3072   bool result() { return _result; }
3073 
3074   void reset(HeapWord* ra) {
3075     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3076     _restart_addr = _global_finger = ra;
3077     _term.reset_for_reuse();
3078   }
3079 
3080   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3081                                            OopTaskQueue* work_q);
3082 
3083  private:
3084   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3085   void do_work_steal(int i);
3086   void bump_global_finger(HeapWord* f);
3087 };
3088 
3089 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3090   assert(_task != NULL, "Error");
3091   return _task->yielding();
3092   // Note that we do not need the disjunct || _task->should_yield() above
3093   // because we want terminating threads to yield only if the task
3094   // is already in the midst of yielding, which happens only after at least one
3095   // thread has yielded.
3096 }
3097 
3098 void CMSConcMarkingTerminator::yield() {
3099   if (_task->should_yield()) {
3100     _task->yield();
3101   } else {
3102     ParallelTaskTerminator::yield();
3103   }
3104 }
3105 
3106 ////////////////////////////////////////////////////////////////
3107 // Concurrent Marking Algorithm Sketch
3108 ////////////////////////////////////////////////////////////////
3109 // Until all tasks exhausted (both spaces):
3110 // -- claim next available chunk
3111 // -- bump global finger via CAS
3112 // -- find first object that starts in this chunk
3113 //    and start scanning bitmap from that position
3114 // -- scan marked objects for oops
3115 // -- CAS-mark target, and if successful:
3116 //    . if target oop is above global finger (volatile read)
3117 //      nothing to do
3118 //    . if target oop is in chunk and above local finger
3119 //        then nothing to do
3120 //    . else push on work-queue
3121 // -- Deal with possible overflow issues:
3122 //    . local work-queue overflow causes stuff to be pushed on
3123 //      global (common) overflow queue
3124 //    . always first empty local work queue
3125 //    . then get a batch of oops from global work queue if any
3126 //    . then do work stealing
3127 // -- When all tasks claimed (both spaces)
3128 //    and local work queue empty,
3129 //    then in a loop do:
3130 //    . check global overflow stack; steal a batch of oops and trace
3131 //    . try to steal from other threads oif GOS is empty
3132 //    . if neither is available, offer termination
3133 // -- Terminate and return result
3134 //
3135 void CMSConcMarkingTask::work(uint worker_id) {
3136   elapsedTimer _timer;
3137   ResourceMark rm;
3138   HandleMark hm;
3139 
3140   DEBUG_ONLY(_collector->verify_overflow_empty();)
3141 
3142   // Before we begin work, our work queue should be empty
3143   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3144   // Scan the bitmap covering _cms_space, tracing through grey objects.
3145   _timer.start();
3146   do_scan_and_mark(worker_id, _cms_space);
3147   _timer.stop();
3148   log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3149 
3150   // ... do work stealing
3151   _timer.reset();
3152   _timer.start();
3153   do_work_steal(worker_id);
3154   _timer.stop();
3155   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3156   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3157   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3158   // Note that under the current task protocol, the
3159   // following assertion is true even of the spaces
3160   // expanded since the completion of the concurrent
3161   // marking. XXX This will likely change under a strict
3162   // ABORT semantics.
3163   // After perm removal the comparison was changed to
3164   // greater than or equal to from strictly greater than.
3165   // Before perm removal the highest address sweep would
3166   // have been at the end of perm gen but now is at the
3167   // end of the tenured gen.
3168   assert(_global_finger >=  _cms_space->end(),
3169          "All tasks have been completed");
3170   DEBUG_ONLY(_collector->verify_overflow_empty();)
3171 }
3172 
3173 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3174   HeapWord* read = _global_finger;
3175   HeapWord* cur  = read;
3176   while (f > read) {
3177     cur = read;
3178     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3179     if (cur == read) {
3180       // our cas succeeded
3181       assert(_global_finger >= f, "protocol consistency");
3182       break;
3183     }
3184   }
3185 }
3186 
3187 // This is really inefficient, and should be redone by
3188 // using (not yet available) block-read and -write interfaces to the
3189 // stack and the work_queue. XXX FIX ME !!!
3190 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3191                                                       OopTaskQueue* work_q) {
3192   // Fast lock-free check
3193   if (ovflw_stk->length() == 0) {
3194     return false;
3195   }
3196   assert(work_q->size() == 0, "Shouldn't steal");
3197   MutexLockerEx ml(ovflw_stk->par_lock(),
3198                    Mutex::_no_safepoint_check_flag);
3199   // Grab up to 1/4 the size of the work queue
3200   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3201                     (size_t)ParGCDesiredObjsFromOverflowList);
3202   num = MIN2(num, ovflw_stk->length());
3203   for (int i = (int) num; i > 0; i--) {
3204     oop cur = ovflw_stk->pop();
3205     assert(cur != NULL, "Counted wrong?");
3206     work_q->push(cur);
3207   }
3208   return num > 0;
3209 }
3210 
3211 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3212   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3213   int n_tasks = pst->n_tasks();
3214   // We allow that there may be no tasks to do here because
3215   // we are restarting after a stack overflow.
3216   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3217   uint nth_task = 0;
3218 
3219   HeapWord* aligned_start = sp->bottom();
3220   if (sp->used_region().contains(_restart_addr)) {
3221     // Align down to a card boundary for the start of 0th task
3222     // for this space.
3223     aligned_start = align_down(_restart_addr, CardTableModRefBS::card_size);
3224   }
3225 
3226   size_t chunk_size = sp->marking_task_size();
3227   while (!pst->is_task_claimed(/* reference */ nth_task)) {
3228     // Having claimed the nth task in this space,
3229     // compute the chunk that it corresponds to:
3230     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3231                                aligned_start + (nth_task+1)*chunk_size);
3232     // Try and bump the global finger via a CAS;
3233     // note that we need to do the global finger bump
3234     // _before_ taking the intersection below, because
3235     // the task corresponding to that region will be
3236     // deemed done even if the used_region() expands
3237     // because of allocation -- as it almost certainly will
3238     // during start-up while the threads yield in the
3239     // closure below.
3240     HeapWord* finger = span.end();
3241     bump_global_finger(finger);   // atomically
3242     // There are null tasks here corresponding to chunks
3243     // beyond the "top" address of the space.
3244     span = span.intersection(sp->used_region());
3245     if (!span.is_empty()) {  // Non-null task
3246       HeapWord* prev_obj;
3247       assert(!span.contains(_restart_addr) || nth_task == 0,
3248              "Inconsistency");
3249       if (nth_task == 0) {
3250         // For the 0th task, we'll not need to compute a block_start.
3251         if (span.contains(_restart_addr)) {
3252           // In the case of a restart because of stack overflow,
3253           // we might additionally skip a chunk prefix.
3254           prev_obj = _restart_addr;
3255         } else {
3256           prev_obj = span.start();
3257         }
3258       } else {
3259         // We want to skip the first object because
3260         // the protocol is to scan any object in its entirety
3261         // that _starts_ in this span; a fortiori, any
3262         // object starting in an earlier span is scanned
3263         // as part of an earlier claimed task.
3264         // Below we use the "careful" version of block_start
3265         // so we do not try to navigate uninitialized objects.
3266         prev_obj = sp->block_start_careful(span.start());
3267         // Below we use a variant of block_size that uses the
3268         // Printezis bits to avoid waiting for allocated
3269         // objects to become initialized/parsable.
3270         while (prev_obj < span.start()) {
3271           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3272           if (sz > 0) {
3273             prev_obj += sz;
3274           } else {
3275             // In this case we may end up doing a bit of redundant
3276             // scanning, but that appears unavoidable, short of
3277             // locking the free list locks; see bug 6324141.
3278             break;
3279           }
3280         }
3281       }
3282       if (prev_obj < span.end()) {
3283         MemRegion my_span = MemRegion(prev_obj, span.end());
3284         // Do the marking work within a non-empty span --
3285         // the last argument to the constructor indicates whether the
3286         // iteration should be incremental with periodic yields.
3287         ParMarkFromRootsClosure cl(this, _collector, my_span,
3288                                    &_collector->_markBitMap,
3289                                    work_queue(i),
3290                                    &_collector->_markStack);
3291         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3292       } // else nothing to do for this task
3293     }   // else nothing to do for this task
3294   }
3295   // We'd be tempted to assert here that since there are no
3296   // more tasks left to claim in this space, the global_finger
3297   // must exceed space->top() and a fortiori space->end(). However,
3298   // that would not quite be correct because the bumping of
3299   // global_finger occurs strictly after the claiming of a task,
3300   // so by the time we reach here the global finger may not yet
3301   // have been bumped up by the thread that claimed the last
3302   // task.
3303   pst->all_tasks_completed();
3304 }
3305 
3306 class ParConcMarkingClosure: public MetadataAwareOopClosure {
3307  private:
3308   CMSCollector* _collector;
3309   CMSConcMarkingTask* _task;
3310   MemRegion     _span;
3311   CMSBitMap*    _bit_map;
3312   CMSMarkStack* _overflow_stack;
3313   OopTaskQueue* _work_queue;
3314  protected:
3315   DO_OOP_WORK_DEFN
3316  public:
3317   ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3318                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3319     MetadataAwareOopClosure(collector->ref_processor()),
3320     _collector(collector),
3321     _task(task),
3322     _span(collector->_span),
3323     _work_queue(work_queue),
3324     _bit_map(bit_map),
3325     _overflow_stack(overflow_stack)
3326   { }
3327   virtual void do_oop(oop* p);
3328   virtual void do_oop(narrowOop* p);
3329 
3330   void trim_queue(size_t max);
3331   void handle_stack_overflow(HeapWord* lost);
3332   void do_yield_check() {
3333     if (_task->should_yield()) {
3334       _task->yield();
3335     }
3336   }
3337 };
3338 
3339 DO_OOP_WORK_IMPL(ParConcMarkingClosure)
3340 
3341 // Grey object scanning during work stealing phase --
3342 // the salient assumption here is that any references
3343 // that are in these stolen objects being scanned must
3344 // already have been initialized (else they would not have
3345 // been published), so we do not need to check for
3346 // uninitialized objects before pushing here.
3347 void ParConcMarkingClosure::do_oop(oop obj) {
3348   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3349   HeapWord* addr = (HeapWord*)obj;
3350   // Check if oop points into the CMS generation
3351   // and is not marked
3352   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3353     // a white object ...
3354     // If we manage to "claim" the object, by being the
3355     // first thread to mark it, then we push it on our
3356     // marking stack
3357     if (_bit_map->par_mark(addr)) {     // ... now grey
3358       // push on work queue (grey set)
3359       bool simulate_overflow = false;
3360       NOT_PRODUCT(
3361         if (CMSMarkStackOverflowALot &&
3362             _collector->simulate_overflow()) {
3363           // simulate a stack overflow
3364           simulate_overflow = true;
3365         }
3366       )
3367       if (simulate_overflow ||
3368           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3369         // stack overflow
3370         log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3371         // We cannot assert that the overflow stack is full because
3372         // it may have been emptied since.
3373         assert(simulate_overflow ||
3374                _work_queue->size() == _work_queue->max_elems(),
3375               "Else push should have succeeded");
3376         handle_stack_overflow(addr);
3377       }
3378     } // Else, some other thread got there first
3379     do_yield_check();
3380   }
3381 }
3382 
3383 void ParConcMarkingClosure::do_oop(oop* p)       { ParConcMarkingClosure::do_oop_work(p); }
3384 void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
3385 
3386 void ParConcMarkingClosure::trim_queue(size_t max) {
3387   while (_work_queue->size() > max) {
3388     oop new_oop;
3389     if (_work_queue->pop_local(new_oop)) {
3390       assert(new_oop->is_oop(), "Should be an oop");
3391       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3392       assert(_span.contains((HeapWord*)new_oop), "Not in span");
3393       new_oop->oop_iterate(this);  // do_oop() above
3394       do_yield_check();
3395     }
3396   }
3397 }
3398 
3399 // Upon stack overflow, we discard (part of) the stack,
3400 // remembering the least address amongst those discarded
3401 // in CMSCollector's _restart_address.
3402 void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3403   // We need to do this under a mutex to prevent other
3404   // workers from interfering with the work done below.
3405   MutexLockerEx ml(_overflow_stack->par_lock(),
3406                    Mutex::_no_safepoint_check_flag);
3407   // Remember the least grey address discarded
3408   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3409   _collector->lower_restart_addr(ra);
3410   _overflow_stack->reset();  // discard stack contents
3411   _overflow_stack->expand(); // expand the stack if possible
3412 }
3413 
3414 
3415 void CMSConcMarkingTask::do_work_steal(int i) {
3416   OopTaskQueue* work_q = work_queue(i);
3417   oop obj_to_scan;
3418   CMSBitMap* bm = &(_collector->_markBitMap);
3419   CMSMarkStack* ovflw = &(_collector->_markStack);
3420   int* seed = _collector->hash_seed(i);
3421   ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3422   while (true) {
3423     cl.trim_queue(0);
3424     assert(work_q->size() == 0, "Should have been emptied above");
3425     if (get_work_from_overflow_stack(ovflw, work_q)) {
3426       // Can't assert below because the work obtained from the
3427       // overflow stack may already have been stolen from us.
3428       // assert(work_q->size() > 0, "Work from overflow stack");
3429       continue;
3430     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3431       assert(obj_to_scan->is_oop(), "Should be an oop");
3432       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3433       obj_to_scan->oop_iterate(&cl);
3434     } else if (terminator()->offer_termination(&_term_term)) {
3435       assert(work_q->size() == 0, "Impossible!");
3436       break;
3437     } else if (yielding() || should_yield()) {
3438       yield();
3439     }
3440   }
3441 }
3442 
3443 // This is run by the CMS (coordinator) thread.
3444 void CMSConcMarkingTask::coordinator_yield() {
3445   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3446          "CMS thread should hold CMS token");
3447   // First give up the locks, then yield, then re-lock
3448   // We should probably use a constructor/destructor idiom to
3449   // do this unlock/lock or modify the MutexUnlocker class to
3450   // serve our purpose. XXX
3451   assert_lock_strong(_bit_map_lock);
3452   _bit_map_lock->unlock();
3453   ConcurrentMarkSweepThread::desynchronize(true);
3454   _collector->stopTimer();
3455   _collector->incrementYields();
3456 
3457   // It is possible for whichever thread initiated the yield request
3458   // not to get a chance to wake up and take the bitmap lock between
3459   // this thread releasing it and reacquiring it. So, while the
3460   // should_yield() flag is on, let's sleep for a bit to give the
3461   // other thread a chance to wake up. The limit imposed on the number
3462   // of iterations is defensive, to avoid any unforseen circumstances
3463   // putting us into an infinite loop. Since it's always been this
3464   // (coordinator_yield()) method that was observed to cause the
3465   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3466   // which is by default non-zero. For the other seven methods that
3467   // also perform the yield operation, as are using a different
3468   // parameter (CMSYieldSleepCount) which is by default zero. This way we
3469   // can enable the sleeping for those methods too, if necessary.
3470   // See 6442774.
3471   //
3472   // We really need to reconsider the synchronization between the GC
3473   // thread and the yield-requesting threads in the future and we
3474   // should really use wait/notify, which is the recommended
3475   // way of doing this type of interaction. Additionally, we should
3476   // consolidate the eight methods that do the yield operation and they
3477   // are almost identical into one for better maintainability and
3478   // readability. See 6445193.
3479   //
3480   // Tony 2006.06.29
3481   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3482                    ConcurrentMarkSweepThread::should_yield() &&
3483                    !CMSCollector::foregroundGCIsActive(); ++i) {
3484     os::sleep(Thread::current(), 1, false);
3485   }
3486 
3487   ConcurrentMarkSweepThread::synchronize(true);
3488   _bit_map_lock->lock_without_safepoint_check();
3489   _collector->startTimer();
3490 }
3491 
3492 bool CMSCollector::do_marking_mt() {
3493   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3494   uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3495                                                                   conc_workers()->active_workers(),
3496                                                                   Threads::number_of_non_daemon_threads());
3497   num_workers = conc_workers()->update_active_workers(num_workers);
3498   log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
3499 
3500   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3501 
3502   CMSConcMarkingTask tsk(this,
3503                          cms_space,
3504                          conc_workers(),
3505                          task_queues());
3506 
3507   // Since the actual number of workers we get may be different
3508   // from the number we requested above, do we need to do anything different
3509   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3510   // class?? XXX
3511   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3512 
3513   // Refs discovery is already non-atomic.
3514   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3515   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3516   conc_workers()->start_task(&tsk);
3517   while (tsk.yielded()) {
3518     tsk.coordinator_yield();
3519     conc_workers()->continue_task(&tsk);
3520   }
3521   // If the task was aborted, _restart_addr will be non-NULL
3522   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3523   while (_restart_addr != NULL) {
3524     // XXX For now we do not make use of ABORTED state and have not
3525     // yet implemented the right abort semantics (even in the original
3526     // single-threaded CMS case). That needs some more investigation
3527     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3528     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3529     // If _restart_addr is non-NULL, a marking stack overflow
3530     // occurred; we need to do a fresh marking iteration from the
3531     // indicated restart address.
3532     if (_foregroundGCIsActive) {
3533       // We may be running into repeated stack overflows, having
3534       // reached the limit of the stack size, while making very
3535       // slow forward progress. It may be best to bail out and
3536       // let the foreground collector do its job.
3537       // Clear _restart_addr, so that foreground GC
3538       // works from scratch. This avoids the headache of
3539       // a "rescan" which would otherwise be needed because
3540       // of the dirty mod union table & card table.
3541       _restart_addr = NULL;
3542       return false;
3543     }
3544     // Adjust the task to restart from _restart_addr
3545     tsk.reset(_restart_addr);
3546     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3547                   _restart_addr);
3548     _restart_addr = NULL;
3549     // Get the workers going again
3550     conc_workers()->start_task(&tsk);
3551     while (tsk.yielded()) {
3552       tsk.coordinator_yield();
3553       conc_workers()->continue_task(&tsk);
3554     }
3555   }
3556   assert(tsk.completed(), "Inconsistency");
3557   assert(tsk.result() == true, "Inconsistency");
3558   return true;
3559 }
3560 
3561 bool CMSCollector::do_marking_st() {
3562   ResourceMark rm;
3563   HandleMark   hm;
3564 
3565   // Temporarily make refs discovery single threaded (non-MT)
3566   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3567   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3568     &_markStack, CMSYield);
3569   // the last argument to iterate indicates whether the iteration
3570   // should be incremental with periodic yields.
3571   _markBitMap.iterate(&markFromRootsClosure);
3572   // If _restart_addr is non-NULL, a marking stack overflow
3573   // occurred; we need to do a fresh iteration from the
3574   // indicated restart address.
3575   while (_restart_addr != NULL) {
3576     if (_foregroundGCIsActive) {
3577       // We may be running into repeated stack overflows, having
3578       // reached the limit of the stack size, while making very
3579       // slow forward progress. It may be best to bail out and
3580       // let the foreground collector do its job.
3581       // Clear _restart_addr, so that foreground GC
3582       // works from scratch. This avoids the headache of
3583       // a "rescan" which would otherwise be needed because
3584       // of the dirty mod union table & card table.
3585       _restart_addr = NULL;
3586       return false;  // indicating failure to complete marking
3587     }
3588     // Deal with stack overflow:
3589     // we restart marking from _restart_addr
3590     HeapWord* ra = _restart_addr;
3591     markFromRootsClosure.reset(ra);
3592     _restart_addr = NULL;
3593     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3594   }
3595   return true;
3596 }
3597 
3598 void CMSCollector::preclean() {
3599   check_correct_thread_executing();
3600   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3601   verify_work_stacks_empty();
3602   verify_overflow_empty();
3603   _abort_preclean = false;
3604   if (CMSPrecleaningEnabled) {
3605     if (!CMSEdenChunksRecordAlways) {
3606       _eden_chunk_index = 0;
3607     }
3608     size_t used = get_eden_used();
3609     size_t capacity = get_eden_capacity();
3610     // Don't start sampling unless we will get sufficiently
3611     // many samples.
3612     if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100)
3613                 * CMSScheduleRemarkEdenPenetration)) {
3614       _start_sampling = true;
3615     } else {
3616       _start_sampling = false;
3617     }
3618     GCTraceCPUTime tcpu;
3619     CMSPhaseAccounting pa(this, "Concurrent Preclean");
3620     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3621   }
3622   CMSTokenSync x(true); // is cms thread
3623   if (CMSPrecleaningEnabled) {
3624     sample_eden();
3625     _collectorState = AbortablePreclean;
3626   } else {
3627     _collectorState = FinalMarking;
3628   }
3629   verify_work_stacks_empty();
3630   verify_overflow_empty();
3631 }
3632 
3633 // Try and schedule the remark such that young gen
3634 // occupancy is CMSScheduleRemarkEdenPenetration %.
3635 void CMSCollector::abortable_preclean() {
3636   check_correct_thread_executing();
3637   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3638   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3639 
3640   // If Eden's current occupancy is below this threshold,
3641   // immediately schedule the remark; else preclean
3642   // past the next scavenge in an effort to
3643   // schedule the pause as described above. By choosing
3644   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3645   // we will never do an actual abortable preclean cycle.
3646   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3647     GCTraceCPUTime tcpu;
3648     CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3649     // We need more smarts in the abortable preclean
3650     // loop below to deal with cases where allocation
3651     // in young gen is very very slow, and our precleaning
3652     // is running a losing race against a horde of
3653     // mutators intent on flooding us with CMS updates
3654     // (dirty cards).
3655     // One, admittedly dumb, strategy is to give up
3656     // after a certain number of abortable precleaning loops
3657     // or after a certain maximum time. We want to make
3658     // this smarter in the next iteration.
3659     // XXX FIX ME!!! YSR
3660     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3661     while (!(should_abort_preclean() ||
3662              ConcurrentMarkSweepThread::cmst()->should_terminate())) {
3663       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3664       cumworkdone += workdone;
3665       loops++;
3666       // Voluntarily terminate abortable preclean phase if we have
3667       // been at it for too long.
3668       if ((CMSMaxAbortablePrecleanLoops != 0) &&
3669           loops >= CMSMaxAbortablePrecleanLoops) {
3670         log_debug(gc)(" CMS: abort preclean due to loops ");
3671         break;
3672       }
3673       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3674         log_debug(gc)(" CMS: abort preclean due to time ");
3675         break;
3676       }
3677       // If we are doing little work each iteration, we should
3678       // take a short break.
3679       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3680         // Sleep for some time, waiting for work to accumulate
3681         stopTimer();
3682         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3683         startTimer();
3684         waited++;
3685       }
3686     }
3687     log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3688                                loops, waited, cumworkdone);
3689   }
3690   CMSTokenSync x(true); // is cms thread
3691   if (_collectorState != Idling) {
3692     assert(_collectorState == AbortablePreclean,
3693            "Spontaneous state transition?");
3694     _collectorState = FinalMarking;
3695   } // Else, a foreground collection completed this CMS cycle.
3696   return;
3697 }
3698 
3699 // Respond to an Eden sampling opportunity
3700 void CMSCollector::sample_eden() {
3701   // Make sure a young gc cannot sneak in between our
3702   // reading and recording of a sample.
3703   assert(Thread::current()->is_ConcurrentGC_thread(),
3704          "Only the cms thread may collect Eden samples");
3705   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3706          "Should collect samples while holding CMS token");
3707   if (!_start_sampling) {
3708     return;
3709   }
3710   // When CMSEdenChunksRecordAlways is true, the eden chunk array
3711   // is populated by the young generation.
3712   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3713     if (_eden_chunk_index < _eden_chunk_capacity) {
3714       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3715       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3716              "Unexpected state of Eden");
3717       // We'd like to check that what we just sampled is an oop-start address;
3718       // however, we cannot do that here since the object may not yet have been
3719       // initialized. So we'll instead do the check when we _use_ this sample
3720       // later.
3721       if (_eden_chunk_index == 0 ||
3722           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3723                          _eden_chunk_array[_eden_chunk_index-1])
3724            >= CMSSamplingGrain)) {
3725         _eden_chunk_index++;  // commit sample
3726       }
3727     }
3728   }
3729   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3730     size_t used = get_eden_used();
3731     size_t capacity = get_eden_capacity();
3732     assert(used <= capacity, "Unexpected state of Eden");
3733     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3734       _abort_preclean = true;
3735     }
3736   }
3737 }
3738 
3739 
3740 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3741   assert(_collectorState == Precleaning ||
3742          _collectorState == AbortablePreclean, "incorrect state");
3743   ResourceMark rm;
3744   HandleMark   hm;
3745 
3746   // Precleaning is currently not MT but the reference processor
3747   // may be set for MT.  Disable it temporarily here.
3748   ReferenceProcessor* rp = ref_processor();
3749   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3750 
3751   // Do one pass of scrubbing the discovered reference lists
3752   // to remove any reference objects with strongly-reachable
3753   // referents.
3754   if (clean_refs) {
3755     CMSPrecleanRefsYieldClosure yield_cl(this);
3756     assert(rp->span().equals(_span), "Spans should be equal");
3757     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3758                                    &_markStack, true /* preclean */);
3759     CMSDrainMarkingStackClosure complete_trace(this,
3760                                    _span, &_markBitMap, &_markStack,
3761                                    &keep_alive, true /* preclean */);
3762 
3763     // We don't want this step to interfere with a young
3764     // collection because we don't want to take CPU
3765     // or memory bandwidth away from the young GC threads
3766     // (which may be as many as there are CPUs).
3767     // Note that we don't need to protect ourselves from
3768     // interference with mutators because they can't
3769     // manipulate the discovered reference lists nor affect
3770     // the computed reachability of the referents, the
3771     // only properties manipulated by the precleaning
3772     // of these reference lists.
3773     stopTimer();
3774     CMSTokenSyncWithLocks x(true /* is cms thread */,
3775                             bitMapLock());
3776     startTimer();
3777     sample_eden();
3778 
3779     // The following will yield to allow foreground
3780     // collection to proceed promptly. XXX YSR:
3781     // The code in this method may need further
3782     // tweaking for better performance and some restructuring
3783     // for cleaner interfaces.
3784     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3785     rp->preclean_discovered_references(
3786           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3787           gc_timer);
3788   }
3789 
3790   if (clean_survivor) {  // preclean the active survivor space(s)
3791     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3792                              &_markBitMap, &_modUnionTable,
3793                              &_markStack, true /* precleaning phase */);
3794     stopTimer();
3795     CMSTokenSyncWithLocks ts(true /* is cms thread */,
3796                              bitMapLock());
3797     startTimer();
3798     unsigned int before_count =
3799       GenCollectedHeap::heap()->total_collections();
3800     SurvivorSpacePrecleanClosure
3801       sss_cl(this, _span, &_markBitMap, &_markStack,
3802              &pam_cl, before_count, CMSYield);
3803     _young_gen->from()->object_iterate_careful(&sss_cl);
3804     _young_gen->to()->object_iterate_careful(&sss_cl);
3805   }
3806   MarkRefsIntoAndScanClosure
3807     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3808              &_markStack, this, CMSYield,
3809              true /* precleaning phase */);
3810   // CAUTION: The following closure has persistent state that may need to
3811   // be reset upon a decrease in the sequence of addresses it
3812   // processes.
3813   ScanMarkedObjectsAgainCarefullyClosure
3814     smoac_cl(this, _span,
3815       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3816 
3817   // Preclean dirty cards in ModUnionTable and CardTable using
3818   // appropriate convergence criterion;
3819   // repeat CMSPrecleanIter times unless we find that
3820   // we are losing.
3821   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3822   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3823          "Bad convergence multiplier");
3824   assert(CMSPrecleanThreshold >= 100,
3825          "Unreasonably low CMSPrecleanThreshold");
3826 
3827   size_t numIter, cumNumCards, lastNumCards, curNumCards;
3828   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3829        numIter < CMSPrecleanIter;
3830        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3831     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3832     log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3833     // Either there are very few dirty cards, so re-mark
3834     // pause will be small anyway, or our pre-cleaning isn't
3835     // that much faster than the rate at which cards are being
3836     // dirtied, so we might as well stop and re-mark since
3837     // precleaning won't improve our re-mark time by much.
3838     if (curNumCards <= CMSPrecleanThreshold ||
3839         (numIter > 0 &&
3840          (curNumCards * CMSPrecleanDenominator >
3841          lastNumCards * CMSPrecleanNumerator))) {
3842       numIter++;
3843       cumNumCards += curNumCards;
3844       break;
3845     }
3846   }
3847 
3848   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3849 
3850   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3851   cumNumCards += curNumCards;
3852   log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3853                              curNumCards, cumNumCards, numIter);
3854   return cumNumCards;   // as a measure of useful work done
3855 }
3856 
3857 // PRECLEANING NOTES:
3858 // Precleaning involves:
3859 // . reading the bits of the modUnionTable and clearing the set bits.
3860 // . For the cards corresponding to the set bits, we scan the
3861 //   objects on those cards. This means we need the free_list_lock
3862 //   so that we can safely iterate over the CMS space when scanning
3863 //   for oops.
3864 // . When we scan the objects, we'll be both reading and setting
3865 //   marks in the marking bit map, so we'll need the marking bit map.
3866 // . For protecting _collector_state transitions, we take the CGC_lock.
3867 //   Note that any races in the reading of of card table entries by the
3868 //   CMS thread on the one hand and the clearing of those entries by the
3869 //   VM thread or the setting of those entries by the mutator threads on the
3870 //   other are quite benign. However, for efficiency it makes sense to keep
3871 //   the VM thread from racing with the CMS thread while the latter is
3872 //   dirty card info to the modUnionTable. We therefore also use the
3873 //   CGC_lock to protect the reading of the card table and the mod union
3874 //   table by the CM thread.
3875 // . We run concurrently with mutator updates, so scanning
3876 //   needs to be done carefully  -- we should not try to scan
3877 //   potentially uninitialized objects.
3878 //
3879 // Locking strategy: While holding the CGC_lock, we scan over and
3880 // reset a maximal dirty range of the mod union / card tables, then lock
3881 // the free_list_lock and bitmap lock to do a full marking, then
3882 // release these locks; and repeat the cycle. This allows for a
3883 // certain amount of fairness in the sharing of these locks between
3884 // the CMS collector on the one hand, and the VM thread and the
3885 // mutators on the other.
3886 
3887 // NOTE: preclean_mod_union_table() and preclean_card_table()
3888 // further below are largely identical; if you need to modify
3889 // one of these methods, please check the other method too.
3890 
3891 size_t CMSCollector::preclean_mod_union_table(
3892   ConcurrentMarkSweepGeneration* old_gen,
3893   ScanMarkedObjectsAgainCarefullyClosure* cl) {
3894   verify_work_stacks_empty();
3895   verify_overflow_empty();
3896 
3897   // strategy: starting with the first card, accumulate contiguous
3898   // ranges of dirty cards; clear these cards, then scan the region
3899   // covered by these cards.
3900 
3901   // Since all of the MUT is committed ahead, we can just use
3902   // that, in case the generations expand while we are precleaning.
3903   // It might also be fine to just use the committed part of the
3904   // generation, but we might potentially miss cards when the
3905   // generation is rapidly expanding while we are in the midst
3906   // of precleaning.
3907   HeapWord* startAddr = old_gen->reserved().start();
3908   HeapWord* endAddr   = old_gen->reserved().end();
3909 
3910   cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
3911 
3912   size_t numDirtyCards, cumNumDirtyCards;
3913   HeapWord *nextAddr, *lastAddr;
3914   for (cumNumDirtyCards = numDirtyCards = 0,
3915        nextAddr = lastAddr = startAddr;
3916        nextAddr < endAddr;
3917        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
3918 
3919     ResourceMark rm;
3920     HandleMark   hm;
3921 
3922     MemRegion dirtyRegion;
3923     {
3924       stopTimer();
3925       // Potential yield point
3926       CMSTokenSync ts(true);
3927       startTimer();
3928       sample_eden();
3929       // Get dirty region starting at nextOffset (inclusive),
3930       // simultaneously clearing it.
3931       dirtyRegion =
3932         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
3933       assert(dirtyRegion.start() >= nextAddr,
3934              "returned region inconsistent?");
3935     }
3936     // Remember where the next search should begin.
3937     // The returned region (if non-empty) is a right open interval,
3938     // so lastOffset is obtained from the right end of that
3939     // interval.
3940     lastAddr = dirtyRegion.end();
3941     // Should do something more transparent and less hacky XXX
3942     numDirtyCards =
3943       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
3944 
3945     // We'll scan the cards in the dirty region (with periodic
3946     // yields for foreground GC as needed).
3947     if (!dirtyRegion.is_empty()) {
3948       assert(numDirtyCards > 0, "consistency check");
3949       HeapWord* stop_point = NULL;
3950       stopTimer();
3951       // Potential yield point
3952       CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
3953                                bitMapLock());
3954       startTimer();
3955       {
3956         verify_work_stacks_empty();
3957         verify_overflow_empty();
3958         sample_eden();
3959         stop_point =
3960           old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
3961       }
3962       if (stop_point != NULL) {
3963         // The careful iteration stopped early either because it found an
3964         // uninitialized object, or because we were in the midst of an
3965         // "abortable preclean", which should now be aborted. Redirty
3966         // the bits corresponding to the partially-scanned or unscanned
3967         // cards. We'll either restart at the next block boundary or
3968         // abort the preclean.
3969         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
3970                "Should only be AbortablePreclean.");
3971         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
3972         if (should_abort_preclean()) {
3973           break; // out of preclean loop
3974         } else {
3975           // Compute the next address at which preclean should pick up;
3976           // might need bitMapLock in order to read P-bits.
3977           lastAddr = next_card_start_after_block(stop_point);
3978         }
3979       }
3980     } else {
3981       assert(lastAddr == endAddr, "consistency check");
3982       assert(numDirtyCards == 0, "consistency check");
3983       break;
3984     }
3985   }
3986   verify_work_stacks_empty();
3987   verify_overflow_empty();
3988   return cumNumDirtyCards;
3989 }
3990 
3991 // NOTE: preclean_mod_union_table() above and preclean_card_table()
3992 // below are largely identical; if you need to modify
3993 // one of these methods, please check the other method too.
3994 
3995 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
3996   ScanMarkedObjectsAgainCarefullyClosure* cl) {
3997   // strategy: it's similar to precleamModUnionTable above, in that
3998   // we accumulate contiguous ranges of dirty cards, mark these cards
3999   // precleaned, then scan the region covered by these cards.
4000   HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
4001   HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
4002 
4003   cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
4004 
4005   size_t numDirtyCards, cumNumDirtyCards;
4006   HeapWord *lastAddr, *nextAddr;
4007 
4008   for (cumNumDirtyCards = numDirtyCards = 0,
4009        nextAddr = lastAddr = startAddr;
4010        nextAddr < endAddr;
4011        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4012 
4013     ResourceMark rm;
4014     HandleMark   hm;
4015 
4016     MemRegion dirtyRegion;
4017     {
4018       // See comments in "Precleaning notes" above on why we
4019       // do this locking. XXX Could the locking overheads be
4020       // too high when dirty cards are sparse? [I don't think so.]
4021       stopTimer();
4022       CMSTokenSync x(true); // is cms thread
4023       startTimer();
4024       sample_eden();
4025       // Get and clear dirty region from card table
4026       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4027                                     MemRegion(nextAddr, endAddr),
4028                                     true,
4029                                     CardTableModRefBS::precleaned_card_val());
4030 
4031       assert(dirtyRegion.start() >= nextAddr,
4032              "returned region inconsistent?");
4033     }
4034     lastAddr = dirtyRegion.end();
4035     numDirtyCards =
4036       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4037 
4038     if (!dirtyRegion.is_empty()) {
4039       stopTimer();
4040       CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4041       startTimer();
4042       sample_eden();
4043       verify_work_stacks_empty();
4044       verify_overflow_empty();
4045       HeapWord* stop_point =
4046         old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4047       if (stop_point != NULL) {
4048         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4049                "Should only be AbortablePreclean.");
4050         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4051         if (should_abort_preclean()) {
4052           break; // out of preclean loop
4053         } else {
4054           // Compute the next address at which preclean should pick up.
4055           lastAddr = next_card_start_after_block(stop_point);
4056         }
4057       }
4058     } else {
4059       break;
4060     }
4061   }
4062   verify_work_stacks_empty();
4063   verify_overflow_empty();
4064   return cumNumDirtyCards;
4065 }
4066 
4067 class PrecleanKlassClosure : public KlassClosure {
4068   KlassToOopClosure _cm_klass_closure;
4069  public:
4070   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4071   void do_klass(Klass* k) {
4072     if (k->has_accumulated_modified_oops()) {
4073       k->clear_accumulated_modified_oops();
4074 
4075       _cm_klass_closure.do_klass(k);
4076     }
4077   }
4078 };
4079 
4080 // The freelist lock is needed to prevent asserts, is it really needed?
4081 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4082 
4083   cl->set_freelistLock(freelistLock);
4084 
4085   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4086 
4087   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4088   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4089   PrecleanKlassClosure preclean_klass_closure(cl);
4090   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4091 
4092   verify_work_stacks_empty();
4093   verify_overflow_empty();
4094 }
4095 
4096 void CMSCollector::checkpointRootsFinal() {
4097   assert(_collectorState == FinalMarking, "incorrect state transition?");
4098   check_correct_thread_executing();
4099   // world is stopped at this checkpoint
4100   assert(SafepointSynchronize::is_at_safepoint(),
4101          "world should be stopped");
4102   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4103 
4104   verify_work_stacks_empty();
4105   verify_overflow_empty();
4106 
4107   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4108                 _young_gen->used() / K, _young_gen->capacity() / K);
4109   {
4110     if (CMSScavengeBeforeRemark) {
4111       GenCollectedHeap* gch = GenCollectedHeap::heap();
4112       // Temporarily set flag to false, GCH->do_collection will
4113       // expect it to be false and set to true
4114       FlagSetting fl(gch->_is_gc_active, false);
4115 
4116       gch->do_collection(true,                      // full (i.e. force, see below)
4117                          false,                     // !clear_all_soft_refs
4118                          0,                         // size
4119                          false,                     // is_tlab
4120                          GenCollectedHeap::YoungGen // type
4121         );
4122     }
4123     FreelistLocker x(this);
4124     MutexLockerEx y(bitMapLock(),
4125                     Mutex::_no_safepoint_check_flag);
4126     checkpointRootsFinalWork();
4127   }
4128   verify_work_stacks_empty();
4129   verify_overflow_empty();
4130 }
4131 
4132 void CMSCollector::checkpointRootsFinalWork() {
4133   GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
4134 
4135   assert(haveFreelistLocks(), "must have free list locks");
4136   assert_lock_strong(bitMapLock());
4137 
4138   ResourceMark rm;
4139   HandleMark   hm;
4140 
4141   GenCollectedHeap* gch = GenCollectedHeap::heap();
4142 
4143   if (should_unload_classes()) {
4144     CodeCache::gc_prologue();
4145   }
4146   assert(haveFreelistLocks(), "must have free list locks");
4147   assert_lock_strong(bitMapLock());
4148 
4149   // We might assume that we need not fill TLAB's when
4150   // CMSScavengeBeforeRemark is set, because we may have just done
4151   // a scavenge which would have filled all TLAB's -- and besides
4152   // Eden would be empty. This however may not always be the case --
4153   // for instance although we asked for a scavenge, it may not have
4154   // happened because of a JNI critical section. We probably need
4155   // a policy for deciding whether we can in that case wait until
4156   // the critical section releases and then do the remark following
4157   // the scavenge, and skip it here. In the absence of that policy,
4158   // or of an indication of whether the scavenge did indeed occur,
4159   // we cannot rely on TLAB's having been filled and must do
4160   // so here just in case a scavenge did not happen.
4161   gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4162   // Update the saved marks which may affect the root scans.
4163   gch->save_marks();
4164 
4165   print_eden_and_survivor_chunk_arrays();
4166 
4167   {
4168 #if defined(COMPILER2) || INCLUDE_JVMCI
4169     DerivedPointerTableDeactivate dpt_deact;
4170 #endif
4171 
4172     // Note on the role of the mod union table:
4173     // Since the marker in "markFromRoots" marks concurrently with
4174     // mutators, it is possible for some reachable objects not to have been
4175     // scanned. For instance, an only reference to an object A was
4176     // placed in object B after the marker scanned B. Unless B is rescanned,
4177     // A would be collected. Such updates to references in marked objects
4178     // are detected via the mod union table which is the set of all cards
4179     // dirtied since the first checkpoint in this GC cycle and prior to
4180     // the most recent young generation GC, minus those cleaned up by the
4181     // concurrent precleaning.
4182     if (CMSParallelRemarkEnabled) {
4183       GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
4184       do_remark_parallel();
4185     } else {
4186       GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
4187       do_remark_non_parallel();
4188     }
4189   }
4190   verify_work_stacks_empty();
4191   verify_overflow_empty();
4192 
4193   {
4194     GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
4195     refProcessingWork();
4196   }
4197   verify_work_stacks_empty();
4198   verify_overflow_empty();
4199 
4200   if (should_unload_classes()) {
4201     CodeCache::gc_epilogue();
4202   }
4203   JvmtiExport::gc_epilogue();
4204 
4205   // If we encountered any (marking stack / work queue) overflow
4206   // events during the current CMS cycle, take appropriate
4207   // remedial measures, where possible, so as to try and avoid
4208   // recurrence of that condition.
4209   assert(_markStack.isEmpty(), "No grey objects");
4210   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4211                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4212   if (ser_ovflw > 0) {
4213     log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
4214                          _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4215     _markStack.expand();
4216     _ser_pmc_remark_ovflw = 0;
4217     _ser_pmc_preclean_ovflw = 0;
4218     _ser_kac_preclean_ovflw = 0;
4219     _ser_kac_ovflw = 0;
4220   }
4221   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4222      log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4223                           _par_pmc_remark_ovflw, _par_kac_ovflw);
4224      _par_pmc_remark_ovflw = 0;
4225     _par_kac_ovflw = 0;
4226   }
4227    if (_markStack._hit_limit > 0) {
4228      log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4229                           _markStack._hit_limit);
4230    }
4231    if (_markStack._failed_double > 0) {
4232      log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4233                           _markStack._failed_double, _markStack.capacity());
4234    }
4235   _markStack._hit_limit = 0;
4236   _markStack._failed_double = 0;
4237 
4238   if ((VerifyAfterGC || VerifyDuringGC) &&
4239       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4240     verify_after_remark();
4241   }
4242 
4243   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4244 
4245   // Change under the freelistLocks.
4246   _collectorState = Sweeping;
4247   // Call isAllClear() under bitMapLock
4248   assert(_modUnionTable.isAllClear(),
4249       "Should be clear by end of the final marking");
4250   assert(_ct->klass_rem_set()->mod_union_is_clear(),
4251       "Should be clear by end of the final marking");
4252 }
4253 
4254 void CMSParInitialMarkTask::work(uint worker_id) {
4255   elapsedTimer _timer;
4256   ResourceMark rm;
4257   HandleMark   hm;
4258 
4259   // ---------- scan from roots --------------
4260   _timer.start();
4261   GenCollectedHeap* gch = GenCollectedHeap::heap();
4262   ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4263 
4264   // ---------- young gen roots --------------
4265   {
4266     work_on_young_gen_roots(&par_mri_cl);
4267     _timer.stop();
4268     log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4269   }
4270 
4271   // ---------- remaining roots --------------
4272   _timer.reset();
4273   _timer.start();
4274 
4275   CLDToOopClosure cld_closure(&par_mri_cl, true);
4276 
4277   gch->cms_process_roots(_strong_roots_scope,
4278                          false,     // yg was scanned above
4279                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4280                          _collector->should_unload_classes(),
4281                          &par_mri_cl,
4282                          &cld_closure);
4283   assert(_collector->should_unload_classes()
4284          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4285          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4286   _timer.stop();
4287   log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4288 }
4289 
4290 // Parallel remark task
4291 class CMSParRemarkTask: public CMSParMarkTask {
4292   CompactibleFreeListSpace* _cms_space;
4293 
4294   // The per-thread work queues, available here for stealing.
4295   OopTaskQueueSet*       _task_queues;
4296   ParallelTaskTerminator _term;
4297   StrongRootsScope*      _strong_roots_scope;
4298 
4299  public:
4300   // A value of 0 passed to n_workers will cause the number of
4301   // workers to be taken from the active workers in the work gang.
4302   CMSParRemarkTask(CMSCollector* collector,
4303                    CompactibleFreeListSpace* cms_space,
4304                    uint n_workers, WorkGang* workers,
4305                    OopTaskQueueSet* task_queues,
4306                    StrongRootsScope* strong_roots_scope):
4307     CMSParMarkTask("Rescan roots and grey objects in parallel",
4308                    collector, n_workers),
4309     _cms_space(cms_space),
4310     _task_queues(task_queues),
4311     _term(n_workers, task_queues),
4312     _strong_roots_scope(strong_roots_scope) { }
4313 
4314   OopTaskQueueSet* task_queues() { return _task_queues; }
4315 
4316   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4317 
4318   ParallelTaskTerminator* terminator() { return &_term; }
4319   uint n_workers() { return _n_workers; }
4320 
4321   void work(uint worker_id);
4322 
4323  private:
4324   // ... of  dirty cards in old space
4325   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4326                                   ParMarkRefsIntoAndScanClosure* cl);
4327 
4328   // ... work stealing for the above
4329   void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
4330 };
4331 
4332 class RemarkKlassClosure : public KlassClosure {
4333   KlassToOopClosure _cm_klass_closure;
4334  public:
4335   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4336   void do_klass(Klass* k) {
4337     // Check if we have modified any oops in the Klass during the concurrent marking.
4338     if (k->has_accumulated_modified_oops()) {
4339       k->clear_accumulated_modified_oops();
4340 
4341       // We could have transfered the current modified marks to the accumulated marks,
4342       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4343     } else if (k->has_modified_oops()) {
4344       // Don't clear anything, this info is needed by the next young collection.
4345     } else {
4346       // No modified oops in the Klass.
4347       return;
4348     }
4349 
4350     // The klass has modified fields, need to scan the klass.
4351     _cm_klass_closure.do_klass(k);
4352   }
4353 };
4354 
4355 void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
4356   ParNewGeneration* young_gen = _collector->_young_gen;
4357   ContiguousSpace* eden_space = young_gen->eden();
4358   ContiguousSpace* from_space = young_gen->from();
4359   ContiguousSpace* to_space   = young_gen->to();
4360 
4361   HeapWord** eca = _collector->_eden_chunk_array;
4362   size_t     ect = _collector->_eden_chunk_index;
4363   HeapWord** sca = _collector->_survivor_chunk_array;
4364   size_t     sct = _collector->_survivor_chunk_index;
4365 
4366   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4367   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4368 
4369   do_young_space_rescan(cl, to_space, NULL, 0);
4370   do_young_space_rescan(cl, from_space, sca, sct);
4371   do_young_space_rescan(cl, eden_space, eca, ect);
4372 }
4373 
4374 // work_queue(i) is passed to the closure
4375 // ParMarkRefsIntoAndScanClosure.  The "i" parameter
4376 // also is passed to do_dirty_card_rescan_tasks() and to
4377 // do_work_steal() to select the i-th task_queue.
4378 
4379 void CMSParRemarkTask::work(uint worker_id) {
4380   elapsedTimer _timer;
4381   ResourceMark rm;
4382   HandleMark   hm;
4383 
4384   // ---------- rescan from roots --------------
4385   _timer.start();
4386   GenCollectedHeap* gch = GenCollectedHeap::heap();
4387   ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4388     _collector->_span, _collector->ref_processor(),
4389     &(_collector->_markBitMap),
4390     work_queue(worker_id));
4391 
4392   // Rescan young gen roots first since these are likely
4393   // coarsely partitioned and may, on that account, constitute
4394   // the critical path; thus, it's best to start off that
4395   // work first.
4396   // ---------- young gen roots --------------
4397   {
4398     work_on_young_gen_roots(&par_mrias_cl);
4399     _timer.stop();
4400     log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4401   }
4402 
4403   // ---------- remaining roots --------------
4404   _timer.reset();
4405   _timer.start();
4406   gch->cms_process_roots(_strong_roots_scope,
4407                          false,     // yg was scanned above
4408                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4409                          _collector->should_unload_classes(),
4410                          &par_mrias_cl,
4411                          NULL);     // The dirty klasses will be handled below
4412 
4413   assert(_collector->should_unload_classes()
4414          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4415          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4416   _timer.stop();
4417   log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());
4418 
4419   // ---------- unhandled CLD scanning ----------
4420   if (worker_id == 0) { // Single threaded at the moment.
4421     _timer.reset();
4422     _timer.start();
4423 
4424     // Scan all new class loader data objects and new dependencies that were
4425     // introduced during concurrent marking.
4426     ResourceMark rm;
4427     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4428     for (int i = 0; i < array->length(); i++) {
4429       par_mrias_cl.do_cld_nv(array->at(i));
4430     }
4431 
4432     // We don't need to keep track of new CLDs anymore.
4433     ClassLoaderDataGraph::remember_new_clds(false);
4434 
4435     _timer.stop();
4436     log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4437   }
4438 
4439   // ---------- dirty klass scanning ----------
4440   if (worker_id == 0) { // Single threaded at the moment.
4441     _timer.reset();
4442     _timer.start();
4443 
4444     // Scan all classes that was dirtied during the concurrent marking phase.
4445     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4446     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4447 
4448     _timer.stop();
4449     log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4450   }
4451 
4452   // We might have added oops to ClassLoaderData::_handles during the
4453   // concurrent marking phase. These oops point to newly allocated objects
4454   // that are guaranteed to be kept alive. Either by the direct allocation
4455   // code, or when the young collector processes the roots. Hence,
4456   // we don't have to revisit the _handles block during the remark phase.
4457 
4458   // ---------- rescan dirty cards ------------
4459   _timer.reset();
4460   _timer.start();
4461 
4462   // Do the rescan tasks for each of the two spaces
4463   // (cms_space) in turn.
4464   // "worker_id" is passed to select the task_queue for "worker_id"
4465   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4466   _timer.stop();
4467   log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4468 
4469   // ---------- steal work from other threads ...
4470   // ---------- ... and drain overflow list.
4471   _timer.reset();
4472   _timer.start();
4473   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4474   _timer.stop();
4475   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4476 }
4477 
4478 void
4479 CMSParMarkTask::do_young_space_rescan(
4480   OopsInGenClosure* cl, ContiguousSpace* space,
4481   HeapWord** chunk_array, size_t chunk_top) {
4482   // Until all tasks completed:
4483   // . claim an unclaimed task
4484   // . compute region boundaries corresponding to task claimed
4485   //   using chunk_array
4486   // . par_oop_iterate(cl) over that region
4487 
4488   ResourceMark rm;
4489   HandleMark   hm;
4490 
4491   SequentialSubTasksDone* pst = space->par_seq_tasks();
4492 
4493   uint nth_task = 0;
4494   uint n_tasks  = pst->n_tasks();
4495 
4496   if (n_tasks > 0) {
4497     assert(pst->valid(), "Uninitialized use?");
4498     HeapWord *start, *end;
4499     while (!pst->is_task_claimed(/* reference */ nth_task)) {
4500       // We claimed task # nth_task; compute its boundaries.
4501       if (chunk_top == 0) {  // no samples were taken
4502         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4503         start = space->bottom();
4504         end   = space->top();
4505       } else if (nth_task == 0) {
4506         start = space->bottom();
4507         end   = chunk_array[nth_task];
4508       } else if (nth_task < (uint)chunk_top) {
4509         assert(nth_task >= 1, "Control point invariant");
4510         start = chunk_array[nth_task - 1];
4511         end   = chunk_array[nth_task];
4512       } else {
4513         assert(nth_task == (uint)chunk_top, "Control point invariant");
4514         start = chunk_array[chunk_top - 1];
4515         end   = space->top();
4516       }
4517       MemRegion mr(start, end);
4518       // Verify that mr is in space
4519       assert(mr.is_empty() || space->used_region().contains(mr),
4520              "Should be in space");
4521       // Verify that "start" is an object boundary
4522       assert(mr.is_empty() || oop(mr.start())->is_oop(),
4523              "Should be an oop");
4524       space->par_oop_iterate(mr, cl);
4525     }
4526     pst->all_tasks_completed();
4527   }
4528 }
4529 
4530 void
4531 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4532   CompactibleFreeListSpace* sp, int i,
4533   ParMarkRefsIntoAndScanClosure* cl) {
4534   // Until all tasks completed:
4535   // . claim an unclaimed task
4536   // . compute region boundaries corresponding to task claimed
4537   // . transfer dirty bits ct->mut for that region
4538   // . apply rescanclosure to dirty mut bits for that region
4539 
4540   ResourceMark rm;
4541   HandleMark   hm;
4542 
4543   OopTaskQueue* work_q = work_queue(i);
4544   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4545   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4546   // CAUTION: This closure has state that persists across calls to
4547   // the work method dirty_range_iterate_clear() in that it has
4548   // embedded in it a (subtype of) UpwardsObjectClosure. The
4549   // use of that state in the embedded UpwardsObjectClosure instance
4550   // assumes that the cards are always iterated (even if in parallel
4551   // by several threads) in monotonically increasing order per each
4552   // thread. This is true of the implementation below which picks
4553   // card ranges (chunks) in monotonically increasing order globally
4554   // and, a-fortiori, in monotonically increasing order per thread
4555   // (the latter order being a subsequence of the former).
4556   // If the work code below is ever reorganized into a more chaotic
4557   // work-partitioning form than the current "sequential tasks"
4558   // paradigm, the use of that persistent state will have to be
4559   // revisited and modified appropriately. See also related
4560   // bug 4756801 work on which should examine this code to make
4561   // sure that the changes there do not run counter to the
4562   // assumptions made here and necessary for correctness and
4563   // efficiency. Note also that this code might yield inefficient
4564   // behavior in the case of very large objects that span one or
4565   // more work chunks. Such objects would potentially be scanned
4566   // several times redundantly. Work on 4756801 should try and
4567   // address that performance anomaly if at all possible. XXX
4568   MemRegion  full_span  = _collector->_span;
4569   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4570   MarkFromDirtyCardsClosure
4571     greyRescanClosure(_collector, full_span, // entire span of interest
4572                       sp, bm, work_q, cl);
4573 
4574   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4575   assert(pst->valid(), "Uninitialized use?");
4576   uint nth_task = 0;
4577   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4578   MemRegion span = sp->used_region();
4579   HeapWord* start_addr = span.start();
4580   HeapWord* end_addr = align_up(span.end(), alignment);
4581   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4582   assert(is_aligned(start_addr, alignment), "Check alignment");
4583   assert(is_aligned(chunk_size, alignment), "Check alignment");
4584 
4585   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4586     // Having claimed the nth_task, compute corresponding mem-region,
4587     // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4588     // The alignment restriction ensures that we do not need any
4589     // synchronization with other gang-workers while setting or
4590     // clearing bits in thus chunk of the MUT.
4591     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4592                                     start_addr + (nth_task+1)*chunk_size);
4593     // The last chunk's end might be way beyond end of the
4594     // used region. In that case pull back appropriately.
4595     if (this_span.end() > end_addr) {
4596       this_span.set_end(end_addr);
4597       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4598     }
4599     // Iterate over the dirty cards covering this chunk, marking them
4600     // precleaned, and setting the corresponding bits in the mod union
4601     // table. Since we have been careful to partition at Card and MUT-word
4602     // boundaries no synchronization is needed between parallel threads.
4603     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4604                                                  &modUnionClosure);
4605 
4606     // Having transferred these marks into the modUnionTable,
4607     // rescan the marked objects on the dirty cards in the modUnionTable.
4608     // Even if this is at a synchronous collection, the initial marking
4609     // may have been done during an asynchronous collection so there
4610     // may be dirty bits in the mod-union table.
4611     _collector->_modUnionTable.dirty_range_iterate_clear(
4612                   this_span, &greyRescanClosure);
4613     _collector->_modUnionTable.verifyNoOneBitsInRange(
4614                                  this_span.start(),
4615                                  this_span.end());
4616   }
4617   pst->all_tasks_completed();  // declare that i am done
4618 }
4619 
4620 // . see if we can share work_queues with ParNew? XXX
4621 void
4622 CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
4623                                 int* seed) {
4624   OopTaskQueue* work_q = work_queue(i);
4625   NOT_PRODUCT(int num_steals = 0;)
4626   oop obj_to_scan;
4627   CMSBitMap* bm = &(_collector->_markBitMap);
4628 
4629   while (true) {
4630     // Completely finish any left over work from (an) earlier round(s)
4631     cl->trim_queue(0);
4632     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4633                                          (size_t)ParGCDesiredObjsFromOverflowList);
4634     // Now check if there's any work in the overflow list
4635     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4636     // only affects the number of attempts made to get work from the
4637     // overflow list and does not affect the number of workers.  Just
4638     // pass ParallelGCThreads so this behavior is unchanged.
4639     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4640                                                 work_q,
4641                                                 ParallelGCThreads)) {
4642       // found something in global overflow list;
4643       // not yet ready to go stealing work from others.
4644       // We'd like to assert(work_q->size() != 0, ...)
4645       // because we just took work from the overflow list,
4646       // but of course we can't since all of that could have
4647       // been already stolen from us.
4648       // "He giveth and He taketh away."
4649       continue;
4650     }
4651     // Verify that we have no work before we resort to stealing
4652     assert(work_q->size() == 0, "Have work, shouldn't steal");
4653     // Try to steal from other queues that have work
4654     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4655       NOT_PRODUCT(num_steals++;)
4656       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4657       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4658       // Do scanning work
4659       obj_to_scan->oop_iterate(cl);
4660       // Loop around, finish this work, and try to steal some more
4661     } else if (terminator()->offer_termination()) {
4662         break;  // nirvana from the infinite cycle
4663     }
4664   }
4665   log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4666   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4667          "Else our work is not yet done");
4668 }
4669 
4670 // Record object boundaries in _eden_chunk_array by sampling the eden
4671 // top in the slow-path eden object allocation code path and record
4672 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4673 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4674 // sampling in sample_eden() that activates during the part of the
4675 // preclean phase.
4676 void CMSCollector::sample_eden_chunk() {
4677   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4678     if (_eden_chunk_lock->try_lock()) {
4679       // Record a sample. This is the critical section. The contents
4680       // of the _eden_chunk_array have to be non-decreasing in the
4681       // address order.
4682       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4683       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4684              "Unexpected state of Eden");
4685       if (_eden_chunk_index == 0 ||
4686           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4687            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4688                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4689         _eden_chunk_index++;  // commit sample
4690       }
4691       _eden_chunk_lock->unlock();
4692     }
4693   }
4694 }
4695 
4696 // Return a thread-local PLAB recording array, as appropriate.
4697 void* CMSCollector::get_data_recorder(int thr_num) {
4698   if (_survivor_plab_array != NULL &&
4699       (CMSPLABRecordAlways ||
4700        (_collectorState > Marking && _collectorState < FinalMarking))) {
4701     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4702     ChunkArray* ca = &_survivor_plab_array[thr_num];
4703     ca->reset();   // clear it so that fresh data is recorded
4704     return (void*) ca;
4705   } else {
4706     return NULL;
4707   }
4708 }
4709 
4710 // Reset all the thread-local PLAB recording arrays
4711 void CMSCollector::reset_survivor_plab_arrays() {
4712   for (uint i = 0; i < ParallelGCThreads; i++) {
4713     _survivor_plab_array[i].reset();
4714   }
4715 }
4716 
4717 // Merge the per-thread plab arrays into the global survivor chunk
4718 // array which will provide the partitioning of the survivor space
4719 // for CMS initial scan and rescan.
4720 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4721                                               int no_of_gc_threads) {
4722   assert(_survivor_plab_array  != NULL, "Error");
4723   assert(_survivor_chunk_array != NULL, "Error");
4724   assert(_collectorState == FinalMarking ||
4725          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4726   for (int j = 0; j < no_of_gc_threads; j++) {
4727     _cursor[j] = 0;
4728   }
4729   HeapWord* top = surv->top();
4730   size_t i;
4731   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4732     HeapWord* min_val = top;          // Higher than any PLAB address
4733     uint      min_tid = 0;            // position of min_val this round
4734     for (int j = 0; j < no_of_gc_threads; j++) {
4735       ChunkArray* cur_sca = &_survivor_plab_array[j];
4736       if (_cursor[j] == cur_sca->end()) {
4737         continue;
4738       }
4739       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4740       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4741       assert(surv->used_region().contains(cur_val), "Out of bounds value");
4742       if (cur_val < min_val) {
4743         min_tid = j;
4744         min_val = cur_val;
4745       } else {
4746         assert(cur_val < top, "All recorded addresses should be less");
4747       }
4748     }
4749     // At this point min_val and min_tid are respectively
4750     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4751     // and the thread (j) that witnesses that address.
4752     // We record this address in the _survivor_chunk_array[i]
4753     // and increment _cursor[min_tid] prior to the next round i.
4754     if (min_val == top) {
4755       break;
4756     }
4757     _survivor_chunk_array[i] = min_val;
4758     _cursor[min_tid]++;
4759   }
4760   // We are all done; record the size of the _survivor_chunk_array
4761   _survivor_chunk_index = i; // exclusive: [0, i)
4762   log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4763   // Verify that we used up all the recorded entries
4764   #ifdef ASSERT
4765     size_t total = 0;
4766     for (int j = 0; j < no_of_gc_threads; j++) {
4767       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4768       total += _cursor[j];
4769     }
4770     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4771     // Check that the merged array is in sorted order
4772     if (total > 0) {
4773       for (size_t i = 0; i < total - 1; i++) {
4774         log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4775                                      i, p2i(_survivor_chunk_array[i]));
4776         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4777                "Not sorted");
4778       }
4779     }
4780   #endif // ASSERT
4781 }
4782 
4783 // Set up the space's par_seq_tasks structure for work claiming
4784 // for parallel initial scan and rescan of young gen.
4785 // See ParRescanTask where this is currently used.
4786 void
4787 CMSCollector::
4788 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4789   assert(n_threads > 0, "Unexpected n_threads argument");
4790 
4791   // Eden space
4792   if (!_young_gen->eden()->is_empty()) {
4793     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4794     assert(!pst->valid(), "Clobbering existing data?");
4795     // Each valid entry in [0, _eden_chunk_index) represents a task.
4796     size_t n_tasks = _eden_chunk_index + 1;
4797     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
4798     // Sets the condition for completion of the subtask (how many threads
4799     // need to finish in order to be done).
4800     pst->set_n_threads(n_threads);
4801     pst->set_n_tasks((int)n_tasks);
4802   }
4803 
4804   // Merge the survivor plab arrays into _survivor_chunk_array
4805   if (_survivor_plab_array != NULL) {
4806     merge_survivor_plab_arrays(_young_gen->from(), n_threads);
4807   } else {
4808     assert(_survivor_chunk_index == 0, "Error");
4809   }
4810 
4811   // To space
4812   {
4813     SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
4814     assert(!pst->valid(), "Clobbering existing data?");
4815     // Sets the condition for completion of the subtask (how many threads
4816     // need to finish in order to be done).
4817     pst->set_n_threads(n_threads);
4818     pst->set_n_tasks(1);
4819     assert(pst->valid(), "Error");
4820   }
4821 
4822   // From space
4823   {
4824     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
4825     assert(!pst->valid(), "Clobbering existing data?");
4826     size_t n_tasks = _survivor_chunk_index + 1;
4827     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
4828     // Sets the condition for completion of the subtask (how many threads
4829     // need to finish in order to be done).
4830     pst->set_n_threads(n_threads);
4831     pst->set_n_tasks((int)n_tasks);
4832     assert(pst->valid(), "Error");
4833   }
4834 }
4835 
4836 // Parallel version of remark
4837 void CMSCollector::do_remark_parallel() {
4838   GenCollectedHeap* gch = GenCollectedHeap::heap();
4839   WorkGang* workers = gch->workers();
4840   assert(workers != NULL, "Need parallel worker threads.");
4841   // Choose to use the number of GC workers most recently set
4842   // into "active_workers".
4843   uint n_workers = workers->active_workers();
4844 
4845   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4846 
4847   StrongRootsScope srs(n_workers);
4848 
4849   CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
4850 
4851   // We won't be iterating over the cards in the card table updating
4852   // the younger_gen cards, so we shouldn't call the following else
4853   // the verification code as well as subsequent younger_refs_iterate
4854   // code would get confused. XXX
4855   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
4856 
4857   // The young gen rescan work will not be done as part of
4858   // process_roots (which currently doesn't know how to
4859   // parallelize such a scan), but rather will be broken up into
4860   // a set of parallel tasks (via the sampling that the [abortable]
4861   // preclean phase did of eden, plus the [two] tasks of
4862   // scanning the [two] survivor spaces. Further fine-grain
4863   // parallelization of the scanning of the survivor spaces
4864   // themselves, and of precleaning of the young gen itself
4865   // is deferred to the future.
4866   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
4867 
4868   // The dirty card rescan work is broken up into a "sequence"
4869   // of parallel tasks (per constituent space) that are dynamically
4870   // claimed by the parallel threads.
4871   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
4872 
4873   // It turns out that even when we're using 1 thread, doing the work in a
4874   // separate thread causes wide variance in run times.  We can't help this
4875   // in the multi-threaded case, but we special-case n=1 here to get
4876   // repeatable measurements of the 1-thread overhead of the parallel code.
4877   if (n_workers > 1) {
4878     // Make refs discovery MT-safe, if it isn't already: it may not
4879     // necessarily be so, since it's possible that we are doing
4880     // ST marking.
4881     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
4882     workers->run_task(&tsk);
4883   } else {
4884     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4885     tsk.work(0);
4886   }
4887 
4888   // restore, single-threaded for now, any preserved marks
4889   // as a result of work_q overflow
4890   restore_preserved_marks_if_any();
4891 }
4892 
4893 // Non-parallel version of remark
4894 void CMSCollector::do_remark_non_parallel() {
4895   ResourceMark rm;
4896   HandleMark   hm;
4897   GenCollectedHeap* gch = GenCollectedHeap::heap();
4898   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4899 
4900   MarkRefsIntoAndScanClosure
4901     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
4902              &_markStack, this,
4903              false /* should_yield */, false /* not precleaning */);
4904   MarkFromDirtyCardsClosure
4905     markFromDirtyCardsClosure(this, _span,
4906                               NULL,  // space is set further below
4907                               &_markBitMap, &_markStack, &mrias_cl);
4908   {
4909     GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
4910     // Iterate over the dirty cards, setting the corresponding bits in the
4911     // mod union table.
4912     {
4913       ModUnionClosure modUnionClosure(&_modUnionTable);
4914       _ct->ct_bs()->dirty_card_iterate(
4915                       _cmsGen->used_region(),
4916                       &modUnionClosure);
4917     }
4918     // Having transferred these marks into the modUnionTable, we just need
4919     // to rescan the marked objects on the dirty cards in the modUnionTable.
4920     // The initial marking may have been done during an asynchronous
4921     // collection so there may be dirty bits in the mod-union table.
4922     const int alignment =
4923       CardTableModRefBS::card_size * BitsPerWord;
4924     {
4925       // ... First handle dirty cards in CMS gen
4926       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
4927       MemRegion ur = _cmsGen->used_region();
4928       HeapWord* lb = ur.start();
4929       HeapWord* ub = align_up(ur.end(), alignment);
4930       MemRegion cms_span(lb, ub);
4931       _modUnionTable.dirty_range_iterate_clear(cms_span,
4932                                                &markFromDirtyCardsClosure);
4933       verify_work_stacks_empty();
4934       log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
4935     }
4936   }
4937   if (VerifyDuringGC &&
4938       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4939     HandleMark hm;  // Discard invalid handles created during verification
4940     Universe::verify();
4941   }
4942   {
4943     GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
4944 
4945     verify_work_stacks_empty();
4946 
4947     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
4948     StrongRootsScope srs(1);
4949 
4950     gch->cms_process_roots(&srs,
4951                            true,  // young gen as roots
4952                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
4953                            should_unload_classes(),
4954                            &mrias_cl,
4955                            NULL); // The dirty klasses will be handled below
4956 
4957     assert(should_unload_classes()
4958            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4959            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4960   }
4961 
4962   {
4963     GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
4964 
4965     verify_work_stacks_empty();
4966 
4967     // Scan all class loader data objects that might have been introduced
4968     // during concurrent marking.
4969     ResourceMark rm;
4970     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4971     for (int i = 0; i < array->length(); i++) {
4972       mrias_cl.do_cld_nv(array->at(i));
4973     }
4974 
4975     // We don't need to keep track of new CLDs anymore.
4976     ClassLoaderDataGraph::remember_new_clds(false);
4977 
4978     verify_work_stacks_empty();
4979   }
4980 
4981   {
4982     GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
4983 
4984     verify_work_stacks_empty();
4985 
4986     RemarkKlassClosure remark_klass_closure(&mrias_cl);
4987     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4988 
4989     verify_work_stacks_empty();
4990   }
4991 
4992   // We might have added oops to ClassLoaderData::_handles during the
4993   // concurrent marking phase. These oops point to newly allocated objects
4994   // that are guaranteed to be kept alive. Either by the direct allocation
4995   // code, or when the young collector processes the roots. Hence,
4996   // we don't have to revisit the _handles block during the remark phase.
4997 
4998   verify_work_stacks_empty();
4999   // Restore evacuated mark words, if any, used for overflow list links
5000   restore_preserved_marks_if_any();
5001 
5002   verify_overflow_empty();
5003 }
5004 
5005 ////////////////////////////////////////////////////////
5006 // Parallel Reference Processing Task Proxy Class
5007 ////////////////////////////////////////////////////////
5008 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5009   OopTaskQueueSet*       _queues;
5010   ParallelTaskTerminator _terminator;
5011  public:
5012   AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5013     AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5014   ParallelTaskTerminator* terminator() { return &_terminator; }
5015   OopTaskQueueSet* queues() { return _queues; }
5016 };
5017 
5018 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5019   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5020   CMSCollector*          _collector;
5021   CMSBitMap*             _mark_bit_map;
5022   const MemRegion        _span;
5023   ProcessTask&           _task;
5024 
5025 public:
5026   CMSRefProcTaskProxy(ProcessTask&     task,
5027                       CMSCollector*    collector,
5028                       const MemRegion& span,
5029                       CMSBitMap*       mark_bit_map,
5030                       AbstractWorkGang* workers,
5031                       OopTaskQueueSet* task_queues):
5032     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5033       task_queues,
5034       workers->active_workers()),
5035     _task(task),
5036     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5037   {
5038     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5039            "Inconsistency in _span");
5040   }
5041 
5042   OopTaskQueueSet* task_queues() { return queues(); }
5043 
5044   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5045 
5046   void do_work_steal(int i,
5047                      CMSParDrainMarkingStackClosure* drain,
5048                      CMSParKeepAliveClosure* keep_alive,
5049                      int* seed);
5050 
5051   virtual void work(uint worker_id);
5052 };
5053 
5054 void CMSRefProcTaskProxy::work(uint worker_id) {
5055   ResourceMark rm;
5056   HandleMark hm;
5057   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5058   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5059                                         _mark_bit_map,
5060                                         work_queue(worker_id));
5061   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5062                                                  _mark_bit_map,
5063                                                  work_queue(worker_id));
5064   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5065   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5066   if (_task.marks_oops_alive()) {
5067     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5068                   _collector->hash_seed(worker_id));
5069   }
5070   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5071   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5072 }
5073 
5074 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5075   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5076   EnqueueTask& _task;
5077 
5078 public:
5079   CMSRefEnqueueTaskProxy(EnqueueTask& task)
5080     : AbstractGangTask("Enqueue reference objects in parallel"),
5081       _task(task)
5082   { }
5083 
5084   virtual void work(uint worker_id)
5085   {
5086     _task.work(worker_id);
5087   }
5088 };
5089 
5090 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5091   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5092    _span(span),
5093    _bit_map(bit_map),
5094    _work_queue(work_queue),
5095    _mark_and_push(collector, span, bit_map, work_queue),
5096    _low_water_mark(MIN2((work_queue->max_elems()/4),
5097                         ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5098 { }
5099 
5100 // . see if we can share work_queues with ParNew? XXX
5101 void CMSRefProcTaskProxy::do_work_steal(int i,
5102   CMSParDrainMarkingStackClosure* drain,
5103   CMSParKeepAliveClosure* keep_alive,
5104   int* seed) {
5105   OopTaskQueue* work_q = work_queue(i);
5106   NOT_PRODUCT(int num_steals = 0;)
5107   oop obj_to_scan;
5108 
5109   while (true) {
5110     // Completely finish any left over work from (an) earlier round(s)
5111     drain->trim_queue(0);
5112     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5113                                          (size_t)ParGCDesiredObjsFromOverflowList);
5114     // Now check if there's any work in the overflow list
5115     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5116     // only affects the number of attempts made to get work from the
5117     // overflow list and does not affect the number of workers.  Just
5118     // pass ParallelGCThreads so this behavior is unchanged.
5119     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5120                                                 work_q,
5121                                                 ParallelGCThreads)) {
5122       // Found something in global overflow list;
5123       // not yet ready to go stealing work from others.
5124       // We'd like to assert(work_q->size() != 0, ...)
5125       // because we just took work from the overflow list,
5126       // but of course we can't, since all of that might have
5127       // been already stolen from us.
5128       continue;
5129     }
5130     // Verify that we have no work before we resort to stealing
5131     assert(work_q->size() == 0, "Have work, shouldn't steal");
5132     // Try to steal from other queues that have work
5133     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5134       NOT_PRODUCT(num_steals++;)
5135       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5136       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5137       // Do scanning work
5138       obj_to_scan->oop_iterate(keep_alive);
5139       // Loop around, finish this work, and try to steal some more
5140     } else if (terminator()->offer_termination()) {
5141       break;  // nirvana from the infinite cycle
5142     }
5143   }
5144   log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5145 }
5146 
5147 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5148 {
5149   GenCollectedHeap* gch = GenCollectedHeap::heap();
5150   WorkGang* workers = gch->workers();
5151   assert(workers != NULL, "Need parallel worker threads.");
5152   CMSRefProcTaskProxy rp_task(task, &_collector,
5153                               _collector.ref_processor()->span(),
5154                               _collector.markBitMap(),
5155                               workers, _collector.task_queues());
5156   workers->run_task(&rp_task);
5157 }
5158 
5159 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5160 {
5161 
5162   GenCollectedHeap* gch = GenCollectedHeap::heap();
5163   WorkGang* workers = gch->workers();
5164   assert(workers != NULL, "Need parallel worker threads.");
5165   CMSRefEnqueueTaskProxy enq_task(task);
5166   workers->run_task(&enq_task);
5167 }
5168 
5169 void CMSCollector::refProcessingWork() {
5170   ResourceMark rm;
5171   HandleMark   hm;
5172 
5173   ReferenceProcessor* rp = ref_processor();
5174   assert(rp->span().equals(_span), "Spans should be equal");
5175   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5176   // Process weak references.
5177   rp->setup_policy(false);
5178   verify_work_stacks_empty();
5179 
5180   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5181                                           &_markStack, false /* !preclean */);
5182   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5183                                 _span, &_markBitMap, &_markStack,
5184                                 &cmsKeepAliveClosure, false /* !preclean */);
5185   {
5186     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
5187 
5188     ReferenceProcessorStats stats;
5189     if (rp->processing_is_mt()) {
5190       // Set the degree of MT here.  If the discovery is done MT, there
5191       // may have been a different number of threads doing the discovery
5192       // and a different number of discovered lists may have Ref objects.
5193       // That is OK as long as the Reference lists are balanced (see
5194       // balance_all_queues() and balance_queues()).
5195       GenCollectedHeap* gch = GenCollectedHeap::heap();
5196       uint active_workers = ParallelGCThreads;
5197       WorkGang* workers = gch->workers();
5198       if (workers != NULL) {
5199         active_workers = workers->active_workers();
5200         // The expectation is that active_workers will have already
5201         // been set to a reasonable value.  If it has not been set,
5202         // investigate.
5203         assert(active_workers > 0, "Should have been set during scavenge");
5204       }
5205       rp->set_active_mt_degree(active_workers);
5206       CMSRefProcTaskExecutor task_executor(*this);
5207       stats = rp->process_discovered_references(&_is_alive_closure,
5208                                         &cmsKeepAliveClosure,
5209                                         &cmsDrainMarkingStackClosure,
5210                                         &task_executor,
5211                                         _gc_timer_cm);
5212     } else {
5213       stats = rp->process_discovered_references(&_is_alive_closure,
5214                                         &cmsKeepAliveClosure,
5215                                         &cmsDrainMarkingStackClosure,
5216                                         NULL,
5217                                         _gc_timer_cm);
5218     }
5219     _gc_tracer_cm->report_gc_reference_stats(stats);
5220 
5221   }
5222 
5223   // This is the point where the entire marking should have completed.
5224   verify_work_stacks_empty();
5225 
5226   if (should_unload_classes()) {
5227     {
5228       GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
5229 
5230       // Unload classes and purge the SystemDictionary.
5231       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure, _gc_timer_cm);
5232 
5233       // Unload nmethods.
5234       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5235 
5236       // Prune dead klasses from subklass/sibling/implementor lists.
5237       Klass::clean_weak_klass_links(&_is_alive_closure);
5238     }
5239 
5240     {
5241       GCTraceTime(Debug, gc, phases) t("Scrub Symbol Table", _gc_timer_cm);
5242       // Clean up unreferenced symbols in symbol table.
5243       SymbolTable::unlink();
5244     }
5245 
5246     {
5247       GCTraceTime(Debug, gc, phases) t("Scrub String Table", _gc_timer_cm);
5248       // Delete entries for dead interned strings.
5249       StringTable::unlink(&_is_alive_closure);
5250     }
5251   }
5252 
5253   // Restore any preserved marks as a result of mark stack or
5254   // work queue overflow
5255   restore_preserved_marks_if_any();  // done single-threaded for now
5256 
5257   rp->set_enqueuing_is_done(true);
5258   if (rp->processing_is_mt()) {
5259     rp->balance_all_queues();
5260     CMSRefProcTaskExecutor task_executor(*this);
5261     rp->enqueue_discovered_references(&task_executor);
5262   } else {
5263     rp->enqueue_discovered_references(NULL);
5264   }
5265   rp->verify_no_references_recorded();
5266   assert(!rp->discovery_enabled(), "should have been disabled");
5267 }
5268 
5269 #ifndef PRODUCT
5270 void CMSCollector::check_correct_thread_executing() {
5271   Thread* t = Thread::current();
5272   // Only the VM thread or the CMS thread should be here.
5273   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5274          "Unexpected thread type");
5275   // If this is the vm thread, the foreground process
5276   // should not be waiting.  Note that _foregroundGCIsActive is
5277   // true while the foreground collector is waiting.
5278   if (_foregroundGCShouldWait) {
5279     // We cannot be the VM thread
5280     assert(t->is_ConcurrentGC_thread(),
5281            "Should be CMS thread");
5282   } else {
5283     // We can be the CMS thread only if we are in a stop-world
5284     // phase of CMS collection.
5285     if (t->is_ConcurrentGC_thread()) {
5286       assert(_collectorState == InitialMarking ||
5287              _collectorState == FinalMarking,
5288              "Should be a stop-world phase");
5289       // The CMS thread should be holding the CMS_token.
5290       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5291              "Potential interference with concurrently "
5292              "executing VM thread");
5293     }
5294   }
5295 }
5296 #endif
5297 
5298 void CMSCollector::sweep() {
5299   assert(_collectorState == Sweeping, "just checking");
5300   check_correct_thread_executing();
5301   verify_work_stacks_empty();
5302   verify_overflow_empty();
5303   increment_sweep_count();
5304   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5305 
5306   _inter_sweep_timer.stop();
5307   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5308 
5309   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5310   _intra_sweep_timer.reset();
5311   _intra_sweep_timer.start();
5312   {
5313     GCTraceCPUTime tcpu;
5314     CMSPhaseAccounting pa(this, "Concurrent Sweep");
5315     // First sweep the old gen
5316     {
5317       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5318                                bitMapLock());
5319       sweepWork(_cmsGen);
5320     }
5321 
5322     // Update Universe::_heap_*_at_gc figures.
5323     // We need all the free list locks to make the abstract state
5324     // transition from Sweeping to Resetting. See detailed note
5325     // further below.
5326     {
5327       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5328       // Update heap occupancy information which is used as
5329       // input to soft ref clearing policy at the next gc.
5330       Universe::update_heap_info_at_gc();
5331       _collectorState = Resizing;
5332     }
5333   }
5334   verify_work_stacks_empty();
5335   verify_overflow_empty();
5336 
5337   if (should_unload_classes()) {
5338     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5339     // requires that the virtual spaces are stable and not deleted.
5340     ClassLoaderDataGraph::set_should_purge(true);
5341   }
5342 
5343   _intra_sweep_timer.stop();
5344   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5345 
5346   _inter_sweep_timer.reset();
5347   _inter_sweep_timer.start();
5348 
5349   // We need to use a monotonically non-decreasing time in ms
5350   // or we will see time-warp warnings and os::javaTimeMillis()
5351   // does not guarantee monotonicity.
5352   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5353   update_time_of_last_gc(now);
5354 
5355   // NOTE on abstract state transitions:
5356   // Mutators allocate-live and/or mark the mod-union table dirty
5357   // based on the state of the collection.  The former is done in
5358   // the interval [Marking, Sweeping] and the latter in the interval
5359   // [Marking, Sweeping).  Thus the transitions into the Marking state
5360   // and out of the Sweeping state must be synchronously visible
5361   // globally to the mutators.
5362   // The transition into the Marking state happens with the world
5363   // stopped so the mutators will globally see it.  Sweeping is
5364   // done asynchronously by the background collector so the transition
5365   // from the Sweeping state to the Resizing state must be done
5366   // under the freelistLock (as is the check for whether to
5367   // allocate-live and whether to dirty the mod-union table).
5368   assert(_collectorState == Resizing, "Change of collector state to"
5369     " Resizing must be done under the freelistLocks (plural)");
5370 
5371   // Now that sweeping has been completed, we clear
5372   // the incremental_collection_failed flag,
5373   // thus inviting a younger gen collection to promote into
5374   // this generation. If such a promotion may still fail,
5375   // the flag will be set again when a young collection is
5376   // attempted.
5377   GenCollectedHeap* gch = GenCollectedHeap::heap();
5378   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5379   gch->update_full_collections_completed(_collection_count_start);
5380 }
5381 
5382 // FIX ME!!! Looks like this belongs in CFLSpace, with
5383 // CMSGen merely delegating to it.
5384 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5385   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5386   HeapWord*  minAddr        = _cmsSpace->bottom();
5387   HeapWord*  largestAddr    =
5388     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5389   if (largestAddr == NULL) {
5390     // The dictionary appears to be empty.  In this case
5391     // try to coalesce at the end of the heap.
5392     largestAddr = _cmsSpace->end();
5393   }
5394   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5395   size_t nearLargestOffset =
5396     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5397   log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5398                           p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5399   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5400 }
5401 
5402 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5403   return addr >= _cmsSpace->nearLargestChunk();
5404 }
5405 
5406 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5407   return _cmsSpace->find_chunk_at_end();
5408 }
5409 
5410 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5411                                                     bool full) {
5412   // If the young generation has been collected, gather any statistics
5413   // that are of interest at this point.
5414   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5415   if (!full && current_is_young) {
5416     // Gather statistics on the young generation collection.
5417     collector()->stats().record_gc0_end(used());
5418   }
5419 }
5420 
5421 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5422   // We iterate over the space(s) underlying this generation,
5423   // checking the mark bit map to see if the bits corresponding
5424   // to specific blocks are marked or not. Blocks that are
5425   // marked are live and are not swept up. All remaining blocks
5426   // are swept up, with coalescing on-the-fly as we sweep up
5427   // contiguous free and/or garbage blocks:
5428   // We need to ensure that the sweeper synchronizes with allocators
5429   // and stop-the-world collectors. In particular, the following
5430   // locks are used:
5431   // . CMS token: if this is held, a stop the world collection cannot occur
5432   // . freelistLock: if this is held no allocation can occur from this
5433   //                 generation by another thread
5434   // . bitMapLock: if this is held, no other thread can access or update
5435   //
5436 
5437   // Note that we need to hold the freelistLock if we use
5438   // block iterate below; else the iterator might go awry if
5439   // a mutator (or promotion) causes block contents to change
5440   // (for instance if the allocator divvies up a block).
5441   // If we hold the free list lock, for all practical purposes
5442   // young generation GC's can't occur (they'll usually need to
5443   // promote), so we might as well prevent all young generation
5444   // GC's while we do a sweeping step. For the same reason, we might
5445   // as well take the bit map lock for the entire duration
5446 
5447   // check that we hold the requisite locks
5448   assert(have_cms_token(), "Should hold cms token");
5449   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5450   assert_lock_strong(old_gen->freelistLock());
5451   assert_lock_strong(bitMapLock());
5452 
5453   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5454   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5455   old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5456                                           _inter_sweep_estimate.padded_average(),
5457                                           _intra_sweep_estimate.padded_average());
5458   old_gen->setNearLargestChunk();
5459 
5460   {
5461     SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5462     old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5463     // We need to free-up/coalesce garbage/blocks from a
5464     // co-terminal free run. This is done in the SweepClosure
5465     // destructor; so, do not remove this scope, else the
5466     // end-of-sweep-census below will be off by a little bit.
5467   }
5468   old_gen->cmsSpace()->sweep_completed();
5469   old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5470   if (should_unload_classes()) {                // unloaded classes this cycle,
5471     _concurrent_cycles_since_last_unload = 0;   // ... reset count
5472   } else {                                      // did not unload classes,
5473     _concurrent_cycles_since_last_unload++;     // ... increment count
5474   }
5475 }
5476 
5477 // Reset CMS data structures (for now just the marking bit map)
5478 // preparatory for the next cycle.
5479 void CMSCollector::reset_concurrent() {
5480   CMSTokenSyncWithLocks ts(true, bitMapLock());
5481 
5482   // If the state is not "Resetting", the foreground  thread
5483   // has done a collection and the resetting.
5484   if (_collectorState != Resetting) {
5485     assert(_collectorState == Idling, "The state should only change"
5486       " because the foreground collector has finished the collection");
5487     return;
5488   }
5489 
5490   {
5491     // Clear the mark bitmap (no grey objects to start with)
5492     // for the next cycle.
5493     GCTraceCPUTime tcpu;
5494     CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5495 
5496     HeapWord* curAddr = _markBitMap.startWord();
5497     while (curAddr < _markBitMap.endWord()) {
5498       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5499       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5500       _markBitMap.clear_large_range(chunk);
5501       if (ConcurrentMarkSweepThread::should_yield() &&
5502           !foregroundGCIsActive() &&
5503           CMSYield) {
5504         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5505                "CMS thread should hold CMS token");
5506         assert_lock_strong(bitMapLock());
5507         bitMapLock()->unlock();
5508         ConcurrentMarkSweepThread::desynchronize(true);
5509         stopTimer();
5510         incrementYields();
5511 
5512         // See the comment in coordinator_yield()
5513         for (unsigned i = 0; i < CMSYieldSleepCount &&
5514                          ConcurrentMarkSweepThread::should_yield() &&
5515                          !CMSCollector::foregroundGCIsActive(); ++i) {
5516           os::sleep(Thread::current(), 1, false);
5517         }
5518 
5519         ConcurrentMarkSweepThread::synchronize(true);
5520         bitMapLock()->lock_without_safepoint_check();
5521         startTimer();
5522       }
5523       curAddr = chunk.end();
5524     }
5525     // A successful mostly concurrent collection has been done.
5526     // Because only the full (i.e., concurrent mode failure) collections
5527     // are being measured for gc overhead limits, clean the "near" flag
5528     // and count.
5529     size_policy()->reset_gc_overhead_limit_count();
5530     _collectorState = Idling;
5531   }
5532 
5533   register_gc_end();
5534 }
5535 
5536 // Same as above but for STW paths
5537 void CMSCollector::reset_stw() {
5538   // already have the lock
5539   assert(_collectorState == Resetting, "just checking");
5540   assert_lock_strong(bitMapLock());
5541   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5542   _markBitMap.clear_all();
5543   _collectorState = Idling;
5544   register_gc_end();
5545 }
5546 
5547 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5548   GCTraceCPUTime tcpu;
5549   TraceCollectorStats tcs(counters());
5550 
5551   switch (op) {
5552     case CMS_op_checkpointRootsInitial: {
5553       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5554       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5555       checkpointRootsInitial();
5556       break;
5557     }
5558     case CMS_op_checkpointRootsFinal: {
5559       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5560       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5561       checkpointRootsFinal();
5562       break;
5563     }
5564     default:
5565       fatal("No such CMS_op");
5566   }
5567 }
5568 
5569 #ifndef PRODUCT
5570 size_t const CMSCollector::skip_header_HeapWords() {
5571   return FreeChunk::header_size();
5572 }
5573 
5574 // Try and collect here conditions that should hold when
5575 // CMS thread is exiting. The idea is that the foreground GC
5576 // thread should not be blocked if it wants to terminate
5577 // the CMS thread and yet continue to run the VM for a while
5578 // after that.
5579 void CMSCollector::verify_ok_to_terminate() const {
5580   assert(Thread::current()->is_ConcurrentGC_thread(),
5581          "should be called by CMS thread");
5582   assert(!_foregroundGCShouldWait, "should be false");
5583   // We could check here that all the various low-level locks
5584   // are not held by the CMS thread, but that is overkill; see
5585   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5586   // is checked.
5587 }
5588 #endif
5589 
5590 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5591    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5592           "missing Printezis mark?");
5593   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5594   size_t size = pointer_delta(nextOneAddr + 1, addr);
5595   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5596          "alignment problem");
5597   assert(size >= 3, "Necessary for Printezis marks to work");
5598   return size;
5599 }
5600 
5601 // A variant of the above (block_size_using_printezis_bits()) except
5602 // that we return 0 if the P-bits are not yet set.
5603 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5604   if (_markBitMap.isMarked(addr + 1)) {
5605     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5606     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5607     size_t size = pointer_delta(nextOneAddr + 1, addr);
5608     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5609            "alignment problem");
5610     assert(size >= 3, "Necessary for Printezis marks to work");
5611     return size;
5612   }
5613   return 0;
5614 }
5615 
5616 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5617   size_t sz = 0;
5618   oop p = (oop)addr;
5619   if (p->klass_or_null_acquire() != NULL) {
5620     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5621   } else {
5622     sz = block_size_using_printezis_bits(addr);
5623   }
5624   assert(sz > 0, "size must be nonzero");
5625   HeapWord* next_block = addr + sz;
5626   HeapWord* next_card  = align_up(next_block, CardTableModRefBS::card_size);
5627   assert(align_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5628          align_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5629          "must be different cards");
5630   return next_card;
5631 }
5632 
5633 
5634 // CMS Bit Map Wrapper /////////////////////////////////////////
5635 
5636 // Construct a CMS bit map infrastructure, but don't create the
5637 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5638 // further below.
5639 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5640   _bm(),
5641   _shifter(shifter),
5642   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5643                                     Monitor::_safepoint_check_sometimes) : NULL)
5644 {
5645   _bmStartWord = 0;
5646   _bmWordSize  = 0;
5647 }
5648 
5649 bool CMSBitMap::allocate(MemRegion mr) {
5650   _bmStartWord = mr.start();
5651   _bmWordSize  = mr.word_size();
5652   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5653                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5654   if (!brs.is_reserved()) {
5655     log_warning(gc)("CMS bit map allocation failure");
5656     return false;
5657   }
5658   // For now we'll just commit all of the bit map up front.
5659   // Later on we'll try to be more parsimonious with swap.
5660   if (!_virtual_space.initialize(brs, brs.size())) {
5661     log_warning(gc)("CMS bit map backing store failure");
5662     return false;
5663   }
5664   assert(_virtual_space.committed_size() == brs.size(),
5665          "didn't reserve backing store for all of CMS bit map?");
5666   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5667          _bmWordSize, "inconsistency in bit map sizing");
5668   _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
5669 
5670   // bm.clear(); // can we rely on getting zero'd memory? verify below
5671   assert(isAllClear(),
5672          "Expected zero'd memory from ReservedSpace constructor");
5673   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5674          "consistency check");
5675   return true;
5676 }
5677 
5678 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5679   HeapWord *next_addr, *end_addr, *last_addr;
5680   assert_locked();
5681   assert(covers(mr), "out-of-range error");
5682   // XXX assert that start and end are appropriately aligned
5683   for (next_addr = mr.start(), end_addr = mr.end();
5684        next_addr < end_addr; next_addr = last_addr) {
5685     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5686     last_addr = dirty_region.end();
5687     if (!dirty_region.is_empty()) {
5688       cl->do_MemRegion(dirty_region);
5689     } else {
5690       assert(last_addr == end_addr, "program logic");
5691       return;
5692     }
5693   }
5694 }
5695 
5696 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5697   _bm.print_on_error(st, prefix);
5698 }
5699 
5700 #ifndef PRODUCT
5701 void CMSBitMap::assert_locked() const {
5702   CMSLockVerifier::assert_locked(lock());
5703 }
5704 
5705 bool CMSBitMap::covers(MemRegion mr) const {
5706   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5707   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5708          "size inconsistency");
5709   return (mr.start() >= _bmStartWord) &&
5710          (mr.end()   <= endWord());
5711 }
5712 
5713 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5714     return (start >= _bmStartWord && (start + size) <= endWord());
5715 }
5716 
5717 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5718   // verify that there are no 1 bits in the interval [left, right)
5719   FalseBitMapClosure falseBitMapClosure;
5720   iterate(&falseBitMapClosure, left, right);
5721 }
5722 
5723 void CMSBitMap::region_invariant(MemRegion mr)
5724 {
5725   assert_locked();
5726   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5727   assert(!mr.is_empty(), "unexpected empty region");
5728   assert(covers(mr), "mr should be covered by bit map");
5729   // convert address range into offset range
5730   size_t start_ofs = heapWordToOffset(mr.start());
5731   // Make sure that end() is appropriately aligned
5732   assert(mr.end() == align_up(mr.end(), (1 << (_shifter+LogHeapWordSize))),
5733          "Misaligned mr.end()");
5734   size_t end_ofs   = heapWordToOffset(mr.end());
5735   assert(end_ofs > start_ofs, "Should mark at least one bit");
5736 }
5737 
5738 #endif
5739 
5740 bool CMSMarkStack::allocate(size_t size) {
5741   // allocate a stack of the requisite depth
5742   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5743                    size * sizeof(oop)));
5744   if (!rs.is_reserved()) {
5745     log_warning(gc)("CMSMarkStack allocation failure");
5746     return false;
5747   }
5748   if (!_virtual_space.initialize(rs, rs.size())) {
5749     log_warning(gc)("CMSMarkStack backing store failure");
5750     return false;
5751   }
5752   assert(_virtual_space.committed_size() == rs.size(),
5753          "didn't reserve backing store for all of CMS stack?");
5754   _base = (oop*)(_virtual_space.low());
5755   _index = 0;
5756   _capacity = size;
5757   NOT_PRODUCT(_max_depth = 0);
5758   return true;
5759 }
5760 
5761 // XXX FIX ME !!! In the MT case we come in here holding a
5762 // leaf lock. For printing we need to take a further lock
5763 // which has lower rank. We need to recalibrate the two
5764 // lock-ranks involved in order to be able to print the
5765 // messages below. (Or defer the printing to the caller.
5766 // For now we take the expedient path of just disabling the
5767 // messages for the problematic case.)
5768 void CMSMarkStack::expand() {
5769   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5770   if (_capacity == MarkStackSizeMax) {
5771     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5772       // We print a warning message only once per CMS cycle.
5773       log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5774     }
5775     return;
5776   }
5777   // Double capacity if possible
5778   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5779   // Do not give up existing stack until we have managed to
5780   // get the double capacity that we desired.
5781   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5782                    new_capacity * sizeof(oop)));
5783   if (rs.is_reserved()) {
5784     // Release the backing store associated with old stack
5785     _virtual_space.release();
5786     // Reinitialize virtual space for new stack
5787     if (!_virtual_space.initialize(rs, rs.size())) {
5788       fatal("Not enough swap for expanded marking stack");
5789     }
5790     _base = (oop*)(_virtual_space.low());
5791     _index = 0;
5792     _capacity = new_capacity;
5793   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5794     // Failed to double capacity, continue;
5795     // we print a detail message only once per CMS cycle.
5796     log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5797                         _capacity / K, new_capacity / K);
5798   }
5799 }
5800 
5801 
5802 // Closures
5803 // XXX: there seems to be a lot of code  duplication here;
5804 // should refactor and consolidate common code.
5805 
5806 // This closure is used to mark refs into the CMS generation in
5807 // the CMS bit map. Called at the first checkpoint. This closure
5808 // assumes that we do not need to re-mark dirty cards; if the CMS
5809 // generation on which this is used is not an oldest
5810 // generation then this will lose younger_gen cards!
5811 
5812 MarkRefsIntoClosure::MarkRefsIntoClosure(
5813   MemRegion span, CMSBitMap* bitMap):
5814     _span(span),
5815     _bitMap(bitMap)
5816 {
5817   assert(ref_processor() == NULL, "deliberately left NULL");
5818   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5819 }
5820 
5821 void MarkRefsIntoClosure::do_oop(oop obj) {
5822   // if p points into _span, then mark corresponding bit in _markBitMap
5823   assert(obj->is_oop(), "expected an oop");
5824   HeapWord* addr = (HeapWord*)obj;
5825   if (_span.contains(addr)) {
5826     // this should be made more efficient
5827     _bitMap->mark(addr);
5828   }
5829 }
5830 
5831 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
5832 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5833 
5834 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5835   MemRegion span, CMSBitMap* bitMap):
5836     _span(span),
5837     _bitMap(bitMap)
5838 {
5839   assert(ref_processor() == NULL, "deliberately left NULL");
5840   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5841 }
5842 
5843 void ParMarkRefsIntoClosure::do_oop(oop obj) {
5844   // if p points into _span, then mark corresponding bit in _markBitMap
5845   assert(obj->is_oop(), "expected an oop");
5846   HeapWord* addr = (HeapWord*)obj;
5847   if (_span.contains(addr)) {
5848     // this should be made more efficient
5849     _bitMap->par_mark(addr);
5850   }
5851 }
5852 
5853 void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
5854 void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5855 
5856 // A variant of the above, used for CMS marking verification.
5857 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5858   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5859     _span(span),
5860     _verification_bm(verification_bm),
5861     _cms_bm(cms_bm)
5862 {
5863   assert(ref_processor() == NULL, "deliberately left NULL");
5864   assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5865 }
5866 
5867 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5868   // if p points into _span, then mark corresponding bit in _markBitMap
5869   assert(obj->is_oop(), "expected an oop");
5870   HeapWord* addr = (HeapWord*)obj;
5871   if (_span.contains(addr)) {
5872     _verification_bm->mark(addr);
5873     if (!_cms_bm->isMarked(addr)) {
5874       Log(gc, verify) log;
5875       ResourceMark rm;
5876       oop(addr)->print_on(log.error_stream());
5877       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5878       fatal("... aborting");
5879     }
5880   }
5881 }
5882 
5883 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5884 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5885 
5886 //////////////////////////////////////////////////
5887 // MarkRefsIntoAndScanClosure
5888 //////////////////////////////////////////////////
5889 
5890 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5891                                                        ReferenceProcessor* rp,
5892                                                        CMSBitMap* bit_map,
5893                                                        CMSBitMap* mod_union_table,
5894                                                        CMSMarkStack*  mark_stack,
5895                                                        CMSCollector* collector,
5896                                                        bool should_yield,
5897                                                        bool concurrent_precleaning):
5898   _collector(collector),
5899   _span(span),
5900   _bit_map(bit_map),
5901   _mark_stack(mark_stack),
5902   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
5903                       mark_stack, concurrent_precleaning),
5904   _yield(should_yield),
5905   _concurrent_precleaning(concurrent_precleaning),
5906   _freelistLock(NULL)
5907 {
5908   // FIXME: Should initialize in base class constructor.
5909   assert(rp != NULL, "ref_processor shouldn't be NULL");
5910   set_ref_processor_internal(rp);
5911 }
5912 
5913 // This closure is used to mark refs into the CMS generation at the
5914 // second (final) checkpoint, and to scan and transitively follow
5915 // the unmarked oops. It is also used during the concurrent precleaning
5916 // phase while scanning objects on dirty cards in the CMS generation.
5917 // The marks are made in the marking bit map and the marking stack is
5918 // used for keeping the (newly) grey objects during the scan.
5919 // The parallel version (Par_...) appears further below.
5920 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5921   if (obj != NULL) {
5922     assert(obj->is_oop(), "expected an oop");
5923     HeapWord* addr = (HeapWord*)obj;
5924     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5925     assert(_collector->overflow_list_is_empty(),
5926            "overflow list should be empty");
5927     if (_span.contains(addr) &&
5928         !_bit_map->isMarked(addr)) {
5929       // mark bit map (object is now grey)
5930       _bit_map->mark(addr);
5931       // push on marking stack (stack should be empty), and drain the
5932       // stack by applying this closure to the oops in the oops popped
5933       // from the stack (i.e. blacken the grey objects)
5934       bool res = _mark_stack->push(obj);
5935       assert(res, "Should have space to push on empty stack");
5936       do {
5937         oop new_oop = _mark_stack->pop();
5938         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
5939         assert(_bit_map->isMarked((HeapWord*)new_oop),
5940                "only grey objects on this stack");
5941         // iterate over the oops in this oop, marking and pushing
5942         // the ones in CMS heap (i.e. in _span).
5943         new_oop->oop_iterate(&_pushAndMarkClosure);
5944         // check if it's time to yield
5945         do_yield_check();
5946       } while (!_mark_stack->isEmpty() ||
5947                (!_concurrent_precleaning && take_from_overflow_list()));
5948         // if marking stack is empty, and we are not doing this
5949         // during precleaning, then check the overflow list
5950     }
5951     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5952     assert(_collector->overflow_list_is_empty(),
5953            "overflow list was drained above");
5954 
5955     assert(_collector->no_preserved_marks(),
5956            "All preserved marks should have been restored above");
5957   }
5958 }
5959 
5960 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5961 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5962 
5963 void MarkRefsIntoAndScanClosure::do_yield_work() {
5964   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5965          "CMS thread should hold CMS token");
5966   assert_lock_strong(_freelistLock);
5967   assert_lock_strong(_bit_map->lock());
5968   // relinquish the free_list_lock and bitMaplock()
5969   _bit_map->lock()->unlock();
5970   _freelistLock->unlock();
5971   ConcurrentMarkSweepThread::desynchronize(true);
5972   _collector->stopTimer();
5973   _collector->incrementYields();
5974 
5975   // See the comment in coordinator_yield()
5976   for (unsigned i = 0;
5977        i < CMSYieldSleepCount &&
5978        ConcurrentMarkSweepThread::should_yield() &&
5979        !CMSCollector::foregroundGCIsActive();
5980        ++i) {
5981     os::sleep(Thread::current(), 1, false);
5982   }
5983 
5984   ConcurrentMarkSweepThread::synchronize(true);
5985   _freelistLock->lock_without_safepoint_check();
5986   _bit_map->lock()->lock_without_safepoint_check();
5987   _collector->startTimer();
5988 }
5989 
5990 ///////////////////////////////////////////////////////////
5991 // ParMarkRefsIntoAndScanClosure: a parallel version of
5992 //                                MarkRefsIntoAndScanClosure
5993 ///////////////////////////////////////////////////////////
5994 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
5995   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
5996   CMSBitMap* bit_map, OopTaskQueue* work_queue):
5997   _span(span),
5998   _bit_map(bit_map),
5999   _work_queue(work_queue),
6000   _low_water_mark(MIN2((work_queue->max_elems()/4),
6001                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6002   _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6003 {
6004   // FIXME: Should initialize in base class constructor.
6005   assert(rp != NULL, "ref_processor shouldn't be NULL");
6006   set_ref_processor_internal(rp);
6007 }
6008 
6009 // This closure is used to mark refs into the CMS generation at the
6010 // second (final) checkpoint, and to scan and transitively follow
6011 // the unmarked oops. The marks are made in the marking bit map and
6012 // the work_queue is used for keeping the (newly) grey objects during
6013 // the scan phase whence they are also available for stealing by parallel
6014 // threads. Since the marking bit map is shared, updates are
6015 // synchronized (via CAS).
6016 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6017   if (obj != NULL) {
6018     // Ignore mark word because this could be an already marked oop
6019     // that may be chained at the end of the overflow list.
6020     assert(obj->is_oop(true), "expected an oop");
6021     HeapWord* addr = (HeapWord*)obj;
6022     if (_span.contains(addr) &&
6023         !_bit_map->isMarked(addr)) {
6024       // mark bit map (object will become grey):
6025       // It is possible for several threads to be
6026       // trying to "claim" this object concurrently;
6027       // the unique thread that succeeds in marking the
6028       // object first will do the subsequent push on
6029       // to the work queue (or overflow list).
6030       if (_bit_map->par_mark(addr)) {
6031         // push on work_queue (which may not be empty), and trim the
6032         // queue to an appropriate length by applying this closure to
6033         // the oops in the oops popped from the stack (i.e. blacken the
6034         // grey objects)
6035         bool res = _work_queue->push(obj);
6036         assert(res, "Low water mark should be less than capacity?");
6037         trim_queue(_low_water_mark);
6038       } // Else, another thread claimed the object
6039     }
6040   }
6041 }
6042 
6043 void ParMarkRefsIntoAndScanClosure::do_oop(oop* p)       { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6044 void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6045 
6046 // This closure is used to rescan the marked objects on the dirty cards
6047 // in the mod union table and the card table proper.
6048 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6049   oop p, MemRegion mr) {
6050 
6051   size_t size = 0;
6052   HeapWord* addr = (HeapWord*)p;
6053   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6054   assert(_span.contains(addr), "we are scanning the CMS generation");
6055   // check if it's time to yield
6056   if (do_yield_check()) {
6057     // We yielded for some foreground stop-world work,
6058     // and we have been asked to abort this ongoing preclean cycle.
6059     return 0;
6060   }
6061   if (_bitMap->isMarked(addr)) {
6062     // it's marked; is it potentially uninitialized?
6063     if (p->klass_or_null_acquire() != NULL) {
6064         // an initialized object; ignore mark word in verification below
6065         // since we are running concurrent with mutators
6066         assert(p->is_oop(true), "should be an oop");
6067         if (p->is_objArray()) {
6068           // objArrays are precisely marked; restrict scanning
6069           // to dirty cards only.
6070           size = CompactibleFreeListSpace::adjustObjectSize(
6071                    p->oop_iterate_size(_scanningClosure, mr));
6072         } else {
6073           // A non-array may have been imprecisely marked; we need
6074           // to scan object in its entirety.
6075           size = CompactibleFreeListSpace::adjustObjectSize(
6076                    p->oop_iterate_size(_scanningClosure));
6077         }
6078       #ifdef ASSERT
6079         size_t direct_size =
6080           CompactibleFreeListSpace::adjustObjectSize(p->size());
6081         assert(size == direct_size, "Inconsistency in size");
6082         assert(size >= 3, "Necessary for Printezis marks to work");
6083         HeapWord* start_pbit = addr + 1;
6084         HeapWord* end_pbit = addr + size - 1;
6085         assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
6086                "inconsistent Printezis mark");
6087         // Verify inner mark bits (between Printezis bits) are clear,
6088         // but don't repeat if there are multiple dirty regions for
6089         // the same object, to avoid potential O(N^2) performance.
6090         if (addr != _last_scanned_object) {
6091           _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit);
6092           _last_scanned_object = addr;
6093         }
6094       #endif // ASSERT
6095     } else {
6096       // An uninitialized object.
6097       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6098       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6099       size = pointer_delta(nextOneAddr + 1, addr);
6100       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6101              "alignment problem");
6102       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6103       // will dirty the card when the klass pointer is installed in the
6104       // object (signaling the completion of initialization).
6105     }
6106   } else {
6107     // Either a not yet marked object or an uninitialized object
6108     if (p->klass_or_null_acquire() == NULL) {
6109       // An uninitialized object, skip to the next card, since
6110       // we may not be able to read its P-bits yet.
6111       assert(size == 0, "Initial value");
6112     } else {
6113       // An object not (yet) reached by marking: we merely need to
6114       // compute its size so as to go look at the next block.
6115       assert(p->is_oop(true), "should be an oop");
6116       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6117     }
6118   }
6119   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6120   return size;
6121 }
6122 
6123 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6124   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6125          "CMS thread should hold CMS token");
6126   assert_lock_strong(_freelistLock);
6127   assert_lock_strong(_bitMap->lock());
6128   // relinquish the free_list_lock and bitMaplock()
6129   _bitMap->lock()->unlock();
6130   _freelistLock->unlock();
6131   ConcurrentMarkSweepThread::desynchronize(true);
6132   _collector->stopTimer();
6133   _collector->incrementYields();
6134 
6135   // See the comment in coordinator_yield()
6136   for (unsigned i = 0; i < CMSYieldSleepCount &&
6137                    ConcurrentMarkSweepThread::should_yield() &&
6138                    !CMSCollector::foregroundGCIsActive(); ++i) {
6139     os::sleep(Thread::current(), 1, false);
6140   }
6141 
6142   ConcurrentMarkSweepThread::synchronize(true);
6143   _freelistLock->lock_without_safepoint_check();
6144   _bitMap->lock()->lock_without_safepoint_check();
6145   _collector->startTimer();
6146 }
6147 
6148 
6149 //////////////////////////////////////////////////////////////////
6150 // SurvivorSpacePrecleanClosure
6151 //////////////////////////////////////////////////////////////////
6152 // This (single-threaded) closure is used to preclean the oops in
6153 // the survivor spaces.
6154 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6155 
6156   HeapWord* addr = (HeapWord*)p;
6157   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6158   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6159   assert(p->klass_or_null() != NULL, "object should be initialized");
6160   // an initialized object; ignore mark word in verification below
6161   // since we are running concurrent with mutators
6162   assert(p->is_oop(true), "should be an oop");
6163   // Note that we do not yield while we iterate over
6164   // the interior oops of p, pushing the relevant ones
6165   // on our marking stack.
6166   size_t size = p->oop_iterate_size(_scanning_closure);
6167   do_yield_check();
6168   // Observe that below, we do not abandon the preclean
6169   // phase as soon as we should; rather we empty the
6170   // marking stack before returning. This is to satisfy
6171   // some existing assertions. In general, it may be a
6172   // good idea to abort immediately and complete the marking
6173   // from the grey objects at a later time.
6174   while (!_mark_stack->isEmpty()) {
6175     oop new_oop = _mark_stack->pop();
6176     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6177     assert(_bit_map->isMarked((HeapWord*)new_oop),
6178            "only grey objects on this stack");
6179     // iterate over the oops in this oop, marking and pushing
6180     // the ones in CMS heap (i.e. in _span).
6181     new_oop->oop_iterate(_scanning_closure);
6182     // check if it's time to yield
6183     do_yield_check();
6184   }
6185   unsigned int after_count =
6186     GenCollectedHeap::heap()->total_collections();
6187   bool abort = (_before_count != after_count) ||
6188                _collector->should_abort_preclean();
6189   return abort ? 0 : size;
6190 }
6191 
6192 void SurvivorSpacePrecleanClosure::do_yield_work() {
6193   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6194          "CMS thread should hold CMS token");
6195   assert_lock_strong(_bit_map->lock());
6196   // Relinquish the bit map lock
6197   _bit_map->lock()->unlock();
6198   ConcurrentMarkSweepThread::desynchronize(true);
6199   _collector->stopTimer();
6200   _collector->incrementYields();
6201 
6202   // See the comment in coordinator_yield()
6203   for (unsigned i = 0; i < CMSYieldSleepCount &&
6204                        ConcurrentMarkSweepThread::should_yield() &&
6205                        !CMSCollector::foregroundGCIsActive(); ++i) {
6206     os::sleep(Thread::current(), 1, false);
6207   }
6208 
6209   ConcurrentMarkSweepThread::synchronize(true);
6210   _bit_map->lock()->lock_without_safepoint_check();
6211   _collector->startTimer();
6212 }
6213 
6214 // This closure is used to rescan the marked objects on the dirty cards
6215 // in the mod union table and the card table proper. In the parallel
6216 // case, although the bitMap is shared, we do a single read so the
6217 // isMarked() query is "safe".
6218 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6219   // Ignore mark word because we are running concurrent with mutators
6220   assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6221   HeapWord* addr = (HeapWord*)p;
6222   assert(_span.contains(addr), "we are scanning the CMS generation");
6223   bool is_obj_array = false;
6224   #ifdef ASSERT
6225     if (!_parallel) {
6226       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6227       assert(_collector->overflow_list_is_empty(),
6228              "overflow list should be empty");
6229 
6230     }
6231   #endif // ASSERT
6232   if (_bit_map->isMarked(addr)) {
6233     // Obj arrays are precisely marked, non-arrays are not;
6234     // so we scan objArrays precisely and non-arrays in their
6235     // entirety.
6236     if (p->is_objArray()) {
6237       is_obj_array = true;
6238       if (_parallel) {
6239         p->oop_iterate(_par_scan_closure, mr);
6240       } else {
6241         p->oop_iterate(_scan_closure, mr);
6242       }
6243     } else {
6244       if (_parallel) {
6245         p->oop_iterate(_par_scan_closure);
6246       } else {
6247         p->oop_iterate(_scan_closure);
6248       }
6249     }
6250   }
6251   #ifdef ASSERT
6252     if (!_parallel) {
6253       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6254       assert(_collector->overflow_list_is_empty(),
6255              "overflow list should be empty");
6256 
6257     }
6258   #endif // ASSERT
6259   return is_obj_array;
6260 }
6261 
6262 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6263                         MemRegion span,
6264                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
6265                         bool should_yield, bool verifying):
6266   _collector(collector),
6267   _span(span),
6268   _bitMap(bitMap),
6269   _mut(&collector->_modUnionTable),
6270   _markStack(markStack),
6271   _yield(should_yield),
6272   _skipBits(0)
6273 {
6274   assert(_markStack->isEmpty(), "stack should be empty");
6275   _finger = _bitMap->startWord();
6276   _threshold = _finger;
6277   assert(_collector->_restart_addr == NULL, "Sanity check");
6278   assert(_span.contains(_finger), "Out of bounds _finger?");
6279   DEBUG_ONLY(_verifying = verifying;)
6280 }
6281 
6282 void MarkFromRootsClosure::reset(HeapWord* addr) {
6283   assert(_markStack->isEmpty(), "would cause duplicates on stack");
6284   assert(_span.contains(addr), "Out of bounds _finger?");
6285   _finger = addr;
6286   _threshold = align_up(_finger, CardTableModRefBS::card_size);
6287 }
6288 
6289 // Should revisit to see if this should be restructured for
6290 // greater efficiency.
6291 bool MarkFromRootsClosure::do_bit(size_t offset) {
6292   if (_skipBits > 0) {
6293     _skipBits--;
6294     return true;
6295   }
6296   // convert offset into a HeapWord*
6297   HeapWord* addr = _bitMap->startWord() + offset;
6298   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6299          "address out of range");
6300   assert(_bitMap->isMarked(addr), "tautology");
6301   if (_bitMap->isMarked(addr+1)) {
6302     // this is an allocated but not yet initialized object
6303     assert(_skipBits == 0, "tautology");
6304     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6305     oop p = oop(addr);
6306     if (p->klass_or_null_acquire() == NULL) {
6307       DEBUG_ONLY(if (!_verifying) {)
6308         // We re-dirty the cards on which this object lies and increase
6309         // the _threshold so that we'll come back to scan this object
6310         // during the preclean or remark phase. (CMSCleanOnEnter)
6311         if (CMSCleanOnEnter) {
6312           size_t sz = _collector->block_size_using_printezis_bits(addr);
6313           HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
6314           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6315           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6316           // Bump _threshold to end_card_addr; note that
6317           // _threshold cannot possibly exceed end_card_addr, anyhow.
6318           // This prevents future clearing of the card as the scan proceeds
6319           // to the right.
6320           assert(_threshold <= end_card_addr,
6321                  "Because we are just scanning into this object");
6322           if (_threshold < end_card_addr) {
6323             _threshold = end_card_addr;
6324           }
6325           if (p->klass_or_null_acquire() != NULL) {
6326             // Redirty the range of cards...
6327             _mut->mark_range(redirty_range);
6328           } // ...else the setting of klass will dirty the card anyway.
6329         }
6330       DEBUG_ONLY(})
6331       return true;
6332     }
6333   }
6334   scanOopsInOop(addr);
6335   return true;
6336 }
6337 
6338 // We take a break if we've been at this for a while,
6339 // so as to avoid monopolizing the locks involved.
6340 void MarkFromRootsClosure::do_yield_work() {
6341   // First give up the locks, then yield, then re-lock
6342   // We should probably use a constructor/destructor idiom to
6343   // do this unlock/lock or modify the MutexUnlocker class to
6344   // serve our purpose. XXX
6345   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6346          "CMS thread should hold CMS token");
6347   assert_lock_strong(_bitMap->lock());
6348   _bitMap->lock()->unlock();
6349   ConcurrentMarkSweepThread::desynchronize(true);
6350   _collector->stopTimer();
6351   _collector->incrementYields();
6352 
6353   // See the comment in coordinator_yield()
6354   for (unsigned i = 0; i < CMSYieldSleepCount &&
6355                        ConcurrentMarkSweepThread::should_yield() &&
6356                        !CMSCollector::foregroundGCIsActive(); ++i) {
6357     os::sleep(Thread::current(), 1, false);
6358   }
6359 
6360   ConcurrentMarkSweepThread::synchronize(true);
6361   _bitMap->lock()->lock_without_safepoint_check();
6362   _collector->startTimer();
6363 }
6364 
6365 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6366   assert(_bitMap->isMarked(ptr), "expected bit to be set");
6367   assert(_markStack->isEmpty(),
6368          "should drain stack to limit stack usage");
6369   // convert ptr to an oop preparatory to scanning
6370   oop obj = oop(ptr);
6371   // Ignore mark word in verification below, since we
6372   // may be running concurrent with mutators.
6373   assert(obj->is_oop(true), "should be an oop");
6374   assert(_finger <= ptr, "_finger runneth ahead");
6375   // advance the finger to right end of this object
6376   _finger = ptr + obj->size();
6377   assert(_finger > ptr, "we just incremented it above");
6378   // On large heaps, it may take us some time to get through
6379   // the marking phase. During
6380   // this time it's possible that a lot of mutations have
6381   // accumulated in the card table and the mod union table --
6382   // these mutation records are redundant until we have
6383   // actually traced into the corresponding card.
6384   // Here, we check whether advancing the finger would make
6385   // us cross into a new card, and if so clear corresponding
6386   // cards in the MUT (preclean them in the card-table in the
6387   // future).
6388 
6389   DEBUG_ONLY(if (!_verifying) {)
6390     // The clean-on-enter optimization is disabled by default,
6391     // until we fix 6178663.
6392     if (CMSCleanOnEnter && (_finger > _threshold)) {
6393       // [_threshold, _finger) represents the interval
6394       // of cards to be cleared  in MUT (or precleaned in card table).
6395       // The set of cards to be cleared is all those that overlap
6396       // with the interval [_threshold, _finger); note that
6397       // _threshold is always kept card-aligned but _finger isn't
6398       // always card-aligned.
6399       HeapWord* old_threshold = _threshold;
6400       assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
6401              "_threshold should always be card-aligned");
6402       _threshold = align_up(_finger, CardTableModRefBS::card_size);
6403       MemRegion mr(old_threshold, _threshold);
6404       assert(!mr.is_empty(), "Control point invariant");
6405       assert(_span.contains(mr), "Should clear within span");
6406       _mut->clear_range(mr);
6407     }
6408   DEBUG_ONLY(})
6409   // Note: the finger doesn't advance while we drain
6410   // the stack below.
6411   PushOrMarkClosure pushOrMarkClosure(_collector,
6412                                       _span, _bitMap, _markStack,
6413                                       _finger, this);
6414   bool res = _markStack->push(obj);
6415   assert(res, "Empty non-zero size stack should have space for single push");
6416   while (!_markStack->isEmpty()) {
6417     oop new_oop = _markStack->pop();
6418     // Skip verifying header mark word below because we are
6419     // running concurrent with mutators.
6420     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6421     // now scan this oop's oops
6422     new_oop->oop_iterate(&pushOrMarkClosure);
6423     do_yield_check();
6424   }
6425   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6426 }
6427 
6428 ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6429                        CMSCollector* collector, MemRegion span,
6430                        CMSBitMap* bit_map,
6431                        OopTaskQueue* work_queue,
6432                        CMSMarkStack*  overflow_stack):
6433   _collector(collector),
6434   _whole_span(collector->_span),
6435   _span(span),
6436   _bit_map(bit_map),
6437   _mut(&collector->_modUnionTable),
6438   _work_queue(work_queue),
6439   _overflow_stack(overflow_stack),
6440   _skip_bits(0),
6441   _task(task)
6442 {
6443   assert(_work_queue->size() == 0, "work_queue should be empty");
6444   _finger = span.start();
6445   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6446   assert(_span.contains(_finger), "Out of bounds _finger?");
6447 }
6448 
6449 // Should revisit to see if this should be restructured for
6450 // greater efficiency.
6451 bool ParMarkFromRootsClosure::do_bit(size_t offset) {
6452   if (_skip_bits > 0) {
6453     _skip_bits--;
6454     return true;
6455   }
6456   // convert offset into a HeapWord*
6457   HeapWord* addr = _bit_map->startWord() + offset;
6458   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6459          "address out of range");
6460   assert(_bit_map->isMarked(addr), "tautology");
6461   if (_bit_map->isMarked(addr+1)) {
6462     // this is an allocated object that might not yet be initialized
6463     assert(_skip_bits == 0, "tautology");
6464     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6465     oop p = oop(addr);
6466     if (p->klass_or_null_acquire() == NULL) {
6467       // in the case of Clean-on-Enter optimization, redirty card
6468       // and avoid clearing card by increasing  the threshold.
6469       return true;
6470     }
6471   }
6472   scan_oops_in_oop(addr);
6473   return true;
6474 }
6475 
6476 void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6477   assert(_bit_map->isMarked(ptr), "expected bit to be set");
6478   // Should we assert that our work queue is empty or
6479   // below some drain limit?
6480   assert(_work_queue->size() == 0,
6481          "should drain stack to limit stack usage");
6482   // convert ptr to an oop preparatory to scanning
6483   oop obj = oop(ptr);
6484   // Ignore mark word in verification below, since we
6485   // may be running concurrent with mutators.
6486   assert(obj->is_oop(true), "should be an oop");
6487   assert(_finger <= ptr, "_finger runneth ahead");
6488   // advance the finger to right end of this object
6489   _finger = ptr + obj->size();
6490   assert(_finger > ptr, "we just incremented it above");
6491   // On large heaps, it may take us some time to get through
6492   // the marking phase. During
6493   // this time it's possible that a lot of mutations have
6494   // accumulated in the card table and the mod union table --
6495   // these mutation records are redundant until we have
6496   // actually traced into the corresponding card.
6497   // Here, we check whether advancing the finger would make
6498   // us cross into a new card, and if so clear corresponding
6499   // cards in the MUT (preclean them in the card-table in the
6500   // future).
6501 
6502   // The clean-on-enter optimization is disabled by default,
6503   // until we fix 6178663.
6504   if (CMSCleanOnEnter && (_finger > _threshold)) {
6505     // [_threshold, _finger) represents the interval
6506     // of cards to be cleared  in MUT (or precleaned in card table).
6507     // The set of cards to be cleared is all those that overlap
6508     // with the interval [_threshold, _finger); note that
6509     // _threshold is always kept card-aligned but _finger isn't
6510     // always card-aligned.
6511     HeapWord* old_threshold = _threshold;
6512     assert(is_aligned(old_threshold, CardTableModRefBS::card_size),
6513            "_threshold should always be card-aligned");
6514     _threshold = align_up(_finger, CardTableModRefBS::card_size);
6515     MemRegion mr(old_threshold, _threshold);
6516     assert(!mr.is_empty(), "Control point invariant");
6517     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6518     _mut->clear_range(mr);
6519   }
6520 
6521   // Note: the local finger doesn't advance while we drain
6522   // the stack below, but the global finger sure can and will.
6523   HeapWord* volatile* gfa = _task->global_finger_addr();
6524   ParPushOrMarkClosure pushOrMarkClosure(_collector,
6525                                          _span, _bit_map,
6526                                          _work_queue,
6527                                          _overflow_stack,
6528                                          _finger,
6529                                          gfa, this);
6530   bool res = _work_queue->push(obj);   // overflow could occur here
6531   assert(res, "Will hold once we use workqueues");
6532   while (true) {
6533     oop new_oop;
6534     if (!_work_queue->pop_local(new_oop)) {
6535       // We emptied our work_queue; check if there's stuff that can
6536       // be gotten from the overflow stack.
6537       if (CMSConcMarkingTask::get_work_from_overflow_stack(
6538             _overflow_stack, _work_queue)) {
6539         do_yield_check();
6540         continue;
6541       } else {  // done
6542         break;
6543       }
6544     }
6545     // Skip verifying header mark word below because we are
6546     // running concurrent with mutators.
6547     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6548     // now scan this oop's oops
6549     new_oop->oop_iterate(&pushOrMarkClosure);
6550     do_yield_check();
6551   }
6552   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6553 }
6554 
6555 // Yield in response to a request from VM Thread or
6556 // from mutators.
6557 void ParMarkFromRootsClosure::do_yield_work() {
6558   assert(_task != NULL, "sanity");
6559   _task->yield();
6560 }
6561 
6562 // A variant of the above used for verifying CMS marking work.
6563 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6564                         MemRegion span,
6565                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6566                         CMSMarkStack*  mark_stack):
6567   _collector(collector),
6568   _span(span),
6569   _verification_bm(verification_bm),
6570   _cms_bm(cms_bm),
6571   _mark_stack(mark_stack),
6572   _pam_verify_closure(collector, span, verification_bm, cms_bm,
6573                       mark_stack)
6574 {
6575   assert(_mark_stack->isEmpty(), "stack should be empty");
6576   _finger = _verification_bm->startWord();
6577   assert(_collector->_restart_addr == NULL, "Sanity check");
6578   assert(_span.contains(_finger), "Out of bounds _finger?");
6579 }
6580 
6581 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6582   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6583   assert(_span.contains(addr), "Out of bounds _finger?");
6584   _finger = addr;
6585 }
6586 
6587 // Should revisit to see if this should be restructured for
6588 // greater efficiency.
6589 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6590   // convert offset into a HeapWord*
6591   HeapWord* addr = _verification_bm->startWord() + offset;
6592   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6593          "address out of range");
6594   assert(_verification_bm->isMarked(addr), "tautology");
6595   assert(_cms_bm->isMarked(addr), "tautology");
6596 
6597   assert(_mark_stack->isEmpty(),
6598          "should drain stack to limit stack usage");
6599   // convert addr to an oop preparatory to scanning
6600   oop obj = oop(addr);
6601   assert(obj->is_oop(), "should be an oop");
6602   assert(_finger <= addr, "_finger runneth ahead");
6603   // advance the finger to right end of this object
6604   _finger = addr + obj->size();
6605   assert(_finger > addr, "we just incremented it above");
6606   // Note: the finger doesn't advance while we drain
6607   // the stack below.
6608   bool res = _mark_stack->push(obj);
6609   assert(res, "Empty non-zero size stack should have space for single push");
6610   while (!_mark_stack->isEmpty()) {
6611     oop new_oop = _mark_stack->pop();
6612     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6613     // now scan this oop's oops
6614     new_oop->oop_iterate(&_pam_verify_closure);
6615   }
6616   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6617   return true;
6618 }
6619 
6620 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6621   CMSCollector* collector, MemRegion span,
6622   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6623   CMSMarkStack*  mark_stack):
6624   MetadataAwareOopClosure(collector->ref_processor()),
6625   _collector(collector),
6626   _span(span),
6627   _verification_bm(verification_bm),
6628   _cms_bm(cms_bm),
6629   _mark_stack(mark_stack)
6630 { }
6631 
6632 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6633 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6634 
6635 // Upon stack overflow, we discard (part of) the stack,
6636 // remembering the least address amongst those discarded
6637 // in CMSCollector's _restart_address.
6638 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6639   // Remember the least grey address discarded
6640   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6641   _collector->lower_restart_addr(ra);
6642   _mark_stack->reset();  // discard stack contents
6643   _mark_stack->expand(); // expand the stack if possible
6644 }
6645 
6646 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6647   assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6648   HeapWord* addr = (HeapWord*)obj;
6649   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6650     // Oop lies in _span and isn't yet grey or black
6651     _verification_bm->mark(addr);            // now grey
6652     if (!_cms_bm->isMarked(addr)) {
6653       Log(gc, verify) log;
6654       ResourceMark rm;
6655       oop(addr)->print_on(log.error_stream());
6656       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6657       fatal("... aborting");
6658     }
6659 
6660     if (!_mark_stack->push(obj)) { // stack overflow
6661       log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6662       assert(_mark_stack->isFull(), "Else push should have succeeded");
6663       handle_stack_overflow(addr);
6664     }
6665     // anything including and to the right of _finger
6666     // will be scanned as we iterate over the remainder of the
6667     // bit map
6668   }
6669 }
6670 
6671 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6672                      MemRegion span,
6673                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
6674                      HeapWord* finger, MarkFromRootsClosure* parent) :
6675   MetadataAwareOopClosure(collector->ref_processor()),
6676   _collector(collector),
6677   _span(span),
6678   _bitMap(bitMap),
6679   _markStack(markStack),
6680   _finger(finger),
6681   _parent(parent)
6682 { }
6683 
6684 ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6685                                            MemRegion span,
6686                                            CMSBitMap* bit_map,
6687                                            OopTaskQueue* work_queue,
6688                                            CMSMarkStack*  overflow_stack,
6689                                            HeapWord* finger,
6690                                            HeapWord* volatile* global_finger_addr,
6691                                            ParMarkFromRootsClosure* parent) :
6692   MetadataAwareOopClosure(collector->ref_processor()),
6693   _collector(collector),
6694   _whole_span(collector->_span),
6695   _span(span),
6696   _bit_map(bit_map),
6697   _work_queue(work_queue),
6698   _overflow_stack(overflow_stack),
6699   _finger(finger),
6700   _global_finger_addr(global_finger_addr),
6701   _parent(parent)
6702 { }
6703 
6704 // Assumes thread-safe access by callers, who are
6705 // responsible for mutual exclusion.
6706 void CMSCollector::lower_restart_addr(HeapWord* low) {
6707   assert(_span.contains(low), "Out of bounds addr");
6708   if (_restart_addr == NULL) {
6709     _restart_addr = low;
6710   } else {
6711     _restart_addr = MIN2(_restart_addr, low);
6712   }
6713 }
6714 
6715 // Upon stack overflow, we discard (part of) the stack,
6716 // remembering the least address amongst those discarded
6717 // in CMSCollector's _restart_address.
6718 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6719   // Remember the least grey address discarded
6720   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6721   _collector->lower_restart_addr(ra);
6722   _markStack->reset();  // discard stack contents
6723   _markStack->expand(); // expand the stack if possible
6724 }
6725 
6726 // Upon stack overflow, we discard (part of) the stack,
6727 // remembering the least address amongst those discarded
6728 // in CMSCollector's _restart_address.
6729 void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6730   // We need to do this under a mutex to prevent other
6731   // workers from interfering with the work done below.
6732   MutexLockerEx ml(_overflow_stack->par_lock(),
6733                    Mutex::_no_safepoint_check_flag);
6734   // Remember the least grey address discarded
6735   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6736   _collector->lower_restart_addr(ra);
6737   _overflow_stack->reset();  // discard stack contents
6738   _overflow_stack->expand(); // expand the stack if possible
6739 }
6740 
6741 void PushOrMarkClosure::do_oop(oop obj) {
6742   // Ignore mark word because we are running concurrent with mutators.
6743   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6744   HeapWord* addr = (HeapWord*)obj;
6745   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6746     // Oop lies in _span and isn't yet grey or black
6747     _bitMap->mark(addr);            // now grey
6748     if (addr < _finger) {
6749       // the bit map iteration has already either passed, or
6750       // sampled, this bit in the bit map; we'll need to
6751       // use the marking stack to scan this oop's oops.
6752       bool simulate_overflow = false;
6753       NOT_PRODUCT(
6754         if (CMSMarkStackOverflowALot &&
6755             _collector->simulate_overflow()) {
6756           // simulate a stack overflow
6757           simulate_overflow = true;
6758         }
6759       )
6760       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6761         log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6762         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6763         handle_stack_overflow(addr);
6764       }
6765     }
6766     // anything including and to the right of _finger
6767     // will be scanned as we iterate over the remainder of the
6768     // bit map
6769     do_yield_check();
6770   }
6771 }
6772 
6773 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
6774 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
6775 
6776 void ParPushOrMarkClosure::do_oop(oop obj) {
6777   // Ignore mark word because we are running concurrent with mutators.
6778   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6779   HeapWord* addr = (HeapWord*)obj;
6780   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6781     // Oop lies in _span and isn't yet grey or black
6782     // We read the global_finger (volatile read) strictly after marking oop
6783     bool res = _bit_map->par_mark(addr);    // now grey
6784     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6785     // Should we push this marked oop on our stack?
6786     // -- if someone else marked it, nothing to do
6787     // -- if target oop is above global finger nothing to do
6788     // -- if target oop is in chunk and above local finger
6789     //      then nothing to do
6790     // -- else push on work queue
6791     if (   !res       // someone else marked it, they will deal with it
6792         || (addr >= *gfa)  // will be scanned in a later task
6793         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6794       return;
6795     }
6796     // the bit map iteration has already either passed, or
6797     // sampled, this bit in the bit map; we'll need to
6798     // use the marking stack to scan this oop's oops.
6799     bool simulate_overflow = false;
6800     NOT_PRODUCT(
6801       if (CMSMarkStackOverflowALot &&
6802           _collector->simulate_overflow()) {
6803         // simulate a stack overflow
6804         simulate_overflow = true;
6805       }
6806     )
6807     if (simulate_overflow ||
6808         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6809       // stack overflow
6810       log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6811       // We cannot assert that the overflow stack is full because
6812       // it may have been emptied since.
6813       assert(simulate_overflow ||
6814              _work_queue->size() == _work_queue->max_elems(),
6815             "Else push should have succeeded");
6816       handle_stack_overflow(addr);
6817     }
6818     do_yield_check();
6819   }
6820 }
6821 
6822 void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
6823 void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
6824 
6825 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6826                                        MemRegion span,
6827                                        ReferenceProcessor* rp,
6828                                        CMSBitMap* bit_map,
6829                                        CMSBitMap* mod_union_table,
6830                                        CMSMarkStack*  mark_stack,
6831                                        bool           concurrent_precleaning):
6832   MetadataAwareOopClosure(rp),
6833   _collector(collector),
6834   _span(span),
6835   _bit_map(bit_map),
6836   _mod_union_table(mod_union_table),
6837   _mark_stack(mark_stack),
6838   _concurrent_precleaning(concurrent_precleaning)
6839 {
6840   assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6841 }
6842 
6843 // Grey object rescan during pre-cleaning and second checkpoint phases --
6844 // the non-parallel version (the parallel version appears further below.)
6845 void PushAndMarkClosure::do_oop(oop obj) {
6846   // Ignore mark word verification. If during concurrent precleaning,
6847   // the object monitor may be locked. If during the checkpoint
6848   // phases, the object may already have been reached by a  different
6849   // path and may be at the end of the global overflow list (so
6850   // the mark word may be NULL).
6851   assert(obj->is_oop_or_null(true /* ignore mark word */),
6852          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6853   HeapWord* addr = (HeapWord*)obj;
6854   // Check if oop points into the CMS generation
6855   // and is not marked
6856   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6857     // a white object ...
6858     _bit_map->mark(addr);         // ... now grey
6859     // push on the marking stack (grey set)
6860     bool simulate_overflow = false;
6861     NOT_PRODUCT(
6862       if (CMSMarkStackOverflowALot &&
6863           _collector->simulate_overflow()) {
6864         // simulate a stack overflow
6865         simulate_overflow = true;
6866       }
6867     )
6868     if (simulate_overflow || !_mark_stack->push(obj)) {
6869       if (_concurrent_precleaning) {
6870          // During precleaning we can just dirty the appropriate card(s)
6871          // in the mod union table, thus ensuring that the object remains
6872          // in the grey set  and continue. In the case of object arrays
6873          // we need to dirty all of the cards that the object spans,
6874          // since the rescan of object arrays will be limited to the
6875          // dirty cards.
6876          // Note that no one can be interfering with us in this action
6877          // of dirtying the mod union table, so no locking or atomics
6878          // are required.
6879          if (obj->is_objArray()) {
6880            size_t sz = obj->size();
6881            HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
6882            MemRegion redirty_range = MemRegion(addr, end_card_addr);
6883            assert(!redirty_range.is_empty(), "Arithmetical tautology");
6884            _mod_union_table->mark_range(redirty_range);
6885          } else {
6886            _mod_union_table->mark(addr);
6887          }
6888          _collector->_ser_pmc_preclean_ovflw++;
6889       } else {
6890          // During the remark phase, we need to remember this oop
6891          // in the overflow list.
6892          _collector->push_on_overflow_list(obj);
6893          _collector->_ser_pmc_remark_ovflw++;
6894       }
6895     }
6896   }
6897 }
6898 
6899 ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6900                                              MemRegion span,
6901                                              ReferenceProcessor* rp,
6902                                              CMSBitMap* bit_map,
6903                                              OopTaskQueue* work_queue):
6904   MetadataAwareOopClosure(rp),
6905   _collector(collector),
6906   _span(span),
6907   _bit_map(bit_map),
6908   _work_queue(work_queue)
6909 {
6910   assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6911 }
6912 
6913 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
6914 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6915 
6916 // Grey object rescan during second checkpoint phase --
6917 // the parallel version.
6918 void ParPushAndMarkClosure::do_oop(oop obj) {
6919   // In the assert below, we ignore the mark word because
6920   // this oop may point to an already visited object that is
6921   // on the overflow stack (in which case the mark word has
6922   // been hijacked for chaining into the overflow stack --
6923   // if this is the last object in the overflow stack then
6924   // its mark word will be NULL). Because this object may
6925   // have been subsequently popped off the global overflow
6926   // stack, and the mark word possibly restored to the prototypical
6927   // value, by the time we get to examined this failing assert in
6928   // the debugger, is_oop_or_null(false) may subsequently start
6929   // to hold.
6930   assert(obj->is_oop_or_null(true),
6931          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6932   HeapWord* addr = (HeapWord*)obj;
6933   // Check if oop points into the CMS generation
6934   // and is not marked
6935   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6936     // a white object ...
6937     // If we manage to "claim" the object, by being the
6938     // first thread to mark it, then we push it on our
6939     // marking stack
6940     if (_bit_map->par_mark(addr)) {     // ... now grey
6941       // push on work queue (grey set)
6942       bool simulate_overflow = false;
6943       NOT_PRODUCT(
6944         if (CMSMarkStackOverflowALot &&
6945             _collector->par_simulate_overflow()) {
6946           // simulate a stack overflow
6947           simulate_overflow = true;
6948         }
6949       )
6950       if (simulate_overflow || !_work_queue->push(obj)) {
6951         _collector->par_push_on_overflow_list(obj);
6952         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
6953       }
6954     } // Else, some other thread got there first
6955   }
6956 }
6957 
6958 void ParPushAndMarkClosure::do_oop(oop* p)       { ParPushAndMarkClosure::do_oop_work(p); }
6959 void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
6960 
6961 void CMSPrecleanRefsYieldClosure::do_yield_work() {
6962   Mutex* bml = _collector->bitMapLock();
6963   assert_lock_strong(bml);
6964   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6965          "CMS thread should hold CMS token");
6966 
6967   bml->unlock();
6968   ConcurrentMarkSweepThread::desynchronize(true);
6969 
6970   _collector->stopTimer();
6971   _collector->incrementYields();
6972 
6973   // See the comment in coordinator_yield()
6974   for (unsigned i = 0; i < CMSYieldSleepCount &&
6975                        ConcurrentMarkSweepThread::should_yield() &&
6976                        !CMSCollector::foregroundGCIsActive(); ++i) {
6977     os::sleep(Thread::current(), 1, false);
6978   }
6979 
6980   ConcurrentMarkSweepThread::synchronize(true);
6981   bml->lock();
6982 
6983   _collector->startTimer();
6984 }
6985 
6986 bool CMSPrecleanRefsYieldClosure::should_return() {
6987   if (ConcurrentMarkSweepThread::should_yield()) {
6988     do_yield_work();
6989   }
6990   return _collector->foregroundGCIsActive();
6991 }
6992 
6993 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
6994   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
6995          "mr should be aligned to start at a card boundary");
6996   // We'd like to assert:
6997   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
6998   //        "mr should be a range of cards");
6999   // However, that would be too strong in one case -- the last
7000   // partition ends at _unallocated_block which, in general, can be
7001   // an arbitrary boundary, not necessarily card aligned.
7002   _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
7003   _space->object_iterate_mem(mr, &_scan_cl);
7004 }
7005 
7006 SweepClosure::SweepClosure(CMSCollector* collector,
7007                            ConcurrentMarkSweepGeneration* g,
7008                            CMSBitMap* bitMap, bool should_yield) :
7009   _collector(collector),
7010   _g(g),
7011   _sp(g->cmsSpace()),
7012   _limit(_sp->sweep_limit()),
7013   _freelistLock(_sp->freelistLock()),
7014   _bitMap(bitMap),
7015   _yield(should_yield),
7016   _inFreeRange(false),           // No free range at beginning of sweep
7017   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7018   _lastFreeRangeCoalesced(false),
7019   _freeFinger(g->used_region().start())
7020 {
7021   NOT_PRODUCT(
7022     _numObjectsFreed = 0;
7023     _numWordsFreed   = 0;
7024     _numObjectsLive = 0;
7025     _numWordsLive = 0;
7026     _numObjectsAlreadyFree = 0;
7027     _numWordsAlreadyFree = 0;
7028     _last_fc = NULL;
7029 
7030     _sp->initializeIndexedFreeListArrayReturnedBytes();
7031     _sp->dictionary()->initialize_dict_returned_bytes();
7032   )
7033   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7034          "sweep _limit out of bounds");
7035   log_develop_trace(gc, sweep)("====================");
7036   log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
7037 }
7038 
7039 void SweepClosure::print_on(outputStream* st) const {
7040   st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7041                p2i(_sp->bottom()), p2i(_sp->end()));
7042   st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7043   st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7044   NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7045   st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7046                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7047 }
7048 
7049 #ifndef PRODUCT
7050 // Assertion checking only:  no useful work in product mode --
7051 // however, if any of the flags below become product flags,
7052 // you may need to review this code to see if it needs to be
7053 // enabled in product mode.
7054 SweepClosure::~SweepClosure() {
7055   assert_lock_strong(_freelistLock);
7056   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7057          "sweep _limit out of bounds");
7058   if (inFreeRange()) {
7059     Log(gc, sweep) log;
7060     log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
7061     ResourceMark rm;
7062     print_on(log.error_stream());
7063     ShouldNotReachHere();
7064   }
7065 
7066   if (log_is_enabled(Debug, gc, sweep)) {
7067     log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7068                          _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7069     log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7070                          _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7071     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7072     log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7073   }
7074 
7075   if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7076     size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7077     size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7078     size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7079     log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
7080                          returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7081   }
7082   log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
7083   log_develop_trace(gc, sweep)("================");
7084 }
7085 #endif  // PRODUCT
7086 
7087 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7088     bool freeRangeInFreeLists) {
7089   log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
7090                                p2i(freeFinger), freeRangeInFreeLists);
7091   assert(!inFreeRange(), "Trampling existing free range");
7092   set_inFreeRange(true);
7093   set_lastFreeRangeCoalesced(false);
7094 
7095   set_freeFinger(freeFinger);
7096   set_freeRangeInFreeLists(freeRangeInFreeLists);
7097   if (CMSTestInFreeList) {
7098     if (freeRangeInFreeLists) {
7099       FreeChunk* fc = (FreeChunk*) freeFinger;
7100       assert(fc->is_free(), "A chunk on the free list should be free.");
7101       assert(fc->size() > 0, "Free range should have a size");
7102       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7103     }
7104   }
7105 }
7106 
7107 // Note that the sweeper runs concurrently with mutators. Thus,
7108 // it is possible for direct allocation in this generation to happen
7109 // in the middle of the sweep. Note that the sweeper also coalesces
7110 // contiguous free blocks. Thus, unless the sweeper and the allocator
7111 // synchronize appropriately freshly allocated blocks may get swept up.
7112 // This is accomplished by the sweeper locking the free lists while
7113 // it is sweeping. Thus blocks that are determined to be free are
7114 // indeed free. There is however one additional complication:
7115 // blocks that have been allocated since the final checkpoint and
7116 // mark, will not have been marked and so would be treated as
7117 // unreachable and swept up. To prevent this, the allocator marks
7118 // the bit map when allocating during the sweep phase. This leads,
7119 // however, to a further complication -- objects may have been allocated
7120 // but not yet initialized -- in the sense that the header isn't yet
7121 // installed. The sweeper can not then determine the size of the block
7122 // in order to skip over it. To deal with this case, we use a technique
7123 // (due to Printezis) to encode such uninitialized block sizes in the
7124 // bit map. Since the bit map uses a bit per every HeapWord, but the
7125 // CMS generation has a minimum object size of 3 HeapWords, it follows
7126 // that "normal marks" won't be adjacent in the bit map (there will
7127 // always be at least two 0 bits between successive 1 bits). We make use
7128 // of these "unused" bits to represent uninitialized blocks -- the bit
7129 // corresponding to the start of the uninitialized object and the next
7130 // bit are both set. Finally, a 1 bit marks the end of the object that
7131 // started with the two consecutive 1 bits to indicate its potentially
7132 // uninitialized state.
7133 
7134 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7135   FreeChunk* fc = (FreeChunk*)addr;
7136   size_t res;
7137 
7138   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7139   // than "addr == _limit" because although _limit was a block boundary when
7140   // we started the sweep, it may no longer be one because heap expansion
7141   // may have caused us to coalesce the block ending at the address _limit
7142   // with a newly expanded chunk (this happens when _limit was set to the
7143   // previous _end of the space), so we may have stepped past _limit:
7144   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7145   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7146     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7147            "sweep _limit out of bounds");
7148     assert(addr < _sp->end(), "addr out of bounds");
7149     // Flush any free range we might be holding as a single
7150     // coalesced chunk to the appropriate free list.
7151     if (inFreeRange()) {
7152       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7153              "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7154       flush_cur_free_chunk(freeFinger(),
7155                            pointer_delta(addr, freeFinger()));
7156       log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
7157                                    p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7158                                    lastFreeRangeCoalesced() ? 1 : 0);
7159     }
7160 
7161     // help the iterator loop finish
7162     return pointer_delta(_sp->end(), addr);
7163   }
7164 
7165   assert(addr < _limit, "sweep invariant");
7166   // check if we should yield
7167   do_yield_check(addr);
7168   if (fc->is_free()) {
7169     // Chunk that is already free
7170     res = fc->size();
7171     do_already_free_chunk(fc);
7172     debug_only(_sp->verifyFreeLists());
7173     // If we flush the chunk at hand in lookahead_and_flush()
7174     // and it's coalesced with a preceding chunk, then the
7175     // process of "mangling" the payload of the coalesced block
7176     // will cause erasure of the size information from the
7177     // (erstwhile) header of all the coalesced blocks but the
7178     // first, so the first disjunct in the assert will not hold
7179     // in that specific case (in which case the second disjunct
7180     // will hold).
7181     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7182            "Otherwise the size info doesn't change at this step");
7183     NOT_PRODUCT(
7184       _numObjectsAlreadyFree++;
7185       _numWordsAlreadyFree += res;
7186     )
7187     NOT_PRODUCT(_last_fc = fc;)
7188   } else if (!_bitMap->isMarked(addr)) {
7189     // Chunk is fresh garbage
7190     res = do_garbage_chunk(fc);
7191     debug_only(_sp->verifyFreeLists());
7192     NOT_PRODUCT(
7193       _numObjectsFreed++;
7194       _numWordsFreed += res;
7195     )
7196   } else {
7197     // Chunk that is alive.
7198     res = do_live_chunk(fc);
7199     debug_only(_sp->verifyFreeLists());
7200     NOT_PRODUCT(
7201         _numObjectsLive++;
7202         _numWordsLive += res;
7203     )
7204   }
7205   return res;
7206 }
7207 
7208 // For the smart allocation, record following
7209 //  split deaths - a free chunk is removed from its free list because
7210 //      it is being split into two or more chunks.
7211 //  split birth - a free chunk is being added to its free list because
7212 //      a larger free chunk has been split and resulted in this free chunk.
7213 //  coal death - a free chunk is being removed from its free list because
7214 //      it is being coalesced into a large free chunk.
7215 //  coal birth - a free chunk is being added to its free list because
7216 //      it was created when two or more free chunks where coalesced into
7217 //      this free chunk.
7218 //
7219 // These statistics are used to determine the desired number of free
7220 // chunks of a given size.  The desired number is chosen to be relative
7221 // to the end of a CMS sweep.  The desired number at the end of a sweep
7222 // is the
7223 //      count-at-end-of-previous-sweep (an amount that was enough)
7224 //              - count-at-beginning-of-current-sweep  (the excess)
7225 //              + split-births  (gains in this size during interval)
7226 //              - split-deaths  (demands on this size during interval)
7227 // where the interval is from the end of one sweep to the end of the
7228 // next.
7229 //
7230 // When sweeping the sweeper maintains an accumulated chunk which is
7231 // the chunk that is made up of chunks that have been coalesced.  That
7232 // will be termed the left-hand chunk.  A new chunk of garbage that
7233 // is being considered for coalescing will be referred to as the
7234 // right-hand chunk.
7235 //
7236 // When making a decision on whether to coalesce a right-hand chunk with
7237 // the current left-hand chunk, the current count vs. the desired count
7238 // of the left-hand chunk is considered.  Also if the right-hand chunk
7239 // is near the large chunk at the end of the heap (see
7240 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7241 // left-hand chunk is coalesced.
7242 //
7243 // When making a decision about whether to split a chunk, the desired count
7244 // vs. the current count of the candidate to be split is also considered.
7245 // If the candidate is underpopulated (currently fewer chunks than desired)
7246 // a chunk of an overpopulated (currently more chunks than desired) size may
7247 // be chosen.  The "hint" associated with a free list, if non-null, points
7248 // to a free list which may be overpopulated.
7249 //
7250 
7251 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7252   const size_t size = fc->size();
7253   // Chunks that cannot be coalesced are not in the
7254   // free lists.
7255   if (CMSTestInFreeList && !fc->cantCoalesce()) {
7256     assert(_sp->verify_chunk_in_free_list(fc),
7257            "free chunk should be in free lists");
7258   }
7259   // a chunk that is already free, should not have been
7260   // marked in the bit map
7261   HeapWord* const addr = (HeapWord*) fc;
7262   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7263   // Verify that the bit map has no bits marked between
7264   // addr and purported end of this block.
7265   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7266 
7267   // Some chunks cannot be coalesced under any circumstances.
7268   // See the definition of cantCoalesce().
7269   if (!fc->cantCoalesce()) {
7270     // This chunk can potentially be coalesced.
7271     // All the work is done in
7272     do_post_free_or_garbage_chunk(fc, size);
7273     // Note that if the chunk is not coalescable (the else arm
7274     // below), we unconditionally flush, without needing to do
7275     // a "lookahead," as we do below.
7276     if (inFreeRange()) lookahead_and_flush(fc, size);
7277   } else {
7278     // Code path common to both original and adaptive free lists.
7279 
7280     // cant coalesce with previous block; this should be treated
7281     // as the end of a free run if any
7282     if (inFreeRange()) {
7283       // we kicked some butt; time to pick up the garbage
7284       assert(freeFinger() < addr, "freeFinger points too high");
7285       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7286     }
7287     // else, nothing to do, just continue
7288   }
7289 }
7290 
7291 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7292   // This is a chunk of garbage.  It is not in any free list.
7293   // Add it to a free list or let it possibly be coalesced into
7294   // a larger chunk.
7295   HeapWord* const addr = (HeapWord*) fc;
7296   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7297 
7298   // Verify that the bit map has no bits marked between
7299   // addr and purported end of just dead object.
7300   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7301   do_post_free_or_garbage_chunk(fc, size);
7302 
7303   assert(_limit >= addr + size,
7304          "A freshly garbage chunk can't possibly straddle over _limit");
7305   if (inFreeRange()) lookahead_and_flush(fc, size);
7306   return size;
7307 }
7308 
7309 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7310   HeapWord* addr = (HeapWord*) fc;
7311   // The sweeper has just found a live object. Return any accumulated
7312   // left hand chunk to the free lists.
7313   if (inFreeRange()) {
7314     assert(freeFinger() < addr, "freeFinger points too high");
7315     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7316   }
7317 
7318   // This object is live: we'd normally expect this to be
7319   // an oop, and like to assert the following:
7320   // assert(oop(addr)->is_oop(), "live block should be an oop");
7321   // However, as we commented above, this may be an object whose
7322   // header hasn't yet been initialized.
7323   size_t size;
7324   assert(_bitMap->isMarked(addr), "Tautology for this control point");
7325   if (_bitMap->isMarked(addr + 1)) {
7326     // Determine the size from the bit map, rather than trying to
7327     // compute it from the object header.
7328     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7329     size = pointer_delta(nextOneAddr + 1, addr);
7330     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7331            "alignment problem");
7332 
7333 #ifdef ASSERT
7334       if (oop(addr)->klass_or_null_acquire() != NULL) {
7335         // Ignore mark word because we are running concurrent with mutators
7336         assert(oop(addr)->is_oop(true), "live block should be an oop");
7337         assert(size ==
7338                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7339                "P-mark and computed size do not agree");
7340       }
7341 #endif
7342 
7343   } else {
7344     // This should be an initialized object that's alive.
7345     assert(oop(addr)->klass_or_null_acquire() != NULL,
7346            "Should be an initialized object");
7347     // Ignore mark word because we are running concurrent with mutators
7348     assert(oop(addr)->is_oop(true), "live block should be an oop");
7349     // Verify that the bit map has no bits marked between
7350     // addr and purported end of this block.
7351     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7352     assert(size >= 3, "Necessary for Printezis marks to work");
7353     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7354     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7355   }
7356   return size;
7357 }
7358 
7359 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7360                                                  size_t chunkSize) {
7361   // do_post_free_or_garbage_chunk() should only be called in the case
7362   // of the adaptive free list allocator.
7363   const bool fcInFreeLists = fc->is_free();
7364   assert((HeapWord*)fc <= _limit, "sweep invariant");
7365   if (CMSTestInFreeList && fcInFreeLists) {
7366     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7367   }
7368 
7369   log_develop_trace(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7370 
7371   HeapWord* const fc_addr = (HeapWord*) fc;
7372 
7373   bool coalesce = false;
7374   const size_t left  = pointer_delta(fc_addr, freeFinger());
7375   const size_t right = chunkSize;
7376   switch (FLSCoalescePolicy) {
7377     // numeric value forms a coalition aggressiveness metric
7378     case 0:  { // never coalesce
7379       coalesce = false;
7380       break;
7381     }
7382     case 1: { // coalesce if left & right chunks on overpopulated lists
7383       coalesce = _sp->coalOverPopulated(left) &&
7384                  _sp->coalOverPopulated(right);
7385       break;
7386     }
7387     case 2: { // coalesce if left chunk on overpopulated list (default)
7388       coalesce = _sp->coalOverPopulated(left);
7389       break;
7390     }
7391     case 3: { // coalesce if left OR right chunk on overpopulated list
7392       coalesce = _sp->coalOverPopulated(left) ||
7393                  _sp->coalOverPopulated(right);
7394       break;
7395     }
7396     case 4: { // always coalesce
7397       coalesce = true;
7398       break;
7399     }
7400     default:
7401      ShouldNotReachHere();
7402   }
7403 
7404   // Should the current free range be coalesced?
7405   // If the chunk is in a free range and either we decided to coalesce above
7406   // or the chunk is near the large block at the end of the heap
7407   // (isNearLargestChunk() returns true), then coalesce this chunk.
7408   const bool doCoalesce = inFreeRange()
7409                           && (coalesce || _g->isNearLargestChunk(fc_addr));
7410   if (doCoalesce) {
7411     // Coalesce the current free range on the left with the new
7412     // chunk on the right.  If either is on a free list,
7413     // it must be removed from the list and stashed in the closure.
7414     if (freeRangeInFreeLists()) {
7415       FreeChunk* const ffc = (FreeChunk*)freeFinger();
7416       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7417              "Size of free range is inconsistent with chunk size.");
7418       if (CMSTestInFreeList) {
7419         assert(_sp->verify_chunk_in_free_list(ffc),
7420                "Chunk is not in free lists");
7421       }
7422       _sp->coalDeath(ffc->size());
7423       _sp->removeFreeChunkFromFreeLists(ffc);
7424       set_freeRangeInFreeLists(false);
7425     }
7426     if (fcInFreeLists) {
7427       _sp->coalDeath(chunkSize);
7428       assert(fc->size() == chunkSize,
7429         "The chunk has the wrong size or is not in the free lists");
7430       _sp->removeFreeChunkFromFreeLists(fc);
7431     }
7432     set_lastFreeRangeCoalesced(true);
7433     print_free_block_coalesced(fc);
7434   } else {  // not in a free range and/or should not coalesce
7435     // Return the current free range and start a new one.
7436     if (inFreeRange()) {
7437       // In a free range but cannot coalesce with the right hand chunk.
7438       // Put the current free range into the free lists.
7439       flush_cur_free_chunk(freeFinger(),
7440                            pointer_delta(fc_addr, freeFinger()));
7441     }
7442     // Set up for new free range.  Pass along whether the right hand
7443     // chunk is in the free lists.
7444     initialize_free_range((HeapWord*)fc, fcInFreeLists);
7445   }
7446 }
7447 
7448 // Lookahead flush:
7449 // If we are tracking a free range, and this is the last chunk that
7450 // we'll look at because its end crosses past _limit, we'll preemptively
7451 // flush it along with any free range we may be holding on to. Note that
7452 // this can be the case only for an already free or freshly garbage
7453 // chunk. If this block is an object, it can never straddle
7454 // over _limit. The "straddling" occurs when _limit is set at
7455 // the previous end of the space when this cycle started, and
7456 // a subsequent heap expansion caused the previously co-terminal
7457 // free block to be coalesced with the newly expanded portion,
7458 // thus rendering _limit a non-block-boundary making it dangerous
7459 // for the sweeper to step over and examine.
7460 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7461   assert(inFreeRange(), "Should only be called if currently in a free range.");
7462   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7463   assert(_sp->used_region().contains(eob - 1),
7464          "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7465          " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7466          " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7467          p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7468   if (eob >= _limit) {
7469     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7470     log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
7471                                  "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7472                                  "[" PTR_FORMAT "," PTR_FORMAT ")",
7473                                  p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7474     // Return the storage we are tracking back into the free lists.
7475     log_develop_trace(gc, sweep)("Flushing ... ");
7476     assert(freeFinger() < eob, "Error");
7477     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7478   }
7479 }
7480 
7481 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7482   assert(inFreeRange(), "Should only be called if currently in a free range.");
7483   assert(size > 0,
7484     "A zero sized chunk cannot be added to the free lists.");
7485   if (!freeRangeInFreeLists()) {
7486     if (CMSTestInFreeList) {
7487       FreeChunk* fc = (FreeChunk*) chunk;
7488       fc->set_size(size);
7489       assert(!_sp->verify_chunk_in_free_list(fc),
7490              "chunk should not be in free lists yet");
7491     }
7492     log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
7493     // A new free range is going to be starting.  The current
7494     // free range has not been added to the free lists yet or
7495     // was removed so add it back.
7496     // If the current free range was coalesced, then the death
7497     // of the free range was recorded.  Record a birth now.
7498     if (lastFreeRangeCoalesced()) {
7499       _sp->coalBirth(size);
7500     }
7501     _sp->addChunkAndRepairOffsetTable(chunk, size,
7502             lastFreeRangeCoalesced());
7503   } else {
7504     log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
7505   }
7506   set_inFreeRange(false);
7507   set_freeRangeInFreeLists(false);
7508 }
7509 
7510 // We take a break if we've been at this for a while,
7511 // so as to avoid monopolizing the locks involved.
7512 void SweepClosure::do_yield_work(HeapWord* addr) {
7513   // Return current free chunk being used for coalescing (if any)
7514   // to the appropriate freelist.  After yielding, the next
7515   // free block encountered will start a coalescing range of
7516   // free blocks.  If the next free block is adjacent to the
7517   // chunk just flushed, they will need to wait for the next
7518   // sweep to be coalesced.
7519   if (inFreeRange()) {
7520     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7521   }
7522 
7523   // First give up the locks, then yield, then re-lock.
7524   // We should probably use a constructor/destructor idiom to
7525   // do this unlock/lock or modify the MutexUnlocker class to
7526   // serve our purpose. XXX
7527   assert_lock_strong(_bitMap->lock());
7528   assert_lock_strong(_freelistLock);
7529   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7530          "CMS thread should hold CMS token");
7531   _bitMap->lock()->unlock();
7532   _freelistLock->unlock();
7533   ConcurrentMarkSweepThread::desynchronize(true);
7534   _collector->stopTimer();
7535   _collector->incrementYields();
7536 
7537   // See the comment in coordinator_yield()
7538   for (unsigned i = 0; i < CMSYieldSleepCount &&
7539                        ConcurrentMarkSweepThread::should_yield() &&
7540                        !CMSCollector::foregroundGCIsActive(); ++i) {
7541     os::sleep(Thread::current(), 1, false);
7542   }
7543 
7544   ConcurrentMarkSweepThread::synchronize(true);
7545   _freelistLock->lock();
7546   _bitMap->lock()->lock_without_safepoint_check();
7547   _collector->startTimer();
7548 }
7549 
7550 #ifndef PRODUCT
7551 // This is actually very useful in a product build if it can
7552 // be called from the debugger.  Compile it into the product
7553 // as needed.
7554 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7555   return debug_cms_space->verify_chunk_in_free_list(fc);
7556 }
7557 #endif
7558 
7559 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7560   log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7561                                p2i(fc), fc->size());
7562 }
7563 
7564 // CMSIsAliveClosure
7565 bool CMSIsAliveClosure::do_object_b(oop obj) {
7566   HeapWord* addr = (HeapWord*)obj;
7567   return addr != NULL &&
7568          (!_span.contains(addr) || _bit_map->isMarked(addr));
7569 }
7570 
7571 
7572 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7573                       MemRegion span,
7574                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7575                       bool cpc):
7576   _collector(collector),
7577   _span(span),
7578   _bit_map(bit_map),
7579   _mark_stack(mark_stack),
7580   _concurrent_precleaning(cpc) {
7581   assert(!_span.is_empty(), "Empty span could spell trouble");
7582 }
7583 
7584 
7585 // CMSKeepAliveClosure: the serial version
7586 void CMSKeepAliveClosure::do_oop(oop obj) {
7587   HeapWord* addr = (HeapWord*)obj;
7588   if (_span.contains(addr) &&
7589       !_bit_map->isMarked(addr)) {
7590     _bit_map->mark(addr);
7591     bool simulate_overflow = false;
7592     NOT_PRODUCT(
7593       if (CMSMarkStackOverflowALot &&
7594           _collector->simulate_overflow()) {
7595         // simulate a stack overflow
7596         simulate_overflow = true;
7597       }
7598     )
7599     if (simulate_overflow || !_mark_stack->push(obj)) {
7600       if (_concurrent_precleaning) {
7601         // We dirty the overflown object and let the remark
7602         // phase deal with it.
7603         assert(_collector->overflow_list_is_empty(), "Error");
7604         // In the case of object arrays, we need to dirty all of
7605         // the cards that the object spans. No locking or atomics
7606         // are needed since no one else can be mutating the mod union
7607         // table.
7608         if (obj->is_objArray()) {
7609           size_t sz = obj->size();
7610           HeapWord* end_card_addr = align_up(addr + sz, CardTableModRefBS::card_size);
7611           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7612           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7613           _collector->_modUnionTable.mark_range(redirty_range);
7614         } else {
7615           _collector->_modUnionTable.mark(addr);
7616         }
7617         _collector->_ser_kac_preclean_ovflw++;
7618       } else {
7619         _collector->push_on_overflow_list(obj);
7620         _collector->_ser_kac_ovflw++;
7621       }
7622     }
7623   }
7624 }
7625 
7626 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
7627 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
7628 
7629 // CMSParKeepAliveClosure: a parallel version of the above.
7630 // The work queues are private to each closure (thread),
7631 // but (may be) available for stealing by other threads.
7632 void CMSParKeepAliveClosure::do_oop(oop obj) {
7633   HeapWord* addr = (HeapWord*)obj;
7634   if (_span.contains(addr) &&
7635       !_bit_map->isMarked(addr)) {
7636     // In general, during recursive tracing, several threads
7637     // may be concurrently getting here; the first one to
7638     // "tag" it, claims it.
7639     if (_bit_map->par_mark(addr)) {
7640       bool res = _work_queue->push(obj);
7641       assert(res, "Low water mark should be much less than capacity");
7642       // Do a recursive trim in the hope that this will keep
7643       // stack usage lower, but leave some oops for potential stealers
7644       trim_queue(_low_water_mark);
7645     } // Else, another thread got there first
7646   }
7647 }
7648 
7649 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
7650 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7651 
7652 void CMSParKeepAliveClosure::trim_queue(uint max) {
7653   while (_work_queue->size() > max) {
7654     oop new_oop;
7655     if (_work_queue->pop_local(new_oop)) {
7656       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7657       assert(_bit_map->isMarked((HeapWord*)new_oop),
7658              "no white objects on this stack!");
7659       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7660       // iterate over the oops in this oop, marking and pushing
7661       // the ones in CMS heap (i.e. in _span).
7662       new_oop->oop_iterate(&_mark_and_push);
7663     }
7664   }
7665 }
7666 
7667 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7668                                 CMSCollector* collector,
7669                                 MemRegion span, CMSBitMap* bit_map,
7670                                 OopTaskQueue* work_queue):
7671   _collector(collector),
7672   _span(span),
7673   _bit_map(bit_map),
7674   _work_queue(work_queue) { }
7675 
7676 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7677   HeapWord* addr = (HeapWord*)obj;
7678   if (_span.contains(addr) &&
7679       !_bit_map->isMarked(addr)) {
7680     if (_bit_map->par_mark(addr)) {
7681       bool simulate_overflow = false;
7682       NOT_PRODUCT(
7683         if (CMSMarkStackOverflowALot &&
7684             _collector->par_simulate_overflow()) {
7685           // simulate a stack overflow
7686           simulate_overflow = true;
7687         }
7688       )
7689       if (simulate_overflow || !_work_queue->push(obj)) {
7690         _collector->par_push_on_overflow_list(obj);
7691         _collector->_par_kac_ovflw++;
7692       }
7693     } // Else another thread got there already
7694   }
7695 }
7696 
7697 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7698 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7699 
7700 //////////////////////////////////////////////////////////////////
7701 //  CMSExpansionCause                /////////////////////////////
7702 //////////////////////////////////////////////////////////////////
7703 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
7704   switch (cause) {
7705     case _no_expansion:
7706       return "No expansion";
7707     case _satisfy_free_ratio:
7708       return "Free ratio";
7709     case _satisfy_promotion:
7710       return "Satisfy promotion";
7711     case _satisfy_allocation:
7712       return "allocation";
7713     case _allocate_par_lab:
7714       return "Par LAB";
7715     case _allocate_par_spooling_space:
7716       return "Par Spooling Space";
7717     case _adaptive_size_policy:
7718       return "Ergonomics";
7719     default:
7720       return "unknown";
7721   }
7722 }
7723 
7724 void CMSDrainMarkingStackClosure::do_void() {
7725   // the max number to take from overflow list at a time
7726   const size_t num = _mark_stack->capacity()/4;
7727   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7728          "Overflow list should be NULL during concurrent phases");
7729   while (!_mark_stack->isEmpty() ||
7730          // if stack is empty, check the overflow list
7731          _collector->take_from_overflow_list(num, _mark_stack)) {
7732     oop obj = _mark_stack->pop();
7733     HeapWord* addr = (HeapWord*)obj;
7734     assert(_span.contains(addr), "Should be within span");
7735     assert(_bit_map->isMarked(addr), "Should be marked");
7736     assert(obj->is_oop(), "Should be an oop");
7737     obj->oop_iterate(_keep_alive);
7738   }
7739 }
7740 
7741 void CMSParDrainMarkingStackClosure::do_void() {
7742   // drain queue
7743   trim_queue(0);
7744 }
7745 
7746 // Trim our work_queue so its length is below max at return
7747 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7748   while (_work_queue->size() > max) {
7749     oop new_oop;
7750     if (_work_queue->pop_local(new_oop)) {
7751       assert(new_oop->is_oop(), "Expected an oop");
7752       assert(_bit_map->isMarked((HeapWord*)new_oop),
7753              "no white objects on this stack!");
7754       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7755       // iterate over the oops in this oop, marking and pushing
7756       // the ones in CMS heap (i.e. in _span).
7757       new_oop->oop_iterate(&_mark_and_push);
7758     }
7759   }
7760 }
7761 
7762 ////////////////////////////////////////////////////////////////////
7763 // Support for Marking Stack Overflow list handling and related code
7764 ////////////////////////////////////////////////////////////////////
7765 // Much of the following code is similar in shape and spirit to the
7766 // code used in ParNewGC. We should try and share that code
7767 // as much as possible in the future.
7768 
7769 #ifndef PRODUCT
7770 // Debugging support for CMSStackOverflowALot
7771 
7772 // It's OK to call this multi-threaded;  the worst thing
7773 // that can happen is that we'll get a bunch of closely
7774 // spaced simulated overflows, but that's OK, in fact
7775 // probably good as it would exercise the overflow code
7776 // under contention.
7777 bool CMSCollector::simulate_overflow() {
7778   if (_overflow_counter-- <= 0) { // just being defensive
7779     _overflow_counter = CMSMarkStackOverflowInterval;
7780     return true;
7781   } else {
7782     return false;
7783   }
7784 }
7785 
7786 bool CMSCollector::par_simulate_overflow() {
7787   return simulate_overflow();
7788 }
7789 #endif
7790 
7791 // Single-threaded
7792 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7793   assert(stack->isEmpty(), "Expected precondition");
7794   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7795   size_t i = num;
7796   oop  cur = _overflow_list;
7797   const markOop proto = markOopDesc::prototype();
7798   NOT_PRODUCT(ssize_t n = 0;)
7799   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7800     next = oop(cur->mark());
7801     cur->set_mark(proto);   // until proven otherwise
7802     assert(cur->is_oop(), "Should be an oop");
7803     bool res = stack->push(cur);
7804     assert(res, "Bit off more than can chew?");
7805     NOT_PRODUCT(n++;)
7806   }
7807   _overflow_list = cur;
7808 #ifndef PRODUCT
7809   assert(_num_par_pushes >= n, "Too many pops?");
7810   _num_par_pushes -=n;
7811 #endif
7812   return !stack->isEmpty();
7813 }
7814 
7815 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
7816 // (MT-safe) Get a prefix of at most "num" from the list.
7817 // The overflow list is chained through the mark word of
7818 // each object in the list. We fetch the entire list,
7819 // break off a prefix of the right size and return the
7820 // remainder. If other threads try to take objects from
7821 // the overflow list at that time, they will wait for
7822 // some time to see if data becomes available. If (and
7823 // only if) another thread places one or more object(s)
7824 // on the global list before we have returned the suffix
7825 // to the global list, we will walk down our local list
7826 // to find its end and append the global list to
7827 // our suffix before returning it. This suffix walk can
7828 // prove to be expensive (quadratic in the amount of traffic)
7829 // when there are many objects in the overflow list and
7830 // there is much producer-consumer contention on the list.
7831 // *NOTE*: The overflow list manipulation code here and
7832 // in ParNewGeneration:: are very similar in shape,
7833 // except that in the ParNew case we use the old (from/eden)
7834 // copy of the object to thread the list via its klass word.
7835 // Because of the common code, if you make any changes in
7836 // the code below, please check the ParNew version to see if
7837 // similar changes might be needed.
7838 // CR 6797058 has been filed to consolidate the common code.
7839 bool CMSCollector::par_take_from_overflow_list(size_t num,
7840                                                OopTaskQueue* work_q,
7841                                                int no_of_gc_threads) {
7842   assert(work_q->size() == 0, "First empty local work queue");
7843   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7844   if (_overflow_list == NULL) {
7845     return false;
7846   }
7847   // Grab the entire list; we'll put back a suffix
7848   oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7849   Thread* tid = Thread::current();
7850   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7851   // set to ParallelGCThreads.
7852   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7853   size_t sleep_time_millis = MAX2((size_t)1, num/100);
7854   // If the list is busy, we spin for a short while,
7855   // sleeping between attempts to get the list.
7856   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7857     os::sleep(tid, sleep_time_millis, false);
7858     if (_overflow_list == NULL) {
7859       // Nothing left to take
7860       return false;
7861     } else if (_overflow_list != BUSY) {
7862       // Try and grab the prefix
7863       prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7864     }
7865   }
7866   // If the list was found to be empty, or we spun long
7867   // enough, we give up and return empty-handed. If we leave
7868   // the list in the BUSY state below, it must be the case that
7869   // some other thread holds the overflow list and will set it
7870   // to a non-BUSY state in the future.
7871   if (prefix == NULL || prefix == BUSY) {
7872      // Nothing to take or waited long enough
7873      if (prefix == NULL) {
7874        // Write back the NULL in case we overwrote it with BUSY above
7875        // and it is still the same value.
7876        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7877      }
7878      return false;
7879   }
7880   assert(prefix != NULL && prefix != BUSY, "Error");
7881   size_t i = num;
7882   oop cur = prefix;
7883   // Walk down the first "num" objects, unless we reach the end.
7884   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
7885   if (cur->mark() == NULL) {
7886     // We have "num" or fewer elements in the list, so there
7887     // is nothing to return to the global list.
7888     // Write back the NULL in lieu of the BUSY we wrote
7889     // above, if it is still the same value.
7890     if (_overflow_list == BUSY) {
7891       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7892     }
7893   } else {
7894     // Chop off the suffix and return it to the global list.
7895     assert(cur->mark() != BUSY, "Error");
7896     oop suffix_head = cur->mark(); // suffix will be put back on global list
7897     cur->set_mark(NULL);           // break off suffix
7898     // It's possible that the list is still in the empty(busy) state
7899     // we left it in a short while ago; in that case we may be
7900     // able to place back the suffix without incurring the cost
7901     // of a walk down the list.
7902     oop observed_overflow_list = _overflow_list;
7903     oop cur_overflow_list = observed_overflow_list;
7904     bool attached = false;
7905     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7906       observed_overflow_list =
7907         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7908       if (cur_overflow_list == observed_overflow_list) {
7909         attached = true;
7910         break;
7911       } else cur_overflow_list = observed_overflow_list;
7912     }
7913     if (!attached) {
7914       // Too bad, someone else sneaked in (at least) an element; we'll need
7915       // to do a splice. Find tail of suffix so we can prepend suffix to global
7916       // list.
7917       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
7918       oop suffix_tail = cur;
7919       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
7920              "Tautology");
7921       observed_overflow_list = _overflow_list;
7922       do {
7923         cur_overflow_list = observed_overflow_list;
7924         if (cur_overflow_list != BUSY) {
7925           // Do the splice ...
7926           suffix_tail->set_mark(markOop(cur_overflow_list));
7927         } else { // cur_overflow_list == BUSY
7928           suffix_tail->set_mark(NULL);
7929         }
7930         // ... and try to place spliced list back on overflow_list ...
7931         observed_overflow_list =
7932           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7933       } while (cur_overflow_list != observed_overflow_list);
7934       // ... until we have succeeded in doing so.
7935     }
7936   }
7937 
7938   // Push the prefix elements on work_q
7939   assert(prefix != NULL, "control point invariant");
7940   const markOop proto = markOopDesc::prototype();
7941   oop next;
7942   NOT_PRODUCT(ssize_t n = 0;)
7943   for (cur = prefix; cur != NULL; cur = next) {
7944     next = oop(cur->mark());
7945     cur->set_mark(proto);   // until proven otherwise
7946     assert(cur->is_oop(), "Should be an oop");
7947     bool res = work_q->push(cur);
7948     assert(res, "Bit off more than we can chew?");
7949     NOT_PRODUCT(n++;)
7950   }
7951 #ifndef PRODUCT
7952   assert(_num_par_pushes >= n, "Too many pops?");
7953   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
7954 #endif
7955   return true;
7956 }
7957 
7958 // Single-threaded
7959 void CMSCollector::push_on_overflow_list(oop p) {
7960   NOT_PRODUCT(_num_par_pushes++;)
7961   assert(p->is_oop(), "Not an oop");
7962   preserve_mark_if_necessary(p);
7963   p->set_mark((markOop)_overflow_list);
7964   _overflow_list = p;
7965 }
7966 
7967 // Multi-threaded; use CAS to prepend to overflow list
7968 void CMSCollector::par_push_on_overflow_list(oop p) {
7969   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
7970   assert(p->is_oop(), "Not an oop");
7971   par_preserve_mark_if_necessary(p);
7972   oop observed_overflow_list = _overflow_list;
7973   oop cur_overflow_list;
7974   do {
7975     cur_overflow_list = observed_overflow_list;
7976     if (cur_overflow_list != BUSY) {
7977       p->set_mark(markOop(cur_overflow_list));
7978     } else {
7979       p->set_mark(NULL);
7980     }
7981     observed_overflow_list =
7982       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
7983   } while (cur_overflow_list != observed_overflow_list);
7984 }
7985 #undef BUSY
7986 
7987 // Single threaded
7988 // General Note on GrowableArray: pushes may silently fail
7989 // because we are (temporarily) out of C-heap for expanding
7990 // the stack. The problem is quite ubiquitous and affects
7991 // a lot of code in the JVM. The prudent thing for GrowableArray
7992 // to do (for now) is to exit with an error. However, that may
7993 // be too draconian in some cases because the caller may be
7994 // able to recover without much harm. For such cases, we
7995 // should probably introduce a "soft_push" method which returns
7996 // an indication of success or failure with the assumption that
7997 // the caller may be able to recover from a failure; code in
7998 // the VM can then be changed, incrementally, to deal with such
7999 // failures where possible, thus, incrementally hardening the VM
8000 // in such low resource situations.
8001 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8002   _preserved_oop_stack.push(p);
8003   _preserved_mark_stack.push(m);
8004   assert(m == p->mark(), "Mark word changed");
8005   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8006          "bijection");
8007 }
8008 
8009 // Single threaded
8010 void CMSCollector::preserve_mark_if_necessary(oop p) {
8011   markOop m = p->mark();
8012   if (m->must_be_preserved(p)) {
8013     preserve_mark_work(p, m);
8014   }
8015 }
8016 
8017 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8018   markOop m = p->mark();
8019   if (m->must_be_preserved(p)) {
8020     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8021     // Even though we read the mark word without holding
8022     // the lock, we are assured that it will not change
8023     // because we "own" this oop, so no other thread can
8024     // be trying to push it on the overflow list; see
8025     // the assertion in preserve_mark_work() that checks
8026     // that m == p->mark().
8027     preserve_mark_work(p, m);
8028   }
8029 }
8030 
8031 // We should be able to do this multi-threaded,
8032 // a chunk of stack being a task (this is
8033 // correct because each oop only ever appears
8034 // once in the overflow list. However, it's
8035 // not very easy to completely overlap this with
8036 // other operations, so will generally not be done
8037 // until all work's been completed. Because we
8038 // expect the preserved oop stack (set) to be small,
8039 // it's probably fine to do this single-threaded.
8040 // We can explore cleverer concurrent/overlapped/parallel
8041 // processing of preserved marks if we feel the
8042 // need for this in the future. Stack overflow should
8043 // be so rare in practice and, when it happens, its
8044 // effect on performance so great that this will
8045 // likely just be in the noise anyway.
8046 void CMSCollector::restore_preserved_marks_if_any() {
8047   assert(SafepointSynchronize::is_at_safepoint(),
8048          "world should be stopped");
8049   assert(Thread::current()->is_ConcurrentGC_thread() ||
8050          Thread::current()->is_VM_thread(),
8051          "should be single-threaded");
8052   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8053          "bijection");
8054 
8055   while (!_preserved_oop_stack.is_empty()) {
8056     oop p = _preserved_oop_stack.pop();
8057     assert(p->is_oop(), "Should be an oop");
8058     assert(_span.contains(p), "oop should be in _span");
8059     assert(p->mark() == markOopDesc::prototype(),
8060            "Set when taken from overflow list");
8061     markOop m = _preserved_mark_stack.pop();
8062     p->set_mark(m);
8063   }
8064   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8065          "stacks were cleared above");
8066 }
8067 
8068 #ifndef PRODUCT
8069 bool CMSCollector::no_preserved_marks() const {
8070   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8071 }
8072 #endif
8073 
8074 // Transfer some number of overflown objects to usual marking
8075 // stack. Return true if some objects were transferred.
8076 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8077   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8078                     (size_t)ParGCDesiredObjsFromOverflowList);
8079 
8080   bool res = _collector->take_from_overflow_list(num, _mark_stack);
8081   assert(_collector->overflow_list_is_empty() || res,
8082          "If list is not empty, we should have taken something");
8083   assert(!res || !_mark_stack->isEmpty(),
8084          "If we took something, it should now be on our stack");
8085   return res;
8086 }
8087 
8088 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8089   size_t res = _sp->block_size_no_stall(addr, _collector);
8090   if (_sp->block_is_obj(addr)) {
8091     if (_live_bit_map->isMarked(addr)) {
8092       // It can't have been dead in a previous cycle
8093       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8094     } else {
8095       _dead_bit_map->mark(addr);      // mark the dead object
8096     }
8097   }
8098   // Could be 0, if the block size could not be computed without stalling.
8099   return res;
8100 }
8101 
8102 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8103 
8104   switch (phase) {
8105     case CMSCollector::InitialMarking:
8106       initialize(true  /* fullGC */ ,
8107                  cause /* cause of the GC */,
8108                  true  /* recordGCBeginTime */,
8109                  true  /* recordPreGCUsage */,
8110                  false /* recordPeakUsage */,
8111                  false /* recordPostGCusage */,
8112                  true  /* recordAccumulatedGCTime */,
8113                  false /* recordGCEndTime */,
8114                  false /* countCollection */  );
8115       break;
8116 
8117     case CMSCollector::FinalMarking:
8118       initialize(true  /* fullGC */ ,
8119                  cause /* cause of the GC */,
8120                  false /* recordGCBeginTime */,
8121                  false /* recordPreGCUsage */,
8122                  false /* recordPeakUsage */,
8123                  false /* recordPostGCusage */,
8124                  true  /* recordAccumulatedGCTime */,
8125                  false /* recordGCEndTime */,
8126                  false /* countCollection */  );
8127       break;
8128 
8129     case CMSCollector::Sweeping:
8130       initialize(true  /* fullGC */ ,
8131                  cause /* cause of the GC */,
8132                  false /* recordGCBeginTime */,
8133                  false /* recordPreGCUsage */,
8134                  true  /* recordPeakUsage */,
8135                  true  /* recordPostGCusage */,
8136                  false /* recordAccumulatedGCTime */,
8137                  true  /* recordGCEndTime */,
8138                  true  /* countCollection */  );
8139       break;
8140 
8141     default:
8142       ShouldNotReachHere();
8143   }
8144 }