1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/symbolTable.hpp"
  29 #include "classfile/systemDictionary.hpp"
  30 #include "code/codeCache.hpp"
  31 #include "gc/cms/cmsCollectorPolicy.hpp"
  32 #include "gc/cms/cmsOopClosures.inline.hpp"
  33 #include "gc/cms/compactibleFreeListSpace.hpp"
  34 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  35 #include "gc/cms/concurrentMarkSweepThread.hpp"
  36 #include "gc/cms/parNewGeneration.hpp"
  37 #include "gc/cms/vmCMSOperations.hpp"
  38 #include "gc/serial/genMarkSweep.hpp"
  39 #include "gc/serial/tenuredGeneration.hpp"
  40 #include "gc/shared/adaptiveSizePolicy.hpp"
  41 #include "gc/shared/cardGeneration.inline.hpp"
  42 #include "gc/shared/cardTableRS.hpp"
  43 #include "gc/shared/collectedHeap.inline.hpp"
  44 #include "gc/shared/collectorCounters.hpp"
  45 #include "gc/shared/collectorPolicy.hpp"
  46 #include "gc/shared/gcLocker.inline.hpp"
  47 #include "gc/shared/gcPolicyCounters.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/genCollectedHeap.hpp"
  52 #include "gc/shared/genOopClosures.inline.hpp"
  53 #include "gc/shared/isGCActiveMark.hpp"
  54 #include "gc/shared/referencePolicy.hpp"
  55 #include "gc/shared/strongRootsScope.hpp"
  56 #include "gc/shared/taskqueue.inline.hpp"
  57 #include "logging/log.hpp"
  58 #include "memory/allocation.hpp"
  59 #include "memory/iterator.inline.hpp"
  60 #include "memory/padded.hpp"
  61 #include "memory/resourceArea.hpp"
  62 #include "oops/oop.inline.hpp"
  63 #include "prims/jvmtiExport.hpp"
  64 #include "runtime/atomic.inline.hpp"
  65 #include "runtime/globals_extension.hpp"
  66 #include "runtime/handles.inline.hpp"
  67 #include "runtime/java.hpp"
  68 #include "runtime/orderAccess.inline.hpp"
  69 #include "runtime/timer.hpp"
  70 #include "runtime/vmThread.hpp"
  71 #include "services/memoryService.hpp"
  72 #include "services/runtimeService.hpp"
  73 #include "utilities/stack.inline.hpp"
  74 
  75 // statics
  76 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  77 bool CMSCollector::_full_gc_requested = false;
  78 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  79 
  80 //////////////////////////////////////////////////////////////////
  81 // In support of CMS/VM thread synchronization
  82 //////////////////////////////////////////////////////////////////
  83 // We split use of the CGC_lock into 2 "levels".
  84 // The low-level locking is of the usual CGC_lock monitor. We introduce
  85 // a higher level "token" (hereafter "CMS token") built on top of the
  86 // low level monitor (hereafter "CGC lock").
  87 // The token-passing protocol gives priority to the VM thread. The
  88 // CMS-lock doesn't provide any fairness guarantees, but clients
  89 // should ensure that it is only held for very short, bounded
  90 // durations.
  91 //
  92 // When either of the CMS thread or the VM thread is involved in
  93 // collection operations during which it does not want the other
  94 // thread to interfere, it obtains the CMS token.
  95 //
  96 // If either thread tries to get the token while the other has
  97 // it, that thread waits. However, if the VM thread and CMS thread
  98 // both want the token, then the VM thread gets priority while the
  99 // CMS thread waits. This ensures, for instance, that the "concurrent"
 100 // phases of the CMS thread's work do not block out the VM thread
 101 // for long periods of time as the CMS thread continues to hog
 102 // the token. (See bug 4616232).
 103 //
 104 // The baton-passing functions are, however, controlled by the
 105 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
 106 // and here the low-level CMS lock, not the high level token,
 107 // ensures mutual exclusion.
 108 //
 109 // Two important conditions that we have to satisfy:
 110 // 1. if a thread does a low-level wait on the CMS lock, then it
 111 //    relinquishes the CMS token if it were holding that token
 112 //    when it acquired the low-level CMS lock.
 113 // 2. any low-level notifications on the low-level lock
 114 //    should only be sent when a thread has relinquished the token.
 115 //
 116 // In the absence of either property, we'd have potential deadlock.
 117 //
 118 // We protect each of the CMS (concurrent and sequential) phases
 119 // with the CMS _token_, not the CMS _lock_.
 120 //
 121 // The only code protected by CMS lock is the token acquisition code
 122 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 123 // baton-passing code.
 124 //
 125 // Unfortunately, i couldn't come up with a good abstraction to factor and
 126 // hide the naked CGC_lock manipulation in the baton-passing code
 127 // further below. That's something we should try to do. Also, the proof
 128 // of correctness of this 2-level locking scheme is far from obvious,
 129 // and potentially quite slippery. We have an uneasy suspicion, for instance,
 130 // that there may be a theoretical possibility of delay/starvation in the
 131 // low-level lock/wait/notify scheme used for the baton-passing because of
 132 // potential interference with the priority scheme embodied in the
 133 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 134 // invocation further below and marked with "XXX 20011219YSR".
 135 // Indeed, as we note elsewhere, this may become yet more slippery
 136 // in the presence of multiple CMS and/or multiple VM threads. XXX
 137 
 138 class CMSTokenSync: public StackObj {
 139  private:
 140   bool _is_cms_thread;
 141  public:
 142   CMSTokenSync(bool is_cms_thread):
 143     _is_cms_thread(is_cms_thread) {
 144     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 145            "Incorrect argument to constructor");
 146     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 147   }
 148 
 149   ~CMSTokenSync() {
 150     assert(_is_cms_thread ?
 151              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 152              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 153           "Incorrect state");
 154     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 155   }
 156 };
 157 
 158 // Convenience class that does a CMSTokenSync, and then acquires
 159 // upto three locks.
 160 class CMSTokenSyncWithLocks: public CMSTokenSync {
 161  private:
 162   // Note: locks are acquired in textual declaration order
 163   // and released in the opposite order
 164   MutexLockerEx _locker1, _locker2, _locker3;
 165  public:
 166   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 167                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 168     CMSTokenSync(is_cms_thread),
 169     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 170     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 171     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 172   { }
 173 };
 174 
 175 
 176 //////////////////////////////////////////////////////////////////
 177 //  Concurrent Mark-Sweep Generation /////////////////////////////
 178 //////////////////////////////////////////////////////////////////
 179 
 180 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 181 
 182 // This struct contains per-thread things necessary to support parallel
 183 // young-gen collection.
 184 class CMSParGCThreadState: public CHeapObj<mtGC> {
 185  public:
 186   CompactibleFreeListSpaceLAB lab;
 187   PromotionInfo promo;
 188 
 189   // Constructor.
 190   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 191     promo.setSpace(cfls);
 192   }
 193 };
 194 
 195 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 196      ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
 197   CardGeneration(rs, initial_byte_size, ct),
 198   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 199   _did_compact(false)
 200 {
 201   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 202   HeapWord* end    = (HeapWord*) _virtual_space.high();
 203 
 204   _direct_allocated_words = 0;
 205   NOT_PRODUCT(
 206     _numObjectsPromoted = 0;
 207     _numWordsPromoted = 0;
 208     _numObjectsAllocated = 0;
 209     _numWordsAllocated = 0;
 210   )
 211 
 212   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
 213   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 214   _cmsSpace->_old_gen = this;
 215 
 216   _gc_stats = new CMSGCStats();
 217 
 218   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 219   // offsets match. The ability to tell free chunks from objects
 220   // depends on this property.
 221   debug_only(
 222     FreeChunk* junk = NULL;
 223     assert(UseCompressedClassPointers ||
 224            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 225            "Offset of FreeChunk::_prev within FreeChunk must match"
 226            "  that of OopDesc::_klass within OopDesc");
 227   )
 228 
 229   _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
 230   for (uint i = 0; i < ParallelGCThreads; i++) {
 231     _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 232   }
 233 
 234   _incremental_collection_failed = false;
 235   // The "dilatation_factor" is the expansion that can occur on
 236   // account of the fact that the minimum object size in the CMS
 237   // generation may be larger than that in, say, a contiguous young
 238   //  generation.
 239   // Ideally, in the calculation below, we'd compute the dilatation
 240   // factor as: MinChunkSize/(promoting_gen's min object size)
 241   // Since we do not have such a general query interface for the
 242   // promoting generation, we'll instead just use the minimum
 243   // object size (which today is a header's worth of space);
 244   // note that all arithmetic is in units of HeapWords.
 245   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 246   assert(_dilatation_factor >= 1.0, "from previous assert");
 247 }
 248 
 249 
 250 // The field "_initiating_occupancy" represents the occupancy percentage
 251 // at which we trigger a new collection cycle.  Unless explicitly specified
 252 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 253 // is calculated by:
 254 //
 255 //   Let "f" be MinHeapFreeRatio in
 256 //
 257 //    _initiating_occupancy = 100-f +
 258 //                           f * (CMSTriggerRatio/100)
 259 //   where CMSTriggerRatio is the argument "tr" below.
 260 //
 261 // That is, if we assume the heap is at its desired maximum occupancy at the
 262 // end of a collection, we let CMSTriggerRatio of the (purported) free
 263 // space be allocated before initiating a new collection cycle.
 264 //
 265 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 266   assert(io <= 100 && tr <= 100, "Check the arguments");
 267   if (io >= 0) {
 268     _initiating_occupancy = (double)io / 100.0;
 269   } else {
 270     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 271                              (double)(tr * MinHeapFreeRatio) / 100.0)
 272                             / 100.0;
 273   }
 274 }
 275 
 276 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 277   assert(collector() != NULL, "no collector");
 278   collector()->ref_processor_init();
 279 }
 280 
 281 void CMSCollector::ref_processor_init() {
 282   if (_ref_processor == NULL) {
 283     // Allocate and initialize a reference processor
 284     _ref_processor =
 285       new ReferenceProcessor(_span,                               // span
 286                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 287                              ParallelGCThreads,                   // mt processing degree
 288                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 289                              MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 290                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 291                              &_is_alive_closure);                 // closure for liveness info
 292     // Initialize the _ref_processor field of CMSGen
 293     _cmsGen->set_ref_processor(_ref_processor);
 294 
 295   }
 296 }
 297 
 298 AdaptiveSizePolicy* CMSCollector::size_policy() {
 299   GenCollectedHeap* gch = GenCollectedHeap::heap();
 300   return gch->gen_policy()->size_policy();
 301 }
 302 
 303 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 304 
 305   const char* gen_name = "old";
 306   GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
 307   // Generation Counters - generation 1, 1 subspace
 308   _gen_counters = new GenerationCounters(gen_name, 1, 1,
 309       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
 310 
 311   _space_counters = new GSpaceCounters(gen_name, 0,
 312                                        _virtual_space.reserved_size(),
 313                                        this, _gen_counters);
 314 }
 315 
 316 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 317   _cms_gen(cms_gen)
 318 {
 319   assert(alpha <= 100, "bad value");
 320   _saved_alpha = alpha;
 321 
 322   // Initialize the alphas to the bootstrap value of 100.
 323   _gc0_alpha = _cms_alpha = 100;
 324 
 325   _cms_begin_time.update();
 326   _cms_end_time.update();
 327 
 328   _gc0_duration = 0.0;
 329   _gc0_period = 0.0;
 330   _gc0_promoted = 0;
 331 
 332   _cms_duration = 0.0;
 333   _cms_period = 0.0;
 334   _cms_allocated = 0;
 335 
 336   _cms_used_at_gc0_begin = 0;
 337   _cms_used_at_gc0_end = 0;
 338   _allow_duty_cycle_reduction = false;
 339   _valid_bits = 0;
 340 }
 341 
 342 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 343   // TBD: CR 6909490
 344   return 1.0;
 345 }
 346 
 347 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 348 }
 349 
 350 // If promotion failure handling is on use
 351 // the padded average size of the promotion for each
 352 // young generation collection.
 353 double CMSStats::time_until_cms_gen_full() const {
 354   size_t cms_free = _cms_gen->cmsSpace()->free();
 355   GenCollectedHeap* gch = GenCollectedHeap::heap();
 356   size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
 357                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 358   if (cms_free > expected_promotion) {
 359     // Start a cms collection if there isn't enough space to promote
 360     // for the next young collection.  Use the padded average as
 361     // a safety factor.
 362     cms_free -= expected_promotion;
 363 
 364     // Adjust by the safety factor.
 365     double cms_free_dbl = (double)cms_free;
 366     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
 367     // Apply a further correction factor which tries to adjust
 368     // for recent occurance of concurrent mode failures.
 369     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 370     cms_free_dbl = cms_free_dbl * cms_adjustment;
 371 
 372     log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 373                   cms_free, expected_promotion);
 374     log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
 375     // Add 1 in case the consumption rate goes to zero.
 376     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 377   }
 378   return 0.0;
 379 }
 380 
 381 // Compare the duration of the cms collection to the
 382 // time remaining before the cms generation is empty.
 383 // Note that the time from the start of the cms collection
 384 // to the start of the cms sweep (less than the total
 385 // duration of the cms collection) can be used.  This
 386 // has been tried and some applications experienced
 387 // promotion failures early in execution.  This was
 388 // possibly because the averages were not accurate
 389 // enough at the beginning.
 390 double CMSStats::time_until_cms_start() const {
 391   // We add "gc0_period" to the "work" calculation
 392   // below because this query is done (mostly) at the
 393   // end of a scavenge, so we need to conservatively
 394   // account for that much possible delay
 395   // in the query so as to avoid concurrent mode failures
 396   // due to starting the collection just a wee bit too
 397   // late.
 398   double work = cms_duration() + gc0_period();
 399   double deadline = time_until_cms_gen_full();
 400   // If a concurrent mode failure occurred recently, we want to be
 401   // more conservative and halve our expected time_until_cms_gen_full()
 402   if (work > deadline) {
 403     log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
 404                           cms_duration(), gc0_period(), time_until_cms_gen_full());
 405     return 0.0;
 406   }
 407   return work - deadline;
 408 }
 409 
 410 #ifndef PRODUCT
 411 void CMSStats::print_on(outputStream *st) const {
 412   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 413   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 414                gc0_duration(), gc0_period(), gc0_promoted());
 415   st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 416             cms_duration(), cms_period(), cms_allocated());
 417   st->print(",cms_since_beg=%g,cms_since_end=%g",
 418             cms_time_since_begin(), cms_time_since_end());
 419   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 420             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 421 
 422   if (valid()) {
 423     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 424               promotion_rate(), cms_allocation_rate());
 425     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 426               cms_consumption_rate(), time_until_cms_gen_full());
 427   }
 428   st->print(" ");
 429 }
 430 #endif // #ifndef PRODUCT
 431 
 432 CMSCollector::CollectorState CMSCollector::_collectorState =
 433                              CMSCollector::Idling;
 434 bool CMSCollector::_foregroundGCIsActive = false;
 435 bool CMSCollector::_foregroundGCShouldWait = false;
 436 
 437 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 438                            CardTableRS*                   ct,
 439                            ConcurrentMarkSweepPolicy*     cp):
 440   _cmsGen(cmsGen),
 441   _ct(ct),
 442   _ref_processor(NULL),    // will be set later
 443   _conc_workers(NULL),     // may be set later
 444   _abort_preclean(false),
 445   _start_sampling(false),
 446   _between_prologue_and_epilogue(false),
 447   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 448   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 449                  -1 /* lock-free */, "No_lock" /* dummy */),
 450   _modUnionClosurePar(&_modUnionTable),
 451   // Adjust my span to cover old (cms) gen
 452   _span(cmsGen->reserved()),
 453   // Construct the is_alive_closure with _span & markBitMap
 454   _is_alive_closure(_span, &_markBitMap),
 455   _restart_addr(NULL),
 456   _overflow_list(NULL),
 457   _stats(cmsGen),
 458   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
 459                              //verify that this lock should be acquired with safepoint check.
 460                              Monitor::_safepoint_check_sometimes)),
 461   _eden_chunk_array(NULL),     // may be set in ctor body
 462   _eden_chunk_capacity(0),     // -- ditto --
 463   _eden_chunk_index(0),        // -- ditto --
 464   _survivor_plab_array(NULL),  // -- ditto --
 465   _survivor_chunk_array(NULL), // -- ditto --
 466   _survivor_chunk_capacity(0), // -- ditto --
 467   _survivor_chunk_index(0),    // -- ditto --
 468   _ser_pmc_preclean_ovflw(0),
 469   _ser_kac_preclean_ovflw(0),
 470   _ser_pmc_remark_ovflw(0),
 471   _par_pmc_remark_ovflw(0),
 472   _ser_kac_ovflw(0),
 473   _par_kac_ovflw(0),
 474 #ifndef PRODUCT
 475   _num_par_pushes(0),
 476 #endif
 477   _collection_count_start(0),
 478   _verifying(false),
 479   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 480   _completed_initialization(false),
 481   _collector_policy(cp),
 482   _should_unload_classes(CMSClassUnloadingEnabled),
 483   _concurrent_cycles_since_last_unload(0),
 484   _roots_scanning_options(GenCollectedHeap::SO_None),
 485   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 486   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 487   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 488   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 489   _cms_start_registered(false)
 490 {
 491   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 492     ExplicitGCInvokesConcurrent = true;
 493   }
 494   // Now expand the span and allocate the collection support structures
 495   // (MUT, marking bit map etc.) to cover both generations subject to
 496   // collection.
 497 
 498   // For use by dirty card to oop closures.
 499   _cmsGen->cmsSpace()->set_collector(this);
 500 
 501   // Allocate MUT and marking bit map
 502   {
 503     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 504     if (!_markBitMap.allocate(_span)) {
 505       warning("Failed to allocate CMS Bit Map");
 506       return;
 507     }
 508     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 509   }
 510   {
 511     _modUnionTable.allocate(_span);
 512     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 513   }
 514 
 515   if (!_markStack.allocate(MarkStackSize)) {
 516     warning("Failed to allocate CMS Marking Stack");
 517     return;
 518   }
 519 
 520   // Support for multi-threaded concurrent phases
 521   if (CMSConcurrentMTEnabled) {
 522     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 523       // just for now
 524       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
 525     }
 526     if (ConcGCThreads > 1) {
 527       _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
 528                                  ConcGCThreads, true);
 529       if (_conc_workers == NULL) {
 530         warning("GC/CMS: _conc_workers allocation failure: "
 531               "forcing -CMSConcurrentMTEnabled");
 532         CMSConcurrentMTEnabled = false;
 533       } else {
 534         _conc_workers->initialize_workers();
 535       }
 536     } else {
 537       CMSConcurrentMTEnabled = false;
 538     }
 539   }
 540   if (!CMSConcurrentMTEnabled) {
 541     ConcGCThreads = 0;
 542   } else {
 543     // Turn off CMSCleanOnEnter optimization temporarily for
 544     // the MT case where it's not fixed yet; see 6178663.
 545     CMSCleanOnEnter = false;
 546   }
 547   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 548          "Inconsistency");
 549 
 550   // Parallel task queues; these are shared for the
 551   // concurrent and stop-world phases of CMS, but
 552   // are not shared with parallel scavenge (ParNew).
 553   {
 554     uint i;
 555     uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
 556 
 557     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 558          || ParallelRefProcEnabled)
 559         && num_queues > 0) {
 560       _task_queues = new OopTaskQueueSet(num_queues);
 561       if (_task_queues == NULL) {
 562         warning("task_queues allocation failure.");
 563         return;
 564       }
 565       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 566       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 567       for (i = 0; i < num_queues; i++) {
 568         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 569         if (q == NULL) {
 570           warning("work_queue allocation failure.");
 571           return;
 572         }
 573         _task_queues->register_queue(i, q);
 574       }
 575       for (i = 0; i < num_queues; i++) {
 576         _task_queues->queue(i)->initialize();
 577         _hash_seed[i] = 17;  // copied from ParNew
 578       }
 579     }
 580   }
 581 
 582   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 583 
 584   // Clip CMSBootstrapOccupancy between 0 and 100.
 585   _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
 586 
 587   // Now tell CMS generations the identity of their collector
 588   ConcurrentMarkSweepGeneration::set_collector(this);
 589 
 590   // Create & start a CMS thread for this CMS collector
 591   _cmsThread = ConcurrentMarkSweepThread::start(this);
 592   assert(cmsThread() != NULL, "CMS Thread should have been created");
 593   assert(cmsThread()->collector() == this,
 594          "CMS Thread should refer to this gen");
 595   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 596 
 597   // Support for parallelizing young gen rescan
 598   GenCollectedHeap* gch = GenCollectedHeap::heap();
 599   assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
 600   _young_gen = (ParNewGeneration*)gch->young_gen();
 601   if (gch->supports_inline_contig_alloc()) {
 602     _top_addr = gch->top_addr();
 603     _end_addr = gch->end_addr();
 604     assert(_young_gen != NULL, "no _young_gen");
 605     _eden_chunk_index = 0;
 606     _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
 607     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 608   }
 609 
 610   // Support for parallelizing survivor space rescan
 611   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 612     const size_t max_plab_samples =
 613       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 614 
 615     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 616     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 617     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 618     _survivor_chunk_capacity = max_plab_samples;
 619     for (uint i = 0; i < ParallelGCThreads; i++) {
 620       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 621       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 622       assert(cur->end() == 0, "Should be 0");
 623       assert(cur->array() == vec, "Should be vec");
 624       assert(cur->capacity() == max_plab_samples, "Error");
 625     }
 626   }
 627 
 628   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 629   _gc_counters = new CollectorCounters("CMS", 1);
 630   _completed_initialization = true;
 631   _inter_sweep_timer.start();  // start of time
 632 }
 633 
 634 const char* ConcurrentMarkSweepGeneration::name() const {
 635   return "concurrent mark-sweep generation";
 636 }
 637 void ConcurrentMarkSweepGeneration::update_counters() {
 638   if (UsePerfData) {
 639     _space_counters->update_all();
 640     _gen_counters->update_all();
 641   }
 642 }
 643 
 644 // this is an optimized version of update_counters(). it takes the
 645 // used value as a parameter rather than computing it.
 646 //
 647 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 648   if (UsePerfData) {
 649     _space_counters->update_used(used);
 650     _space_counters->update_capacity();
 651     _gen_counters->update_all();
 652   }
 653 }
 654 
 655 void ConcurrentMarkSweepGeneration::print() const {
 656   Generation::print();
 657   cmsSpace()->print();
 658 }
 659 
 660 #ifndef PRODUCT
 661 void ConcurrentMarkSweepGeneration::print_statistics() {
 662   cmsSpace()->printFLCensus(0);
 663 }
 664 #endif
 665 
 666 size_t
 667 ConcurrentMarkSweepGeneration::contiguous_available() const {
 668   // dld proposes an improvement in precision here. If the committed
 669   // part of the space ends in a free block we should add that to
 670   // uncommitted size in the calculation below. Will make this
 671   // change later, staying with the approximation below for the
 672   // time being. -- ysr.
 673   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 674 }
 675 
 676 size_t
 677 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 678   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 679 }
 680 
 681 size_t ConcurrentMarkSweepGeneration::max_available() const {
 682   return free() + _virtual_space.uncommitted_size();
 683 }
 684 
 685 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 686   size_t available = max_available();
 687   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 688   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 689   log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
 690                            res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
 691   return res;
 692 }
 693 
 694 // At a promotion failure dump information on block layout in heap
 695 // (cms old generation).
 696 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 697   LogHandle(gc, promotion) log;
 698   if (log.is_trace()) {
 699     ResourceMark rm;
 700     cmsSpace()->dump_at_safepoint_with_locks(collector(), log.trace_stream());
 701   }
 702 }
 703 
 704 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 705   // Clear the promotion information.  These pointers can be adjusted
 706   // along with all the other pointers into the heap but
 707   // compaction is expected to be a rare event with
 708   // a heap using cms so don't do it without seeing the need.
 709   for (uint i = 0; i < ParallelGCThreads; i++) {
 710     _par_gc_thread_states[i]->promo.reset();
 711   }
 712 }
 713 
 714 void ConcurrentMarkSweepGeneration::compute_new_size() {
 715   assert_locked_or_safepoint(Heap_lock);
 716 
 717   // If incremental collection failed, we just want to expand
 718   // to the limit.
 719   if (incremental_collection_failed()) {
 720     clear_incremental_collection_failed();
 721     grow_to_reserved();
 722     return;
 723   }
 724 
 725   // The heap has been compacted but not reset yet.
 726   // Any metric such as free() or used() will be incorrect.
 727 
 728   CardGeneration::compute_new_size();
 729 
 730   // Reset again after a possible resizing
 731   if (did_compact()) {
 732     cmsSpace()->reset_after_compaction();
 733   }
 734 }
 735 
 736 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 737   assert_locked_or_safepoint(Heap_lock);
 738 
 739   // If incremental collection failed, we just want to expand
 740   // to the limit.
 741   if (incremental_collection_failed()) {
 742     clear_incremental_collection_failed();
 743     grow_to_reserved();
 744     return;
 745   }
 746 
 747   double free_percentage = ((double) free()) / capacity();
 748   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 749   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 750 
 751   // compute expansion delta needed for reaching desired free percentage
 752   if (free_percentage < desired_free_percentage) {
 753     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 754     assert(desired_capacity >= capacity(), "invalid expansion size");
 755     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 756     LogHandle(gc) log;
 757     if (log.is_trace()) {
 758       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 759       log.trace("From compute_new_size: ");
 760       log.trace("  Free fraction %f", free_percentage);
 761       log.trace("  Desired free fraction %f", desired_free_percentage);
 762       log.trace("  Maximum free fraction %f", maximum_free_percentage);
 763       log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
 764       log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
 765       GenCollectedHeap* gch = GenCollectedHeap::heap();
 766       assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
 767       size_t young_size = gch->young_gen()->capacity();
 768       log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
 769       log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
 770       log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
 771       log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
 772     }
 773     // safe if expansion fails
 774     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 775     log.trace("  Expanded free fraction %f", ((double) free()) / capacity());
 776   } else {
 777     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 778     assert(desired_capacity <= capacity(), "invalid expansion size");
 779     size_t shrink_bytes = capacity() - desired_capacity;
 780     // Don't shrink unless the delta is greater than the minimum shrink we want
 781     if (shrink_bytes >= MinHeapDeltaBytes) {
 782       shrink_free_list_by(shrink_bytes);
 783     }
 784   }
 785 }
 786 
 787 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 788   return cmsSpace()->freelistLock();
 789 }
 790 
 791 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
 792   CMSSynchronousYieldRequest yr;
 793   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
 794   return have_lock_and_allocate(size, tlab);
 795 }
 796 
 797 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
 798                                                                 bool   tlab /* ignored */) {
 799   assert_lock_strong(freelistLock());
 800   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
 801   HeapWord* res = cmsSpace()->allocate(adjustedSize);
 802   // Allocate the object live (grey) if the background collector has
 803   // started marking. This is necessary because the marker may
 804   // have passed this address and consequently this object will
 805   // not otherwise be greyed and would be incorrectly swept up.
 806   // Note that if this object contains references, the writing
 807   // of those references will dirty the card containing this object
 808   // allowing the object to be blackened (and its references scanned)
 809   // either during a preclean phase or at the final checkpoint.
 810   if (res != NULL) {
 811     // We may block here with an uninitialized object with
 812     // its mark-bit or P-bits not yet set. Such objects need
 813     // to be safely navigable by block_start().
 814     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
 815     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
 816     collector()->direct_allocated(res, adjustedSize);
 817     _direct_allocated_words += adjustedSize;
 818     // allocation counters
 819     NOT_PRODUCT(
 820       _numObjectsAllocated++;
 821       _numWordsAllocated += (int)adjustedSize;
 822     )
 823   }
 824   return res;
 825 }
 826 
 827 // In the case of direct allocation by mutators in a generation that
 828 // is being concurrently collected, the object must be allocated
 829 // live (grey) if the background collector has started marking.
 830 // This is necessary because the marker may
 831 // have passed this address and consequently this object will
 832 // not otherwise be greyed and would be incorrectly swept up.
 833 // Note that if this object contains references, the writing
 834 // of those references will dirty the card containing this object
 835 // allowing the object to be blackened (and its references scanned)
 836 // either during a preclean phase or at the final checkpoint.
 837 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
 838   assert(_markBitMap.covers(start, size), "Out of bounds");
 839   if (_collectorState >= Marking) {
 840     MutexLockerEx y(_markBitMap.lock(),
 841                     Mutex::_no_safepoint_check_flag);
 842     // [see comments preceding SweepClosure::do_blk() below for details]
 843     //
 844     // Can the P-bits be deleted now?  JJJ
 845     //
 846     // 1. need to mark the object as live so it isn't collected
 847     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
 848     // 3. need to mark the end of the object so marking, precleaning or sweeping
 849     //    can skip over uninitialized or unparsable objects. An allocated
 850     //    object is considered uninitialized for our purposes as long as
 851     //    its klass word is NULL.  All old gen objects are parsable
 852     //    as soon as they are initialized.)
 853     _markBitMap.mark(start);          // object is live
 854     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
 855     _markBitMap.mark(start + size - 1);
 856                                       // mark end of object
 857   }
 858   // check that oop looks uninitialized
 859   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
 860 }
 861 
 862 void CMSCollector::promoted(bool par, HeapWord* start,
 863                             bool is_obj_array, size_t obj_size) {
 864   assert(_markBitMap.covers(start), "Out of bounds");
 865   // See comment in direct_allocated() about when objects should
 866   // be allocated live.
 867   if (_collectorState >= Marking) {
 868     // we already hold the marking bit map lock, taken in
 869     // the prologue
 870     if (par) {
 871       _markBitMap.par_mark(start);
 872     } else {
 873       _markBitMap.mark(start);
 874     }
 875     // We don't need to mark the object as uninitialized (as
 876     // in direct_allocated above) because this is being done with the
 877     // world stopped and the object will be initialized by the
 878     // time the marking, precleaning or sweeping get to look at it.
 879     // But see the code for copying objects into the CMS generation,
 880     // where we need to ensure that concurrent readers of the
 881     // block offset table are able to safely navigate a block that
 882     // is in flux from being free to being allocated (and in
 883     // transition while being copied into) and subsequently
 884     // becoming a bona-fide object when the copy/promotion is complete.
 885     assert(SafepointSynchronize::is_at_safepoint(),
 886            "expect promotion only at safepoints");
 887 
 888     if (_collectorState < Sweeping) {
 889       // Mark the appropriate cards in the modUnionTable, so that
 890       // this object gets scanned before the sweep. If this is
 891       // not done, CMS generation references in the object might
 892       // not get marked.
 893       // For the case of arrays, which are otherwise precisely
 894       // marked, we need to dirty the entire array, not just its head.
 895       if (is_obj_array) {
 896         // The [par_]mark_range() method expects mr.end() below to
 897         // be aligned to the granularity of a bit's representation
 898         // in the heap. In the case of the MUT below, that's a
 899         // card size.
 900         MemRegion mr(start,
 901                      (HeapWord*)round_to((intptr_t)(start + obj_size),
 902                         CardTableModRefBS::card_size /* bytes */));
 903         if (par) {
 904           _modUnionTable.par_mark_range(mr);
 905         } else {
 906           _modUnionTable.mark_range(mr);
 907         }
 908       } else {  // not an obj array; we can just mark the head
 909         if (par) {
 910           _modUnionTable.par_mark(start);
 911         } else {
 912           _modUnionTable.mark(start);
 913         }
 914       }
 915     }
 916   }
 917 }
 918 
 919 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
 920   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 921   // allocate, copy and if necessary update promoinfo --
 922   // delegate to underlying space.
 923   assert_lock_strong(freelistLock());
 924 
 925 #ifndef PRODUCT
 926   if (GenCollectedHeap::heap()->promotion_should_fail()) {
 927     return NULL;
 928   }
 929 #endif  // #ifndef PRODUCT
 930 
 931   oop res = _cmsSpace->promote(obj, obj_size);
 932   if (res == NULL) {
 933     // expand and retry
 934     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
 935     expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
 936     // Since this is the old generation, we don't try to promote
 937     // into a more senior generation.
 938     res = _cmsSpace->promote(obj, obj_size);
 939   }
 940   if (res != NULL) {
 941     // See comment in allocate() about when objects should
 942     // be allocated live.
 943     assert(obj->is_oop(), "Will dereference klass pointer below");
 944     collector()->promoted(false,           // Not parallel
 945                           (HeapWord*)res, obj->is_objArray(), obj_size);
 946     // promotion counters
 947     NOT_PRODUCT(
 948       _numObjectsPromoted++;
 949       _numWordsPromoted +=
 950         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
 951     )
 952   }
 953   return res;
 954 }
 955 
 956 
 957 // IMPORTANT: Notes on object size recognition in CMS.
 958 // ---------------------------------------------------
 959 // A block of storage in the CMS generation is always in
 960 // one of three states. A free block (FREE), an allocated
 961 // object (OBJECT) whose size() method reports the correct size,
 962 // and an intermediate state (TRANSIENT) in which its size cannot
 963 // be accurately determined.
 964 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
 965 // -----------------------------------------------------
 966 // FREE:      klass_word & 1 == 1; mark_word holds block size
 967 //
 968 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
 969 //            obj->size() computes correct size
 970 //
 971 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
 972 //
 973 // STATE IDENTIFICATION: (64 bit+COOPS)
 974 // ------------------------------------
 975 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
 976 //
 977 // OBJECT:    klass_word installed; klass_word != 0;
 978 //            obj->size() computes correct size
 979 //
 980 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
 981 //
 982 //
 983 // STATE TRANSITION DIAGRAM
 984 //
 985 //        mut / parnew                     mut  /  parnew
 986 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
 987 //  ^                                                                   |
 988 //  |------------------------ DEAD <------------------------------------|
 989 //         sweep                            mut
 990 //
 991 // While a block is in TRANSIENT state its size cannot be determined
 992 // so readers will either need to come back later or stall until
 993 // the size can be determined. Note that for the case of direct
 994 // allocation, P-bits, when available, may be used to determine the
 995 // size of an object that may not yet have been initialized.
 996 
 997 // Things to support parallel young-gen collection.
 998 oop
 999 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1000                                            oop old, markOop m,
1001                                            size_t word_sz) {
1002 #ifndef PRODUCT
1003   if (GenCollectedHeap::heap()->promotion_should_fail()) {
1004     return NULL;
1005   }
1006 #endif  // #ifndef PRODUCT
1007 
1008   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1009   PromotionInfo* promoInfo = &ps->promo;
1010   // if we are tracking promotions, then first ensure space for
1011   // promotion (including spooling space for saving header if necessary).
1012   // then allocate and copy, then track promoted info if needed.
1013   // When tracking (see PromotionInfo::track()), the mark word may
1014   // be displaced and in this case restoration of the mark word
1015   // occurs in the (oop_since_save_marks_)iterate phase.
1016   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1017     // Out of space for allocating spooling buffers;
1018     // try expanding and allocating spooling buffers.
1019     if (!expand_and_ensure_spooling_space(promoInfo)) {
1020       return NULL;
1021     }
1022   }
1023   assert(promoInfo->has_spooling_space(), "Control point invariant");
1024   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1025   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1026   if (obj_ptr == NULL) {
1027      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1028      if (obj_ptr == NULL) {
1029        return NULL;
1030      }
1031   }
1032   oop obj = oop(obj_ptr);
1033   OrderAccess::storestore();
1034   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1035   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1036   // IMPORTANT: See note on object initialization for CMS above.
1037   // Otherwise, copy the object.  Here we must be careful to insert the
1038   // klass pointer last, since this marks the block as an allocated object.
1039   // Except with compressed oops it's the mark word.
1040   HeapWord* old_ptr = (HeapWord*)old;
1041   // Restore the mark word copied above.
1042   obj->set_mark(m);
1043   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1044   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1045   OrderAccess::storestore();
1046 
1047   if (UseCompressedClassPointers) {
1048     // Copy gap missed by (aligned) header size calculation below
1049     obj->set_klass_gap(old->klass_gap());
1050   }
1051   if (word_sz > (size_t)oopDesc::header_size()) {
1052     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1053                                  obj_ptr + oopDesc::header_size(),
1054                                  word_sz - oopDesc::header_size());
1055   }
1056 
1057   // Now we can track the promoted object, if necessary.  We take care
1058   // to delay the transition from uninitialized to full object
1059   // (i.e., insertion of klass pointer) until after, so that it
1060   // atomically becomes a promoted object.
1061   if (promoInfo->tracking()) {
1062     promoInfo->track((PromotedObject*)obj, old->klass());
1063   }
1064   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1065   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1066   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1067 
1068   // Finally, install the klass pointer (this should be volatile).
1069   OrderAccess::storestore();
1070   obj->set_klass(old->klass());
1071   // We should now be able to calculate the right size for this object
1072   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1073 
1074   collector()->promoted(true,          // parallel
1075                         obj_ptr, old->is_objArray(), word_sz);
1076 
1077   NOT_PRODUCT(
1078     Atomic::inc_ptr(&_numObjectsPromoted);
1079     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1080   )
1081 
1082   return obj;
1083 }
1084 
1085 void
1086 ConcurrentMarkSweepGeneration::
1087 par_promote_alloc_done(int thread_num) {
1088   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1089   ps->lab.retire(thread_num);
1090 }
1091 
1092 void
1093 ConcurrentMarkSweepGeneration::
1094 par_oop_since_save_marks_iterate_done(int thread_num) {
1095   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1096   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1097   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1098 }
1099 
1100 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1101                                                    size_t size,
1102                                                    bool   tlab)
1103 {
1104   // We allow a STW collection only if a full
1105   // collection was requested.
1106   return full || should_allocate(size, tlab); // FIX ME !!!
1107   // This and promotion failure handling are connected at the
1108   // hip and should be fixed by untying them.
1109 }
1110 
1111 bool CMSCollector::shouldConcurrentCollect() {
1112   if (_full_gc_requested) {
1113     log_trace(gc)("CMSCollector: collect because of explicit  gc request (or GCLocker)");
1114     return true;
1115   }
1116 
1117   FreelistLocker x(this);
1118   // ------------------------------------------------------------------
1119   // Print out lots of information which affects the initiation of
1120   // a collection.
1121   LogHandle(gc) log;
1122   if (log.is_trace() && stats().valid()) {
1123     log.trace("CMSCollector shouldConcurrentCollect: ");
1124     ResourceMark rm;
1125     stats().print_on(log.debug_stream());
1126     log.trace("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
1127     log.trace("free=" SIZE_FORMAT, _cmsGen->free());
1128     log.trace("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
1129     log.trace("promotion_rate=%g", stats().promotion_rate());
1130     log.trace("cms_allocation_rate=%g", stats().cms_allocation_rate());
1131     log.trace("occupancy=%3.7f", _cmsGen->occupancy());
1132     log.trace("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1133     log.trace("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1134     log.trace("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1135     log.trace("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
1136   }
1137   // ------------------------------------------------------------------
1138 
1139   // If the estimated time to complete a cms collection (cms_duration())
1140   // is less than the estimated time remaining until the cms generation
1141   // is full, start a collection.
1142   if (!UseCMSInitiatingOccupancyOnly) {
1143     if (stats().valid()) {
1144       if (stats().time_until_cms_start() == 0.0) {
1145         return true;
1146       }
1147     } else {
1148       // We want to conservatively collect somewhat early in order
1149       // to try and "bootstrap" our CMS/promotion statistics;
1150       // this branch will not fire after the first successful CMS
1151       // collection because the stats should then be valid.
1152       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1153         log_trace(gc)(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
1154                       _cmsGen->occupancy(), _bootstrap_occupancy);
1155         return true;
1156       }
1157     }
1158   }
1159 
1160   // Otherwise, we start a collection cycle if
1161   // old gen want a collection cycle started. Each may use
1162   // an appropriate criterion for making this decision.
1163   // XXX We need to make sure that the gen expansion
1164   // criterion dovetails well with this. XXX NEED TO FIX THIS
1165   if (_cmsGen->should_concurrent_collect()) {
1166     log_trace(gc)("CMS old gen initiated");
1167     return true;
1168   }
1169 
1170   // We start a collection if we believe an incremental collection may fail;
1171   // this is not likely to be productive in practice because it's probably too
1172   // late anyway.
1173   GenCollectedHeap* gch = GenCollectedHeap::heap();
1174   assert(gch->collector_policy()->is_generation_policy(),
1175          "You may want to check the correctness of the following");
1176   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1177     log_trace(gc)("CMSCollector: collect because incremental collection will fail ");
1178     return true;
1179   }
1180 
1181   if (MetaspaceGC::should_concurrent_collect()) {
1182     log_trace(gc)("CMSCollector: collect for metadata allocation ");
1183     return true;
1184   }
1185 
1186   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1187   if (CMSTriggerInterval >= 0) {
1188     if (CMSTriggerInterval == 0) {
1189       // Trigger always
1190       return true;
1191     }
1192 
1193     // Check the CMS time since begin (we do not check the stats validity
1194     // as we want to be able to trigger the first CMS cycle as well)
1195     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1196       if (stats().valid()) {
1197         log_trace(gc)("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1198                       stats().cms_time_since_begin());
1199       } else {
1200         log_trace(gc)("CMSCollector: collect because of trigger interval (first collection)");
1201       }
1202       return true;
1203     }
1204   }
1205 
1206   return false;
1207 }
1208 
1209 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1210 
1211 // Clear _expansion_cause fields of constituent generations
1212 void CMSCollector::clear_expansion_cause() {
1213   _cmsGen->clear_expansion_cause();
1214 }
1215 
1216 // We should be conservative in starting a collection cycle.  To
1217 // start too eagerly runs the risk of collecting too often in the
1218 // extreme.  To collect too rarely falls back on full collections,
1219 // which works, even if not optimum in terms of concurrent work.
1220 // As a work around for too eagerly collecting, use the flag
1221 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1222 // giving the user an easily understandable way of controlling the
1223 // collections.
1224 // We want to start a new collection cycle if any of the following
1225 // conditions hold:
1226 // . our current occupancy exceeds the configured initiating occupancy
1227 //   for this generation, or
1228 // . we recently needed to expand this space and have not, since that
1229 //   expansion, done a collection of this generation, or
1230 // . the underlying space believes that it may be a good idea to initiate
1231 //   a concurrent collection (this may be based on criteria such as the
1232 //   following: the space uses linear allocation and linear allocation is
1233 //   going to fail, or there is believed to be excessive fragmentation in
1234 //   the generation, etc... or ...
1235 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1236 //   the case of the old generation; see CR 6543076):
1237 //   we may be approaching a point at which allocation requests may fail because
1238 //   we will be out of sufficient free space given allocation rate estimates.]
1239 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1240 
1241   assert_lock_strong(freelistLock());
1242   if (occupancy() > initiating_occupancy()) {
1243     log_trace(gc)(" %s: collect because of occupancy %f / %f  ",
1244                   short_name(), occupancy(), initiating_occupancy());
1245     return true;
1246   }
1247   if (UseCMSInitiatingOccupancyOnly) {
1248     return false;
1249   }
1250   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1251     log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
1252     return true;
1253   }
1254   return false;
1255 }
1256 
1257 void ConcurrentMarkSweepGeneration::collect(bool   full,
1258                                             bool   clear_all_soft_refs,
1259                                             size_t size,
1260                                             bool   tlab)
1261 {
1262   collector()->collect(full, clear_all_soft_refs, size, tlab);
1263 }
1264 
1265 void CMSCollector::collect(bool   full,
1266                            bool   clear_all_soft_refs,
1267                            size_t size,
1268                            bool   tlab)
1269 {
1270   // The following "if" branch is present for defensive reasons.
1271   // In the current uses of this interface, it can be replaced with:
1272   // assert(!GCLocker.is_active(), "Can't be called otherwise");
1273   // But I am not placing that assert here to allow future
1274   // generality in invoking this interface.
1275   if (GCLocker::is_active()) {
1276     // A consistency test for GCLocker
1277     assert(GCLocker::needs_gc(), "Should have been set already");
1278     // Skip this foreground collection, instead
1279     // expanding the heap if necessary.
1280     // Need the free list locks for the call to free() in compute_new_size()
1281     compute_new_size();
1282     return;
1283   }
1284   acquire_control_and_collect(full, clear_all_soft_refs);
1285 }
1286 
1287 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1288   GenCollectedHeap* gch = GenCollectedHeap::heap();
1289   unsigned int gc_count = gch->total_full_collections();
1290   if (gc_count == full_gc_count) {
1291     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1292     _full_gc_requested = true;
1293     _full_gc_cause = cause;
1294     CGC_lock->notify();   // nudge CMS thread
1295   } else {
1296     assert(gc_count > full_gc_count, "Error: causal loop");
1297   }
1298 }
1299 
1300 bool CMSCollector::is_external_interruption() {
1301   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1302   return GCCause::is_user_requested_gc(cause) ||
1303          GCCause::is_serviceability_requested_gc(cause);
1304 }
1305 
1306 void CMSCollector::report_concurrent_mode_interruption() {
1307   if (is_external_interruption()) {
1308     log_debug(gc)("Concurrent mode interrupted");
1309   } else {
1310     log_debug(gc)("Concurrent mode failure");
1311     _gc_tracer_cm->report_concurrent_mode_failure();
1312   }
1313 }
1314 
1315 
1316 // The foreground and background collectors need to coordinate in order
1317 // to make sure that they do not mutually interfere with CMS collections.
1318 // When a background collection is active,
1319 // the foreground collector may need to take over (preempt) and
1320 // synchronously complete an ongoing collection. Depending on the
1321 // frequency of the background collections and the heap usage
1322 // of the application, this preemption can be seldom or frequent.
1323 // There are only certain
1324 // points in the background collection that the "collection-baton"
1325 // can be passed to the foreground collector.
1326 //
1327 // The foreground collector will wait for the baton before
1328 // starting any part of the collection.  The foreground collector
1329 // will only wait at one location.
1330 //
1331 // The background collector will yield the baton before starting a new
1332 // phase of the collection (e.g., before initial marking, marking from roots,
1333 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1334 // of the loop which switches the phases. The background collector does some
1335 // of the phases (initial mark, final re-mark) with the world stopped.
1336 // Because of locking involved in stopping the world,
1337 // the foreground collector should not block waiting for the background
1338 // collector when it is doing a stop-the-world phase.  The background
1339 // collector will yield the baton at an additional point just before
1340 // it enters a stop-the-world phase.  Once the world is stopped, the
1341 // background collector checks the phase of the collection.  If the
1342 // phase has not changed, it proceeds with the collection.  If the
1343 // phase has changed, it skips that phase of the collection.  See
1344 // the comments on the use of the Heap_lock in collect_in_background().
1345 //
1346 // Variable used in baton passing.
1347 //   _foregroundGCIsActive - Set to true by the foreground collector when
1348 //      it wants the baton.  The foreground clears it when it has finished
1349 //      the collection.
1350 //   _foregroundGCShouldWait - Set to true by the background collector
1351 //        when it is running.  The foreground collector waits while
1352 //      _foregroundGCShouldWait is true.
1353 //  CGC_lock - monitor used to protect access to the above variables
1354 //      and to notify the foreground and background collectors.
1355 //  _collectorState - current state of the CMS collection.
1356 //
1357 // The foreground collector
1358 //   acquires the CGC_lock
1359 //   sets _foregroundGCIsActive
1360 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1361 //     various locks acquired in preparation for the collection
1362 //     are released so as not to block the background collector
1363 //     that is in the midst of a collection
1364 //   proceeds with the collection
1365 //   clears _foregroundGCIsActive
1366 //   returns
1367 //
1368 // The background collector in a loop iterating on the phases of the
1369 //      collection
1370 //   acquires the CGC_lock
1371 //   sets _foregroundGCShouldWait
1372 //   if _foregroundGCIsActive is set
1373 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1374 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1375 //     and exits the loop.
1376 //   otherwise
1377 //     proceed with that phase of the collection
1378 //     if the phase is a stop-the-world phase,
1379 //       yield the baton once more just before enqueueing
1380 //       the stop-world CMS operation (executed by the VM thread).
1381 //   returns after all phases of the collection are done
1382 //
1383 
1384 void CMSCollector::acquire_control_and_collect(bool full,
1385         bool clear_all_soft_refs) {
1386   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1387   assert(!Thread::current()->is_ConcurrentGC_thread(),
1388          "shouldn't try to acquire control from self!");
1389 
1390   // Start the protocol for acquiring control of the
1391   // collection from the background collector (aka CMS thread).
1392   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1393          "VM thread should have CMS token");
1394   // Remember the possibly interrupted state of an ongoing
1395   // concurrent collection
1396   CollectorState first_state = _collectorState;
1397 
1398   // Signal to a possibly ongoing concurrent collection that
1399   // we want to do a foreground collection.
1400   _foregroundGCIsActive = true;
1401 
1402   // release locks and wait for a notify from the background collector
1403   // releasing the locks in only necessary for phases which
1404   // do yields to improve the granularity of the collection.
1405   assert_lock_strong(bitMapLock());
1406   // We need to lock the Free list lock for the space that we are
1407   // currently collecting.
1408   assert(haveFreelistLocks(), "Must be holding free list locks");
1409   bitMapLock()->unlock();
1410   releaseFreelistLocks();
1411   {
1412     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1413     if (_foregroundGCShouldWait) {
1414       // We are going to be waiting for action for the CMS thread;
1415       // it had better not be gone (for instance at shutdown)!
1416       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1417              "CMS thread must be running");
1418       // Wait here until the background collector gives us the go-ahead
1419       ConcurrentMarkSweepThread::clear_CMS_flag(
1420         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1421       // Get a possibly blocked CMS thread going:
1422       //   Note that we set _foregroundGCIsActive true above,
1423       //   without protection of the CGC_lock.
1424       CGC_lock->notify();
1425       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1426              "Possible deadlock");
1427       while (_foregroundGCShouldWait) {
1428         // wait for notification
1429         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1430         // Possibility of delay/starvation here, since CMS token does
1431         // not know to give priority to VM thread? Actually, i think
1432         // there wouldn't be any delay/starvation, but the proof of
1433         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1434       }
1435       ConcurrentMarkSweepThread::set_CMS_flag(
1436         ConcurrentMarkSweepThread::CMS_vm_has_token);
1437     }
1438   }
1439   // The CMS_token is already held.  Get back the other locks.
1440   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1441          "VM thread should have CMS token");
1442   getFreelistLocks();
1443   bitMapLock()->lock_without_safepoint_check();
1444   log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
1445                        p2i(Thread::current()), first_state);
1446   log_debug(gc, state)("    gets control with state %d", _collectorState);
1447 
1448   // Inform cms gen if this was due to partial collection failing.
1449   // The CMS gen may use this fact to determine its expansion policy.
1450   GenCollectedHeap* gch = GenCollectedHeap::heap();
1451   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1452     assert(!_cmsGen->incremental_collection_failed(),
1453            "Should have been noticed, reacted to and cleared");
1454     _cmsGen->set_incremental_collection_failed();
1455   }
1456 
1457   if (first_state > Idling) {
1458     report_concurrent_mode_interruption();
1459   }
1460 
1461   set_did_compact(true);
1462 
1463   // If the collection is being acquired from the background
1464   // collector, there may be references on the discovered
1465   // references lists.  Abandon those references, since some
1466   // of them may have become unreachable after concurrent
1467   // discovery; the STW compacting collector will redo discovery
1468   // more precisely, without being subject to floating garbage.
1469   // Leaving otherwise unreachable references in the discovered
1470   // lists would require special handling.
1471   ref_processor()->disable_discovery();
1472   ref_processor()->abandon_partial_discovery();
1473   ref_processor()->verify_no_references_recorded();
1474 
1475   if (first_state > Idling) {
1476     save_heap_summary();
1477   }
1478 
1479   do_compaction_work(clear_all_soft_refs);
1480 
1481   // Has the GC time limit been exceeded?
1482   size_t max_eden_size = _young_gen->max_eden_size();
1483   GCCause::Cause gc_cause = gch->gc_cause();
1484   size_policy()->check_gc_overhead_limit(_young_gen->used(),
1485                                          _young_gen->eden()->used(),
1486                                          _cmsGen->max_capacity(),
1487                                          max_eden_size,
1488                                          full,
1489                                          gc_cause,
1490                                          gch->collector_policy());
1491 
1492   // Reset the expansion cause, now that we just completed
1493   // a collection cycle.
1494   clear_expansion_cause();
1495   _foregroundGCIsActive = false;
1496   return;
1497 }
1498 
1499 // Resize the tenured generation
1500 // after obtaining the free list locks for the
1501 // two generations.
1502 void CMSCollector::compute_new_size() {
1503   assert_locked_or_safepoint(Heap_lock);
1504   FreelistLocker z(this);
1505   MetaspaceGC::compute_new_size();
1506   _cmsGen->compute_new_size_free_list();
1507 }
1508 
1509 // A work method used by the foreground collector to do
1510 // a mark-sweep-compact.
1511 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1512   GenCollectedHeap* gch = GenCollectedHeap::heap();
1513 
1514   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1515   gc_timer->register_gc_start();
1516 
1517   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1518   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1519 
1520   gch->pre_full_gc_dump(gc_timer);
1521 
1522   GCTraceTime(Trace, gc) t("CMS:MSC");
1523 
1524   // Temporarily widen the span of the weak reference processing to
1525   // the entire heap.
1526   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1527   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1528   // Temporarily, clear the "is_alive_non_header" field of the
1529   // reference processor.
1530   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1531   // Temporarily make reference _processing_ single threaded (non-MT).
1532   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1533   // Temporarily make refs discovery atomic
1534   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1535   // Temporarily make reference _discovery_ single threaded (non-MT)
1536   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1537 
1538   ref_processor()->set_enqueuing_is_done(false);
1539   ref_processor()->enable_discovery();
1540   ref_processor()->setup_policy(clear_all_soft_refs);
1541   // If an asynchronous collection finishes, the _modUnionTable is
1542   // all clear.  If we are assuming the collection from an asynchronous
1543   // collection, clear the _modUnionTable.
1544   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1545     "_modUnionTable should be clear if the baton was not passed");
1546   _modUnionTable.clear_all();
1547   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1548     "mod union for klasses should be clear if the baton was passed");
1549   _ct->klass_rem_set()->clear_mod_union();
1550 
1551   // We must adjust the allocation statistics being maintained
1552   // in the free list space. We do so by reading and clearing
1553   // the sweep timer and updating the block flux rate estimates below.
1554   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1555   if (_inter_sweep_timer.is_active()) {
1556     _inter_sweep_timer.stop();
1557     // Note that we do not use this sample to update the _inter_sweep_estimate.
1558     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1559                                             _inter_sweep_estimate.padded_average(),
1560                                             _intra_sweep_estimate.padded_average());
1561   }
1562 
1563   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1564   #ifdef ASSERT
1565     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1566     size_t free_size = cms_space->free();
1567     assert(free_size ==
1568            pointer_delta(cms_space->end(), cms_space->compaction_top())
1569            * HeapWordSize,
1570       "All the free space should be compacted into one chunk at top");
1571     assert(cms_space->dictionary()->total_chunk_size(
1572                                       debug_only(cms_space->freelistLock())) == 0 ||
1573            cms_space->totalSizeInIndexedFreeLists() == 0,
1574       "All the free space should be in a single chunk");
1575     size_t num = cms_space->totalCount();
1576     assert((free_size == 0 && num == 0) ||
1577            (free_size > 0  && (num == 1 || num == 2)),
1578          "There should be at most 2 free chunks after compaction");
1579   #endif // ASSERT
1580   _collectorState = Resetting;
1581   assert(_restart_addr == NULL,
1582          "Should have been NULL'd before baton was passed");
1583   reset_stw();
1584   _cmsGen->reset_after_compaction();
1585   _concurrent_cycles_since_last_unload = 0;
1586 
1587   // Clear any data recorded in the PLAB chunk arrays.
1588   if (_survivor_plab_array != NULL) {
1589     reset_survivor_plab_arrays();
1590   }
1591 
1592   // Adjust the per-size allocation stats for the next epoch.
1593   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1594   // Restart the "inter sweep timer" for the next epoch.
1595   _inter_sweep_timer.reset();
1596   _inter_sweep_timer.start();
1597 
1598   gch->post_full_gc_dump(gc_timer);
1599 
1600   gc_timer->register_gc_end();
1601 
1602   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1603 
1604   // For a mark-sweep-compact, compute_new_size() will be called
1605   // in the heap's do_collection() method.
1606 }
1607 
1608 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1609   LogHandle(gc, heap) log;
1610   if (!log.is_trace()) {
1611     return;
1612   }
1613 
1614   ContiguousSpace* eden_space = _young_gen->eden();
1615   ContiguousSpace* from_space = _young_gen->from();
1616   ContiguousSpace* to_space   = _young_gen->to();
1617   // Eden
1618   if (_eden_chunk_array != NULL) {
1619     log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1620               p2i(eden_space->bottom()), p2i(eden_space->top()),
1621               p2i(eden_space->end()), eden_space->capacity());
1622     log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
1623               _eden_chunk_index, _eden_chunk_capacity);
1624     for (size_t i = 0; i < _eden_chunk_index; i++) {
1625       log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
1626     }
1627   }
1628   // Survivor
1629   if (_survivor_chunk_array != NULL) {
1630     log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1631               p2i(from_space->bottom()), p2i(from_space->top()),
1632               p2i(from_space->end()), from_space->capacity());
1633     log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
1634               _survivor_chunk_index, _survivor_chunk_capacity);
1635     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1636       log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
1637     }
1638   }
1639 }
1640 
1641 void CMSCollector::getFreelistLocks() const {
1642   // Get locks for all free lists in all generations that this
1643   // collector is responsible for
1644   _cmsGen->freelistLock()->lock_without_safepoint_check();
1645 }
1646 
1647 void CMSCollector::releaseFreelistLocks() const {
1648   // Release locks for all free lists in all generations that this
1649   // collector is responsible for
1650   _cmsGen->freelistLock()->unlock();
1651 }
1652 
1653 bool CMSCollector::haveFreelistLocks() const {
1654   // Check locks for all free lists in all generations that this
1655   // collector is responsible for
1656   assert_lock_strong(_cmsGen->freelistLock());
1657   PRODUCT_ONLY(ShouldNotReachHere());
1658   return true;
1659 }
1660 
1661 // A utility class that is used by the CMS collector to
1662 // temporarily "release" the foreground collector from its
1663 // usual obligation to wait for the background collector to
1664 // complete an ongoing phase before proceeding.
1665 class ReleaseForegroundGC: public StackObj {
1666  private:
1667   CMSCollector* _c;
1668  public:
1669   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1670     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1671     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1672     // allow a potentially blocked foreground collector to proceed
1673     _c->_foregroundGCShouldWait = false;
1674     if (_c->_foregroundGCIsActive) {
1675       CGC_lock->notify();
1676     }
1677     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1678            "Possible deadlock");
1679   }
1680 
1681   ~ReleaseForegroundGC() {
1682     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1683     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1684     _c->_foregroundGCShouldWait = true;
1685   }
1686 };
1687 
1688 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1689   assert(Thread::current()->is_ConcurrentGC_thread(),
1690     "A CMS asynchronous collection is only allowed on a CMS thread.");
1691 
1692   GenCollectedHeap* gch = GenCollectedHeap::heap();
1693   {
1694     bool safepoint_check = Mutex::_no_safepoint_check_flag;
1695     MutexLockerEx hl(Heap_lock, safepoint_check);
1696     FreelistLocker fll(this);
1697     MutexLockerEx x(CGC_lock, safepoint_check);
1698     if (_foregroundGCIsActive) {
1699       // The foreground collector is. Skip this
1700       // background collection.
1701       assert(!_foregroundGCShouldWait, "Should be clear");
1702       return;
1703     } else {
1704       assert(_collectorState == Idling, "Should be idling before start.");
1705       _collectorState = InitialMarking;
1706       register_gc_start(cause);
1707       // Reset the expansion cause, now that we are about to begin
1708       // a new cycle.
1709       clear_expansion_cause();
1710 
1711       // Clear the MetaspaceGC flag since a concurrent collection
1712       // is starting but also clear it after the collection.
1713       MetaspaceGC::set_should_concurrent_collect(false);
1714     }
1715     // Decide if we want to enable class unloading as part of the
1716     // ensuing concurrent GC cycle.
1717     update_should_unload_classes();
1718     _full_gc_requested = false;           // acks all outstanding full gc requests
1719     _full_gc_cause = GCCause::_no_gc;
1720     // Signal that we are about to start a collection
1721     gch->increment_total_full_collections();  // ... starting a collection cycle
1722     _collection_count_start = gch->total_full_collections();
1723   }
1724 
1725   size_t prev_used = _cmsGen->used();
1726 
1727   // The change of the collection state is normally done at this level;
1728   // the exceptions are phases that are executed while the world is
1729   // stopped.  For those phases the change of state is done while the
1730   // world is stopped.  For baton passing purposes this allows the
1731   // background collector to finish the phase and change state atomically.
1732   // The foreground collector cannot wait on a phase that is done
1733   // while the world is stopped because the foreground collector already
1734   // has the world stopped and would deadlock.
1735   while (_collectorState != Idling) {
1736     log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
1737                          p2i(Thread::current()), _collectorState);
1738     // The foreground collector
1739     //   holds the Heap_lock throughout its collection.
1740     //   holds the CMS token (but not the lock)
1741     //     except while it is waiting for the background collector to yield.
1742     //
1743     // The foreground collector should be blocked (not for long)
1744     //   if the background collector is about to start a phase
1745     //   executed with world stopped.  If the background
1746     //   collector has already started such a phase, the
1747     //   foreground collector is blocked waiting for the
1748     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1749     //   are executed in the VM thread.
1750     //
1751     // The locking order is
1752     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1753     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1754     //   CMS token  (claimed in
1755     //                stop_world_and_do() -->
1756     //                  safepoint_synchronize() -->
1757     //                    CMSThread::synchronize())
1758 
1759     {
1760       // Check if the FG collector wants us to yield.
1761       CMSTokenSync x(true); // is cms thread
1762       if (waitForForegroundGC()) {
1763         // We yielded to a foreground GC, nothing more to be
1764         // done this round.
1765         assert(_foregroundGCShouldWait == false, "We set it to false in "
1766                "waitForForegroundGC()");
1767         log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1768                              p2i(Thread::current()), _collectorState);
1769         return;
1770       } else {
1771         // The background collector can run but check to see if the
1772         // foreground collector has done a collection while the
1773         // background collector was waiting to get the CGC_lock
1774         // above.  If yes, break so that _foregroundGCShouldWait
1775         // is cleared before returning.
1776         if (_collectorState == Idling) {
1777           break;
1778         }
1779       }
1780     }
1781 
1782     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1783       "should be waiting");
1784 
1785     switch (_collectorState) {
1786       case InitialMarking:
1787         {
1788           ReleaseForegroundGC x(this);
1789           stats().record_cms_begin();
1790           VM_CMS_Initial_Mark initial_mark_op(this);
1791           VMThread::execute(&initial_mark_op);
1792         }
1793         // The collector state may be any legal state at this point
1794         // since the background collector may have yielded to the
1795         // foreground collector.
1796         break;
1797       case Marking:
1798         // initial marking in checkpointRootsInitialWork has been completed
1799         if (markFromRoots()) { // we were successful
1800           assert(_collectorState == Precleaning, "Collector state should "
1801             "have changed");
1802         } else {
1803           assert(_foregroundGCIsActive, "Internal state inconsistency");
1804         }
1805         break;
1806       case Precleaning:
1807         // marking from roots in markFromRoots has been completed
1808         preclean();
1809         assert(_collectorState == AbortablePreclean ||
1810                _collectorState == FinalMarking,
1811                "Collector state should have changed");
1812         break;
1813       case AbortablePreclean:
1814         abortable_preclean();
1815         assert(_collectorState == FinalMarking, "Collector state should "
1816           "have changed");
1817         break;
1818       case FinalMarking:
1819         {
1820           ReleaseForegroundGC x(this);
1821 
1822           VM_CMS_Final_Remark final_remark_op(this);
1823           VMThread::execute(&final_remark_op);
1824         }
1825         assert(_foregroundGCShouldWait, "block post-condition");
1826         break;
1827       case Sweeping:
1828         // final marking in checkpointRootsFinal has been completed
1829         sweep();
1830         assert(_collectorState == Resizing, "Collector state change "
1831           "to Resizing must be done under the free_list_lock");
1832 
1833       case Resizing: {
1834         // Sweeping has been completed...
1835         // At this point the background collection has completed.
1836         // Don't move the call to compute_new_size() down
1837         // into code that might be executed if the background
1838         // collection was preempted.
1839         {
1840           ReleaseForegroundGC x(this);   // unblock FG collection
1841           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1842           CMSTokenSync        z(true);   // not strictly needed.
1843           if (_collectorState == Resizing) {
1844             compute_new_size();
1845             save_heap_summary();
1846             _collectorState = Resetting;
1847           } else {
1848             assert(_collectorState == Idling, "The state should only change"
1849                    " because the foreground collector has finished the collection");
1850           }
1851         }
1852         break;
1853       }
1854       case Resetting:
1855         // CMS heap resizing has been completed
1856         reset_concurrent();
1857         assert(_collectorState == Idling, "Collector state should "
1858           "have changed");
1859 
1860         MetaspaceGC::set_should_concurrent_collect(false);
1861 
1862         stats().record_cms_end();
1863         // Don't move the concurrent_phases_end() and compute_new_size()
1864         // calls to here because a preempted background collection
1865         // has it's state set to "Resetting".
1866         break;
1867       case Idling:
1868       default:
1869         ShouldNotReachHere();
1870         break;
1871     }
1872     log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1873                          p2i(Thread::current()), _collectorState);
1874     assert(_foregroundGCShouldWait, "block post-condition");
1875   }
1876 
1877   // Should this be in gc_epilogue?
1878   collector_policy()->counters()->update_counters();
1879 
1880   {
1881     // Clear _foregroundGCShouldWait and, in the event that the
1882     // foreground collector is waiting, notify it, before
1883     // returning.
1884     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1885     _foregroundGCShouldWait = false;
1886     if (_foregroundGCIsActive) {
1887       CGC_lock->notify();
1888     }
1889     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1890            "Possible deadlock");
1891   }
1892   log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
1893                        p2i(Thread::current()), _collectorState);
1894   log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1895                      prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
1896 }
1897 
1898 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1899   _cms_start_registered = true;
1900   _gc_timer_cm->register_gc_start();
1901   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1902 }
1903 
1904 void CMSCollector::register_gc_end() {
1905   if (_cms_start_registered) {
1906     report_heap_summary(GCWhen::AfterGC);
1907 
1908     _gc_timer_cm->register_gc_end();
1909     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1910     _cms_start_registered = false;
1911   }
1912 }
1913 
1914 void CMSCollector::save_heap_summary() {
1915   GenCollectedHeap* gch = GenCollectedHeap::heap();
1916   _last_heap_summary = gch->create_heap_summary();
1917   _last_metaspace_summary = gch->create_metaspace_summary();
1918 }
1919 
1920 void CMSCollector::report_heap_summary(GCWhen::Type when) {
1921   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
1922   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
1923 }
1924 
1925 bool CMSCollector::waitForForegroundGC() {
1926   bool res = false;
1927   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1928          "CMS thread should have CMS token");
1929   // Block the foreground collector until the
1930   // background collectors decides whether to
1931   // yield.
1932   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1933   _foregroundGCShouldWait = true;
1934   if (_foregroundGCIsActive) {
1935     // The background collector yields to the
1936     // foreground collector and returns a value
1937     // indicating that it has yielded.  The foreground
1938     // collector can proceed.
1939     res = true;
1940     _foregroundGCShouldWait = false;
1941     ConcurrentMarkSweepThread::clear_CMS_flag(
1942       ConcurrentMarkSweepThread::CMS_cms_has_token);
1943     ConcurrentMarkSweepThread::set_CMS_flag(
1944       ConcurrentMarkSweepThread::CMS_cms_wants_token);
1945     // Get a possibly blocked foreground thread going
1946     CGC_lock->notify();
1947     log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
1948                          p2i(Thread::current()), _collectorState);
1949     while (_foregroundGCIsActive) {
1950       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1951     }
1952     ConcurrentMarkSweepThread::set_CMS_flag(
1953       ConcurrentMarkSweepThread::CMS_cms_has_token);
1954     ConcurrentMarkSweepThread::clear_CMS_flag(
1955       ConcurrentMarkSweepThread::CMS_cms_wants_token);
1956   }
1957   log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
1958                        p2i(Thread::current()), _collectorState);
1959   return res;
1960 }
1961 
1962 // Because of the need to lock the free lists and other structures in
1963 // the collector, common to all the generations that the collector is
1964 // collecting, we need the gc_prologues of individual CMS generations
1965 // delegate to their collector. It may have been simpler had the
1966 // current infrastructure allowed one to call a prologue on a
1967 // collector. In the absence of that we have the generation's
1968 // prologue delegate to the collector, which delegates back
1969 // some "local" work to a worker method in the individual generations
1970 // that it's responsible for collecting, while itself doing any
1971 // work common to all generations it's responsible for. A similar
1972 // comment applies to the  gc_epilogue()'s.
1973 // The role of the variable _between_prologue_and_epilogue is to
1974 // enforce the invocation protocol.
1975 void CMSCollector::gc_prologue(bool full) {
1976   // Call gc_prologue_work() for the CMSGen
1977   // we are responsible for.
1978 
1979   // The following locking discipline assumes that we are only called
1980   // when the world is stopped.
1981   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
1982 
1983   // The CMSCollector prologue must call the gc_prologues for the
1984   // "generations" that it's responsible
1985   // for.
1986 
1987   assert(   Thread::current()->is_VM_thread()
1988          || (   CMSScavengeBeforeRemark
1989              && Thread::current()->is_ConcurrentGC_thread()),
1990          "Incorrect thread type for prologue execution");
1991 
1992   if (_between_prologue_and_epilogue) {
1993     // We have already been invoked; this is a gc_prologue delegation
1994     // from yet another CMS generation that we are responsible for, just
1995     // ignore it since all relevant work has already been done.
1996     return;
1997   }
1998 
1999   // set a bit saying prologue has been called; cleared in epilogue
2000   _between_prologue_and_epilogue = true;
2001   // Claim locks for common data structures, then call gc_prologue_work()
2002   // for each CMSGen.
2003 
2004   getFreelistLocks();   // gets free list locks on constituent spaces
2005   bitMapLock()->lock_without_safepoint_check();
2006 
2007   // Should call gc_prologue_work() for all cms gens we are responsible for
2008   bool duringMarking =    _collectorState >= Marking
2009                          && _collectorState < Sweeping;
2010 
2011   // The young collections clear the modified oops state, which tells if
2012   // there are any modified oops in the class. The remark phase also needs
2013   // that information. Tell the young collection to save the union of all
2014   // modified klasses.
2015   if (duringMarking) {
2016     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2017   }
2018 
2019   bool registerClosure = duringMarking;
2020 
2021   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2022 
2023   if (!full) {
2024     stats().record_gc0_begin();
2025   }
2026 }
2027 
2028 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2029 
2030   _capacity_at_prologue = capacity();
2031   _used_at_prologue = used();
2032 
2033   // Delegate to CMScollector which knows how to coordinate between
2034   // this and any other CMS generations that it is responsible for
2035   // collecting.
2036   collector()->gc_prologue(full);
2037 }
2038 
2039 // This is a "private" interface for use by this generation's CMSCollector.
2040 // Not to be called directly by any other entity (for instance,
2041 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2042 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2043   bool registerClosure, ModUnionClosure* modUnionClosure) {
2044   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2045   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2046     "Should be NULL");
2047   if (registerClosure) {
2048     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2049   }
2050   cmsSpace()->gc_prologue();
2051   // Clear stat counters
2052   NOT_PRODUCT(
2053     assert(_numObjectsPromoted == 0, "check");
2054     assert(_numWordsPromoted   == 0, "check");
2055     log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
2056                                  _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2057     _numObjectsAllocated = 0;
2058     _numWordsAllocated   = 0;
2059   )
2060 }
2061 
2062 void CMSCollector::gc_epilogue(bool full) {
2063   // The following locking discipline assumes that we are only called
2064   // when the world is stopped.
2065   assert(SafepointSynchronize::is_at_safepoint(),
2066          "world is stopped assumption");
2067 
2068   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2069   // if linear allocation blocks need to be appropriately marked to allow the
2070   // the blocks to be parsable. We also check here whether we need to nudge the
2071   // CMS collector thread to start a new cycle (if it's not already active).
2072   assert(   Thread::current()->is_VM_thread()
2073          || (   CMSScavengeBeforeRemark
2074              && Thread::current()->is_ConcurrentGC_thread()),
2075          "Incorrect thread type for epilogue execution");
2076 
2077   if (!_between_prologue_and_epilogue) {
2078     // We have already been invoked; this is a gc_epilogue delegation
2079     // from yet another CMS generation that we are responsible for, just
2080     // ignore it since all relevant work has already been done.
2081     return;
2082   }
2083   assert(haveFreelistLocks(), "must have freelist locks");
2084   assert_lock_strong(bitMapLock());
2085 
2086   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2087 
2088   _cmsGen->gc_epilogue_work(full);
2089 
2090   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2091     // in case sampling was not already enabled, enable it
2092     _start_sampling = true;
2093   }
2094   // reset _eden_chunk_array so sampling starts afresh
2095   _eden_chunk_index = 0;
2096 
2097   size_t cms_used   = _cmsGen->cmsSpace()->used();
2098 
2099   // update performance counters - this uses a special version of
2100   // update_counters() that allows the utilization to be passed as a
2101   // parameter, avoiding multiple calls to used().
2102   //
2103   _cmsGen->update_counters(cms_used);
2104 
2105   bitMapLock()->unlock();
2106   releaseFreelistLocks();
2107 
2108   if (!CleanChunkPoolAsync) {
2109     Chunk::clean_chunk_pool();
2110   }
2111 
2112   set_did_compact(false);
2113   _between_prologue_and_epilogue = false;  // ready for next cycle
2114 }
2115 
2116 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2117   collector()->gc_epilogue(full);
2118 
2119   // Also reset promotion tracking in par gc thread states.
2120   for (uint i = 0; i < ParallelGCThreads; i++) {
2121     _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2122   }
2123 }
2124 
2125 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2126   assert(!incremental_collection_failed(), "Should have been cleared");
2127   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2128   cmsSpace()->gc_epilogue();
2129     // Print stat counters
2130   NOT_PRODUCT(
2131     assert(_numObjectsAllocated == 0, "check");
2132     assert(_numWordsAllocated == 0, "check");
2133     log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
2134                                      _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2135     _numObjectsPromoted = 0;
2136     _numWordsPromoted   = 0;
2137   )
2138 
2139   // Call down the chain in contiguous_available needs the freelistLock
2140   // so print this out before releasing the freeListLock.
2141   log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
2142 }
2143 
2144 #ifndef PRODUCT
2145 bool CMSCollector::have_cms_token() {
2146   Thread* thr = Thread::current();
2147   if (thr->is_VM_thread()) {
2148     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2149   } else if (thr->is_ConcurrentGC_thread()) {
2150     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2151   } else if (thr->is_GC_task_thread()) {
2152     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2153            ParGCRareEvent_lock->owned_by_self();
2154   }
2155   return false;
2156 }
2157 
2158 // Check reachability of the given heap address in CMS generation,
2159 // treating all other generations as roots.
2160 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2161   // We could "guarantee" below, rather than assert, but I'll
2162   // leave these as "asserts" so that an adventurous debugger
2163   // could try this in the product build provided some subset of
2164   // the conditions were met, provided they were interested in the
2165   // results and knew that the computation below wouldn't interfere
2166   // with other concurrent computations mutating the structures
2167   // being read or written.
2168   assert(SafepointSynchronize::is_at_safepoint(),
2169          "Else mutations in object graph will make answer suspect");
2170   assert(have_cms_token(), "Should hold cms token");
2171   assert(haveFreelistLocks(), "must hold free list locks");
2172   assert_lock_strong(bitMapLock());
2173 
2174   // Clear the marking bit map array before starting, but, just
2175   // for kicks, first report if the given address is already marked
2176   tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2177                 _markBitMap.isMarked(addr) ? "" : " not");
2178 
2179   if (verify_after_remark()) {
2180     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2181     bool result = verification_mark_bm()->isMarked(addr);
2182     tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2183                   result ? "IS" : "is NOT");
2184     return result;
2185   } else {
2186     tty->print_cr("Could not compute result");
2187     return false;
2188   }
2189 }
2190 #endif
2191 
2192 void
2193 CMSCollector::print_on_error(outputStream* st) {
2194   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2195   if (collector != NULL) {
2196     CMSBitMap* bitmap = &collector->_markBitMap;
2197     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2198     bitmap->print_on_error(st, " Bits: ");
2199 
2200     st->cr();
2201 
2202     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2203     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2204     mut_bitmap->print_on_error(st, " Bits: ");
2205   }
2206 }
2207 
2208 ////////////////////////////////////////////////////////
2209 // CMS Verification Support
2210 ////////////////////////////////////////////////////////
2211 // Following the remark phase, the following invariant
2212 // should hold -- each object in the CMS heap which is
2213 // marked in markBitMap() should be marked in the verification_mark_bm().
2214 
2215 class VerifyMarkedClosure: public BitMapClosure {
2216   CMSBitMap* _marks;
2217   bool       _failed;
2218 
2219  public:
2220   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2221 
2222   bool do_bit(size_t offset) {
2223     HeapWord* addr = _marks->offsetToHeapWord(offset);
2224     if (!_marks->isMarked(addr)) {
2225       LogHandle(gc, verify) log;
2226       ResourceMark rm;
2227       oop(addr)->print_on(log.error_stream());
2228       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2229       _failed = true;
2230     }
2231     return true;
2232   }
2233 
2234   bool failed() { return _failed; }
2235 };
2236 
2237 bool CMSCollector::verify_after_remark() {
2238   GCTraceTime(Info, gc, verify) tm("Verifying CMS Marking.");
2239   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2240   static bool init = false;
2241 
2242   assert(SafepointSynchronize::is_at_safepoint(),
2243          "Else mutations in object graph will make answer suspect");
2244   assert(have_cms_token(),
2245          "Else there may be mutual interference in use of "
2246          " verification data structures");
2247   assert(_collectorState > Marking && _collectorState <= Sweeping,
2248          "Else marking info checked here may be obsolete");
2249   assert(haveFreelistLocks(), "must hold free list locks");
2250   assert_lock_strong(bitMapLock());
2251 
2252 
2253   // Allocate marking bit map if not already allocated
2254   if (!init) { // first time
2255     if (!verification_mark_bm()->allocate(_span)) {
2256       return false;
2257     }
2258     init = true;
2259   }
2260 
2261   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2262 
2263   // Turn off refs discovery -- so we will be tracing through refs.
2264   // This is as intended, because by this time
2265   // GC must already have cleared any refs that need to be cleared,
2266   // and traced those that need to be marked; moreover,
2267   // the marking done here is not going to interfere in any
2268   // way with the marking information used by GC.
2269   NoRefDiscovery no_discovery(ref_processor());
2270 
2271 #if defined(COMPILER2) || INCLUDE_JVMCI
2272   DerivedPointerTableDeactivate dpt_deact;
2273 #endif
2274 
2275   // Clear any marks from a previous round
2276   verification_mark_bm()->clear_all();
2277   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2278   verify_work_stacks_empty();
2279 
2280   GenCollectedHeap* gch = GenCollectedHeap::heap();
2281   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2282   // Update the saved marks which may affect the root scans.
2283   gch->save_marks();
2284 
2285   if (CMSRemarkVerifyVariant == 1) {
2286     // In this first variant of verification, we complete
2287     // all marking, then check if the new marks-vector is
2288     // a subset of the CMS marks-vector.
2289     verify_after_remark_work_1();
2290   } else {
2291     guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
2292     // In this second variant of verification, we flag an error
2293     // (i.e. an object reachable in the new marks-vector not reachable
2294     // in the CMS marks-vector) immediately, also indicating the
2295     // identify of an object (A) that references the unmarked object (B) --
2296     // presumably, a mutation to A failed to be picked up by preclean/remark?
2297     verify_after_remark_work_2();
2298   }
2299 
2300   return true;
2301 }
2302 
2303 void CMSCollector::verify_after_remark_work_1() {
2304   ResourceMark rm;
2305   HandleMark  hm;
2306   GenCollectedHeap* gch = GenCollectedHeap::heap();
2307 
2308   // Get a clear set of claim bits for the roots processing to work with.
2309   ClassLoaderDataGraph::clear_claimed_marks();
2310 
2311   // Mark from roots one level into CMS
2312   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2313   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2314 
2315   {
2316     StrongRootsScope srs(1);
2317 
2318     gch->gen_process_roots(&srs,
2319                            GenCollectedHeap::OldGen,
2320                            true,   // young gen as roots
2321                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2322                            should_unload_classes(),
2323                            &notOlder,
2324                            NULL,
2325                            NULL);
2326   }
2327 
2328   // Now mark from the roots
2329   MarkFromRootsClosure markFromRootsClosure(this, _span,
2330     verification_mark_bm(), verification_mark_stack(),
2331     false /* don't yield */, true /* verifying */);
2332   assert(_restart_addr == NULL, "Expected pre-condition");
2333   verification_mark_bm()->iterate(&markFromRootsClosure);
2334   while (_restart_addr != NULL) {
2335     // Deal with stack overflow: by restarting at the indicated
2336     // address.
2337     HeapWord* ra = _restart_addr;
2338     markFromRootsClosure.reset(ra);
2339     _restart_addr = NULL;
2340     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2341   }
2342   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2343   verify_work_stacks_empty();
2344 
2345   // Marking completed -- now verify that each bit marked in
2346   // verification_mark_bm() is also marked in markBitMap(); flag all
2347   // errors by printing corresponding objects.
2348   VerifyMarkedClosure vcl(markBitMap());
2349   verification_mark_bm()->iterate(&vcl);
2350   if (vcl.failed()) {
2351     LogHandle(gc, verify) log;
2352     log.error("Failed marking verification after remark");
2353     ResourceMark rm;
2354     gch->print_on(log.error_stream());
2355     fatal("CMS: failed marking verification after remark");
2356   }
2357 }
2358 
2359 class VerifyKlassOopsKlassClosure : public KlassClosure {
2360   class VerifyKlassOopsClosure : public OopClosure {
2361     CMSBitMap* _bitmap;
2362    public:
2363     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2364     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2365     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2366   } _oop_closure;
2367  public:
2368   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2369   void do_klass(Klass* k) {
2370     k->oops_do(&_oop_closure);
2371   }
2372 };
2373 
2374 void CMSCollector::verify_after_remark_work_2() {
2375   ResourceMark rm;
2376   HandleMark  hm;
2377   GenCollectedHeap* gch = GenCollectedHeap::heap();
2378 
2379   // Get a clear set of claim bits for the roots processing to work with.
2380   ClassLoaderDataGraph::clear_claimed_marks();
2381 
2382   // Mark from roots one level into CMS
2383   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2384                                      markBitMap());
2385   CLDToOopClosure cld_closure(&notOlder, true);
2386 
2387   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2388 
2389   {
2390     StrongRootsScope srs(1);
2391 
2392     gch->gen_process_roots(&srs,
2393                            GenCollectedHeap::OldGen,
2394                            true,   // young gen as roots
2395                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2396                            should_unload_classes(),
2397                            &notOlder,
2398                            NULL,
2399                            &cld_closure);
2400   }
2401 
2402   // Now mark from the roots
2403   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2404     verification_mark_bm(), markBitMap(), verification_mark_stack());
2405   assert(_restart_addr == NULL, "Expected pre-condition");
2406   verification_mark_bm()->iterate(&markFromRootsClosure);
2407   while (_restart_addr != NULL) {
2408     // Deal with stack overflow: by restarting at the indicated
2409     // address.
2410     HeapWord* ra = _restart_addr;
2411     markFromRootsClosure.reset(ra);
2412     _restart_addr = NULL;
2413     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2414   }
2415   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2416   verify_work_stacks_empty();
2417 
2418   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2419   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2420 
2421   // Marking completed -- now verify that each bit marked in
2422   // verification_mark_bm() is also marked in markBitMap(); flag all
2423   // errors by printing corresponding objects.
2424   VerifyMarkedClosure vcl(markBitMap());
2425   verification_mark_bm()->iterate(&vcl);
2426   assert(!vcl.failed(), "Else verification above should not have succeeded");
2427 }
2428 
2429 void ConcurrentMarkSweepGeneration::save_marks() {
2430   // delegate to CMS space
2431   cmsSpace()->save_marks();
2432   for (uint i = 0; i < ParallelGCThreads; i++) {
2433     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2434   }
2435 }
2436 
2437 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2438   return cmsSpace()->no_allocs_since_save_marks();
2439 }
2440 
2441 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2442                                                                 \
2443 void ConcurrentMarkSweepGeneration::                            \
2444 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2445   cl->set_generation(this);                                     \
2446   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2447   cl->reset_generation();                                       \
2448   save_marks();                                                 \
2449 }
2450 
2451 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2452 
2453 void
2454 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2455   if (freelistLock()->owned_by_self()) {
2456     Generation::oop_iterate(cl);
2457   } else {
2458     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2459     Generation::oop_iterate(cl);
2460   }
2461 }
2462 
2463 void
2464 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2465   if (freelistLock()->owned_by_self()) {
2466     Generation::object_iterate(cl);
2467   } else {
2468     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2469     Generation::object_iterate(cl);
2470   }
2471 }
2472 
2473 void
2474 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2475   if (freelistLock()->owned_by_self()) {
2476     Generation::safe_object_iterate(cl);
2477   } else {
2478     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2479     Generation::safe_object_iterate(cl);
2480   }
2481 }
2482 
2483 void
2484 ConcurrentMarkSweepGeneration::post_compact() {
2485 }
2486 
2487 void
2488 ConcurrentMarkSweepGeneration::prepare_for_verify() {
2489   // Fix the linear allocation blocks to look like free blocks.
2490 
2491   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2492   // are not called when the heap is verified during universe initialization and
2493   // at vm shutdown.
2494   if (freelistLock()->owned_by_self()) {
2495     cmsSpace()->prepare_for_verify();
2496   } else {
2497     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2498     cmsSpace()->prepare_for_verify();
2499   }
2500 }
2501 
2502 void
2503 ConcurrentMarkSweepGeneration::verify() {
2504   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2505   // are not called when the heap is verified during universe initialization and
2506   // at vm shutdown.
2507   if (freelistLock()->owned_by_self()) {
2508     cmsSpace()->verify();
2509   } else {
2510     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2511     cmsSpace()->verify();
2512   }
2513 }
2514 
2515 void CMSCollector::verify() {
2516   _cmsGen->verify();
2517 }
2518 
2519 #ifndef PRODUCT
2520 bool CMSCollector::overflow_list_is_empty() const {
2521   assert(_num_par_pushes >= 0, "Inconsistency");
2522   if (_overflow_list == NULL) {
2523     assert(_num_par_pushes == 0, "Inconsistency");
2524   }
2525   return _overflow_list == NULL;
2526 }
2527 
2528 // The methods verify_work_stacks_empty() and verify_overflow_empty()
2529 // merely consolidate assertion checks that appear to occur together frequently.
2530 void CMSCollector::verify_work_stacks_empty() const {
2531   assert(_markStack.isEmpty(), "Marking stack should be empty");
2532   assert(overflow_list_is_empty(), "Overflow list should be empty");
2533 }
2534 
2535 void CMSCollector::verify_overflow_empty() const {
2536   assert(overflow_list_is_empty(), "Overflow list should be empty");
2537   assert(no_preserved_marks(), "No preserved marks");
2538 }
2539 #endif // PRODUCT
2540 
2541 // Decide if we want to enable class unloading as part of the
2542 // ensuing concurrent GC cycle. We will collect and
2543 // unload classes if it's the case that:
2544 // (1) an explicit gc request has been made and the flag
2545 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2546 // (2) (a) class unloading is enabled at the command line, and
2547 //     (b) old gen is getting really full
2548 // NOTE: Provided there is no change in the state of the heap between
2549 // calls to this method, it should have idempotent results. Moreover,
2550 // its results should be monotonically increasing (i.e. going from 0 to 1,
2551 // but not 1 to 0) between successive calls between which the heap was
2552 // not collected. For the implementation below, it must thus rely on
2553 // the property that concurrent_cycles_since_last_unload()
2554 // will not decrease unless a collection cycle happened and that
2555 // _cmsGen->is_too_full() are
2556 // themselves also monotonic in that sense. See check_monotonicity()
2557 // below.
2558 void CMSCollector::update_should_unload_classes() {
2559   _should_unload_classes = false;
2560   // Condition 1 above
2561   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2562     _should_unload_classes = true;
2563   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2564     // Disjuncts 2.b.(i,ii,iii) above
2565     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2566                               CMSClassUnloadingMaxInterval)
2567                            || _cmsGen->is_too_full();
2568   }
2569 }
2570 
2571 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2572   bool res = should_concurrent_collect();
2573   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2574   return res;
2575 }
2576 
2577 void CMSCollector::setup_cms_unloading_and_verification_state() {
2578   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2579                              || VerifyBeforeExit;
2580   const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2581 
2582   // We set the proper root for this CMS cycle here.
2583   if (should_unload_classes()) {   // Should unload classes this cycle
2584     remove_root_scanning_option(rso);  // Shrink the root set appropriately
2585     set_verifying(should_verify);    // Set verification state for this cycle
2586     return;                            // Nothing else needs to be done at this time
2587   }
2588 
2589   // Not unloading classes this cycle
2590   assert(!should_unload_classes(), "Inconsistency!");
2591 
2592   // If we are not unloading classes then add SO_AllCodeCache to root
2593   // scanning options.
2594   add_root_scanning_option(rso);
2595 
2596   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2597     set_verifying(true);
2598   } else if (verifying() && !should_verify) {
2599     // We were verifying, but some verification flags got disabled.
2600     set_verifying(false);
2601     // Exclude symbols, strings and code cache elements from root scanning to
2602     // reduce IM and RM pauses.
2603     remove_root_scanning_option(rso);
2604   }
2605 }
2606 
2607 
2608 #ifndef PRODUCT
2609 HeapWord* CMSCollector::block_start(const void* p) const {
2610   const HeapWord* addr = (HeapWord*)p;
2611   if (_span.contains(p)) {
2612     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2613       return _cmsGen->cmsSpace()->block_start(p);
2614     }
2615   }
2616   return NULL;
2617 }
2618 #endif
2619 
2620 HeapWord*
2621 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2622                                                    bool   tlab,
2623                                                    bool   parallel) {
2624   CMSSynchronousYieldRequest yr;
2625   assert(!tlab, "Can't deal with TLAB allocation");
2626   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2627   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2628   if (GCExpandToAllocateDelayMillis > 0) {
2629     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2630   }
2631   return have_lock_and_allocate(word_size, tlab);
2632 }
2633 
2634 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2635     size_t bytes,
2636     size_t expand_bytes,
2637     CMSExpansionCause::Cause cause)
2638 {
2639 
2640   bool success = expand(bytes, expand_bytes);
2641 
2642   // remember why we expanded; this information is used
2643   // by shouldConcurrentCollect() when making decisions on whether to start
2644   // a new CMS cycle.
2645   if (success) {
2646     set_expansion_cause(cause);
2647     log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));
2648   }
2649 }
2650 
2651 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2652   HeapWord* res = NULL;
2653   MutexLocker x(ParGCRareEvent_lock);
2654   while (true) {
2655     // Expansion by some other thread might make alloc OK now:
2656     res = ps->lab.alloc(word_sz);
2657     if (res != NULL) return res;
2658     // If there's not enough expansion space available, give up.
2659     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2660       return NULL;
2661     }
2662     // Otherwise, we try expansion.
2663     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2664     // Now go around the loop and try alloc again;
2665     // A competing par_promote might beat us to the expansion space,
2666     // so we may go around the loop again if promotion fails again.
2667     if (GCExpandToAllocateDelayMillis > 0) {
2668       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2669     }
2670   }
2671 }
2672 
2673 
2674 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2675   PromotionInfo* promo) {
2676   MutexLocker x(ParGCRareEvent_lock);
2677   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2678   while (true) {
2679     // Expansion by some other thread might make alloc OK now:
2680     if (promo->ensure_spooling_space()) {
2681       assert(promo->has_spooling_space(),
2682              "Post-condition of successful ensure_spooling_space()");
2683       return true;
2684     }
2685     // If there's not enough expansion space available, give up.
2686     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2687       return false;
2688     }
2689     // Otherwise, we try expansion.
2690     expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2691     // Now go around the loop and try alloc again;
2692     // A competing allocation might beat us to the expansion space,
2693     // so we may go around the loop again if allocation fails again.
2694     if (GCExpandToAllocateDelayMillis > 0) {
2695       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2696     }
2697   }
2698 }
2699 
2700 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2701   // Only shrink if a compaction was done so that all the free space
2702   // in the generation is in a contiguous block at the end.
2703   if (did_compact()) {
2704     CardGeneration::shrink(bytes);
2705   }
2706 }
2707 
2708 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2709   assert_locked_or_safepoint(Heap_lock);
2710 }
2711 
2712 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2713   assert_locked_or_safepoint(Heap_lock);
2714   assert_lock_strong(freelistLock());
2715   log_trace(gc)("Shrinking of CMS not yet implemented");
2716   return;
2717 }
2718 
2719 
2720 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2721 // phases.
2722 class CMSPhaseAccounting: public StackObj {
2723  public:
2724   CMSPhaseAccounting(CMSCollector *collector,
2725                      const char *title);
2726   ~CMSPhaseAccounting();
2727 
2728  private:
2729   CMSCollector *_collector;
2730   const char *_title;
2731   GCTraceConcTime(Info, gc) _trace_time;
2732 
2733  public:
2734   // Not MT-safe; so do not pass around these StackObj's
2735   // where they may be accessed by other threads.
2736   double wallclock_millis() {
2737     return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
2738   }
2739 };
2740 
2741 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2742                                        const char *title) :
2743   _collector(collector), _title(title), _trace_time(title) {
2744 
2745   _collector->resetYields();
2746   _collector->resetTimer();
2747   _collector->startTimer();
2748   _collector->gc_timer_cm()->register_gc_concurrent_start(title);
2749 }
2750 
2751 CMSPhaseAccounting::~CMSPhaseAccounting() {
2752   _collector->gc_timer_cm()->register_gc_concurrent_end();
2753   _collector->stopTimer();
2754   log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_seconds(_collector->timerTicks()));
2755   log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
2756 }
2757 
2758 // CMS work
2759 
2760 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2761 class CMSParMarkTask : public AbstractGangTask {
2762  protected:
2763   CMSCollector*     _collector;
2764   uint              _n_workers;
2765   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2766       AbstractGangTask(name),
2767       _collector(collector),
2768       _n_workers(n_workers) {}
2769   // Work method in support of parallel rescan ... of young gen spaces
2770   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2771                              ContiguousSpace* space,
2772                              HeapWord** chunk_array, size_t chunk_top);
2773   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2774 };
2775 
2776 // Parallel initial mark task
2777 class CMSParInitialMarkTask: public CMSParMarkTask {
2778   StrongRootsScope* _strong_roots_scope;
2779  public:
2780   CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2781       CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2782       _strong_roots_scope(strong_roots_scope) {}
2783   void work(uint worker_id);
2784 };
2785 
2786 // Checkpoint the roots into this generation from outside
2787 // this generation. [Note this initial checkpoint need only
2788 // be approximate -- we'll do a catch up phase subsequently.]
2789 void CMSCollector::checkpointRootsInitial() {
2790   assert(_collectorState == InitialMarking, "Wrong collector state");
2791   check_correct_thread_executing();
2792   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2793 
2794   save_heap_summary();
2795   report_heap_summary(GCWhen::BeforeGC);
2796 
2797   ReferenceProcessor* rp = ref_processor();
2798   assert(_restart_addr == NULL, "Control point invariant");
2799   {
2800     // acquire locks for subsequent manipulations
2801     MutexLockerEx x(bitMapLock(),
2802                     Mutex::_no_safepoint_check_flag);
2803     checkpointRootsInitialWork();
2804     // enable ("weak") refs discovery
2805     rp->enable_discovery();
2806     _collectorState = Marking;
2807   }
2808 }
2809 
2810 void CMSCollector::checkpointRootsInitialWork() {
2811   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2812   assert(_collectorState == InitialMarking, "just checking");
2813 
2814   // Already have locks.
2815   assert_lock_strong(bitMapLock());
2816   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2817 
2818   // Setup the verification and class unloading state for this
2819   // CMS collection cycle.
2820   setup_cms_unloading_and_verification_state();
2821 
2822   GCTraceTime(Trace, gc) ts("checkpointRootsInitialWork", _gc_timer_cm);
2823 
2824   // Reset all the PLAB chunk arrays if necessary.
2825   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2826     reset_survivor_plab_arrays();
2827   }
2828 
2829   ResourceMark rm;
2830   HandleMark  hm;
2831 
2832   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2833   GenCollectedHeap* gch = GenCollectedHeap::heap();
2834 
2835   verify_work_stacks_empty();
2836   verify_overflow_empty();
2837 
2838   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2839   // Update the saved marks which may affect the root scans.
2840   gch->save_marks();
2841 
2842   // weak reference processing has not started yet.
2843   ref_processor()->set_enqueuing_is_done(false);
2844 
2845   // Need to remember all newly created CLDs,
2846   // so that we can guarantee that the remark finds them.
2847   ClassLoaderDataGraph::remember_new_clds(true);
2848 
2849   // Whenever a CLD is found, it will be claimed before proceeding to mark
2850   // the klasses. The claimed marks need to be cleared before marking starts.
2851   ClassLoaderDataGraph::clear_claimed_marks();
2852 
2853   print_eden_and_survivor_chunk_arrays();
2854 
2855   {
2856 #if defined(COMPILER2) || INCLUDE_JVMCI
2857     DerivedPointerTableDeactivate dpt_deact;
2858 #endif
2859     if (CMSParallelInitialMarkEnabled) {
2860       // The parallel version.
2861       WorkGang* workers = gch->workers();
2862       assert(workers != NULL, "Need parallel worker threads.");
2863       uint n_workers = workers->active_workers();
2864 
2865       StrongRootsScope srs(n_workers);
2866 
2867       CMSParInitialMarkTask tsk(this, &srs, n_workers);
2868       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2869       if (n_workers > 1) {
2870         workers->run_task(&tsk);
2871       } else {
2872         tsk.work(0);
2873       }
2874     } else {
2875       // The serial version.
2876       CLDToOopClosure cld_closure(&notOlder, true);
2877       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2878 
2879       StrongRootsScope srs(1);
2880 
2881       gch->gen_process_roots(&srs,
2882                              GenCollectedHeap::OldGen,
2883                              true,   // young gen as roots
2884                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
2885                              should_unload_classes(),
2886                              &notOlder,
2887                              NULL,
2888                              &cld_closure);
2889     }
2890   }
2891 
2892   // Clear mod-union table; it will be dirtied in the prologue of
2893   // CMS generation per each young generation collection.
2894 
2895   assert(_modUnionTable.isAllClear(),
2896        "Was cleared in most recent final checkpoint phase"
2897        " or no bits are set in the gc_prologue before the start of the next "
2898        "subsequent marking phase.");
2899 
2900   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
2901 
2902   // Save the end of the used_region of the constituent generations
2903   // to be used to limit the extent of sweep in each generation.
2904   save_sweep_limits();
2905   verify_overflow_empty();
2906 }
2907 
2908 bool CMSCollector::markFromRoots() {
2909   // we might be tempted to assert that:
2910   // assert(!SafepointSynchronize::is_at_safepoint(),
2911   //        "inconsistent argument?");
2912   // However that wouldn't be right, because it's possible that
2913   // a safepoint is indeed in progress as a young generation
2914   // stop-the-world GC happens even as we mark in this generation.
2915   assert(_collectorState == Marking, "inconsistent state?");
2916   check_correct_thread_executing();
2917   verify_overflow_empty();
2918 
2919   // Weak ref discovery note: We may be discovering weak
2920   // refs in this generation concurrent (but interleaved) with
2921   // weak ref discovery by the young generation collector.
2922 
2923   CMSTokenSyncWithLocks ts(true, bitMapLock());
2924   GCTraceCPUTime tcpu;
2925   CMSPhaseAccounting pa(this, "Concurrent Mark");
2926   bool res = markFromRootsWork();
2927   if (res) {
2928     _collectorState = Precleaning;
2929   } else { // We failed and a foreground collection wants to take over
2930     assert(_foregroundGCIsActive, "internal state inconsistency");
2931     assert(_restart_addr == NULL,  "foreground will restart from scratch");
2932     log_debug(gc)("bailing out to foreground collection");
2933   }
2934   verify_overflow_empty();
2935   return res;
2936 }
2937 
2938 bool CMSCollector::markFromRootsWork() {
2939   // iterate over marked bits in bit map, doing a full scan and mark
2940   // from these roots using the following algorithm:
2941   // . if oop is to the right of the current scan pointer,
2942   //   mark corresponding bit (we'll process it later)
2943   // . else (oop is to left of current scan pointer)
2944   //   push oop on marking stack
2945   // . drain the marking stack
2946 
2947   // Note that when we do a marking step we need to hold the
2948   // bit map lock -- recall that direct allocation (by mutators)
2949   // and promotion (by the young generation collector) is also
2950   // marking the bit map. [the so-called allocate live policy.]
2951   // Because the implementation of bit map marking is not
2952   // robust wrt simultaneous marking of bits in the same word,
2953   // we need to make sure that there is no such interference
2954   // between concurrent such updates.
2955 
2956   // already have locks
2957   assert_lock_strong(bitMapLock());
2958 
2959   verify_work_stacks_empty();
2960   verify_overflow_empty();
2961   bool result = false;
2962   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
2963     result = do_marking_mt();
2964   } else {
2965     result = do_marking_st();
2966   }
2967   return result;
2968 }
2969 
2970 // Forward decl
2971 class CMSConcMarkingTask;
2972 
2973 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
2974   CMSCollector*       _collector;
2975   CMSConcMarkingTask* _task;
2976  public:
2977   virtual void yield();
2978 
2979   // "n_threads" is the number of threads to be terminated.
2980   // "queue_set" is a set of work queues of other threads.
2981   // "collector" is the CMS collector associated with this task terminator.
2982   // "yield" indicates whether we need the gang as a whole to yield.
2983   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
2984     ParallelTaskTerminator(n_threads, queue_set),
2985     _collector(collector) { }
2986 
2987   void set_task(CMSConcMarkingTask* task) {
2988     _task = task;
2989   }
2990 };
2991 
2992 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
2993   CMSConcMarkingTask* _task;
2994  public:
2995   bool should_exit_termination();
2996   void set_task(CMSConcMarkingTask* task) {
2997     _task = task;
2998   }
2999 };
3000 
3001 // MT Concurrent Marking Task
3002 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3003   CMSCollector* _collector;
3004   uint          _n_workers;       // requested/desired # workers
3005   bool          _result;
3006   CompactibleFreeListSpace*  _cms_space;
3007   char          _pad_front[64];   // padding to ...
3008   HeapWord*     _global_finger;   // ... avoid sharing cache line
3009   char          _pad_back[64];
3010   HeapWord*     _restart_addr;
3011 
3012   //  Exposed here for yielding support
3013   Mutex* const _bit_map_lock;
3014 
3015   // The per thread work queues, available here for stealing
3016   OopTaskQueueSet*  _task_queues;
3017 
3018   // Termination (and yielding) support
3019   CMSConcMarkingTerminator _term;
3020   CMSConcMarkingTerminatorTerminator _term_term;
3021 
3022  public:
3023   CMSConcMarkingTask(CMSCollector* collector,
3024                  CompactibleFreeListSpace* cms_space,
3025                  YieldingFlexibleWorkGang* workers,
3026                  OopTaskQueueSet* task_queues):
3027     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3028     _collector(collector),
3029     _cms_space(cms_space),
3030     _n_workers(0), _result(true),
3031     _task_queues(task_queues),
3032     _term(_n_workers, task_queues, _collector),
3033     _bit_map_lock(collector->bitMapLock())
3034   {
3035     _requested_size = _n_workers;
3036     _term.set_task(this);
3037     _term_term.set_task(this);
3038     _restart_addr = _global_finger = _cms_space->bottom();
3039   }
3040 
3041 
3042   OopTaskQueueSet* task_queues()  { return _task_queues; }
3043 
3044   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3045 
3046   HeapWord** global_finger_addr() { return &_global_finger; }
3047 
3048   CMSConcMarkingTerminator* terminator() { return &_term; }
3049 
3050   virtual void set_for_termination(uint active_workers) {
3051     terminator()->reset_for_reuse(active_workers);
3052   }
3053 
3054   void work(uint worker_id);
3055   bool should_yield() {
3056     return    ConcurrentMarkSweepThread::should_yield()
3057            && !_collector->foregroundGCIsActive();
3058   }
3059 
3060   virtual void coordinator_yield();  // stuff done by coordinator
3061   bool result() { return _result; }
3062 
3063   void reset(HeapWord* ra) {
3064     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3065     _restart_addr = _global_finger = ra;
3066     _term.reset_for_reuse();
3067   }
3068 
3069   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3070                                            OopTaskQueue* work_q);
3071 
3072  private:
3073   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3074   void do_work_steal(int i);
3075   void bump_global_finger(HeapWord* f);
3076 };
3077 
3078 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3079   assert(_task != NULL, "Error");
3080   return _task->yielding();
3081   // Note that we do not need the disjunct || _task->should_yield() above
3082   // because we want terminating threads to yield only if the task
3083   // is already in the midst of yielding, which happens only after at least one
3084   // thread has yielded.
3085 }
3086 
3087 void CMSConcMarkingTerminator::yield() {
3088   if (_task->should_yield()) {
3089     _task->yield();
3090   } else {
3091     ParallelTaskTerminator::yield();
3092   }
3093 }
3094 
3095 ////////////////////////////////////////////////////////////////
3096 // Concurrent Marking Algorithm Sketch
3097 ////////////////////////////////////////////////////////////////
3098 // Until all tasks exhausted (both spaces):
3099 // -- claim next available chunk
3100 // -- bump global finger via CAS
3101 // -- find first object that starts in this chunk
3102 //    and start scanning bitmap from that position
3103 // -- scan marked objects for oops
3104 // -- CAS-mark target, and if successful:
3105 //    . if target oop is above global finger (volatile read)
3106 //      nothing to do
3107 //    . if target oop is in chunk and above local finger
3108 //        then nothing to do
3109 //    . else push on work-queue
3110 // -- Deal with possible overflow issues:
3111 //    . local work-queue overflow causes stuff to be pushed on
3112 //      global (common) overflow queue
3113 //    . always first empty local work queue
3114 //    . then get a batch of oops from global work queue if any
3115 //    . then do work stealing
3116 // -- When all tasks claimed (both spaces)
3117 //    and local work queue empty,
3118 //    then in a loop do:
3119 //    . check global overflow stack; steal a batch of oops and trace
3120 //    . try to steal from other threads oif GOS is empty
3121 //    . if neither is available, offer termination
3122 // -- Terminate and return result
3123 //
3124 void CMSConcMarkingTask::work(uint worker_id) {
3125   elapsedTimer _timer;
3126   ResourceMark rm;
3127   HandleMark hm;
3128 
3129   DEBUG_ONLY(_collector->verify_overflow_empty();)
3130 
3131   // Before we begin work, our work queue should be empty
3132   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3133   // Scan the bitmap covering _cms_space, tracing through grey objects.
3134   _timer.start();
3135   do_scan_and_mark(worker_id, _cms_space);
3136   _timer.stop();
3137   log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3138 
3139   // ... do work stealing
3140   _timer.reset();
3141   _timer.start();
3142   do_work_steal(worker_id);
3143   _timer.stop();
3144   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
3145   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3146   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3147   // Note that under the current task protocol, the
3148   // following assertion is true even of the spaces
3149   // expanded since the completion of the concurrent
3150   // marking. XXX This will likely change under a strict
3151   // ABORT semantics.
3152   // After perm removal the comparison was changed to
3153   // greater than or equal to from strictly greater than.
3154   // Before perm removal the highest address sweep would
3155   // have been at the end of perm gen but now is at the
3156   // end of the tenured gen.
3157   assert(_global_finger >=  _cms_space->end(),
3158          "All tasks have been completed");
3159   DEBUG_ONLY(_collector->verify_overflow_empty();)
3160 }
3161 
3162 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3163   HeapWord* read = _global_finger;
3164   HeapWord* cur  = read;
3165   while (f > read) {
3166     cur = read;
3167     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3168     if (cur == read) {
3169       // our cas succeeded
3170       assert(_global_finger >= f, "protocol consistency");
3171       break;
3172     }
3173   }
3174 }
3175 
3176 // This is really inefficient, and should be redone by
3177 // using (not yet available) block-read and -write interfaces to the
3178 // stack and the work_queue. XXX FIX ME !!!
3179 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3180                                                       OopTaskQueue* work_q) {
3181   // Fast lock-free check
3182   if (ovflw_stk->length() == 0) {
3183     return false;
3184   }
3185   assert(work_q->size() == 0, "Shouldn't steal");
3186   MutexLockerEx ml(ovflw_stk->par_lock(),
3187                    Mutex::_no_safepoint_check_flag);
3188   // Grab up to 1/4 the size of the work queue
3189   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3190                     (size_t)ParGCDesiredObjsFromOverflowList);
3191   num = MIN2(num, ovflw_stk->length());
3192   for (int i = (int) num; i > 0; i--) {
3193     oop cur = ovflw_stk->pop();
3194     assert(cur != NULL, "Counted wrong?");
3195     work_q->push(cur);
3196   }
3197   return num > 0;
3198 }
3199 
3200 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3201   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3202   int n_tasks = pst->n_tasks();
3203   // We allow that there may be no tasks to do here because
3204   // we are restarting after a stack overflow.
3205   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3206   uint nth_task = 0;
3207 
3208   HeapWord* aligned_start = sp->bottom();
3209   if (sp->used_region().contains(_restart_addr)) {
3210     // Align down to a card boundary for the start of 0th task
3211     // for this space.
3212     aligned_start =
3213       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3214                                  CardTableModRefBS::card_size);
3215   }
3216 
3217   size_t chunk_size = sp->marking_task_size();
3218   while (!pst->is_task_claimed(/* reference */ nth_task)) {
3219     // Having claimed the nth task in this space,
3220     // compute the chunk that it corresponds to:
3221     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3222                                aligned_start + (nth_task+1)*chunk_size);
3223     // Try and bump the global finger via a CAS;
3224     // note that we need to do the global finger bump
3225     // _before_ taking the intersection below, because
3226     // the task corresponding to that region will be
3227     // deemed done even if the used_region() expands
3228     // because of allocation -- as it almost certainly will
3229     // during start-up while the threads yield in the
3230     // closure below.
3231     HeapWord* finger = span.end();
3232     bump_global_finger(finger);   // atomically
3233     // There are null tasks here corresponding to chunks
3234     // beyond the "top" address of the space.
3235     span = span.intersection(sp->used_region());
3236     if (!span.is_empty()) {  // Non-null task
3237       HeapWord* prev_obj;
3238       assert(!span.contains(_restart_addr) || nth_task == 0,
3239              "Inconsistency");
3240       if (nth_task == 0) {
3241         // For the 0th task, we'll not need to compute a block_start.
3242         if (span.contains(_restart_addr)) {
3243           // In the case of a restart because of stack overflow,
3244           // we might additionally skip a chunk prefix.
3245           prev_obj = _restart_addr;
3246         } else {
3247           prev_obj = span.start();
3248         }
3249       } else {
3250         // We want to skip the first object because
3251         // the protocol is to scan any object in its entirety
3252         // that _starts_ in this span; a fortiori, any
3253         // object starting in an earlier span is scanned
3254         // as part of an earlier claimed task.
3255         // Below we use the "careful" version of block_start
3256         // so we do not try to navigate uninitialized objects.
3257         prev_obj = sp->block_start_careful(span.start());
3258         // Below we use a variant of block_size that uses the
3259         // Printezis bits to avoid waiting for allocated
3260         // objects to become initialized/parsable.
3261         while (prev_obj < span.start()) {
3262           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3263           if (sz > 0) {
3264             prev_obj += sz;
3265           } else {
3266             // In this case we may end up doing a bit of redundant
3267             // scanning, but that appears unavoidable, short of
3268             // locking the free list locks; see bug 6324141.
3269             break;
3270           }
3271         }
3272       }
3273       if (prev_obj < span.end()) {
3274         MemRegion my_span = MemRegion(prev_obj, span.end());
3275         // Do the marking work within a non-empty span --
3276         // the last argument to the constructor indicates whether the
3277         // iteration should be incremental with periodic yields.
3278         ParMarkFromRootsClosure cl(this, _collector, my_span,
3279                                    &_collector->_markBitMap,
3280                                    work_queue(i),
3281                                    &_collector->_markStack);
3282         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3283       } // else nothing to do for this task
3284     }   // else nothing to do for this task
3285   }
3286   // We'd be tempted to assert here that since there are no
3287   // more tasks left to claim in this space, the global_finger
3288   // must exceed space->top() and a fortiori space->end(). However,
3289   // that would not quite be correct because the bumping of
3290   // global_finger occurs strictly after the claiming of a task,
3291   // so by the time we reach here the global finger may not yet
3292   // have been bumped up by the thread that claimed the last
3293   // task.
3294   pst->all_tasks_completed();
3295 }
3296 
3297 class ParConcMarkingClosure: public MetadataAwareOopClosure {
3298  private:
3299   CMSCollector* _collector;
3300   CMSConcMarkingTask* _task;
3301   MemRegion     _span;
3302   CMSBitMap*    _bit_map;
3303   CMSMarkStack* _overflow_stack;
3304   OopTaskQueue* _work_queue;
3305  protected:
3306   DO_OOP_WORK_DEFN
3307  public:
3308   ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3309                         CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3310     MetadataAwareOopClosure(collector->ref_processor()),
3311     _collector(collector),
3312     _task(task),
3313     _span(collector->_span),
3314     _work_queue(work_queue),
3315     _bit_map(bit_map),
3316     _overflow_stack(overflow_stack)
3317   { }
3318   virtual void do_oop(oop* p);
3319   virtual void do_oop(narrowOop* p);
3320 
3321   void trim_queue(size_t max);
3322   void handle_stack_overflow(HeapWord* lost);
3323   void do_yield_check() {
3324     if (_task->should_yield()) {
3325       _task->yield();
3326     }
3327   }
3328 };
3329 
3330 DO_OOP_WORK_IMPL(ParConcMarkingClosure)
3331 
3332 // Grey object scanning during work stealing phase --
3333 // the salient assumption here is that any references
3334 // that are in these stolen objects being scanned must
3335 // already have been initialized (else they would not have
3336 // been published), so we do not need to check for
3337 // uninitialized objects before pushing here.
3338 void ParConcMarkingClosure::do_oop(oop obj) {
3339   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3340   HeapWord* addr = (HeapWord*)obj;
3341   // Check if oop points into the CMS generation
3342   // and is not marked
3343   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3344     // a white object ...
3345     // If we manage to "claim" the object, by being the
3346     // first thread to mark it, then we push it on our
3347     // marking stack
3348     if (_bit_map->par_mark(addr)) {     // ... now grey
3349       // push on work queue (grey set)
3350       bool simulate_overflow = false;
3351       NOT_PRODUCT(
3352         if (CMSMarkStackOverflowALot &&
3353             _collector->simulate_overflow()) {
3354           // simulate a stack overflow
3355           simulate_overflow = true;
3356         }
3357       )
3358       if (simulate_overflow ||
3359           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3360         // stack overflow
3361         log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
3362         // We cannot assert that the overflow stack is full because
3363         // it may have been emptied since.
3364         assert(simulate_overflow ||
3365                _work_queue->size() == _work_queue->max_elems(),
3366               "Else push should have succeeded");
3367         handle_stack_overflow(addr);
3368       }
3369     } // Else, some other thread got there first
3370     do_yield_check();
3371   }
3372 }
3373 
3374 void ParConcMarkingClosure::do_oop(oop* p)       { ParConcMarkingClosure::do_oop_work(p); }
3375 void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); }
3376 
3377 void ParConcMarkingClosure::trim_queue(size_t max) {
3378   while (_work_queue->size() > max) {
3379     oop new_oop;
3380     if (_work_queue->pop_local(new_oop)) {
3381       assert(new_oop->is_oop(), "Should be an oop");
3382       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3383       assert(_span.contains((HeapWord*)new_oop), "Not in span");
3384       new_oop->oop_iterate(this);  // do_oop() above
3385       do_yield_check();
3386     }
3387   }
3388 }
3389 
3390 // Upon stack overflow, we discard (part of) the stack,
3391 // remembering the least address amongst those discarded
3392 // in CMSCollector's _restart_address.
3393 void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3394   // We need to do this under a mutex to prevent other
3395   // workers from interfering with the work done below.
3396   MutexLockerEx ml(_overflow_stack->par_lock(),
3397                    Mutex::_no_safepoint_check_flag);
3398   // Remember the least grey address discarded
3399   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3400   _collector->lower_restart_addr(ra);
3401   _overflow_stack->reset();  // discard stack contents
3402   _overflow_stack->expand(); // expand the stack if possible
3403 }
3404 
3405 
3406 void CMSConcMarkingTask::do_work_steal(int i) {
3407   OopTaskQueue* work_q = work_queue(i);
3408   oop obj_to_scan;
3409   CMSBitMap* bm = &(_collector->_markBitMap);
3410   CMSMarkStack* ovflw = &(_collector->_markStack);
3411   int* seed = _collector->hash_seed(i);
3412   ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3413   while (true) {
3414     cl.trim_queue(0);
3415     assert(work_q->size() == 0, "Should have been emptied above");
3416     if (get_work_from_overflow_stack(ovflw, work_q)) {
3417       // Can't assert below because the work obtained from the
3418       // overflow stack may already have been stolen from us.
3419       // assert(work_q->size() > 0, "Work from overflow stack");
3420       continue;
3421     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3422       assert(obj_to_scan->is_oop(), "Should be an oop");
3423       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3424       obj_to_scan->oop_iterate(&cl);
3425     } else if (terminator()->offer_termination(&_term_term)) {
3426       assert(work_q->size() == 0, "Impossible!");
3427       break;
3428     } else if (yielding() || should_yield()) {
3429       yield();
3430     }
3431   }
3432 }
3433 
3434 // This is run by the CMS (coordinator) thread.
3435 void CMSConcMarkingTask::coordinator_yield() {
3436   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3437          "CMS thread should hold CMS token");
3438   // First give up the locks, then yield, then re-lock
3439   // We should probably use a constructor/destructor idiom to
3440   // do this unlock/lock or modify the MutexUnlocker class to
3441   // serve our purpose. XXX
3442   assert_lock_strong(_bit_map_lock);
3443   _bit_map_lock->unlock();
3444   ConcurrentMarkSweepThread::desynchronize(true);
3445   _collector->stopTimer();
3446   _collector->incrementYields();
3447 
3448   // It is possible for whichever thread initiated the yield request
3449   // not to get a chance to wake up and take the bitmap lock between
3450   // this thread releasing it and reacquiring it. So, while the
3451   // should_yield() flag is on, let's sleep for a bit to give the
3452   // other thread a chance to wake up. The limit imposed on the number
3453   // of iterations is defensive, to avoid any unforseen circumstances
3454   // putting us into an infinite loop. Since it's always been this
3455   // (coordinator_yield()) method that was observed to cause the
3456   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3457   // which is by default non-zero. For the other seven methods that
3458   // also perform the yield operation, as are using a different
3459   // parameter (CMSYieldSleepCount) which is by default zero. This way we
3460   // can enable the sleeping for those methods too, if necessary.
3461   // See 6442774.
3462   //
3463   // We really need to reconsider the synchronization between the GC
3464   // thread and the yield-requesting threads in the future and we
3465   // should really use wait/notify, which is the recommended
3466   // way of doing this type of interaction. Additionally, we should
3467   // consolidate the eight methods that do the yield operation and they
3468   // are almost identical into one for better maintainability and
3469   // readability. See 6445193.
3470   //
3471   // Tony 2006.06.29
3472   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3473                    ConcurrentMarkSweepThread::should_yield() &&
3474                    !CMSCollector::foregroundGCIsActive(); ++i) {
3475     os::sleep(Thread::current(), 1, false);
3476   }
3477 
3478   ConcurrentMarkSweepThread::synchronize(true);
3479   _bit_map_lock->lock_without_safepoint_check();
3480   _collector->startTimer();
3481 }
3482 
3483 bool CMSCollector::do_marking_mt() {
3484   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3485   uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3486                                                                   conc_workers()->active_workers(),
3487                                                                   Threads::number_of_non_daemon_threads());
3488   conc_workers()->set_active_workers(num_workers);
3489 
3490   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3491 
3492   CMSConcMarkingTask tsk(this,
3493                          cms_space,
3494                          conc_workers(),
3495                          task_queues());
3496 
3497   // Since the actual number of workers we get may be different
3498   // from the number we requested above, do we need to do anything different
3499   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3500   // class?? XXX
3501   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3502 
3503   // Refs discovery is already non-atomic.
3504   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3505   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3506   conc_workers()->start_task(&tsk);
3507   while (tsk.yielded()) {
3508     tsk.coordinator_yield();
3509     conc_workers()->continue_task(&tsk);
3510   }
3511   // If the task was aborted, _restart_addr will be non-NULL
3512   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3513   while (_restart_addr != NULL) {
3514     // XXX For now we do not make use of ABORTED state and have not
3515     // yet implemented the right abort semantics (even in the original
3516     // single-threaded CMS case). That needs some more investigation
3517     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3518     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3519     // If _restart_addr is non-NULL, a marking stack overflow
3520     // occurred; we need to do a fresh marking iteration from the
3521     // indicated restart address.
3522     if (_foregroundGCIsActive) {
3523       // We may be running into repeated stack overflows, having
3524       // reached the limit of the stack size, while making very
3525       // slow forward progress. It may be best to bail out and
3526       // let the foreground collector do its job.
3527       // Clear _restart_addr, so that foreground GC
3528       // works from scratch. This avoids the headache of
3529       // a "rescan" which would otherwise be needed because
3530       // of the dirty mod union table & card table.
3531       _restart_addr = NULL;
3532       return false;
3533     }
3534     // Adjust the task to restart from _restart_addr
3535     tsk.reset(_restart_addr);
3536     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3537                   _restart_addr);
3538     _restart_addr = NULL;
3539     // Get the workers going again
3540     conc_workers()->start_task(&tsk);
3541     while (tsk.yielded()) {
3542       tsk.coordinator_yield();
3543       conc_workers()->continue_task(&tsk);
3544     }
3545   }
3546   assert(tsk.completed(), "Inconsistency");
3547   assert(tsk.result() == true, "Inconsistency");
3548   return true;
3549 }
3550 
3551 bool CMSCollector::do_marking_st() {
3552   ResourceMark rm;
3553   HandleMark   hm;
3554 
3555   // Temporarily make refs discovery single threaded (non-MT)
3556   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3557   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3558     &_markStack, CMSYield);
3559   // the last argument to iterate indicates whether the iteration
3560   // should be incremental with periodic yields.
3561   _markBitMap.iterate(&markFromRootsClosure);
3562   // If _restart_addr is non-NULL, a marking stack overflow
3563   // occurred; we need to do a fresh iteration from the
3564   // indicated restart address.
3565   while (_restart_addr != NULL) {
3566     if (_foregroundGCIsActive) {
3567       // We may be running into repeated stack overflows, having
3568       // reached the limit of the stack size, while making very
3569       // slow forward progress. It may be best to bail out and
3570       // let the foreground collector do its job.
3571       // Clear _restart_addr, so that foreground GC
3572       // works from scratch. This avoids the headache of
3573       // a "rescan" which would otherwise be needed because
3574       // of the dirty mod union table & card table.
3575       _restart_addr = NULL;
3576       return false;  // indicating failure to complete marking
3577     }
3578     // Deal with stack overflow:
3579     // we restart marking from _restart_addr
3580     HeapWord* ra = _restart_addr;
3581     markFromRootsClosure.reset(ra);
3582     _restart_addr = NULL;
3583     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3584   }
3585   return true;
3586 }
3587 
3588 void CMSCollector::preclean() {
3589   check_correct_thread_executing();
3590   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3591   verify_work_stacks_empty();
3592   verify_overflow_empty();
3593   _abort_preclean = false;
3594   if (CMSPrecleaningEnabled) {
3595     if (!CMSEdenChunksRecordAlways) {
3596       _eden_chunk_index = 0;
3597     }
3598     size_t used = get_eden_used();
3599     size_t capacity = get_eden_capacity();
3600     // Don't start sampling unless we will get sufficiently
3601     // many samples.
3602     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3603                 * CMSScheduleRemarkEdenPenetration)) {
3604       _start_sampling = true;
3605     } else {
3606       _start_sampling = false;
3607     }
3608     GCTraceCPUTime tcpu;
3609     CMSPhaseAccounting pa(this, "Concurrent Preclean");
3610     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3611   }
3612   CMSTokenSync x(true); // is cms thread
3613   if (CMSPrecleaningEnabled) {
3614     sample_eden();
3615     _collectorState = AbortablePreclean;
3616   } else {
3617     _collectorState = FinalMarking;
3618   }
3619   verify_work_stacks_empty();
3620   verify_overflow_empty();
3621 }
3622 
3623 // Try and schedule the remark such that young gen
3624 // occupancy is CMSScheduleRemarkEdenPenetration %.
3625 void CMSCollector::abortable_preclean() {
3626   check_correct_thread_executing();
3627   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3628   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3629 
3630   // If Eden's current occupancy is below this threshold,
3631   // immediately schedule the remark; else preclean
3632   // past the next scavenge in an effort to
3633   // schedule the pause as described above. By choosing
3634   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3635   // we will never do an actual abortable preclean cycle.
3636   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3637     GCTraceCPUTime tcpu;
3638     CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
3639     // We need more smarts in the abortable preclean
3640     // loop below to deal with cases where allocation
3641     // in young gen is very very slow, and our precleaning
3642     // is running a losing race against a horde of
3643     // mutators intent on flooding us with CMS updates
3644     // (dirty cards).
3645     // One, admittedly dumb, strategy is to give up
3646     // after a certain number of abortable precleaning loops
3647     // or after a certain maximum time. We want to make
3648     // this smarter in the next iteration.
3649     // XXX FIX ME!!! YSR
3650     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3651     while (!(should_abort_preclean() ||
3652              ConcurrentMarkSweepThread::should_terminate())) {
3653       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3654       cumworkdone += workdone;
3655       loops++;
3656       // Voluntarily terminate abortable preclean phase if we have
3657       // been at it for too long.
3658       if ((CMSMaxAbortablePrecleanLoops != 0) &&
3659           loops >= CMSMaxAbortablePrecleanLoops) {
3660         log_debug(gc)(" CMS: abort preclean due to loops ");
3661         break;
3662       }
3663       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3664         log_debug(gc)(" CMS: abort preclean due to time ");
3665         break;
3666       }
3667       // If we are doing little work each iteration, we should
3668       // take a short break.
3669       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3670         // Sleep for some time, waiting for work to accumulate
3671         stopTimer();
3672         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3673         startTimer();
3674         waited++;
3675       }
3676     }
3677     log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3678                                loops, waited, cumworkdone);
3679   }
3680   CMSTokenSync x(true); // is cms thread
3681   if (_collectorState != Idling) {
3682     assert(_collectorState == AbortablePreclean,
3683            "Spontaneous state transition?");
3684     _collectorState = FinalMarking;
3685   } // Else, a foreground collection completed this CMS cycle.
3686   return;
3687 }
3688 
3689 // Respond to an Eden sampling opportunity
3690 void CMSCollector::sample_eden() {
3691   // Make sure a young gc cannot sneak in between our
3692   // reading and recording of a sample.
3693   assert(Thread::current()->is_ConcurrentGC_thread(),
3694          "Only the cms thread may collect Eden samples");
3695   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3696          "Should collect samples while holding CMS token");
3697   if (!_start_sampling) {
3698     return;
3699   }
3700   // When CMSEdenChunksRecordAlways is true, the eden chunk array
3701   // is populated by the young generation.
3702   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3703     if (_eden_chunk_index < _eden_chunk_capacity) {
3704       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3705       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3706              "Unexpected state of Eden");
3707       // We'd like to check that what we just sampled is an oop-start address;
3708       // however, we cannot do that here since the object may not yet have been
3709       // initialized. So we'll instead do the check when we _use_ this sample
3710       // later.
3711       if (_eden_chunk_index == 0 ||
3712           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3713                          _eden_chunk_array[_eden_chunk_index-1])
3714            >= CMSSamplingGrain)) {
3715         _eden_chunk_index++;  // commit sample
3716       }
3717     }
3718   }
3719   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3720     size_t used = get_eden_used();
3721     size_t capacity = get_eden_capacity();
3722     assert(used <= capacity, "Unexpected state of Eden");
3723     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3724       _abort_preclean = true;
3725     }
3726   }
3727 }
3728 
3729 
3730 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3731   assert(_collectorState == Precleaning ||
3732          _collectorState == AbortablePreclean, "incorrect state");
3733   ResourceMark rm;
3734   HandleMark   hm;
3735 
3736   // Precleaning is currently not MT but the reference processor
3737   // may be set for MT.  Disable it temporarily here.
3738   ReferenceProcessor* rp = ref_processor();
3739   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3740 
3741   // Do one pass of scrubbing the discovered reference lists
3742   // to remove any reference objects with strongly-reachable
3743   // referents.
3744   if (clean_refs) {
3745     CMSPrecleanRefsYieldClosure yield_cl(this);
3746     assert(rp->span().equals(_span), "Spans should be equal");
3747     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3748                                    &_markStack, true /* preclean */);
3749     CMSDrainMarkingStackClosure complete_trace(this,
3750                                    _span, &_markBitMap, &_markStack,
3751                                    &keep_alive, true /* preclean */);
3752 
3753     // We don't want this step to interfere with a young
3754     // collection because we don't want to take CPU
3755     // or memory bandwidth away from the young GC threads
3756     // (which may be as many as there are CPUs).
3757     // Note that we don't need to protect ourselves from
3758     // interference with mutators because they can't
3759     // manipulate the discovered reference lists nor affect
3760     // the computed reachability of the referents, the
3761     // only properties manipulated by the precleaning
3762     // of these reference lists.
3763     stopTimer();
3764     CMSTokenSyncWithLocks x(true /* is cms thread */,
3765                             bitMapLock());
3766     startTimer();
3767     sample_eden();
3768 
3769     // The following will yield to allow foreground
3770     // collection to proceed promptly. XXX YSR:
3771     // The code in this method may need further
3772     // tweaking for better performance and some restructuring
3773     // for cleaner interfaces.
3774     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3775     rp->preclean_discovered_references(
3776           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3777           gc_timer);
3778   }
3779 
3780   if (clean_survivor) {  // preclean the active survivor space(s)
3781     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3782                              &_markBitMap, &_modUnionTable,
3783                              &_markStack, true /* precleaning phase */);
3784     stopTimer();
3785     CMSTokenSyncWithLocks ts(true /* is cms thread */,
3786                              bitMapLock());
3787     startTimer();
3788     unsigned int before_count =
3789       GenCollectedHeap::heap()->total_collections();
3790     SurvivorSpacePrecleanClosure
3791       sss_cl(this, _span, &_markBitMap, &_markStack,
3792              &pam_cl, before_count, CMSYield);
3793     _young_gen->from()->object_iterate_careful(&sss_cl);
3794     _young_gen->to()->object_iterate_careful(&sss_cl);
3795   }
3796   MarkRefsIntoAndScanClosure
3797     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3798              &_markStack, this, CMSYield,
3799              true /* precleaning phase */);
3800   // CAUTION: The following closure has persistent state that may need to
3801   // be reset upon a decrease in the sequence of addresses it
3802   // processes.
3803   ScanMarkedObjectsAgainCarefullyClosure
3804     smoac_cl(this, _span,
3805       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3806 
3807   // Preclean dirty cards in ModUnionTable and CardTable using
3808   // appropriate convergence criterion;
3809   // repeat CMSPrecleanIter times unless we find that
3810   // we are losing.
3811   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3812   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3813          "Bad convergence multiplier");
3814   assert(CMSPrecleanThreshold >= 100,
3815          "Unreasonably low CMSPrecleanThreshold");
3816 
3817   size_t numIter, cumNumCards, lastNumCards, curNumCards;
3818   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3819        numIter < CMSPrecleanIter;
3820        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3821     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3822     log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3823     // Either there are very few dirty cards, so re-mark
3824     // pause will be small anyway, or our pre-cleaning isn't
3825     // that much faster than the rate at which cards are being
3826     // dirtied, so we might as well stop and re-mark since
3827     // precleaning won't improve our re-mark time by much.
3828     if (curNumCards <= CMSPrecleanThreshold ||
3829         (numIter > 0 &&
3830          (curNumCards * CMSPrecleanDenominator >
3831          lastNumCards * CMSPrecleanNumerator))) {
3832       numIter++;
3833       cumNumCards += curNumCards;
3834       break;
3835     }
3836   }
3837 
3838   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3839 
3840   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3841   cumNumCards += curNumCards;
3842   log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3843                              curNumCards, cumNumCards, numIter);
3844   return cumNumCards;   // as a measure of useful work done
3845 }
3846 
3847 // PRECLEANING NOTES:
3848 // Precleaning involves:
3849 // . reading the bits of the modUnionTable and clearing the set bits.
3850 // . For the cards corresponding to the set bits, we scan the
3851 //   objects on those cards. This means we need the free_list_lock
3852 //   so that we can safely iterate over the CMS space when scanning
3853 //   for oops.
3854 // . When we scan the objects, we'll be both reading and setting
3855 //   marks in the marking bit map, so we'll need the marking bit map.
3856 // . For protecting _collector_state transitions, we take the CGC_lock.
3857 //   Note that any races in the reading of of card table entries by the
3858 //   CMS thread on the one hand and the clearing of those entries by the
3859 //   VM thread or the setting of those entries by the mutator threads on the
3860 //   other are quite benign. However, for efficiency it makes sense to keep
3861 //   the VM thread from racing with the CMS thread while the latter is
3862 //   dirty card info to the modUnionTable. We therefore also use the
3863 //   CGC_lock to protect the reading of the card table and the mod union
3864 //   table by the CM thread.
3865 // . We run concurrently with mutator updates, so scanning
3866 //   needs to be done carefully  -- we should not try to scan
3867 //   potentially uninitialized objects.
3868 //
3869 // Locking strategy: While holding the CGC_lock, we scan over and
3870 // reset a maximal dirty range of the mod union / card tables, then lock
3871 // the free_list_lock and bitmap lock to do a full marking, then
3872 // release these locks; and repeat the cycle. This allows for a
3873 // certain amount of fairness in the sharing of these locks between
3874 // the CMS collector on the one hand, and the VM thread and the
3875 // mutators on the other.
3876 
3877 // NOTE: preclean_mod_union_table() and preclean_card_table()
3878 // further below are largely identical; if you need to modify
3879 // one of these methods, please check the other method too.
3880 
3881 size_t CMSCollector::preclean_mod_union_table(
3882   ConcurrentMarkSweepGeneration* old_gen,
3883   ScanMarkedObjectsAgainCarefullyClosure* cl) {
3884   verify_work_stacks_empty();
3885   verify_overflow_empty();
3886 
3887   // strategy: starting with the first card, accumulate contiguous
3888   // ranges of dirty cards; clear these cards, then scan the region
3889   // covered by these cards.
3890 
3891   // Since all of the MUT is committed ahead, we can just use
3892   // that, in case the generations expand while we are precleaning.
3893   // It might also be fine to just use the committed part of the
3894   // generation, but we might potentially miss cards when the
3895   // generation is rapidly expanding while we are in the midst
3896   // of precleaning.
3897   HeapWord* startAddr = old_gen->reserved().start();
3898   HeapWord* endAddr   = old_gen->reserved().end();
3899 
3900   cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
3901 
3902   size_t numDirtyCards, cumNumDirtyCards;
3903   HeapWord *nextAddr, *lastAddr;
3904   for (cumNumDirtyCards = numDirtyCards = 0,
3905        nextAddr = lastAddr = startAddr;
3906        nextAddr < endAddr;
3907        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
3908 
3909     ResourceMark rm;
3910     HandleMark   hm;
3911 
3912     MemRegion dirtyRegion;
3913     {
3914       stopTimer();
3915       // Potential yield point
3916       CMSTokenSync ts(true);
3917       startTimer();
3918       sample_eden();
3919       // Get dirty region starting at nextOffset (inclusive),
3920       // simultaneously clearing it.
3921       dirtyRegion =
3922         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
3923       assert(dirtyRegion.start() >= nextAddr,
3924              "returned region inconsistent?");
3925     }
3926     // Remember where the next search should begin.
3927     // The returned region (if non-empty) is a right open interval,
3928     // so lastOffset is obtained from the right end of that
3929     // interval.
3930     lastAddr = dirtyRegion.end();
3931     // Should do something more transparent and less hacky XXX
3932     numDirtyCards =
3933       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
3934 
3935     // We'll scan the cards in the dirty region (with periodic
3936     // yields for foreground GC as needed).
3937     if (!dirtyRegion.is_empty()) {
3938       assert(numDirtyCards > 0, "consistency check");
3939       HeapWord* stop_point = NULL;
3940       stopTimer();
3941       // Potential yield point
3942       CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
3943                                bitMapLock());
3944       startTimer();
3945       {
3946         verify_work_stacks_empty();
3947         verify_overflow_empty();
3948         sample_eden();
3949         stop_point =
3950           old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
3951       }
3952       if (stop_point != NULL) {
3953         // The careful iteration stopped early either because it found an
3954         // uninitialized object, or because we were in the midst of an
3955         // "abortable preclean", which should now be aborted. Redirty
3956         // the bits corresponding to the partially-scanned or unscanned
3957         // cards. We'll either restart at the next block boundary or
3958         // abort the preclean.
3959         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
3960                "Should only be AbortablePreclean.");
3961         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
3962         if (should_abort_preclean()) {
3963           break; // out of preclean loop
3964         } else {
3965           // Compute the next address at which preclean should pick up;
3966           // might need bitMapLock in order to read P-bits.
3967           lastAddr = next_card_start_after_block(stop_point);
3968         }
3969       }
3970     } else {
3971       assert(lastAddr == endAddr, "consistency check");
3972       assert(numDirtyCards == 0, "consistency check");
3973       break;
3974     }
3975   }
3976   verify_work_stacks_empty();
3977   verify_overflow_empty();
3978   return cumNumDirtyCards;
3979 }
3980 
3981 // NOTE: preclean_mod_union_table() above and preclean_card_table()
3982 // below are largely identical; if you need to modify
3983 // one of these methods, please check the other method too.
3984 
3985 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
3986   ScanMarkedObjectsAgainCarefullyClosure* cl) {
3987   // strategy: it's similar to precleamModUnionTable above, in that
3988   // we accumulate contiguous ranges of dirty cards, mark these cards
3989   // precleaned, then scan the region covered by these cards.
3990   HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
3991   HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
3992 
3993   cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
3994 
3995   size_t numDirtyCards, cumNumDirtyCards;
3996   HeapWord *lastAddr, *nextAddr;
3997 
3998   for (cumNumDirtyCards = numDirtyCards = 0,
3999        nextAddr = lastAddr = startAddr;
4000        nextAddr < endAddr;
4001        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4002 
4003     ResourceMark rm;
4004     HandleMark   hm;
4005 
4006     MemRegion dirtyRegion;
4007     {
4008       // See comments in "Precleaning notes" above on why we
4009       // do this locking. XXX Could the locking overheads be
4010       // too high when dirty cards are sparse? [I don't think so.]
4011       stopTimer();
4012       CMSTokenSync x(true); // is cms thread
4013       startTimer();
4014       sample_eden();
4015       // Get and clear dirty region from card table
4016       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4017                                     MemRegion(nextAddr, endAddr),
4018                                     true,
4019                                     CardTableModRefBS::precleaned_card_val());
4020 
4021       assert(dirtyRegion.start() >= nextAddr,
4022              "returned region inconsistent?");
4023     }
4024     lastAddr = dirtyRegion.end();
4025     numDirtyCards =
4026       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4027 
4028     if (!dirtyRegion.is_empty()) {
4029       stopTimer();
4030       CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4031       startTimer();
4032       sample_eden();
4033       verify_work_stacks_empty();
4034       verify_overflow_empty();
4035       HeapWord* stop_point =
4036         old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4037       if (stop_point != NULL) {
4038         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4039                "Should only be AbortablePreclean.");
4040         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4041         if (should_abort_preclean()) {
4042           break; // out of preclean loop
4043         } else {
4044           // Compute the next address at which preclean should pick up.
4045           lastAddr = next_card_start_after_block(stop_point);
4046         }
4047       }
4048     } else {
4049       break;
4050     }
4051   }
4052   verify_work_stacks_empty();
4053   verify_overflow_empty();
4054   return cumNumDirtyCards;
4055 }
4056 
4057 class PrecleanKlassClosure : public KlassClosure {
4058   KlassToOopClosure _cm_klass_closure;
4059  public:
4060   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4061   void do_klass(Klass* k) {
4062     if (k->has_accumulated_modified_oops()) {
4063       k->clear_accumulated_modified_oops();
4064 
4065       _cm_klass_closure.do_klass(k);
4066     }
4067   }
4068 };
4069 
4070 // The freelist lock is needed to prevent asserts, is it really needed?
4071 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4072 
4073   cl->set_freelistLock(freelistLock);
4074 
4075   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4076 
4077   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4078   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4079   PrecleanKlassClosure preclean_klass_closure(cl);
4080   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4081 
4082   verify_work_stacks_empty();
4083   verify_overflow_empty();
4084 }
4085 
4086 void CMSCollector::checkpointRootsFinal() {
4087   assert(_collectorState == FinalMarking, "incorrect state transition?");
4088   check_correct_thread_executing();
4089   // world is stopped at this checkpoint
4090   assert(SafepointSynchronize::is_at_safepoint(),
4091          "world should be stopped");
4092   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4093 
4094   verify_work_stacks_empty();
4095   verify_overflow_empty();
4096 
4097   log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
4098                 _young_gen->used() / K, _young_gen->capacity() / K);
4099   {
4100     if (CMSScavengeBeforeRemark) {
4101       GenCollectedHeap* gch = GenCollectedHeap::heap();
4102       // Temporarily set flag to false, GCH->do_collection will
4103       // expect it to be false and set to true
4104       FlagSetting fl(gch->_is_gc_active, false);
4105 
4106       GCTraceTime(Trace, gc) tm("Pause Scavenge Before Remark", _gc_timer_cm);
4107 
4108       gch->do_collection(true,                      // full (i.e. force, see below)
4109                          false,                     // !clear_all_soft_refs
4110                          0,                         // size
4111                          false,                     // is_tlab
4112                          GenCollectedHeap::YoungGen // type
4113         );
4114     }
4115     FreelistLocker x(this);
4116     MutexLockerEx y(bitMapLock(),
4117                     Mutex::_no_safepoint_check_flag);
4118     checkpointRootsFinalWork();
4119   }
4120   verify_work_stacks_empty();
4121   verify_overflow_empty();
4122 }
4123 
4124 void CMSCollector::checkpointRootsFinalWork() {
4125   GCTraceTime(Trace, gc) tm("checkpointRootsFinalWork", _gc_timer_cm);
4126 
4127   assert(haveFreelistLocks(), "must have free list locks");
4128   assert_lock_strong(bitMapLock());
4129 
4130   ResourceMark rm;
4131   HandleMark   hm;
4132 
4133   GenCollectedHeap* gch = GenCollectedHeap::heap();
4134 
4135   if (should_unload_classes()) {
4136     CodeCache::gc_prologue();
4137   }
4138   assert(haveFreelistLocks(), "must have free list locks");
4139   assert_lock_strong(bitMapLock());
4140 
4141   // We might assume that we need not fill TLAB's when
4142   // CMSScavengeBeforeRemark is set, because we may have just done
4143   // a scavenge which would have filled all TLAB's -- and besides
4144   // Eden would be empty. This however may not always be the case --
4145   // for instance although we asked for a scavenge, it may not have
4146   // happened because of a JNI critical section. We probably need
4147   // a policy for deciding whether we can in that case wait until
4148   // the critical section releases and then do the remark following
4149   // the scavenge, and skip it here. In the absence of that policy,
4150   // or of an indication of whether the scavenge did indeed occur,
4151   // we cannot rely on TLAB's having been filled and must do
4152   // so here just in case a scavenge did not happen.
4153   gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4154   // Update the saved marks which may affect the root scans.
4155   gch->save_marks();
4156 
4157   print_eden_and_survivor_chunk_arrays();
4158 
4159   {
4160 #if defined(COMPILER2) || INCLUDE_JVMCI
4161     DerivedPointerTableDeactivate dpt_deact;
4162 #endif
4163 
4164     // Note on the role of the mod union table:
4165     // Since the marker in "markFromRoots" marks concurrently with
4166     // mutators, it is possible for some reachable objects not to have been
4167     // scanned. For instance, an only reference to an object A was
4168     // placed in object B after the marker scanned B. Unless B is rescanned,
4169     // A would be collected. Such updates to references in marked objects
4170     // are detected via the mod union table which is the set of all cards
4171     // dirtied since the first checkpoint in this GC cycle and prior to
4172     // the most recent young generation GC, minus those cleaned up by the
4173     // concurrent precleaning.
4174     if (CMSParallelRemarkEnabled) {
4175       GCTraceTime(Debug, gc) t("Rescan (parallel)", _gc_timer_cm);
4176       do_remark_parallel();
4177     } else {
4178       GCTraceTime(Debug, gc) t("Rescan (non-parallel)", _gc_timer_cm);
4179       do_remark_non_parallel();
4180     }
4181   }
4182   verify_work_stacks_empty();
4183   verify_overflow_empty();
4184 
4185   {
4186     GCTraceTime(Trace, gc) ts("refProcessingWork", _gc_timer_cm);
4187     refProcessingWork();
4188   }
4189   verify_work_stacks_empty();
4190   verify_overflow_empty();
4191 
4192   if (should_unload_classes()) {
4193     CodeCache::gc_epilogue();
4194   }
4195   JvmtiExport::gc_epilogue();
4196 
4197   // If we encountered any (marking stack / work queue) overflow
4198   // events during the current CMS cycle, take appropriate
4199   // remedial measures, where possible, so as to try and avoid
4200   // recurrence of that condition.
4201   assert(_markStack.isEmpty(), "No grey objects");
4202   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4203                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4204   if (ser_ovflw > 0) {
4205     log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
4206                          _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4207     _markStack.expand();
4208     _ser_pmc_remark_ovflw = 0;
4209     _ser_pmc_preclean_ovflw = 0;
4210     _ser_kac_preclean_ovflw = 0;
4211     _ser_kac_ovflw = 0;
4212   }
4213   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4214      log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4215                           _par_pmc_remark_ovflw, _par_kac_ovflw);
4216      _par_pmc_remark_ovflw = 0;
4217     _par_kac_ovflw = 0;
4218   }
4219    if (_markStack._hit_limit > 0) {
4220      log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4221                           _markStack._hit_limit);
4222    }
4223    if (_markStack._failed_double > 0) {
4224      log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
4225                           _markStack._failed_double, _markStack.capacity());
4226    }
4227   _markStack._hit_limit = 0;
4228   _markStack._failed_double = 0;
4229 
4230   if ((VerifyAfterGC || VerifyDuringGC) &&
4231       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4232     verify_after_remark();
4233   }
4234 
4235   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4236 
4237   // Change under the freelistLocks.
4238   _collectorState = Sweeping;
4239   // Call isAllClear() under bitMapLock
4240   assert(_modUnionTable.isAllClear(),
4241       "Should be clear by end of the final marking");
4242   assert(_ct->klass_rem_set()->mod_union_is_clear(),
4243       "Should be clear by end of the final marking");
4244 }
4245 
4246 void CMSParInitialMarkTask::work(uint worker_id) {
4247   elapsedTimer _timer;
4248   ResourceMark rm;
4249   HandleMark   hm;
4250 
4251   // ---------- scan from roots --------------
4252   _timer.start();
4253   GenCollectedHeap* gch = GenCollectedHeap::heap();
4254   ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4255 
4256   // ---------- young gen roots --------------
4257   {
4258     work_on_young_gen_roots(worker_id, &par_mri_cl);
4259     _timer.stop();
4260     log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4261   }
4262 
4263   // ---------- remaining roots --------------
4264   _timer.reset();
4265   _timer.start();
4266 
4267   CLDToOopClosure cld_closure(&par_mri_cl, true);
4268 
4269   gch->gen_process_roots(_strong_roots_scope,
4270                          GenCollectedHeap::OldGen,
4271                          false,     // yg was scanned above
4272                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4273                          _collector->should_unload_classes(),
4274                          &par_mri_cl,
4275                          NULL,
4276                          &cld_closure);
4277   assert(_collector->should_unload_classes()
4278          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4279          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4280   _timer.stop();
4281   log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4282 }
4283 
4284 // Parallel remark task
4285 class CMSParRemarkTask: public CMSParMarkTask {
4286   CompactibleFreeListSpace* _cms_space;
4287 
4288   // The per-thread work queues, available here for stealing.
4289   OopTaskQueueSet*       _task_queues;
4290   ParallelTaskTerminator _term;
4291   StrongRootsScope*      _strong_roots_scope;
4292 
4293  public:
4294   // A value of 0 passed to n_workers will cause the number of
4295   // workers to be taken from the active workers in the work gang.
4296   CMSParRemarkTask(CMSCollector* collector,
4297                    CompactibleFreeListSpace* cms_space,
4298                    uint n_workers, WorkGang* workers,
4299                    OopTaskQueueSet* task_queues,
4300                    StrongRootsScope* strong_roots_scope):
4301     CMSParMarkTask("Rescan roots and grey objects in parallel",
4302                    collector, n_workers),
4303     _cms_space(cms_space),
4304     _task_queues(task_queues),
4305     _term(n_workers, task_queues),
4306     _strong_roots_scope(strong_roots_scope) { }
4307 
4308   OopTaskQueueSet* task_queues() { return _task_queues; }
4309 
4310   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4311 
4312   ParallelTaskTerminator* terminator() { return &_term; }
4313   uint n_workers() { return _n_workers; }
4314 
4315   void work(uint worker_id);
4316 
4317  private:
4318   // ... of  dirty cards in old space
4319   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4320                                   ParMarkRefsIntoAndScanClosure* cl);
4321 
4322   // ... work stealing for the above
4323   void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
4324 };
4325 
4326 class RemarkKlassClosure : public KlassClosure {
4327   KlassToOopClosure _cm_klass_closure;
4328  public:
4329   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4330   void do_klass(Klass* k) {
4331     // Check if we have modified any oops in the Klass during the concurrent marking.
4332     if (k->has_accumulated_modified_oops()) {
4333       k->clear_accumulated_modified_oops();
4334 
4335       // We could have transfered the current modified marks to the accumulated marks,
4336       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4337     } else if (k->has_modified_oops()) {
4338       // Don't clear anything, this info is needed by the next young collection.
4339     } else {
4340       // No modified oops in the Klass.
4341       return;
4342     }
4343 
4344     // The klass has modified fields, need to scan the klass.
4345     _cm_klass_closure.do_klass(k);
4346   }
4347 };
4348 
4349 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
4350   ParNewGeneration* young_gen = _collector->_young_gen;
4351   ContiguousSpace* eden_space = young_gen->eden();
4352   ContiguousSpace* from_space = young_gen->from();
4353   ContiguousSpace* to_space   = young_gen->to();
4354 
4355   HeapWord** eca = _collector->_eden_chunk_array;
4356   size_t     ect = _collector->_eden_chunk_index;
4357   HeapWord** sca = _collector->_survivor_chunk_array;
4358   size_t     sct = _collector->_survivor_chunk_index;
4359 
4360   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4361   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4362 
4363   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
4364   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
4365   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
4366 }
4367 
4368 // work_queue(i) is passed to the closure
4369 // ParMarkRefsIntoAndScanClosure.  The "i" parameter
4370 // also is passed to do_dirty_card_rescan_tasks() and to
4371 // do_work_steal() to select the i-th task_queue.
4372 
4373 void CMSParRemarkTask::work(uint worker_id) {
4374   elapsedTimer _timer;
4375   ResourceMark rm;
4376   HandleMark   hm;
4377 
4378   // ---------- rescan from roots --------------
4379   _timer.start();
4380   GenCollectedHeap* gch = GenCollectedHeap::heap();
4381   ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4382     _collector->_span, _collector->ref_processor(),
4383     &(_collector->_markBitMap),
4384     work_queue(worker_id));
4385 
4386   // Rescan young gen roots first since these are likely
4387   // coarsely partitioned and may, on that account, constitute
4388   // the critical path; thus, it's best to start off that
4389   // work first.
4390   // ---------- young gen roots --------------
4391   {
4392     work_on_young_gen_roots(worker_id, &par_mrias_cl);
4393     _timer.stop();
4394     log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4395   }
4396 
4397   // ---------- remaining roots --------------
4398   _timer.reset();
4399   _timer.start();
4400   gch->gen_process_roots(_strong_roots_scope,
4401                          GenCollectedHeap::OldGen,
4402                          false,     // yg was scanned above
4403                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4404                          _collector->should_unload_classes(),
4405                          &par_mrias_cl,
4406                          NULL,
4407                          NULL);     // The dirty klasses will be handled below
4408 
4409   assert(_collector->should_unload_classes()
4410          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4411          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4412   _timer.stop();
4413   log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());
4414 
4415   // ---------- unhandled CLD scanning ----------
4416   if (worker_id == 0) { // Single threaded at the moment.
4417     _timer.reset();
4418     _timer.start();
4419 
4420     // Scan all new class loader data objects and new dependencies that were
4421     // introduced during concurrent marking.
4422     ResourceMark rm;
4423     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4424     for (int i = 0; i < array->length(); i++) {
4425       par_mrias_cl.do_cld_nv(array->at(i));
4426     }
4427 
4428     // We don't need to keep track of new CLDs anymore.
4429     ClassLoaderDataGraph::remember_new_clds(false);
4430 
4431     _timer.stop();
4432     log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4433   }
4434 
4435   // ---------- dirty klass scanning ----------
4436   if (worker_id == 0) { // Single threaded at the moment.
4437     _timer.reset();
4438     _timer.start();
4439 
4440     // Scan all classes that was dirtied during the concurrent marking phase.
4441     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4442     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4443 
4444     _timer.stop();
4445     log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4446   }
4447 
4448   // We might have added oops to ClassLoaderData::_handles during the
4449   // concurrent marking phase. These oops point to newly allocated objects
4450   // that are guaranteed to be kept alive. Either by the direct allocation
4451   // code, or when the young collector processes the roots. Hence,
4452   // we don't have to revisit the _handles block during the remark phase.
4453 
4454   // ---------- rescan dirty cards ------------
4455   _timer.reset();
4456   _timer.start();
4457 
4458   // Do the rescan tasks for each of the two spaces
4459   // (cms_space) in turn.
4460   // "worker_id" is passed to select the task_queue for "worker_id"
4461   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4462   _timer.stop();
4463   log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4464 
4465   // ---------- steal work from other threads ...
4466   // ---------- ... and drain overflow list.
4467   _timer.reset();
4468   _timer.start();
4469   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4470   _timer.stop();
4471   log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
4472 }
4473 
4474 // Note that parameter "i" is not used.
4475 void
4476 CMSParMarkTask::do_young_space_rescan(uint worker_id,
4477   OopsInGenClosure* cl, ContiguousSpace* space,
4478   HeapWord** chunk_array, size_t chunk_top) {
4479   // Until all tasks completed:
4480   // . claim an unclaimed task
4481   // . compute region boundaries corresponding to task claimed
4482   //   using chunk_array
4483   // . par_oop_iterate(cl) over that region
4484 
4485   ResourceMark rm;
4486   HandleMark   hm;
4487 
4488   SequentialSubTasksDone* pst = space->par_seq_tasks();
4489 
4490   uint nth_task = 0;
4491   uint n_tasks  = pst->n_tasks();
4492 
4493   if (n_tasks > 0) {
4494     assert(pst->valid(), "Uninitialized use?");
4495     HeapWord *start, *end;
4496     while (!pst->is_task_claimed(/* reference */ nth_task)) {
4497       // We claimed task # nth_task; compute its boundaries.
4498       if (chunk_top == 0) {  // no samples were taken
4499         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4500         start = space->bottom();
4501         end   = space->top();
4502       } else if (nth_task == 0) {
4503         start = space->bottom();
4504         end   = chunk_array[nth_task];
4505       } else if (nth_task < (uint)chunk_top) {
4506         assert(nth_task >= 1, "Control point invariant");
4507         start = chunk_array[nth_task - 1];
4508         end   = chunk_array[nth_task];
4509       } else {
4510         assert(nth_task == (uint)chunk_top, "Control point invariant");
4511         start = chunk_array[chunk_top - 1];
4512         end   = space->top();
4513       }
4514       MemRegion mr(start, end);
4515       // Verify that mr is in space
4516       assert(mr.is_empty() || space->used_region().contains(mr),
4517              "Should be in space");
4518       // Verify that "start" is an object boundary
4519       assert(mr.is_empty() || oop(mr.start())->is_oop(),
4520              "Should be an oop");
4521       space->par_oop_iterate(mr, cl);
4522     }
4523     pst->all_tasks_completed();
4524   }
4525 }
4526 
4527 void
4528 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4529   CompactibleFreeListSpace* sp, int i,
4530   ParMarkRefsIntoAndScanClosure* cl) {
4531   // Until all tasks completed:
4532   // . claim an unclaimed task
4533   // . compute region boundaries corresponding to task claimed
4534   // . transfer dirty bits ct->mut for that region
4535   // . apply rescanclosure to dirty mut bits for that region
4536 
4537   ResourceMark rm;
4538   HandleMark   hm;
4539 
4540   OopTaskQueue* work_q = work_queue(i);
4541   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4542   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4543   // CAUTION: This closure has state that persists across calls to
4544   // the work method dirty_range_iterate_clear() in that it has
4545   // embedded in it a (subtype of) UpwardsObjectClosure. The
4546   // use of that state in the embedded UpwardsObjectClosure instance
4547   // assumes that the cards are always iterated (even if in parallel
4548   // by several threads) in monotonically increasing order per each
4549   // thread. This is true of the implementation below which picks
4550   // card ranges (chunks) in monotonically increasing order globally
4551   // and, a-fortiori, in monotonically increasing order per thread
4552   // (the latter order being a subsequence of the former).
4553   // If the work code below is ever reorganized into a more chaotic
4554   // work-partitioning form than the current "sequential tasks"
4555   // paradigm, the use of that persistent state will have to be
4556   // revisited and modified appropriately. See also related
4557   // bug 4756801 work on which should examine this code to make
4558   // sure that the changes there do not run counter to the
4559   // assumptions made here and necessary for correctness and
4560   // efficiency. Note also that this code might yield inefficient
4561   // behavior in the case of very large objects that span one or
4562   // more work chunks. Such objects would potentially be scanned
4563   // several times redundantly. Work on 4756801 should try and
4564   // address that performance anomaly if at all possible. XXX
4565   MemRegion  full_span  = _collector->_span;
4566   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4567   MarkFromDirtyCardsClosure
4568     greyRescanClosure(_collector, full_span, // entire span of interest
4569                       sp, bm, work_q, cl);
4570 
4571   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4572   assert(pst->valid(), "Uninitialized use?");
4573   uint nth_task = 0;
4574   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4575   MemRegion span = sp->used_region();
4576   HeapWord* start_addr = span.start();
4577   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4578                                            alignment);
4579   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4580   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4581          start_addr, "Check alignment");
4582   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4583          chunk_size, "Check alignment");
4584 
4585   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4586     // Having claimed the nth_task, compute corresponding mem-region,
4587     // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4588     // The alignment restriction ensures that we do not need any
4589     // synchronization with other gang-workers while setting or
4590     // clearing bits in thus chunk of the MUT.
4591     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4592                                     start_addr + (nth_task+1)*chunk_size);
4593     // The last chunk's end might be way beyond end of the
4594     // used region. In that case pull back appropriately.
4595     if (this_span.end() > end_addr) {
4596       this_span.set_end(end_addr);
4597       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4598     }
4599     // Iterate over the dirty cards covering this chunk, marking them
4600     // precleaned, and setting the corresponding bits in the mod union
4601     // table. Since we have been careful to partition at Card and MUT-word
4602     // boundaries no synchronization is needed between parallel threads.
4603     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4604                                                  &modUnionClosure);
4605 
4606     // Having transferred these marks into the modUnionTable,
4607     // rescan the marked objects on the dirty cards in the modUnionTable.
4608     // Even if this is at a synchronous collection, the initial marking
4609     // may have been done during an asynchronous collection so there
4610     // may be dirty bits in the mod-union table.
4611     _collector->_modUnionTable.dirty_range_iterate_clear(
4612                   this_span, &greyRescanClosure);
4613     _collector->_modUnionTable.verifyNoOneBitsInRange(
4614                                  this_span.start(),
4615                                  this_span.end());
4616   }
4617   pst->all_tasks_completed();  // declare that i am done
4618 }
4619 
4620 // . see if we can share work_queues with ParNew? XXX
4621 void
4622 CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl,
4623                                 int* seed) {
4624   OopTaskQueue* work_q = work_queue(i);
4625   NOT_PRODUCT(int num_steals = 0;)
4626   oop obj_to_scan;
4627   CMSBitMap* bm = &(_collector->_markBitMap);
4628 
4629   while (true) {
4630     // Completely finish any left over work from (an) earlier round(s)
4631     cl->trim_queue(0);
4632     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4633                                          (size_t)ParGCDesiredObjsFromOverflowList);
4634     // Now check if there's any work in the overflow list
4635     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4636     // only affects the number of attempts made to get work from the
4637     // overflow list and does not affect the number of workers.  Just
4638     // pass ParallelGCThreads so this behavior is unchanged.
4639     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4640                                                 work_q,
4641                                                 ParallelGCThreads)) {
4642       // found something in global overflow list;
4643       // not yet ready to go stealing work from others.
4644       // We'd like to assert(work_q->size() != 0, ...)
4645       // because we just took work from the overflow list,
4646       // but of course we can't since all of that could have
4647       // been already stolen from us.
4648       // "He giveth and He taketh away."
4649       continue;
4650     }
4651     // Verify that we have no work before we resort to stealing
4652     assert(work_q->size() == 0, "Have work, shouldn't steal");
4653     // Try to steal from other queues that have work
4654     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4655       NOT_PRODUCT(num_steals++;)
4656       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4657       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4658       // Do scanning work
4659       obj_to_scan->oop_iterate(cl);
4660       // Loop around, finish this work, and try to steal some more
4661     } else if (terminator()->offer_termination()) {
4662         break;  // nirvana from the infinite cycle
4663     }
4664   }
4665   log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
4666   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4667          "Else our work is not yet done");
4668 }
4669 
4670 // Record object boundaries in _eden_chunk_array by sampling the eden
4671 // top in the slow-path eden object allocation code path and record
4672 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4673 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4674 // sampling in sample_eden() that activates during the part of the
4675 // preclean phase.
4676 void CMSCollector::sample_eden_chunk() {
4677   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4678     if (_eden_chunk_lock->try_lock()) {
4679       // Record a sample. This is the critical section. The contents
4680       // of the _eden_chunk_array have to be non-decreasing in the
4681       // address order.
4682       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4683       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4684              "Unexpected state of Eden");
4685       if (_eden_chunk_index == 0 ||
4686           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4687            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4688                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4689         _eden_chunk_index++;  // commit sample
4690       }
4691       _eden_chunk_lock->unlock();
4692     }
4693   }
4694 }
4695 
4696 // Return a thread-local PLAB recording array, as appropriate.
4697 void* CMSCollector::get_data_recorder(int thr_num) {
4698   if (_survivor_plab_array != NULL &&
4699       (CMSPLABRecordAlways ||
4700        (_collectorState > Marking && _collectorState < FinalMarking))) {
4701     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4702     ChunkArray* ca = &_survivor_plab_array[thr_num];
4703     ca->reset();   // clear it so that fresh data is recorded
4704     return (void*) ca;
4705   } else {
4706     return NULL;
4707   }
4708 }
4709 
4710 // Reset all the thread-local PLAB recording arrays
4711 void CMSCollector::reset_survivor_plab_arrays() {
4712   for (uint i = 0; i < ParallelGCThreads; i++) {
4713     _survivor_plab_array[i].reset();
4714   }
4715 }
4716 
4717 // Merge the per-thread plab arrays into the global survivor chunk
4718 // array which will provide the partitioning of the survivor space
4719 // for CMS initial scan and rescan.
4720 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4721                                               int no_of_gc_threads) {
4722   assert(_survivor_plab_array  != NULL, "Error");
4723   assert(_survivor_chunk_array != NULL, "Error");
4724   assert(_collectorState == FinalMarking ||
4725          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4726   for (int j = 0; j < no_of_gc_threads; j++) {
4727     _cursor[j] = 0;
4728   }
4729   HeapWord* top = surv->top();
4730   size_t i;
4731   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4732     HeapWord* min_val = top;          // Higher than any PLAB address
4733     uint      min_tid = 0;            // position of min_val this round
4734     for (int j = 0; j < no_of_gc_threads; j++) {
4735       ChunkArray* cur_sca = &_survivor_plab_array[j];
4736       if (_cursor[j] == cur_sca->end()) {
4737         continue;
4738       }
4739       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4740       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4741       assert(surv->used_region().contains(cur_val), "Out of bounds value");
4742       if (cur_val < min_val) {
4743         min_tid = j;
4744         min_val = cur_val;
4745       } else {
4746         assert(cur_val < top, "All recorded addresses should be less");
4747       }
4748     }
4749     // At this point min_val and min_tid are respectively
4750     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4751     // and the thread (j) that witnesses that address.
4752     // We record this address in the _survivor_chunk_array[i]
4753     // and increment _cursor[min_tid] prior to the next round i.
4754     if (min_val == top) {
4755       break;
4756     }
4757     _survivor_chunk_array[i] = min_val;
4758     _cursor[min_tid]++;
4759   }
4760   // We are all done; record the size of the _survivor_chunk_array
4761   _survivor_chunk_index = i; // exclusive: [0, i)
4762   log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4763   // Verify that we used up all the recorded entries
4764   #ifdef ASSERT
4765     size_t total = 0;
4766     for (int j = 0; j < no_of_gc_threads; j++) {
4767       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4768       total += _cursor[j];
4769     }
4770     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4771     // Check that the merged array is in sorted order
4772     if (total > 0) {
4773       for (size_t i = 0; i < total - 1; i++) {
4774         log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4775                                      i, p2i(_survivor_chunk_array[i]));
4776         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4777                "Not sorted");
4778       }
4779     }
4780   #endif // ASSERT
4781 }
4782 
4783 // Set up the space's par_seq_tasks structure for work claiming
4784 // for parallel initial scan and rescan of young gen.
4785 // See ParRescanTask where this is currently used.
4786 void
4787 CMSCollector::
4788 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4789   assert(n_threads > 0, "Unexpected n_threads argument");
4790 
4791   // Eden space
4792   if (!_young_gen->eden()->is_empty()) {
4793     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4794     assert(!pst->valid(), "Clobbering existing data?");
4795     // Each valid entry in [0, _eden_chunk_index) represents a task.
4796     size_t n_tasks = _eden_chunk_index + 1;
4797     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
4798     // Sets the condition for completion of the subtask (how many threads
4799     // need to finish in order to be done).
4800     pst->set_n_threads(n_threads);
4801     pst->set_n_tasks((int)n_tasks);
4802   }
4803 
4804   // Merge the survivor plab arrays into _survivor_chunk_array
4805   if (_survivor_plab_array != NULL) {
4806     merge_survivor_plab_arrays(_young_gen->from(), n_threads);
4807   } else {
4808     assert(_survivor_chunk_index == 0, "Error");
4809   }
4810 
4811   // To space
4812   {
4813     SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
4814     assert(!pst->valid(), "Clobbering existing data?");
4815     // Sets the condition for completion of the subtask (how many threads
4816     // need to finish in order to be done).
4817     pst->set_n_threads(n_threads);
4818     pst->set_n_tasks(1);
4819     assert(pst->valid(), "Error");
4820   }
4821 
4822   // From space
4823   {
4824     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
4825     assert(!pst->valid(), "Clobbering existing data?");
4826     size_t n_tasks = _survivor_chunk_index + 1;
4827     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
4828     // Sets the condition for completion of the subtask (how many threads
4829     // need to finish in order to be done).
4830     pst->set_n_threads(n_threads);
4831     pst->set_n_tasks((int)n_tasks);
4832     assert(pst->valid(), "Error");
4833   }
4834 }
4835 
4836 // Parallel version of remark
4837 void CMSCollector::do_remark_parallel() {
4838   GenCollectedHeap* gch = GenCollectedHeap::heap();
4839   WorkGang* workers = gch->workers();
4840   assert(workers != NULL, "Need parallel worker threads.");
4841   // Choose to use the number of GC workers most recently set
4842   // into "active_workers".
4843   uint n_workers = workers->active_workers();
4844 
4845   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
4846 
4847   StrongRootsScope srs(n_workers);
4848 
4849   CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
4850 
4851   // We won't be iterating over the cards in the card table updating
4852   // the younger_gen cards, so we shouldn't call the following else
4853   // the verification code as well as subsequent younger_refs_iterate
4854   // code would get confused. XXX
4855   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
4856 
4857   // The young gen rescan work will not be done as part of
4858   // process_roots (which currently doesn't know how to
4859   // parallelize such a scan), but rather will be broken up into
4860   // a set of parallel tasks (via the sampling that the [abortable]
4861   // preclean phase did of eden, plus the [two] tasks of
4862   // scanning the [two] survivor spaces. Further fine-grain
4863   // parallelization of the scanning of the survivor spaces
4864   // themselves, and of precleaning of the young gen itself
4865   // is deferred to the future.
4866   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
4867 
4868   // The dirty card rescan work is broken up into a "sequence"
4869   // of parallel tasks (per constituent space) that are dynamically
4870   // claimed by the parallel threads.
4871   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
4872 
4873   // It turns out that even when we're using 1 thread, doing the work in a
4874   // separate thread causes wide variance in run times.  We can't help this
4875   // in the multi-threaded case, but we special-case n=1 here to get
4876   // repeatable measurements of the 1-thread overhead of the parallel code.
4877   if (n_workers > 1) {
4878     // Make refs discovery MT-safe, if it isn't already: it may not
4879     // necessarily be so, since it's possible that we are doing
4880     // ST marking.
4881     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
4882     workers->run_task(&tsk);
4883   } else {
4884     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4885     tsk.work(0);
4886   }
4887 
4888   // restore, single-threaded for now, any preserved marks
4889   // as a result of work_q overflow
4890   restore_preserved_marks_if_any();
4891 }
4892 
4893 // Non-parallel version of remark
4894 void CMSCollector::do_remark_non_parallel() {
4895   ResourceMark rm;
4896   HandleMark   hm;
4897   GenCollectedHeap* gch = GenCollectedHeap::heap();
4898   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
4899 
4900   MarkRefsIntoAndScanClosure
4901     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
4902              &_markStack, this,
4903              false /* should_yield */, false /* not precleaning */);
4904   MarkFromDirtyCardsClosure
4905     markFromDirtyCardsClosure(this, _span,
4906                               NULL,  // space is set further below
4907                               &_markBitMap, &_markStack, &mrias_cl);
4908   {
4909     GCTraceTime(Trace, gc) t("Grey Object Rescan", _gc_timer_cm);
4910     // Iterate over the dirty cards, setting the corresponding bits in the
4911     // mod union table.
4912     {
4913       ModUnionClosure modUnionClosure(&_modUnionTable);
4914       _ct->ct_bs()->dirty_card_iterate(
4915                       _cmsGen->used_region(),
4916                       &modUnionClosure);
4917     }
4918     // Having transferred these marks into the modUnionTable, we just need
4919     // to rescan the marked objects on the dirty cards in the modUnionTable.
4920     // The initial marking may have been done during an asynchronous
4921     // collection so there may be dirty bits in the mod-union table.
4922     const int alignment =
4923       CardTableModRefBS::card_size * BitsPerWord;
4924     {
4925       // ... First handle dirty cards in CMS gen
4926       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
4927       MemRegion ur = _cmsGen->used_region();
4928       HeapWord* lb = ur.start();
4929       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
4930       MemRegion cms_span(lb, ub);
4931       _modUnionTable.dirty_range_iterate_clear(cms_span,
4932                                                &markFromDirtyCardsClosure);
4933       verify_work_stacks_empty();
4934       log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
4935     }
4936   }
4937   if (VerifyDuringGC &&
4938       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4939     HandleMark hm;  // Discard invalid handles created during verification
4940     Universe::verify();
4941   }
4942   {
4943     GCTraceTime(Trace, gc) t("Root Rescan", _gc_timer_cm);
4944 
4945     verify_work_stacks_empty();
4946 
4947     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
4948     StrongRootsScope srs(1);
4949 
4950     gch->gen_process_roots(&srs,
4951                            GenCollectedHeap::OldGen,
4952                            true,  // young gen as roots
4953                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
4954                            should_unload_classes(),
4955                            &mrias_cl,
4956                            NULL,
4957                            NULL); // The dirty klasses will be handled below
4958 
4959     assert(should_unload_classes()
4960            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4961            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4962   }
4963 
4964   {
4965     GCTraceTime(Trace, gc) t("Visit Unhandled CLDs", _gc_timer_cm);
4966 
4967     verify_work_stacks_empty();
4968 
4969     // Scan all class loader data objects that might have been introduced
4970     // during concurrent marking.
4971     ResourceMark rm;
4972     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4973     for (int i = 0; i < array->length(); i++) {
4974       mrias_cl.do_cld_nv(array->at(i));
4975     }
4976 
4977     // We don't need to keep track of new CLDs anymore.
4978     ClassLoaderDataGraph::remember_new_clds(false);
4979 
4980     verify_work_stacks_empty();
4981   }
4982 
4983   {
4984     GCTraceTime(Trace, gc) t("Dirty Klass Scan", _gc_timer_cm);
4985 
4986     verify_work_stacks_empty();
4987 
4988     RemarkKlassClosure remark_klass_closure(&mrias_cl);
4989     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4990 
4991     verify_work_stacks_empty();
4992   }
4993 
4994   // We might have added oops to ClassLoaderData::_handles during the
4995   // concurrent marking phase. These oops point to newly allocated objects
4996   // that are guaranteed to be kept alive. Either by the direct allocation
4997   // code, or when the young collector processes the roots. Hence,
4998   // we don't have to revisit the _handles block during the remark phase.
4999 
5000   verify_work_stacks_empty();
5001   // Restore evacuated mark words, if any, used for overflow list links
5002   restore_preserved_marks_if_any();
5003 
5004   verify_overflow_empty();
5005 }
5006 
5007 ////////////////////////////////////////////////////////
5008 // Parallel Reference Processing Task Proxy Class
5009 ////////////////////////////////////////////////////////
5010 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5011   OopTaskQueueSet*       _queues;
5012   ParallelTaskTerminator _terminator;
5013  public:
5014   AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5015     AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5016   ParallelTaskTerminator* terminator() { return &_terminator; }
5017   OopTaskQueueSet* queues() { return _queues; }
5018 };
5019 
5020 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5021   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5022   CMSCollector*          _collector;
5023   CMSBitMap*             _mark_bit_map;
5024   const MemRegion        _span;
5025   ProcessTask&           _task;
5026 
5027 public:
5028   CMSRefProcTaskProxy(ProcessTask&     task,
5029                       CMSCollector*    collector,
5030                       const MemRegion& span,
5031                       CMSBitMap*       mark_bit_map,
5032                       AbstractWorkGang* workers,
5033                       OopTaskQueueSet* task_queues):
5034     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5035       task_queues,
5036       workers->active_workers()),
5037     _task(task),
5038     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5039   {
5040     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5041            "Inconsistency in _span");
5042   }
5043 
5044   OopTaskQueueSet* task_queues() { return queues(); }
5045 
5046   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5047 
5048   void do_work_steal(int i,
5049                      CMSParDrainMarkingStackClosure* drain,
5050                      CMSParKeepAliveClosure* keep_alive,
5051                      int* seed);
5052 
5053   virtual void work(uint worker_id);
5054 };
5055 
5056 void CMSRefProcTaskProxy::work(uint worker_id) {
5057   ResourceMark rm;
5058   HandleMark hm;
5059   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5060   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5061                                         _mark_bit_map,
5062                                         work_queue(worker_id));
5063   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5064                                                  _mark_bit_map,
5065                                                  work_queue(worker_id));
5066   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5067   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5068   if (_task.marks_oops_alive()) {
5069     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5070                   _collector->hash_seed(worker_id));
5071   }
5072   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5073   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5074 }
5075 
5076 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5077   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5078   EnqueueTask& _task;
5079 
5080 public:
5081   CMSRefEnqueueTaskProxy(EnqueueTask& task)
5082     : AbstractGangTask("Enqueue reference objects in parallel"),
5083       _task(task)
5084   { }
5085 
5086   virtual void work(uint worker_id)
5087   {
5088     _task.work(worker_id);
5089   }
5090 };
5091 
5092 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5093   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5094    _span(span),
5095    _bit_map(bit_map),
5096    _work_queue(work_queue),
5097    _mark_and_push(collector, span, bit_map, work_queue),
5098    _low_water_mark(MIN2((work_queue->max_elems()/4),
5099                         ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5100 { }
5101 
5102 // . see if we can share work_queues with ParNew? XXX
5103 void CMSRefProcTaskProxy::do_work_steal(int i,
5104   CMSParDrainMarkingStackClosure* drain,
5105   CMSParKeepAliveClosure* keep_alive,
5106   int* seed) {
5107   OopTaskQueue* work_q = work_queue(i);
5108   NOT_PRODUCT(int num_steals = 0;)
5109   oop obj_to_scan;
5110 
5111   while (true) {
5112     // Completely finish any left over work from (an) earlier round(s)
5113     drain->trim_queue(0);
5114     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5115                                          (size_t)ParGCDesiredObjsFromOverflowList);
5116     // Now check if there's any work in the overflow list
5117     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5118     // only affects the number of attempts made to get work from the
5119     // overflow list and does not affect the number of workers.  Just
5120     // pass ParallelGCThreads so this behavior is unchanged.
5121     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5122                                                 work_q,
5123                                                 ParallelGCThreads)) {
5124       // Found something in global overflow list;
5125       // not yet ready to go stealing work from others.
5126       // We'd like to assert(work_q->size() != 0, ...)
5127       // because we just took work from the overflow list,
5128       // but of course we can't, since all of that might have
5129       // been already stolen from us.
5130       continue;
5131     }
5132     // Verify that we have no work before we resort to stealing
5133     assert(work_q->size() == 0, "Have work, shouldn't steal");
5134     // Try to steal from other queues that have work
5135     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5136       NOT_PRODUCT(num_steals++;)
5137       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5138       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5139       // Do scanning work
5140       obj_to_scan->oop_iterate(keep_alive);
5141       // Loop around, finish this work, and try to steal some more
5142     } else if (terminator()->offer_termination()) {
5143       break;  // nirvana from the infinite cycle
5144     }
5145   }
5146   log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
5147 }
5148 
5149 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5150 {
5151   GenCollectedHeap* gch = GenCollectedHeap::heap();
5152   WorkGang* workers = gch->workers();
5153   assert(workers != NULL, "Need parallel worker threads.");
5154   CMSRefProcTaskProxy rp_task(task, &_collector,
5155                               _collector.ref_processor()->span(),
5156                               _collector.markBitMap(),
5157                               workers, _collector.task_queues());
5158   workers->run_task(&rp_task);
5159 }
5160 
5161 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5162 {
5163 
5164   GenCollectedHeap* gch = GenCollectedHeap::heap();
5165   WorkGang* workers = gch->workers();
5166   assert(workers != NULL, "Need parallel worker threads.");
5167   CMSRefEnqueueTaskProxy enq_task(task);
5168   workers->run_task(&enq_task);
5169 }
5170 
5171 void CMSCollector::refProcessingWork() {
5172   ResourceMark rm;
5173   HandleMark   hm;
5174 
5175   ReferenceProcessor* rp = ref_processor();
5176   assert(rp->span().equals(_span), "Spans should be equal");
5177   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5178   // Process weak references.
5179   rp->setup_policy(false);
5180   verify_work_stacks_empty();
5181 
5182   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5183                                           &_markStack, false /* !preclean */);
5184   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5185                                 _span, &_markBitMap, &_markStack,
5186                                 &cmsKeepAliveClosure, false /* !preclean */);
5187   {
5188     GCTraceTime(Debug, gc) t("Weak Refs Processing", _gc_timer_cm);
5189 
5190     ReferenceProcessorStats stats;
5191     if (rp->processing_is_mt()) {
5192       // Set the degree of MT here.  If the discovery is done MT, there
5193       // may have been a different number of threads doing the discovery
5194       // and a different number of discovered lists may have Ref objects.
5195       // That is OK as long as the Reference lists are balanced (see
5196       // balance_all_queues() and balance_queues()).
5197       GenCollectedHeap* gch = GenCollectedHeap::heap();
5198       uint active_workers = ParallelGCThreads;
5199       WorkGang* workers = gch->workers();
5200       if (workers != NULL) {
5201         active_workers = workers->active_workers();
5202         // The expectation is that active_workers will have already
5203         // been set to a reasonable value.  If it has not been set,
5204         // investigate.
5205         assert(active_workers > 0, "Should have been set during scavenge");
5206       }
5207       rp->set_active_mt_degree(active_workers);
5208       CMSRefProcTaskExecutor task_executor(*this);
5209       stats = rp->process_discovered_references(&_is_alive_closure,
5210                                         &cmsKeepAliveClosure,
5211                                         &cmsDrainMarkingStackClosure,
5212                                         &task_executor,
5213                                         _gc_timer_cm);
5214     } else {
5215       stats = rp->process_discovered_references(&_is_alive_closure,
5216                                         &cmsKeepAliveClosure,
5217                                         &cmsDrainMarkingStackClosure,
5218                                         NULL,
5219                                         _gc_timer_cm);
5220     }
5221     _gc_tracer_cm->report_gc_reference_stats(stats);
5222 
5223   }
5224 
5225   // This is the point where the entire marking should have completed.
5226   verify_work_stacks_empty();
5227 
5228   if (should_unload_classes()) {
5229     {
5230       GCTraceTime(Debug, gc) t("Class Unloading", _gc_timer_cm);
5231 
5232       // Unload classes and purge the SystemDictionary.
5233       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5234 
5235       // Unload nmethods.
5236       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5237 
5238       // Prune dead klasses from subklass/sibling/implementor lists.
5239       Klass::clean_weak_klass_links(&_is_alive_closure);
5240     }
5241 
5242     {
5243       GCTraceTime(Debug, gc) t("Scrub Symbol Table", _gc_timer_cm);
5244       // Clean up unreferenced symbols in symbol table.
5245       SymbolTable::unlink();
5246     }
5247 
5248     {
5249       GCTraceTime(Debug, gc) t("Scrub String Table", _gc_timer_cm);
5250       // Delete entries for dead interned strings.
5251       StringTable::unlink(&_is_alive_closure);
5252     }
5253   }
5254 
5255 
5256   // Restore any preserved marks as a result of mark stack or
5257   // work queue overflow
5258   restore_preserved_marks_if_any();  // done single-threaded for now
5259 
5260   rp->set_enqueuing_is_done(true);
5261   if (rp->processing_is_mt()) {
5262     rp->balance_all_queues();
5263     CMSRefProcTaskExecutor task_executor(*this);
5264     rp->enqueue_discovered_references(&task_executor);
5265   } else {
5266     rp->enqueue_discovered_references(NULL);
5267   }
5268   rp->verify_no_references_recorded();
5269   assert(!rp->discovery_enabled(), "should have been disabled");
5270 }
5271 
5272 #ifndef PRODUCT
5273 void CMSCollector::check_correct_thread_executing() {
5274   Thread* t = Thread::current();
5275   // Only the VM thread or the CMS thread should be here.
5276   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5277          "Unexpected thread type");
5278   // If this is the vm thread, the foreground process
5279   // should not be waiting.  Note that _foregroundGCIsActive is
5280   // true while the foreground collector is waiting.
5281   if (_foregroundGCShouldWait) {
5282     // We cannot be the VM thread
5283     assert(t->is_ConcurrentGC_thread(),
5284            "Should be CMS thread");
5285   } else {
5286     // We can be the CMS thread only if we are in a stop-world
5287     // phase of CMS collection.
5288     if (t->is_ConcurrentGC_thread()) {
5289       assert(_collectorState == InitialMarking ||
5290              _collectorState == FinalMarking,
5291              "Should be a stop-world phase");
5292       // The CMS thread should be holding the CMS_token.
5293       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5294              "Potential interference with concurrently "
5295              "executing VM thread");
5296     }
5297   }
5298 }
5299 #endif
5300 
5301 void CMSCollector::sweep() {
5302   assert(_collectorState == Sweeping, "just checking");
5303   check_correct_thread_executing();
5304   verify_work_stacks_empty();
5305   verify_overflow_empty();
5306   increment_sweep_count();
5307   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5308 
5309   _inter_sweep_timer.stop();
5310   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5311 
5312   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5313   _intra_sweep_timer.reset();
5314   _intra_sweep_timer.start();
5315   {
5316     GCTraceCPUTime tcpu;
5317     CMSPhaseAccounting pa(this, "Concurrent Sweep");
5318     // First sweep the old gen
5319     {
5320       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5321                                bitMapLock());
5322       sweepWork(_cmsGen);
5323     }
5324 
5325     // Update Universe::_heap_*_at_gc figures.
5326     // We need all the free list locks to make the abstract state
5327     // transition from Sweeping to Resetting. See detailed note
5328     // further below.
5329     {
5330       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5331       // Update heap occupancy information which is used as
5332       // input to soft ref clearing policy at the next gc.
5333       Universe::update_heap_info_at_gc();
5334       _collectorState = Resizing;
5335     }
5336   }
5337   verify_work_stacks_empty();
5338   verify_overflow_empty();
5339 
5340   if (should_unload_classes()) {
5341     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5342     // requires that the virtual spaces are stable and not deleted.
5343     ClassLoaderDataGraph::set_should_purge(true);
5344   }
5345 
5346   _intra_sweep_timer.stop();
5347   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5348 
5349   _inter_sweep_timer.reset();
5350   _inter_sweep_timer.start();
5351 
5352   // We need to use a monotonically non-decreasing time in ms
5353   // or we will see time-warp warnings and os::javaTimeMillis()
5354   // does not guarantee monotonicity.
5355   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5356   update_time_of_last_gc(now);
5357 
5358   // NOTE on abstract state transitions:
5359   // Mutators allocate-live and/or mark the mod-union table dirty
5360   // based on the state of the collection.  The former is done in
5361   // the interval [Marking, Sweeping] and the latter in the interval
5362   // [Marking, Sweeping).  Thus the transitions into the Marking state
5363   // and out of the Sweeping state must be synchronously visible
5364   // globally to the mutators.
5365   // The transition into the Marking state happens with the world
5366   // stopped so the mutators will globally see it.  Sweeping is
5367   // done asynchronously by the background collector so the transition
5368   // from the Sweeping state to the Resizing state must be done
5369   // under the freelistLock (as is the check for whether to
5370   // allocate-live and whether to dirty the mod-union table).
5371   assert(_collectorState == Resizing, "Change of collector state to"
5372     " Resizing must be done under the freelistLocks (plural)");
5373 
5374   // Now that sweeping has been completed, we clear
5375   // the incremental_collection_failed flag,
5376   // thus inviting a younger gen collection to promote into
5377   // this generation. If such a promotion may still fail,
5378   // the flag will be set again when a young collection is
5379   // attempted.
5380   GenCollectedHeap* gch = GenCollectedHeap::heap();
5381   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5382   gch->update_full_collections_completed(_collection_count_start);
5383 }
5384 
5385 // FIX ME!!! Looks like this belongs in CFLSpace, with
5386 // CMSGen merely delegating to it.
5387 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5388   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5389   HeapWord*  minAddr        = _cmsSpace->bottom();
5390   HeapWord*  largestAddr    =
5391     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5392   if (largestAddr == NULL) {
5393     // The dictionary appears to be empty.  In this case
5394     // try to coalesce at the end of the heap.
5395     largestAddr = _cmsSpace->end();
5396   }
5397   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5398   size_t nearLargestOffset =
5399     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5400   log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5401                           p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5402   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5403 }
5404 
5405 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5406   return addr >= _cmsSpace->nearLargestChunk();
5407 }
5408 
5409 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5410   return _cmsSpace->find_chunk_at_end();
5411 }
5412 
5413 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5414                                                     bool full) {
5415   // If the young generation has been collected, gather any statistics
5416   // that are of interest at this point.
5417   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5418   if (!full && current_is_young) {
5419     // Gather statistics on the young generation collection.
5420     collector()->stats().record_gc0_end(used());
5421   }
5422 }
5423 
5424 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5425   // We iterate over the space(s) underlying this generation,
5426   // checking the mark bit map to see if the bits corresponding
5427   // to specific blocks are marked or not. Blocks that are
5428   // marked are live and are not swept up. All remaining blocks
5429   // are swept up, with coalescing on-the-fly as we sweep up
5430   // contiguous free and/or garbage blocks:
5431   // We need to ensure that the sweeper synchronizes with allocators
5432   // and stop-the-world collectors. In particular, the following
5433   // locks are used:
5434   // . CMS token: if this is held, a stop the world collection cannot occur
5435   // . freelistLock: if this is held no allocation can occur from this
5436   //                 generation by another thread
5437   // . bitMapLock: if this is held, no other thread can access or update
5438   //
5439 
5440   // Note that we need to hold the freelistLock if we use
5441   // block iterate below; else the iterator might go awry if
5442   // a mutator (or promotion) causes block contents to change
5443   // (for instance if the allocator divvies up a block).
5444   // If we hold the free list lock, for all practical purposes
5445   // young generation GC's can't occur (they'll usually need to
5446   // promote), so we might as well prevent all young generation
5447   // GC's while we do a sweeping step. For the same reason, we might
5448   // as well take the bit map lock for the entire duration
5449 
5450   // check that we hold the requisite locks
5451   assert(have_cms_token(), "Should hold cms token");
5452   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5453   assert_lock_strong(old_gen->freelistLock());
5454   assert_lock_strong(bitMapLock());
5455 
5456   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5457   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5458   old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5459                                           _inter_sweep_estimate.padded_average(),
5460                                           _intra_sweep_estimate.padded_average());
5461   old_gen->setNearLargestChunk();
5462 
5463   {
5464     SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5465     old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5466     // We need to free-up/coalesce garbage/blocks from a
5467     // co-terminal free run. This is done in the SweepClosure
5468     // destructor; so, do not remove this scope, else the
5469     // end-of-sweep-census below will be off by a little bit.
5470   }
5471   old_gen->cmsSpace()->sweep_completed();
5472   old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5473   if (should_unload_classes()) {                // unloaded classes this cycle,
5474     _concurrent_cycles_since_last_unload = 0;   // ... reset count
5475   } else {                                      // did not unload classes,
5476     _concurrent_cycles_since_last_unload++;     // ... increment count
5477   }
5478 }
5479 
5480 // Reset CMS data structures (for now just the marking bit map)
5481 // preparatory for the next cycle.
5482 void CMSCollector::reset_concurrent() {
5483   CMSTokenSyncWithLocks ts(true, bitMapLock());
5484 
5485   // If the state is not "Resetting", the foreground  thread
5486   // has done a collection and the resetting.
5487   if (_collectorState != Resetting) {
5488     assert(_collectorState == Idling, "The state should only change"
5489       " because the foreground collector has finished the collection");
5490     return;
5491   }
5492 
5493   {
5494     // Clear the mark bitmap (no grey objects to start with)
5495     // for the next cycle.
5496     GCTraceCPUTime tcpu;
5497     CMSPhaseAccounting cmspa(this, "Concurrent Reset");
5498 
5499     HeapWord* curAddr = _markBitMap.startWord();
5500     while (curAddr < _markBitMap.endWord()) {
5501       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5502       MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5503       _markBitMap.clear_large_range(chunk);
5504       if (ConcurrentMarkSweepThread::should_yield() &&
5505           !foregroundGCIsActive() &&
5506           CMSYield) {
5507         assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5508                "CMS thread should hold CMS token");
5509         assert_lock_strong(bitMapLock());
5510         bitMapLock()->unlock();
5511         ConcurrentMarkSweepThread::desynchronize(true);
5512         stopTimer();
5513         incrementYields();
5514 
5515         // See the comment in coordinator_yield()
5516         for (unsigned i = 0; i < CMSYieldSleepCount &&
5517                          ConcurrentMarkSweepThread::should_yield() &&
5518                          !CMSCollector::foregroundGCIsActive(); ++i) {
5519           os::sleep(Thread::current(), 1, false);
5520         }
5521 
5522         ConcurrentMarkSweepThread::synchronize(true);
5523         bitMapLock()->lock_without_safepoint_check();
5524         startTimer();
5525       }
5526       curAddr = chunk.end();
5527     }
5528     // A successful mostly concurrent collection has been done.
5529     // Because only the full (i.e., concurrent mode failure) collections
5530     // are being measured for gc overhead limits, clean the "near" flag
5531     // and count.
5532     size_policy()->reset_gc_overhead_limit_count();
5533     _collectorState = Idling;
5534   }
5535 
5536   register_gc_end();
5537 }
5538 
5539 // Same as above but for STW paths
5540 void CMSCollector::reset_stw() {
5541   // already have the lock
5542   assert(_collectorState == Resetting, "just checking");
5543   assert_lock_strong(bitMapLock());
5544   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5545   _markBitMap.clear_all();
5546   _collectorState = Idling;
5547   register_gc_end();
5548 }
5549 
5550 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5551   GCTraceCPUTime tcpu;
5552   TraceCollectorStats tcs(counters());
5553 
5554   switch (op) {
5555     case CMS_op_checkpointRootsInitial: {
5556       GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
5557       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5558       checkpointRootsInitial();
5559       break;
5560     }
5561     case CMS_op_checkpointRootsFinal: {
5562       GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
5563       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5564       checkpointRootsFinal();
5565       break;
5566     }
5567     default:
5568       fatal("No such CMS_op");
5569   }
5570 }
5571 
5572 #ifndef PRODUCT
5573 size_t const CMSCollector::skip_header_HeapWords() {
5574   return FreeChunk::header_size();
5575 }
5576 
5577 // Try and collect here conditions that should hold when
5578 // CMS thread is exiting. The idea is that the foreground GC
5579 // thread should not be blocked if it wants to terminate
5580 // the CMS thread and yet continue to run the VM for a while
5581 // after that.
5582 void CMSCollector::verify_ok_to_terminate() const {
5583   assert(Thread::current()->is_ConcurrentGC_thread(),
5584          "should be called by CMS thread");
5585   assert(!_foregroundGCShouldWait, "should be false");
5586   // We could check here that all the various low-level locks
5587   // are not held by the CMS thread, but that is overkill; see
5588   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5589   // is checked.
5590 }
5591 #endif
5592 
5593 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5594    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5595           "missing Printezis mark?");
5596   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5597   size_t size = pointer_delta(nextOneAddr + 1, addr);
5598   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5599          "alignment problem");
5600   assert(size >= 3, "Necessary for Printezis marks to work");
5601   return size;
5602 }
5603 
5604 // A variant of the above (block_size_using_printezis_bits()) except
5605 // that we return 0 if the P-bits are not yet set.
5606 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5607   if (_markBitMap.isMarked(addr + 1)) {
5608     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5609     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5610     size_t size = pointer_delta(nextOneAddr + 1, addr);
5611     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5612            "alignment problem");
5613     assert(size >= 3, "Necessary for Printezis marks to work");
5614     return size;
5615   }
5616   return 0;
5617 }
5618 
5619 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5620   size_t sz = 0;
5621   oop p = (oop)addr;
5622   if (p->klass_or_null() != NULL) {
5623     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5624   } else {
5625     sz = block_size_using_printezis_bits(addr);
5626   }
5627   assert(sz > 0, "size must be nonzero");
5628   HeapWord* next_block = addr + sz;
5629   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5630                                              CardTableModRefBS::card_size);
5631   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5632          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5633          "must be different cards");
5634   return next_card;
5635 }
5636 
5637 
5638 // CMS Bit Map Wrapper /////////////////////////////////////////
5639 
5640 // Construct a CMS bit map infrastructure, but don't create the
5641 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5642 // further below.
5643 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5644   _bm(),
5645   _shifter(shifter),
5646   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5647                                     Monitor::_safepoint_check_sometimes) : NULL)
5648 {
5649   _bmStartWord = 0;
5650   _bmWordSize  = 0;
5651 }
5652 
5653 bool CMSBitMap::allocate(MemRegion mr) {
5654   _bmStartWord = mr.start();
5655   _bmWordSize  = mr.word_size();
5656   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5657                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5658   if (!brs.is_reserved()) {
5659     warning("CMS bit map allocation failure");
5660     return false;
5661   }
5662   // For now we'll just commit all of the bit map up front.
5663   // Later on we'll try to be more parsimonious with swap.
5664   if (!_virtual_space.initialize(brs, brs.size())) {
5665     warning("CMS bit map backing store failure");
5666     return false;
5667   }
5668   assert(_virtual_space.committed_size() == brs.size(),
5669          "didn't reserve backing store for all of CMS bit map?");
5670   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
5671   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5672          _bmWordSize, "inconsistency in bit map sizing");
5673   _bm.set_size(_bmWordSize >> _shifter);
5674 
5675   // bm.clear(); // can we rely on getting zero'd memory? verify below
5676   assert(isAllClear(),
5677          "Expected zero'd memory from ReservedSpace constructor");
5678   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5679          "consistency check");
5680   return true;
5681 }
5682 
5683 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5684   HeapWord *next_addr, *end_addr, *last_addr;
5685   assert_locked();
5686   assert(covers(mr), "out-of-range error");
5687   // XXX assert that start and end are appropriately aligned
5688   for (next_addr = mr.start(), end_addr = mr.end();
5689        next_addr < end_addr; next_addr = last_addr) {
5690     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5691     last_addr = dirty_region.end();
5692     if (!dirty_region.is_empty()) {
5693       cl->do_MemRegion(dirty_region);
5694     } else {
5695       assert(last_addr == end_addr, "program logic");
5696       return;
5697     }
5698   }
5699 }
5700 
5701 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5702   _bm.print_on_error(st, prefix);
5703 }
5704 
5705 #ifndef PRODUCT
5706 void CMSBitMap::assert_locked() const {
5707   CMSLockVerifier::assert_locked(lock());
5708 }
5709 
5710 bool CMSBitMap::covers(MemRegion mr) const {
5711   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5712   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5713          "size inconsistency");
5714   return (mr.start() >= _bmStartWord) &&
5715          (mr.end()   <= endWord());
5716 }
5717 
5718 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5719     return (start >= _bmStartWord && (start + size) <= endWord());
5720 }
5721 
5722 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5723   // verify that there are no 1 bits in the interval [left, right)
5724   FalseBitMapClosure falseBitMapClosure;
5725   iterate(&falseBitMapClosure, left, right);
5726 }
5727 
5728 void CMSBitMap::region_invariant(MemRegion mr)
5729 {
5730   assert_locked();
5731   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5732   assert(!mr.is_empty(), "unexpected empty region");
5733   assert(covers(mr), "mr should be covered by bit map");
5734   // convert address range into offset range
5735   size_t start_ofs = heapWordToOffset(mr.start());
5736   // Make sure that end() is appropriately aligned
5737   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5738                         (1 << (_shifter+LogHeapWordSize))),
5739          "Misaligned mr.end()");
5740   size_t end_ofs   = heapWordToOffset(mr.end());
5741   assert(end_ofs > start_ofs, "Should mark at least one bit");
5742 }
5743 
5744 #endif
5745 
5746 bool CMSMarkStack::allocate(size_t size) {
5747   // allocate a stack of the requisite depth
5748   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5749                    size * sizeof(oop)));
5750   if (!rs.is_reserved()) {
5751     warning("CMSMarkStack allocation failure");
5752     return false;
5753   }
5754   if (!_virtual_space.initialize(rs, rs.size())) {
5755     warning("CMSMarkStack backing store failure");
5756     return false;
5757   }
5758   assert(_virtual_space.committed_size() == rs.size(),
5759          "didn't reserve backing store for all of CMS stack?");
5760   _base = (oop*)(_virtual_space.low());
5761   _index = 0;
5762   _capacity = size;
5763   NOT_PRODUCT(_max_depth = 0);
5764   return true;
5765 }
5766 
5767 // XXX FIX ME !!! In the MT case we come in here holding a
5768 // leaf lock. For printing we need to take a further lock
5769 // which has lower rank. We need to recalibrate the two
5770 // lock-ranks involved in order to be able to print the
5771 // messages below. (Or defer the printing to the caller.
5772 // For now we take the expedient path of just disabling the
5773 // messages for the problematic case.)
5774 void CMSMarkStack::expand() {
5775   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5776   if (_capacity == MarkStackSizeMax) {
5777     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
5778       // We print a warning message only once per CMS cycle.
5779       log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
5780     }
5781     return;
5782   }
5783   // Double capacity if possible
5784   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5785   // Do not give up existing stack until we have managed to
5786   // get the double capacity that we desired.
5787   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5788                    new_capacity * sizeof(oop)));
5789   if (rs.is_reserved()) {
5790     // Release the backing store associated with old stack
5791     _virtual_space.release();
5792     // Reinitialize virtual space for new stack
5793     if (!_virtual_space.initialize(rs, rs.size())) {
5794       fatal("Not enough swap for expanded marking stack");
5795     }
5796     _base = (oop*)(_virtual_space.low());
5797     _index = 0;
5798     _capacity = new_capacity;
5799   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
5800     // Failed to double capacity, continue;
5801     // we print a detail message only once per CMS cycle.
5802     log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
5803                         _capacity / K, new_capacity / K);
5804   }
5805 }
5806 
5807 
5808 // Closures
5809 // XXX: there seems to be a lot of code  duplication here;
5810 // should refactor and consolidate common code.
5811 
5812 // This closure is used to mark refs into the CMS generation in
5813 // the CMS bit map. Called at the first checkpoint. This closure
5814 // assumes that we do not need to re-mark dirty cards; if the CMS
5815 // generation on which this is used is not an oldest
5816 // generation then this will lose younger_gen cards!
5817 
5818 MarkRefsIntoClosure::MarkRefsIntoClosure(
5819   MemRegion span, CMSBitMap* bitMap):
5820     _span(span),
5821     _bitMap(bitMap)
5822 {
5823   assert(ref_processor() == NULL, "deliberately left NULL");
5824   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5825 }
5826 
5827 void MarkRefsIntoClosure::do_oop(oop obj) {
5828   // if p points into _span, then mark corresponding bit in _markBitMap
5829   assert(obj->is_oop(), "expected an oop");
5830   HeapWord* addr = (HeapWord*)obj;
5831   if (_span.contains(addr)) {
5832     // this should be made more efficient
5833     _bitMap->mark(addr);
5834   }
5835 }
5836 
5837 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
5838 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
5839 
5840 ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
5841   MemRegion span, CMSBitMap* bitMap):
5842     _span(span),
5843     _bitMap(bitMap)
5844 {
5845   assert(ref_processor() == NULL, "deliberately left NULL");
5846   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
5847 }
5848 
5849 void ParMarkRefsIntoClosure::do_oop(oop obj) {
5850   // if p points into _span, then mark corresponding bit in _markBitMap
5851   assert(obj->is_oop(), "expected an oop");
5852   HeapWord* addr = (HeapWord*)obj;
5853   if (_span.contains(addr)) {
5854     // this should be made more efficient
5855     _bitMap->par_mark(addr);
5856   }
5857 }
5858 
5859 void ParMarkRefsIntoClosure::do_oop(oop* p)       { ParMarkRefsIntoClosure::do_oop_work(p); }
5860 void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); }
5861 
5862 // A variant of the above, used for CMS marking verification.
5863 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
5864   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
5865     _span(span),
5866     _verification_bm(verification_bm),
5867     _cms_bm(cms_bm)
5868 {
5869   assert(ref_processor() == NULL, "deliberately left NULL");
5870   assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
5871 }
5872 
5873 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
5874   // if p points into _span, then mark corresponding bit in _markBitMap
5875   assert(obj->is_oop(), "expected an oop");
5876   HeapWord* addr = (HeapWord*)obj;
5877   if (_span.contains(addr)) {
5878     _verification_bm->mark(addr);
5879     if (!_cms_bm->isMarked(addr)) {
5880       LogHandle(gc, verify) log;
5881       ResourceMark rm;
5882       oop(addr)->print_on(log.error_stream());
5883       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
5884       fatal("... aborting");
5885     }
5886   }
5887 }
5888 
5889 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5890 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
5891 
5892 //////////////////////////////////////////////////
5893 // MarkRefsIntoAndScanClosure
5894 //////////////////////////////////////////////////
5895 
5896 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
5897                                                        ReferenceProcessor* rp,
5898                                                        CMSBitMap* bit_map,
5899                                                        CMSBitMap* mod_union_table,
5900                                                        CMSMarkStack*  mark_stack,
5901                                                        CMSCollector* collector,
5902                                                        bool should_yield,
5903                                                        bool concurrent_precleaning):
5904   _collector(collector),
5905   _span(span),
5906   _bit_map(bit_map),
5907   _mark_stack(mark_stack),
5908   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
5909                       mark_stack, concurrent_precleaning),
5910   _yield(should_yield),
5911   _concurrent_precleaning(concurrent_precleaning),
5912   _freelistLock(NULL)
5913 {
5914   // FIXME: Should initialize in base class constructor.
5915   assert(rp != NULL, "ref_processor shouldn't be NULL");
5916   set_ref_processor_internal(rp);
5917 }
5918 
5919 // This closure is used to mark refs into the CMS generation at the
5920 // second (final) checkpoint, and to scan and transitively follow
5921 // the unmarked oops. It is also used during the concurrent precleaning
5922 // phase while scanning objects on dirty cards in the CMS generation.
5923 // The marks are made in the marking bit map and the marking stack is
5924 // used for keeping the (newly) grey objects during the scan.
5925 // The parallel version (Par_...) appears further below.
5926 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
5927   if (obj != NULL) {
5928     assert(obj->is_oop(), "expected an oop");
5929     HeapWord* addr = (HeapWord*)obj;
5930     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
5931     assert(_collector->overflow_list_is_empty(),
5932            "overflow list should be empty");
5933     if (_span.contains(addr) &&
5934         !_bit_map->isMarked(addr)) {
5935       // mark bit map (object is now grey)
5936       _bit_map->mark(addr);
5937       // push on marking stack (stack should be empty), and drain the
5938       // stack by applying this closure to the oops in the oops popped
5939       // from the stack (i.e. blacken the grey objects)
5940       bool res = _mark_stack->push(obj);
5941       assert(res, "Should have space to push on empty stack");
5942       do {
5943         oop new_oop = _mark_stack->pop();
5944         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
5945         assert(_bit_map->isMarked((HeapWord*)new_oop),
5946                "only grey objects on this stack");
5947         // iterate over the oops in this oop, marking and pushing
5948         // the ones in CMS heap (i.e. in _span).
5949         new_oop->oop_iterate(&_pushAndMarkClosure);
5950         // check if it's time to yield
5951         do_yield_check();
5952       } while (!_mark_stack->isEmpty() ||
5953                (!_concurrent_precleaning && take_from_overflow_list()));
5954         // if marking stack is empty, and we are not doing this
5955         // during precleaning, then check the overflow list
5956     }
5957     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
5958     assert(_collector->overflow_list_is_empty(),
5959            "overflow list was drained above");
5960 
5961     assert(_collector->no_preserved_marks(),
5962            "All preserved marks should have been restored above");
5963   }
5964 }
5965 
5966 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5967 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
5968 
5969 void MarkRefsIntoAndScanClosure::do_yield_work() {
5970   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5971          "CMS thread should hold CMS token");
5972   assert_lock_strong(_freelistLock);
5973   assert_lock_strong(_bit_map->lock());
5974   // relinquish the free_list_lock and bitMaplock()
5975   _bit_map->lock()->unlock();
5976   _freelistLock->unlock();
5977   ConcurrentMarkSweepThread::desynchronize(true);
5978   _collector->stopTimer();
5979   _collector->incrementYields();
5980 
5981   // See the comment in coordinator_yield()
5982   for (unsigned i = 0;
5983        i < CMSYieldSleepCount &&
5984        ConcurrentMarkSweepThread::should_yield() &&
5985        !CMSCollector::foregroundGCIsActive();
5986        ++i) {
5987     os::sleep(Thread::current(), 1, false);
5988   }
5989 
5990   ConcurrentMarkSweepThread::synchronize(true);
5991   _freelistLock->lock_without_safepoint_check();
5992   _bit_map->lock()->lock_without_safepoint_check();
5993   _collector->startTimer();
5994 }
5995 
5996 ///////////////////////////////////////////////////////////
5997 // ParMarkRefsIntoAndScanClosure: a parallel version of
5998 //                                MarkRefsIntoAndScanClosure
5999 ///////////////////////////////////////////////////////////
6000 ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
6001   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6002   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6003   _span(span),
6004   _bit_map(bit_map),
6005   _work_queue(work_queue),
6006   _low_water_mark(MIN2((work_queue->max_elems()/4),
6007                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6008   _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6009 {
6010   // FIXME: Should initialize in base class constructor.
6011   assert(rp != NULL, "ref_processor shouldn't be NULL");
6012   set_ref_processor_internal(rp);
6013 }
6014 
6015 // This closure is used to mark refs into the CMS generation at the
6016 // second (final) checkpoint, and to scan and transitively follow
6017 // the unmarked oops. The marks are made in the marking bit map and
6018 // the work_queue is used for keeping the (newly) grey objects during
6019 // the scan phase whence they are also available for stealing by parallel
6020 // threads. Since the marking bit map is shared, updates are
6021 // synchronized (via CAS).
6022 void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
6023   if (obj != NULL) {
6024     // Ignore mark word because this could be an already marked oop
6025     // that may be chained at the end of the overflow list.
6026     assert(obj->is_oop(true), "expected an oop");
6027     HeapWord* addr = (HeapWord*)obj;
6028     if (_span.contains(addr) &&
6029         !_bit_map->isMarked(addr)) {
6030       // mark bit map (object will become grey):
6031       // It is possible for several threads to be
6032       // trying to "claim" this object concurrently;
6033       // the unique thread that succeeds in marking the
6034       // object first will do the subsequent push on
6035       // to the work queue (or overflow list).
6036       if (_bit_map->par_mark(addr)) {
6037         // push on work_queue (which may not be empty), and trim the
6038         // queue to an appropriate length by applying this closure to
6039         // the oops in the oops popped from the stack (i.e. blacken the
6040         // grey objects)
6041         bool res = _work_queue->push(obj);
6042         assert(res, "Low water mark should be less than capacity?");
6043         trim_queue(_low_water_mark);
6044       } // Else, another thread claimed the object
6045     }
6046   }
6047 }
6048 
6049 void ParMarkRefsIntoAndScanClosure::do_oop(oop* p)       { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6050 void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); }
6051 
6052 // This closure is used to rescan the marked objects on the dirty cards
6053 // in the mod union table and the card table proper.
6054 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6055   oop p, MemRegion mr) {
6056 
6057   size_t size = 0;
6058   HeapWord* addr = (HeapWord*)p;
6059   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6060   assert(_span.contains(addr), "we are scanning the CMS generation");
6061   // check if it's time to yield
6062   if (do_yield_check()) {
6063     // We yielded for some foreground stop-world work,
6064     // and we have been asked to abort this ongoing preclean cycle.
6065     return 0;
6066   }
6067   if (_bitMap->isMarked(addr)) {
6068     // it's marked; is it potentially uninitialized?
6069     if (p->klass_or_null() != NULL) {
6070         // an initialized object; ignore mark word in verification below
6071         // since we are running concurrent with mutators
6072         assert(p->is_oop(true), "should be an oop");
6073         if (p->is_objArray()) {
6074           // objArrays are precisely marked; restrict scanning
6075           // to dirty cards only.
6076           size = CompactibleFreeListSpace::adjustObjectSize(
6077                    p->oop_iterate_size(_scanningClosure, mr));
6078         } else {
6079           // A non-array may have been imprecisely marked; we need
6080           // to scan object in its entirety.
6081           size = CompactibleFreeListSpace::adjustObjectSize(
6082                    p->oop_iterate_size(_scanningClosure));
6083         }
6084         #ifdef ASSERT
6085           size_t direct_size =
6086             CompactibleFreeListSpace::adjustObjectSize(p->size());
6087           assert(size == direct_size, "Inconsistency in size");
6088           assert(size >= 3, "Necessary for Printezis marks to work");
6089           if (!_bitMap->isMarked(addr+1)) {
6090             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6091           } else {
6092             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6093             assert(_bitMap->isMarked(addr+size-1),
6094                    "inconsistent Printezis mark");
6095           }
6096         #endif // ASSERT
6097     } else {
6098       // An uninitialized object.
6099       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6100       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6101       size = pointer_delta(nextOneAddr + 1, addr);
6102       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6103              "alignment problem");
6104       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6105       // will dirty the card when the klass pointer is installed in the
6106       // object (signaling the completion of initialization).
6107     }
6108   } else {
6109     // Either a not yet marked object or an uninitialized object
6110     if (p->klass_or_null() == NULL) {
6111       // An uninitialized object, skip to the next card, since
6112       // we may not be able to read its P-bits yet.
6113       assert(size == 0, "Initial value");
6114     } else {
6115       // An object not (yet) reached by marking: we merely need to
6116       // compute its size so as to go look at the next block.
6117       assert(p->is_oop(true), "should be an oop");
6118       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6119     }
6120   }
6121   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6122   return size;
6123 }
6124 
6125 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6126   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6127          "CMS thread should hold CMS token");
6128   assert_lock_strong(_freelistLock);
6129   assert_lock_strong(_bitMap->lock());
6130   // relinquish the free_list_lock and bitMaplock()
6131   _bitMap->lock()->unlock();
6132   _freelistLock->unlock();
6133   ConcurrentMarkSweepThread::desynchronize(true);
6134   _collector->stopTimer();
6135   _collector->incrementYields();
6136 
6137   // See the comment in coordinator_yield()
6138   for (unsigned i = 0; i < CMSYieldSleepCount &&
6139                    ConcurrentMarkSweepThread::should_yield() &&
6140                    !CMSCollector::foregroundGCIsActive(); ++i) {
6141     os::sleep(Thread::current(), 1, false);
6142   }
6143 
6144   ConcurrentMarkSweepThread::synchronize(true);
6145   _freelistLock->lock_without_safepoint_check();
6146   _bitMap->lock()->lock_without_safepoint_check();
6147   _collector->startTimer();
6148 }
6149 
6150 
6151 //////////////////////////////////////////////////////////////////
6152 // SurvivorSpacePrecleanClosure
6153 //////////////////////////////////////////////////////////////////
6154 // This (single-threaded) closure is used to preclean the oops in
6155 // the survivor spaces.
6156 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6157 
6158   HeapWord* addr = (HeapWord*)p;
6159   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6160   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6161   assert(p->klass_or_null() != NULL, "object should be initialized");
6162   // an initialized object; ignore mark word in verification below
6163   // since we are running concurrent with mutators
6164   assert(p->is_oop(true), "should be an oop");
6165   // Note that we do not yield while we iterate over
6166   // the interior oops of p, pushing the relevant ones
6167   // on our marking stack.
6168   size_t size = p->oop_iterate_size(_scanning_closure);
6169   do_yield_check();
6170   // Observe that below, we do not abandon the preclean
6171   // phase as soon as we should; rather we empty the
6172   // marking stack before returning. This is to satisfy
6173   // some existing assertions. In general, it may be a
6174   // good idea to abort immediately and complete the marking
6175   // from the grey objects at a later time.
6176   while (!_mark_stack->isEmpty()) {
6177     oop new_oop = _mark_stack->pop();
6178     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6179     assert(_bit_map->isMarked((HeapWord*)new_oop),
6180            "only grey objects on this stack");
6181     // iterate over the oops in this oop, marking and pushing
6182     // the ones in CMS heap (i.e. in _span).
6183     new_oop->oop_iterate(_scanning_closure);
6184     // check if it's time to yield
6185     do_yield_check();
6186   }
6187   unsigned int after_count =
6188     GenCollectedHeap::heap()->total_collections();
6189   bool abort = (_before_count != after_count) ||
6190                _collector->should_abort_preclean();
6191   return abort ? 0 : size;
6192 }
6193 
6194 void SurvivorSpacePrecleanClosure::do_yield_work() {
6195   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6196          "CMS thread should hold CMS token");
6197   assert_lock_strong(_bit_map->lock());
6198   // Relinquish the bit map lock
6199   _bit_map->lock()->unlock();
6200   ConcurrentMarkSweepThread::desynchronize(true);
6201   _collector->stopTimer();
6202   _collector->incrementYields();
6203 
6204   // See the comment in coordinator_yield()
6205   for (unsigned i = 0; i < CMSYieldSleepCount &&
6206                        ConcurrentMarkSweepThread::should_yield() &&
6207                        !CMSCollector::foregroundGCIsActive(); ++i) {
6208     os::sleep(Thread::current(), 1, false);
6209   }
6210 
6211   ConcurrentMarkSweepThread::synchronize(true);
6212   _bit_map->lock()->lock_without_safepoint_check();
6213   _collector->startTimer();
6214 }
6215 
6216 // This closure is used to rescan the marked objects on the dirty cards
6217 // in the mod union table and the card table proper. In the parallel
6218 // case, although the bitMap is shared, we do a single read so the
6219 // isMarked() query is "safe".
6220 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6221   // Ignore mark word because we are running concurrent with mutators
6222   assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6223   HeapWord* addr = (HeapWord*)p;
6224   assert(_span.contains(addr), "we are scanning the CMS generation");
6225   bool is_obj_array = false;
6226   #ifdef ASSERT
6227     if (!_parallel) {
6228       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6229       assert(_collector->overflow_list_is_empty(),
6230              "overflow list should be empty");
6231 
6232     }
6233   #endif // ASSERT
6234   if (_bit_map->isMarked(addr)) {
6235     // Obj arrays are precisely marked, non-arrays are not;
6236     // so we scan objArrays precisely and non-arrays in their
6237     // entirety.
6238     if (p->is_objArray()) {
6239       is_obj_array = true;
6240       if (_parallel) {
6241         p->oop_iterate(_par_scan_closure, mr);
6242       } else {
6243         p->oop_iterate(_scan_closure, mr);
6244       }
6245     } else {
6246       if (_parallel) {
6247         p->oop_iterate(_par_scan_closure);
6248       } else {
6249         p->oop_iterate(_scan_closure);
6250       }
6251     }
6252   }
6253   #ifdef ASSERT
6254     if (!_parallel) {
6255       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6256       assert(_collector->overflow_list_is_empty(),
6257              "overflow list should be empty");
6258 
6259     }
6260   #endif // ASSERT
6261   return is_obj_array;
6262 }
6263 
6264 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6265                         MemRegion span,
6266                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
6267                         bool should_yield, bool verifying):
6268   _collector(collector),
6269   _span(span),
6270   _bitMap(bitMap),
6271   _mut(&collector->_modUnionTable),
6272   _markStack(markStack),
6273   _yield(should_yield),
6274   _skipBits(0)
6275 {
6276   assert(_markStack->isEmpty(), "stack should be empty");
6277   _finger = _bitMap->startWord();
6278   _threshold = _finger;
6279   assert(_collector->_restart_addr == NULL, "Sanity check");
6280   assert(_span.contains(_finger), "Out of bounds _finger?");
6281   DEBUG_ONLY(_verifying = verifying;)
6282 }
6283 
6284 void MarkFromRootsClosure::reset(HeapWord* addr) {
6285   assert(_markStack->isEmpty(), "would cause duplicates on stack");
6286   assert(_span.contains(addr), "Out of bounds _finger?");
6287   _finger = addr;
6288   _threshold = (HeapWord*)round_to(
6289                  (intptr_t)_finger, CardTableModRefBS::card_size);
6290 }
6291 
6292 // Should revisit to see if this should be restructured for
6293 // greater efficiency.
6294 bool MarkFromRootsClosure::do_bit(size_t offset) {
6295   if (_skipBits > 0) {
6296     _skipBits--;
6297     return true;
6298   }
6299   // convert offset into a HeapWord*
6300   HeapWord* addr = _bitMap->startWord() + offset;
6301   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6302          "address out of range");
6303   assert(_bitMap->isMarked(addr), "tautology");
6304   if (_bitMap->isMarked(addr+1)) {
6305     // this is an allocated but not yet initialized object
6306     assert(_skipBits == 0, "tautology");
6307     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6308     oop p = oop(addr);
6309     if (p->klass_or_null() == NULL) {
6310       DEBUG_ONLY(if (!_verifying) {)
6311         // We re-dirty the cards on which this object lies and increase
6312         // the _threshold so that we'll come back to scan this object
6313         // during the preclean or remark phase. (CMSCleanOnEnter)
6314         if (CMSCleanOnEnter) {
6315           size_t sz = _collector->block_size_using_printezis_bits(addr);
6316           HeapWord* end_card_addr   = (HeapWord*)round_to(
6317                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6318           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6319           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6320           // Bump _threshold to end_card_addr; note that
6321           // _threshold cannot possibly exceed end_card_addr, anyhow.
6322           // This prevents future clearing of the card as the scan proceeds
6323           // to the right.
6324           assert(_threshold <= end_card_addr,
6325                  "Because we are just scanning into this object");
6326           if (_threshold < end_card_addr) {
6327             _threshold = end_card_addr;
6328           }
6329           if (p->klass_or_null() != NULL) {
6330             // Redirty the range of cards...
6331             _mut->mark_range(redirty_range);
6332           } // ...else the setting of klass will dirty the card anyway.
6333         }
6334       DEBUG_ONLY(})
6335       return true;
6336     }
6337   }
6338   scanOopsInOop(addr);
6339   return true;
6340 }
6341 
6342 // We take a break if we've been at this for a while,
6343 // so as to avoid monopolizing the locks involved.
6344 void MarkFromRootsClosure::do_yield_work() {
6345   // First give up the locks, then yield, then re-lock
6346   // We should probably use a constructor/destructor idiom to
6347   // do this unlock/lock or modify the MutexUnlocker class to
6348   // serve our purpose. XXX
6349   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6350          "CMS thread should hold CMS token");
6351   assert_lock_strong(_bitMap->lock());
6352   _bitMap->lock()->unlock();
6353   ConcurrentMarkSweepThread::desynchronize(true);
6354   _collector->stopTimer();
6355   _collector->incrementYields();
6356 
6357   // See the comment in coordinator_yield()
6358   for (unsigned i = 0; i < CMSYieldSleepCount &&
6359                        ConcurrentMarkSweepThread::should_yield() &&
6360                        !CMSCollector::foregroundGCIsActive(); ++i) {
6361     os::sleep(Thread::current(), 1, false);
6362   }
6363 
6364   ConcurrentMarkSweepThread::synchronize(true);
6365   _bitMap->lock()->lock_without_safepoint_check();
6366   _collector->startTimer();
6367 }
6368 
6369 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6370   assert(_bitMap->isMarked(ptr), "expected bit to be set");
6371   assert(_markStack->isEmpty(),
6372          "should drain stack to limit stack usage");
6373   // convert ptr to an oop preparatory to scanning
6374   oop obj = oop(ptr);
6375   // Ignore mark word in verification below, since we
6376   // may be running concurrent with mutators.
6377   assert(obj->is_oop(true), "should be an oop");
6378   assert(_finger <= ptr, "_finger runneth ahead");
6379   // advance the finger to right end of this object
6380   _finger = ptr + obj->size();
6381   assert(_finger > ptr, "we just incremented it above");
6382   // On large heaps, it may take us some time to get through
6383   // the marking phase. During
6384   // this time it's possible that a lot of mutations have
6385   // accumulated in the card table and the mod union table --
6386   // these mutation records are redundant until we have
6387   // actually traced into the corresponding card.
6388   // Here, we check whether advancing the finger would make
6389   // us cross into a new card, and if so clear corresponding
6390   // cards in the MUT (preclean them in the card-table in the
6391   // future).
6392 
6393   DEBUG_ONLY(if (!_verifying) {)
6394     // The clean-on-enter optimization is disabled by default,
6395     // until we fix 6178663.
6396     if (CMSCleanOnEnter && (_finger > _threshold)) {
6397       // [_threshold, _finger) represents the interval
6398       // of cards to be cleared  in MUT (or precleaned in card table).
6399       // The set of cards to be cleared is all those that overlap
6400       // with the interval [_threshold, _finger); note that
6401       // _threshold is always kept card-aligned but _finger isn't
6402       // always card-aligned.
6403       HeapWord* old_threshold = _threshold;
6404       assert(old_threshold == (HeapWord*)round_to(
6405               (intptr_t)old_threshold, CardTableModRefBS::card_size),
6406              "_threshold should always be card-aligned");
6407       _threshold = (HeapWord*)round_to(
6408                      (intptr_t)_finger, CardTableModRefBS::card_size);
6409       MemRegion mr(old_threshold, _threshold);
6410       assert(!mr.is_empty(), "Control point invariant");
6411       assert(_span.contains(mr), "Should clear within span");
6412       _mut->clear_range(mr);
6413     }
6414   DEBUG_ONLY(})
6415   // Note: the finger doesn't advance while we drain
6416   // the stack below.
6417   PushOrMarkClosure pushOrMarkClosure(_collector,
6418                                       _span, _bitMap, _markStack,
6419                                       _finger, this);
6420   bool res = _markStack->push(obj);
6421   assert(res, "Empty non-zero size stack should have space for single push");
6422   while (!_markStack->isEmpty()) {
6423     oop new_oop = _markStack->pop();
6424     // Skip verifying header mark word below because we are
6425     // running concurrent with mutators.
6426     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6427     // now scan this oop's oops
6428     new_oop->oop_iterate(&pushOrMarkClosure);
6429     do_yield_check();
6430   }
6431   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6432 }
6433 
6434 ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
6435                        CMSCollector* collector, MemRegion span,
6436                        CMSBitMap* bit_map,
6437                        OopTaskQueue* work_queue,
6438                        CMSMarkStack*  overflow_stack):
6439   _collector(collector),
6440   _whole_span(collector->_span),
6441   _span(span),
6442   _bit_map(bit_map),
6443   _mut(&collector->_modUnionTable),
6444   _work_queue(work_queue),
6445   _overflow_stack(overflow_stack),
6446   _skip_bits(0),
6447   _task(task)
6448 {
6449   assert(_work_queue->size() == 0, "work_queue should be empty");
6450   _finger = span.start();
6451   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6452   assert(_span.contains(_finger), "Out of bounds _finger?");
6453 }
6454 
6455 // Should revisit to see if this should be restructured for
6456 // greater efficiency.
6457 bool ParMarkFromRootsClosure::do_bit(size_t offset) {
6458   if (_skip_bits > 0) {
6459     _skip_bits--;
6460     return true;
6461   }
6462   // convert offset into a HeapWord*
6463   HeapWord* addr = _bit_map->startWord() + offset;
6464   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6465          "address out of range");
6466   assert(_bit_map->isMarked(addr), "tautology");
6467   if (_bit_map->isMarked(addr+1)) {
6468     // this is an allocated object that might not yet be initialized
6469     assert(_skip_bits == 0, "tautology");
6470     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6471     oop p = oop(addr);
6472     if (p->klass_or_null() == NULL) {
6473       // in the case of Clean-on-Enter optimization, redirty card
6474       // and avoid clearing card by increasing  the threshold.
6475       return true;
6476     }
6477   }
6478   scan_oops_in_oop(addr);
6479   return true;
6480 }
6481 
6482 void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6483   assert(_bit_map->isMarked(ptr), "expected bit to be set");
6484   // Should we assert that our work queue is empty or
6485   // below some drain limit?
6486   assert(_work_queue->size() == 0,
6487          "should drain stack to limit stack usage");
6488   // convert ptr to an oop preparatory to scanning
6489   oop obj = oop(ptr);
6490   // Ignore mark word in verification below, since we
6491   // may be running concurrent with mutators.
6492   assert(obj->is_oop(true), "should be an oop");
6493   assert(_finger <= ptr, "_finger runneth ahead");
6494   // advance the finger to right end of this object
6495   _finger = ptr + obj->size();
6496   assert(_finger > ptr, "we just incremented it above");
6497   // On large heaps, it may take us some time to get through
6498   // the marking phase. During
6499   // this time it's possible that a lot of mutations have
6500   // accumulated in the card table and the mod union table --
6501   // these mutation records are redundant until we have
6502   // actually traced into the corresponding card.
6503   // Here, we check whether advancing the finger would make
6504   // us cross into a new card, and if so clear corresponding
6505   // cards in the MUT (preclean them in the card-table in the
6506   // future).
6507 
6508   // The clean-on-enter optimization is disabled by default,
6509   // until we fix 6178663.
6510   if (CMSCleanOnEnter && (_finger > _threshold)) {
6511     // [_threshold, _finger) represents the interval
6512     // of cards to be cleared  in MUT (or precleaned in card table).
6513     // The set of cards to be cleared is all those that overlap
6514     // with the interval [_threshold, _finger); note that
6515     // _threshold is always kept card-aligned but _finger isn't
6516     // always card-aligned.
6517     HeapWord* old_threshold = _threshold;
6518     assert(old_threshold == (HeapWord*)round_to(
6519             (intptr_t)old_threshold, CardTableModRefBS::card_size),
6520            "_threshold should always be card-aligned");
6521     _threshold = (HeapWord*)round_to(
6522                    (intptr_t)_finger, CardTableModRefBS::card_size);
6523     MemRegion mr(old_threshold, _threshold);
6524     assert(!mr.is_empty(), "Control point invariant");
6525     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6526     _mut->clear_range(mr);
6527   }
6528 
6529   // Note: the local finger doesn't advance while we drain
6530   // the stack below, but the global finger sure can and will.
6531   HeapWord** gfa = _task->global_finger_addr();
6532   ParPushOrMarkClosure pushOrMarkClosure(_collector,
6533                                          _span, _bit_map,
6534                                          _work_queue,
6535                                          _overflow_stack,
6536                                          _finger,
6537                                          gfa, this);
6538   bool res = _work_queue->push(obj);   // overflow could occur here
6539   assert(res, "Will hold once we use workqueues");
6540   while (true) {
6541     oop new_oop;
6542     if (!_work_queue->pop_local(new_oop)) {
6543       // We emptied our work_queue; check if there's stuff that can
6544       // be gotten from the overflow stack.
6545       if (CMSConcMarkingTask::get_work_from_overflow_stack(
6546             _overflow_stack, _work_queue)) {
6547         do_yield_check();
6548         continue;
6549       } else {  // done
6550         break;
6551       }
6552     }
6553     // Skip verifying header mark word below because we are
6554     // running concurrent with mutators.
6555     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6556     // now scan this oop's oops
6557     new_oop->oop_iterate(&pushOrMarkClosure);
6558     do_yield_check();
6559   }
6560   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6561 }
6562 
6563 // Yield in response to a request from VM Thread or
6564 // from mutators.
6565 void ParMarkFromRootsClosure::do_yield_work() {
6566   assert(_task != NULL, "sanity");
6567   _task->yield();
6568 }
6569 
6570 // A variant of the above used for verifying CMS marking work.
6571 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6572                         MemRegion span,
6573                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6574                         CMSMarkStack*  mark_stack):
6575   _collector(collector),
6576   _span(span),
6577   _verification_bm(verification_bm),
6578   _cms_bm(cms_bm),
6579   _mark_stack(mark_stack),
6580   _pam_verify_closure(collector, span, verification_bm, cms_bm,
6581                       mark_stack)
6582 {
6583   assert(_mark_stack->isEmpty(), "stack should be empty");
6584   _finger = _verification_bm->startWord();
6585   assert(_collector->_restart_addr == NULL, "Sanity check");
6586   assert(_span.contains(_finger), "Out of bounds _finger?");
6587 }
6588 
6589 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6590   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6591   assert(_span.contains(addr), "Out of bounds _finger?");
6592   _finger = addr;
6593 }
6594 
6595 // Should revisit to see if this should be restructured for
6596 // greater efficiency.
6597 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6598   // convert offset into a HeapWord*
6599   HeapWord* addr = _verification_bm->startWord() + offset;
6600   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6601          "address out of range");
6602   assert(_verification_bm->isMarked(addr), "tautology");
6603   assert(_cms_bm->isMarked(addr), "tautology");
6604 
6605   assert(_mark_stack->isEmpty(),
6606          "should drain stack to limit stack usage");
6607   // convert addr to an oop preparatory to scanning
6608   oop obj = oop(addr);
6609   assert(obj->is_oop(), "should be an oop");
6610   assert(_finger <= addr, "_finger runneth ahead");
6611   // advance the finger to right end of this object
6612   _finger = addr + obj->size();
6613   assert(_finger > addr, "we just incremented it above");
6614   // Note: the finger doesn't advance while we drain
6615   // the stack below.
6616   bool res = _mark_stack->push(obj);
6617   assert(res, "Empty non-zero size stack should have space for single push");
6618   while (!_mark_stack->isEmpty()) {
6619     oop new_oop = _mark_stack->pop();
6620     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6621     // now scan this oop's oops
6622     new_oop->oop_iterate(&_pam_verify_closure);
6623   }
6624   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6625   return true;
6626 }
6627 
6628 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6629   CMSCollector* collector, MemRegion span,
6630   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6631   CMSMarkStack*  mark_stack):
6632   MetadataAwareOopClosure(collector->ref_processor()),
6633   _collector(collector),
6634   _span(span),
6635   _verification_bm(verification_bm),
6636   _cms_bm(cms_bm),
6637   _mark_stack(mark_stack)
6638 { }
6639 
6640 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6641 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6642 
6643 // Upon stack overflow, we discard (part of) the stack,
6644 // remembering the least address amongst those discarded
6645 // in CMSCollector's _restart_address.
6646 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6647   // Remember the least grey address discarded
6648   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6649   _collector->lower_restart_addr(ra);
6650   _mark_stack->reset();  // discard stack contents
6651   _mark_stack->expand(); // expand the stack if possible
6652 }
6653 
6654 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6655   assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6656   HeapWord* addr = (HeapWord*)obj;
6657   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6658     // Oop lies in _span and isn't yet grey or black
6659     _verification_bm->mark(addr);            // now grey
6660     if (!_cms_bm->isMarked(addr)) {
6661       LogHandle(gc, verify) log;
6662       ResourceMark rm;
6663       oop(addr)->print_on(log.error_stream());
6664       log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6665       fatal("... aborting");
6666     }
6667 
6668     if (!_mark_stack->push(obj)) { // stack overflow
6669       log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
6670       assert(_mark_stack->isFull(), "Else push should have succeeded");
6671       handle_stack_overflow(addr);
6672     }
6673     // anything including and to the right of _finger
6674     // will be scanned as we iterate over the remainder of the
6675     // bit map
6676   }
6677 }
6678 
6679 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6680                      MemRegion span,
6681                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
6682                      HeapWord* finger, MarkFromRootsClosure* parent) :
6683   MetadataAwareOopClosure(collector->ref_processor()),
6684   _collector(collector),
6685   _span(span),
6686   _bitMap(bitMap),
6687   _markStack(markStack),
6688   _finger(finger),
6689   _parent(parent)
6690 { }
6691 
6692 ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
6693                                            MemRegion span,
6694                                            CMSBitMap* bit_map,
6695                                            OopTaskQueue* work_queue,
6696                                            CMSMarkStack*  overflow_stack,
6697                                            HeapWord* finger,
6698                                            HeapWord** global_finger_addr,
6699                                            ParMarkFromRootsClosure* parent) :
6700   MetadataAwareOopClosure(collector->ref_processor()),
6701   _collector(collector),
6702   _whole_span(collector->_span),
6703   _span(span),
6704   _bit_map(bit_map),
6705   _work_queue(work_queue),
6706   _overflow_stack(overflow_stack),
6707   _finger(finger),
6708   _global_finger_addr(global_finger_addr),
6709   _parent(parent)
6710 { }
6711 
6712 // Assumes thread-safe access by callers, who are
6713 // responsible for mutual exclusion.
6714 void CMSCollector::lower_restart_addr(HeapWord* low) {
6715   assert(_span.contains(low), "Out of bounds addr");
6716   if (_restart_addr == NULL) {
6717     _restart_addr = low;
6718   } else {
6719     _restart_addr = MIN2(_restart_addr, low);
6720   }
6721 }
6722 
6723 // Upon stack overflow, we discard (part of) the stack,
6724 // remembering the least address amongst those discarded
6725 // in CMSCollector's _restart_address.
6726 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6727   // Remember the least grey address discarded
6728   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6729   _collector->lower_restart_addr(ra);
6730   _markStack->reset();  // discard stack contents
6731   _markStack->expand(); // expand the stack if possible
6732 }
6733 
6734 // Upon stack overflow, we discard (part of) the stack,
6735 // remembering the least address amongst those discarded
6736 // in CMSCollector's _restart_address.
6737 void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6738   // We need to do this under a mutex to prevent other
6739   // workers from interfering with the work done below.
6740   MutexLockerEx ml(_overflow_stack->par_lock(),
6741                    Mutex::_no_safepoint_check_flag);
6742   // Remember the least grey address discarded
6743   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6744   _collector->lower_restart_addr(ra);
6745   _overflow_stack->reset();  // discard stack contents
6746   _overflow_stack->expand(); // expand the stack if possible
6747 }
6748 
6749 void PushOrMarkClosure::do_oop(oop obj) {
6750   // Ignore mark word because we are running concurrent with mutators.
6751   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6752   HeapWord* addr = (HeapWord*)obj;
6753   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6754     // Oop lies in _span and isn't yet grey or black
6755     _bitMap->mark(addr);            // now grey
6756     if (addr < _finger) {
6757       // the bit map iteration has already either passed, or
6758       // sampled, this bit in the bit map; we'll need to
6759       // use the marking stack to scan this oop's oops.
6760       bool simulate_overflow = false;
6761       NOT_PRODUCT(
6762         if (CMSMarkStackOverflowALot &&
6763             _collector->simulate_overflow()) {
6764           // simulate a stack overflow
6765           simulate_overflow = true;
6766         }
6767       )
6768       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6769         log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
6770         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6771         handle_stack_overflow(addr);
6772       }
6773     }
6774     // anything including and to the right of _finger
6775     // will be scanned as we iterate over the remainder of the
6776     // bit map
6777     do_yield_check();
6778   }
6779 }
6780 
6781 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
6782 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
6783 
6784 void ParPushOrMarkClosure::do_oop(oop obj) {
6785   // Ignore mark word because we are running concurrent with mutators.
6786   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6787   HeapWord* addr = (HeapWord*)obj;
6788   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
6789     // Oop lies in _span and isn't yet grey or black
6790     // We read the global_finger (volatile read) strictly after marking oop
6791     bool res = _bit_map->par_mark(addr);    // now grey
6792     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
6793     // Should we push this marked oop on our stack?
6794     // -- if someone else marked it, nothing to do
6795     // -- if target oop is above global finger nothing to do
6796     // -- if target oop is in chunk and above local finger
6797     //      then nothing to do
6798     // -- else push on work queue
6799     if (   !res       // someone else marked it, they will deal with it
6800         || (addr >= *gfa)  // will be scanned in a later task
6801         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
6802       return;
6803     }
6804     // the bit map iteration has already either passed, or
6805     // sampled, this bit in the bit map; we'll need to
6806     // use the marking stack to scan this oop's oops.
6807     bool simulate_overflow = false;
6808     NOT_PRODUCT(
6809       if (CMSMarkStackOverflowALot &&
6810           _collector->simulate_overflow()) {
6811         // simulate a stack overflow
6812         simulate_overflow = true;
6813       }
6814     )
6815     if (simulate_overflow ||
6816         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
6817       // stack overflow
6818       log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
6819       // We cannot assert that the overflow stack is full because
6820       // it may have been emptied since.
6821       assert(simulate_overflow ||
6822              _work_queue->size() == _work_queue->max_elems(),
6823             "Else push should have succeeded");
6824       handle_stack_overflow(addr);
6825     }
6826     do_yield_check();
6827   }
6828 }
6829 
6830 void ParPushOrMarkClosure::do_oop(oop* p)       { ParPushOrMarkClosure::do_oop_work(p); }
6831 void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); }
6832 
6833 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
6834                                        MemRegion span,
6835                                        ReferenceProcessor* rp,
6836                                        CMSBitMap* bit_map,
6837                                        CMSBitMap* mod_union_table,
6838                                        CMSMarkStack*  mark_stack,
6839                                        bool           concurrent_precleaning):
6840   MetadataAwareOopClosure(rp),
6841   _collector(collector),
6842   _span(span),
6843   _bit_map(bit_map),
6844   _mod_union_table(mod_union_table),
6845   _mark_stack(mark_stack),
6846   _concurrent_precleaning(concurrent_precleaning)
6847 {
6848   assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6849 }
6850 
6851 // Grey object rescan during pre-cleaning and second checkpoint phases --
6852 // the non-parallel version (the parallel version appears further below.)
6853 void PushAndMarkClosure::do_oop(oop obj) {
6854   // Ignore mark word verification. If during concurrent precleaning,
6855   // the object monitor may be locked. If during the checkpoint
6856   // phases, the object may already have been reached by a  different
6857   // path and may be at the end of the global overflow list (so
6858   // the mark word may be NULL).
6859   assert(obj->is_oop_or_null(true /* ignore mark word */),
6860          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6861   HeapWord* addr = (HeapWord*)obj;
6862   // Check if oop points into the CMS generation
6863   // and is not marked
6864   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6865     // a white object ...
6866     _bit_map->mark(addr);         // ... now grey
6867     // push on the marking stack (grey set)
6868     bool simulate_overflow = false;
6869     NOT_PRODUCT(
6870       if (CMSMarkStackOverflowALot &&
6871           _collector->simulate_overflow()) {
6872         // simulate a stack overflow
6873         simulate_overflow = true;
6874       }
6875     )
6876     if (simulate_overflow || !_mark_stack->push(obj)) {
6877       if (_concurrent_precleaning) {
6878          // During precleaning we can just dirty the appropriate card(s)
6879          // in the mod union table, thus ensuring that the object remains
6880          // in the grey set  and continue. In the case of object arrays
6881          // we need to dirty all of the cards that the object spans,
6882          // since the rescan of object arrays will be limited to the
6883          // dirty cards.
6884          // Note that no one can be interfering with us in this action
6885          // of dirtying the mod union table, so no locking or atomics
6886          // are required.
6887          if (obj->is_objArray()) {
6888            size_t sz = obj->size();
6889            HeapWord* end_card_addr = (HeapWord*)round_to(
6890                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6891            MemRegion redirty_range = MemRegion(addr, end_card_addr);
6892            assert(!redirty_range.is_empty(), "Arithmetical tautology");
6893            _mod_union_table->mark_range(redirty_range);
6894          } else {
6895            _mod_union_table->mark(addr);
6896          }
6897          _collector->_ser_pmc_preclean_ovflw++;
6898       } else {
6899          // During the remark phase, we need to remember this oop
6900          // in the overflow list.
6901          _collector->push_on_overflow_list(obj);
6902          _collector->_ser_pmc_remark_ovflw++;
6903       }
6904     }
6905   }
6906 }
6907 
6908 ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
6909                                              MemRegion span,
6910                                              ReferenceProcessor* rp,
6911                                              CMSBitMap* bit_map,
6912                                              OopTaskQueue* work_queue):
6913   MetadataAwareOopClosure(rp),
6914   _collector(collector),
6915   _span(span),
6916   _bit_map(bit_map),
6917   _work_queue(work_queue)
6918 {
6919   assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
6920 }
6921 
6922 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
6923 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
6924 
6925 // Grey object rescan during second checkpoint phase --
6926 // the parallel version.
6927 void ParPushAndMarkClosure::do_oop(oop obj) {
6928   // In the assert below, we ignore the mark word because
6929   // this oop may point to an already visited object that is
6930   // on the overflow stack (in which case the mark word has
6931   // been hijacked for chaining into the overflow stack --
6932   // if this is the last object in the overflow stack then
6933   // its mark word will be NULL). Because this object may
6934   // have been subsequently popped off the global overflow
6935   // stack, and the mark word possibly restored to the prototypical
6936   // value, by the time we get to examined this failing assert in
6937   // the debugger, is_oop_or_null(false) may subsequently start
6938   // to hold.
6939   assert(obj->is_oop_or_null(true),
6940          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6941   HeapWord* addr = (HeapWord*)obj;
6942   // Check if oop points into the CMS generation
6943   // and is not marked
6944   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
6945     // a white object ...
6946     // If we manage to "claim" the object, by being the
6947     // first thread to mark it, then we push it on our
6948     // marking stack
6949     if (_bit_map->par_mark(addr)) {     // ... now grey
6950       // push on work queue (grey set)
6951       bool simulate_overflow = false;
6952       NOT_PRODUCT(
6953         if (CMSMarkStackOverflowALot &&
6954             _collector->par_simulate_overflow()) {
6955           // simulate a stack overflow
6956           simulate_overflow = true;
6957         }
6958       )
6959       if (simulate_overflow || !_work_queue->push(obj)) {
6960         _collector->par_push_on_overflow_list(obj);
6961         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
6962       }
6963     } // Else, some other thread got there first
6964   }
6965 }
6966 
6967 void ParPushAndMarkClosure::do_oop(oop* p)       { ParPushAndMarkClosure::do_oop_work(p); }
6968 void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); }
6969 
6970 void CMSPrecleanRefsYieldClosure::do_yield_work() {
6971   Mutex* bml = _collector->bitMapLock();
6972   assert_lock_strong(bml);
6973   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6974          "CMS thread should hold CMS token");
6975 
6976   bml->unlock();
6977   ConcurrentMarkSweepThread::desynchronize(true);
6978 
6979   _collector->stopTimer();
6980   _collector->incrementYields();
6981 
6982   // See the comment in coordinator_yield()
6983   for (unsigned i = 0; i < CMSYieldSleepCount &&
6984                        ConcurrentMarkSweepThread::should_yield() &&
6985                        !CMSCollector::foregroundGCIsActive(); ++i) {
6986     os::sleep(Thread::current(), 1, false);
6987   }
6988 
6989   ConcurrentMarkSweepThread::synchronize(true);
6990   bml->lock();
6991 
6992   _collector->startTimer();
6993 }
6994 
6995 bool CMSPrecleanRefsYieldClosure::should_return() {
6996   if (ConcurrentMarkSweepThread::should_yield()) {
6997     do_yield_work();
6998   }
6999   return _collector->foregroundGCIsActive();
7000 }
7001 
7002 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7003   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7004          "mr should be aligned to start at a card boundary");
7005   // We'd like to assert:
7006   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7007   //        "mr should be a range of cards");
7008   // However, that would be too strong in one case -- the last
7009   // partition ends at _unallocated_block which, in general, can be
7010   // an arbitrary boundary, not necessarily card aligned.
7011   _num_dirty_cards += mr.word_size()/CardTableModRefBS::card_size_in_words;
7012   _space->object_iterate_mem(mr, &_scan_cl);
7013 }
7014 
7015 SweepClosure::SweepClosure(CMSCollector* collector,
7016                            ConcurrentMarkSweepGeneration* g,
7017                            CMSBitMap* bitMap, bool should_yield) :
7018   _collector(collector),
7019   _g(g),
7020   _sp(g->cmsSpace()),
7021   _limit(_sp->sweep_limit()),
7022   _freelistLock(_sp->freelistLock()),
7023   _bitMap(bitMap),
7024   _yield(should_yield),
7025   _inFreeRange(false),           // No free range at beginning of sweep
7026   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7027   _lastFreeRangeCoalesced(false),
7028   _freeFinger(g->used_region().start())
7029 {
7030   NOT_PRODUCT(
7031     _numObjectsFreed = 0;
7032     _numWordsFreed   = 0;
7033     _numObjectsLive = 0;
7034     _numWordsLive = 0;
7035     _numObjectsAlreadyFree = 0;
7036     _numWordsAlreadyFree = 0;
7037     _last_fc = NULL;
7038 
7039     _sp->initializeIndexedFreeListArrayReturnedBytes();
7040     _sp->dictionary()->initialize_dict_returned_bytes();
7041   )
7042   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7043          "sweep _limit out of bounds");
7044   log_develop_trace(gc, sweep)("====================");
7045   log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
7046 }
7047 
7048 void SweepClosure::print_on(outputStream* st) const {
7049   st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7050                p2i(_sp->bottom()), p2i(_sp->end()));
7051   st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7052   st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7053   NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7054   st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7055                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7056 }
7057 
7058 #ifndef PRODUCT
7059 // Assertion checking only:  no useful work in product mode --
7060 // however, if any of the flags below become product flags,
7061 // you may need to review this code to see if it needs to be
7062 // enabled in product mode.
7063 SweepClosure::~SweepClosure() {
7064   assert_lock_strong(_freelistLock);
7065   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7066          "sweep _limit out of bounds");
7067   if (inFreeRange()) {
7068     LogHandle(gc, sweep) log;
7069     log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
7070     ResourceMark rm;
7071     print_on(log.error_stream());
7072     ShouldNotReachHere();
7073   }
7074 
7075   if (log_is_enabled(Debug, gc, sweep)) {
7076     log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7077                          _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7078     log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7079                          _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7080     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
7081     log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7082   }
7083 
7084   if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
7085     size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7086     size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7087     size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7088     log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
7089                          returned_bytes, indexListReturnedBytes, dict_returned_bytes);
7090   }
7091   log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
7092   log_develop_trace(gc, sweep)("================");
7093 }
7094 #endif  // PRODUCT
7095 
7096 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7097     bool freeRangeInFreeLists) {
7098   log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
7099                                p2i(freeFinger), freeRangeInFreeLists);
7100   assert(!inFreeRange(), "Trampling existing free range");
7101   set_inFreeRange(true);
7102   set_lastFreeRangeCoalesced(false);
7103 
7104   set_freeFinger(freeFinger);
7105   set_freeRangeInFreeLists(freeRangeInFreeLists);
7106   if (CMSTestInFreeList) {
7107     if (freeRangeInFreeLists) {
7108       FreeChunk* fc = (FreeChunk*) freeFinger;
7109       assert(fc->is_free(), "A chunk on the free list should be free.");
7110       assert(fc->size() > 0, "Free range should have a size");
7111       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7112     }
7113   }
7114 }
7115 
7116 // Note that the sweeper runs concurrently with mutators. Thus,
7117 // it is possible for direct allocation in this generation to happen
7118 // in the middle of the sweep. Note that the sweeper also coalesces
7119 // contiguous free blocks. Thus, unless the sweeper and the allocator
7120 // synchronize appropriately freshly allocated blocks may get swept up.
7121 // This is accomplished by the sweeper locking the free lists while
7122 // it is sweeping. Thus blocks that are determined to be free are
7123 // indeed free. There is however one additional complication:
7124 // blocks that have been allocated since the final checkpoint and
7125 // mark, will not have been marked and so would be treated as
7126 // unreachable and swept up. To prevent this, the allocator marks
7127 // the bit map when allocating during the sweep phase. This leads,
7128 // however, to a further complication -- objects may have been allocated
7129 // but not yet initialized -- in the sense that the header isn't yet
7130 // installed. The sweeper can not then determine the size of the block
7131 // in order to skip over it. To deal with this case, we use a technique
7132 // (due to Printezis) to encode such uninitialized block sizes in the
7133 // bit map. Since the bit map uses a bit per every HeapWord, but the
7134 // CMS generation has a minimum object size of 3 HeapWords, it follows
7135 // that "normal marks" won't be adjacent in the bit map (there will
7136 // always be at least two 0 bits between successive 1 bits). We make use
7137 // of these "unused" bits to represent uninitialized blocks -- the bit
7138 // corresponding to the start of the uninitialized object and the next
7139 // bit are both set. Finally, a 1 bit marks the end of the object that
7140 // started with the two consecutive 1 bits to indicate its potentially
7141 // uninitialized state.
7142 
7143 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7144   FreeChunk* fc = (FreeChunk*)addr;
7145   size_t res;
7146 
7147   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7148   // than "addr == _limit" because although _limit was a block boundary when
7149   // we started the sweep, it may no longer be one because heap expansion
7150   // may have caused us to coalesce the block ending at the address _limit
7151   // with a newly expanded chunk (this happens when _limit was set to the
7152   // previous _end of the space), so we may have stepped past _limit:
7153   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7154   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7155     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7156            "sweep _limit out of bounds");
7157     assert(addr < _sp->end(), "addr out of bounds");
7158     // Flush any free range we might be holding as a single
7159     // coalesced chunk to the appropriate free list.
7160     if (inFreeRange()) {
7161       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7162              "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7163       flush_cur_free_chunk(freeFinger(),
7164                            pointer_delta(addr, freeFinger()));
7165       log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
7166                                    p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7167                                    lastFreeRangeCoalesced() ? 1 : 0);
7168     }
7169 
7170     // help the iterator loop finish
7171     return pointer_delta(_sp->end(), addr);
7172   }
7173 
7174   assert(addr < _limit, "sweep invariant");
7175   // check if we should yield
7176   do_yield_check(addr);
7177   if (fc->is_free()) {
7178     // Chunk that is already free
7179     res = fc->size();
7180     do_already_free_chunk(fc);
7181     debug_only(_sp->verifyFreeLists());
7182     // If we flush the chunk at hand in lookahead_and_flush()
7183     // and it's coalesced with a preceding chunk, then the
7184     // process of "mangling" the payload of the coalesced block
7185     // will cause erasure of the size information from the
7186     // (erstwhile) header of all the coalesced blocks but the
7187     // first, so the first disjunct in the assert will not hold
7188     // in that specific case (in which case the second disjunct
7189     // will hold).
7190     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7191            "Otherwise the size info doesn't change at this step");
7192     NOT_PRODUCT(
7193       _numObjectsAlreadyFree++;
7194       _numWordsAlreadyFree += res;
7195     )
7196     NOT_PRODUCT(_last_fc = fc;)
7197   } else if (!_bitMap->isMarked(addr)) {
7198     // Chunk is fresh garbage
7199     res = do_garbage_chunk(fc);
7200     debug_only(_sp->verifyFreeLists());
7201     NOT_PRODUCT(
7202       _numObjectsFreed++;
7203       _numWordsFreed += res;
7204     )
7205   } else {
7206     // Chunk that is alive.
7207     res = do_live_chunk(fc);
7208     debug_only(_sp->verifyFreeLists());
7209     NOT_PRODUCT(
7210         _numObjectsLive++;
7211         _numWordsLive += res;
7212     )
7213   }
7214   return res;
7215 }
7216 
7217 // For the smart allocation, record following
7218 //  split deaths - a free chunk is removed from its free list because
7219 //      it is being split into two or more chunks.
7220 //  split birth - a free chunk is being added to its free list because
7221 //      a larger free chunk has been split and resulted in this free chunk.
7222 //  coal death - a free chunk is being removed from its free list because
7223 //      it is being coalesced into a large free chunk.
7224 //  coal birth - a free chunk is being added to its free list because
7225 //      it was created when two or more free chunks where coalesced into
7226 //      this free chunk.
7227 //
7228 // These statistics are used to determine the desired number of free
7229 // chunks of a given size.  The desired number is chosen to be relative
7230 // to the end of a CMS sweep.  The desired number at the end of a sweep
7231 // is the
7232 //      count-at-end-of-previous-sweep (an amount that was enough)
7233 //              - count-at-beginning-of-current-sweep  (the excess)
7234 //              + split-births  (gains in this size during interval)
7235 //              - split-deaths  (demands on this size during interval)
7236 // where the interval is from the end of one sweep to the end of the
7237 // next.
7238 //
7239 // When sweeping the sweeper maintains an accumulated chunk which is
7240 // the chunk that is made up of chunks that have been coalesced.  That
7241 // will be termed the left-hand chunk.  A new chunk of garbage that
7242 // is being considered for coalescing will be referred to as the
7243 // right-hand chunk.
7244 //
7245 // When making a decision on whether to coalesce a right-hand chunk with
7246 // the current left-hand chunk, the current count vs. the desired count
7247 // of the left-hand chunk is considered.  Also if the right-hand chunk
7248 // is near the large chunk at the end of the heap (see
7249 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7250 // left-hand chunk is coalesced.
7251 //
7252 // When making a decision about whether to split a chunk, the desired count
7253 // vs. the current count of the candidate to be split is also considered.
7254 // If the candidate is underpopulated (currently fewer chunks than desired)
7255 // a chunk of an overpopulated (currently more chunks than desired) size may
7256 // be chosen.  The "hint" associated with a free list, if non-null, points
7257 // to a free list which may be overpopulated.
7258 //
7259 
7260 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7261   const size_t size = fc->size();
7262   // Chunks that cannot be coalesced are not in the
7263   // free lists.
7264   if (CMSTestInFreeList && !fc->cantCoalesce()) {
7265     assert(_sp->verify_chunk_in_free_list(fc),
7266            "free chunk should be in free lists");
7267   }
7268   // a chunk that is already free, should not have been
7269   // marked in the bit map
7270   HeapWord* const addr = (HeapWord*) fc;
7271   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7272   // Verify that the bit map has no bits marked between
7273   // addr and purported end of this block.
7274   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7275 
7276   // Some chunks cannot be coalesced under any circumstances.
7277   // See the definition of cantCoalesce().
7278   if (!fc->cantCoalesce()) {
7279     // This chunk can potentially be coalesced.
7280     // All the work is done in
7281     do_post_free_or_garbage_chunk(fc, size);
7282     // Note that if the chunk is not coalescable (the else arm
7283     // below), we unconditionally flush, without needing to do
7284     // a "lookahead," as we do below.
7285     if (inFreeRange()) lookahead_and_flush(fc, size);
7286   } else {
7287     // Code path common to both original and adaptive free lists.
7288 
7289     // cant coalesce with previous block; this should be treated
7290     // as the end of a free run if any
7291     if (inFreeRange()) {
7292       // we kicked some butt; time to pick up the garbage
7293       assert(freeFinger() < addr, "freeFinger points too high");
7294       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7295     }
7296     // else, nothing to do, just continue
7297   }
7298 }
7299 
7300 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7301   // This is a chunk of garbage.  It is not in any free list.
7302   // Add it to a free list or let it possibly be coalesced into
7303   // a larger chunk.
7304   HeapWord* const addr = (HeapWord*) fc;
7305   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7306 
7307   // Verify that the bit map has no bits marked between
7308   // addr and purported end of just dead object.
7309   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7310   do_post_free_or_garbage_chunk(fc, size);
7311 
7312   assert(_limit >= addr + size,
7313          "A freshly garbage chunk can't possibly straddle over _limit");
7314   if (inFreeRange()) lookahead_and_flush(fc, size);
7315   return size;
7316 }
7317 
7318 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7319   HeapWord* addr = (HeapWord*) fc;
7320   // The sweeper has just found a live object. Return any accumulated
7321   // left hand chunk to the free lists.
7322   if (inFreeRange()) {
7323     assert(freeFinger() < addr, "freeFinger points too high");
7324     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7325   }
7326 
7327   // This object is live: we'd normally expect this to be
7328   // an oop, and like to assert the following:
7329   // assert(oop(addr)->is_oop(), "live block should be an oop");
7330   // However, as we commented above, this may be an object whose
7331   // header hasn't yet been initialized.
7332   size_t size;
7333   assert(_bitMap->isMarked(addr), "Tautology for this control point");
7334   if (_bitMap->isMarked(addr + 1)) {
7335     // Determine the size from the bit map, rather than trying to
7336     // compute it from the object header.
7337     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7338     size = pointer_delta(nextOneAddr + 1, addr);
7339     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7340            "alignment problem");
7341 
7342 #ifdef ASSERT
7343       if (oop(addr)->klass_or_null() != NULL) {
7344         // Ignore mark word because we are running concurrent with mutators
7345         assert(oop(addr)->is_oop(true), "live block should be an oop");
7346         assert(size ==
7347                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7348                "P-mark and computed size do not agree");
7349       }
7350 #endif
7351 
7352   } else {
7353     // This should be an initialized object that's alive.
7354     assert(oop(addr)->klass_or_null() != NULL,
7355            "Should be an initialized object");
7356     // Ignore mark word because we are running concurrent with mutators
7357     assert(oop(addr)->is_oop(true), "live block should be an oop");
7358     // Verify that the bit map has no bits marked between
7359     // addr and purported end of this block.
7360     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7361     assert(size >= 3, "Necessary for Printezis marks to work");
7362     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7363     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7364   }
7365   return size;
7366 }
7367 
7368 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7369                                                  size_t chunkSize) {
7370   // do_post_free_or_garbage_chunk() should only be called in the case
7371   // of the adaptive free list allocator.
7372   const bool fcInFreeLists = fc->is_free();
7373   assert((HeapWord*)fc <= _limit, "sweep invariant");
7374   if (CMSTestInFreeList && fcInFreeLists) {
7375     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7376   }
7377 
7378   log_develop_trace(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7379 
7380   HeapWord* const fc_addr = (HeapWord*) fc;
7381 
7382   bool coalesce = false;
7383   const size_t left  = pointer_delta(fc_addr, freeFinger());
7384   const size_t right = chunkSize;
7385   switch (FLSCoalescePolicy) {
7386     // numeric value forms a coalition aggressiveness metric
7387     case 0:  { // never coalesce
7388       coalesce = false;
7389       break;
7390     }
7391     case 1: { // coalesce if left & right chunks on overpopulated lists
7392       coalesce = _sp->coalOverPopulated(left) &&
7393                  _sp->coalOverPopulated(right);
7394       break;
7395     }
7396     case 2: { // coalesce if left chunk on overpopulated list (default)
7397       coalesce = _sp->coalOverPopulated(left);
7398       break;
7399     }
7400     case 3: { // coalesce if left OR right chunk on overpopulated list
7401       coalesce = _sp->coalOverPopulated(left) ||
7402                  _sp->coalOverPopulated(right);
7403       break;
7404     }
7405     case 4: { // always coalesce
7406       coalesce = true;
7407       break;
7408     }
7409     default:
7410      ShouldNotReachHere();
7411   }
7412 
7413   // Should the current free range be coalesced?
7414   // If the chunk is in a free range and either we decided to coalesce above
7415   // or the chunk is near the large block at the end of the heap
7416   // (isNearLargestChunk() returns true), then coalesce this chunk.
7417   const bool doCoalesce = inFreeRange()
7418                           && (coalesce || _g->isNearLargestChunk(fc_addr));
7419   if (doCoalesce) {
7420     // Coalesce the current free range on the left with the new
7421     // chunk on the right.  If either is on a free list,
7422     // it must be removed from the list and stashed in the closure.
7423     if (freeRangeInFreeLists()) {
7424       FreeChunk* const ffc = (FreeChunk*)freeFinger();
7425       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7426              "Size of free range is inconsistent with chunk size.");
7427       if (CMSTestInFreeList) {
7428         assert(_sp->verify_chunk_in_free_list(ffc),
7429                "Chunk is not in free lists");
7430       }
7431       _sp->coalDeath(ffc->size());
7432       _sp->removeFreeChunkFromFreeLists(ffc);
7433       set_freeRangeInFreeLists(false);
7434     }
7435     if (fcInFreeLists) {
7436       _sp->coalDeath(chunkSize);
7437       assert(fc->size() == chunkSize,
7438         "The chunk has the wrong size or is not in the free lists");
7439       _sp->removeFreeChunkFromFreeLists(fc);
7440     }
7441     set_lastFreeRangeCoalesced(true);
7442     print_free_block_coalesced(fc);
7443   } else {  // not in a free range and/or should not coalesce
7444     // Return the current free range and start a new one.
7445     if (inFreeRange()) {
7446       // In a free range but cannot coalesce with the right hand chunk.
7447       // Put the current free range into the free lists.
7448       flush_cur_free_chunk(freeFinger(),
7449                            pointer_delta(fc_addr, freeFinger()));
7450     }
7451     // Set up for new free range.  Pass along whether the right hand
7452     // chunk is in the free lists.
7453     initialize_free_range((HeapWord*)fc, fcInFreeLists);
7454   }
7455 }
7456 
7457 // Lookahead flush:
7458 // If we are tracking a free range, and this is the last chunk that
7459 // we'll look at because its end crosses past _limit, we'll preemptively
7460 // flush it along with any free range we may be holding on to. Note that
7461 // this can be the case only for an already free or freshly garbage
7462 // chunk. If this block is an object, it can never straddle
7463 // over _limit. The "straddling" occurs when _limit is set at
7464 // the previous end of the space when this cycle started, and
7465 // a subsequent heap expansion caused the previously co-terminal
7466 // free block to be coalesced with the newly expanded portion,
7467 // thus rendering _limit a non-block-boundary making it dangerous
7468 // for the sweeper to step over and examine.
7469 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7470   assert(inFreeRange(), "Should only be called if currently in a free range.");
7471   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7472   assert(_sp->used_region().contains(eob - 1),
7473          "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7474          " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7475          " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7476          p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7477   if (eob >= _limit) {
7478     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7479     log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
7480                                  "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7481                                  "[" PTR_FORMAT "," PTR_FORMAT ")",
7482                                  p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7483     // Return the storage we are tracking back into the free lists.
7484     log_develop_trace(gc, sweep)("Flushing ... ");
7485     assert(freeFinger() < eob, "Error");
7486     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7487   }
7488 }
7489 
7490 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7491   assert(inFreeRange(), "Should only be called if currently in a free range.");
7492   assert(size > 0,
7493     "A zero sized chunk cannot be added to the free lists.");
7494   if (!freeRangeInFreeLists()) {
7495     if (CMSTestInFreeList) {
7496       FreeChunk* fc = (FreeChunk*) chunk;
7497       fc->set_size(size);
7498       assert(!_sp->verify_chunk_in_free_list(fc),
7499              "chunk should not be in free lists yet");
7500     }
7501     log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
7502     // A new free range is going to be starting.  The current
7503     // free range has not been added to the free lists yet or
7504     // was removed so add it back.
7505     // If the current free range was coalesced, then the death
7506     // of the free range was recorded.  Record a birth now.
7507     if (lastFreeRangeCoalesced()) {
7508       _sp->coalBirth(size);
7509     }
7510     _sp->addChunkAndRepairOffsetTable(chunk, size,
7511             lastFreeRangeCoalesced());
7512   } else {
7513     log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
7514   }
7515   set_inFreeRange(false);
7516   set_freeRangeInFreeLists(false);
7517 }
7518 
7519 // We take a break if we've been at this for a while,
7520 // so as to avoid monopolizing the locks involved.
7521 void SweepClosure::do_yield_work(HeapWord* addr) {
7522   // Return current free chunk being used for coalescing (if any)
7523   // to the appropriate freelist.  After yielding, the next
7524   // free block encountered will start a coalescing range of
7525   // free blocks.  If the next free block is adjacent to the
7526   // chunk just flushed, they will need to wait for the next
7527   // sweep to be coalesced.
7528   if (inFreeRange()) {
7529     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7530   }
7531 
7532   // First give up the locks, then yield, then re-lock.
7533   // We should probably use a constructor/destructor idiom to
7534   // do this unlock/lock or modify the MutexUnlocker class to
7535   // serve our purpose. XXX
7536   assert_lock_strong(_bitMap->lock());
7537   assert_lock_strong(_freelistLock);
7538   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7539          "CMS thread should hold CMS token");
7540   _bitMap->lock()->unlock();
7541   _freelistLock->unlock();
7542   ConcurrentMarkSweepThread::desynchronize(true);
7543   _collector->stopTimer();
7544   _collector->incrementYields();
7545 
7546   // See the comment in coordinator_yield()
7547   for (unsigned i = 0; i < CMSYieldSleepCount &&
7548                        ConcurrentMarkSweepThread::should_yield() &&
7549                        !CMSCollector::foregroundGCIsActive(); ++i) {
7550     os::sleep(Thread::current(), 1, false);
7551   }
7552 
7553   ConcurrentMarkSweepThread::synchronize(true);
7554   _freelistLock->lock();
7555   _bitMap->lock()->lock_without_safepoint_check();
7556   _collector->startTimer();
7557 }
7558 
7559 #ifndef PRODUCT
7560 // This is actually very useful in a product build if it can
7561 // be called from the debugger.  Compile it into the product
7562 // as needed.
7563 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7564   return debug_cms_space->verify_chunk_in_free_list(fc);
7565 }
7566 #endif
7567 
7568 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7569   log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7570                                p2i(fc), fc->size());
7571 }
7572 
7573 // CMSIsAliveClosure
7574 bool CMSIsAliveClosure::do_object_b(oop obj) {
7575   HeapWord* addr = (HeapWord*)obj;
7576   return addr != NULL &&
7577          (!_span.contains(addr) || _bit_map->isMarked(addr));
7578 }
7579 
7580 
7581 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7582                       MemRegion span,
7583                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7584                       bool cpc):
7585   _collector(collector),
7586   _span(span),
7587   _bit_map(bit_map),
7588   _mark_stack(mark_stack),
7589   _concurrent_precleaning(cpc) {
7590   assert(!_span.is_empty(), "Empty span could spell trouble");
7591 }
7592 
7593 
7594 // CMSKeepAliveClosure: the serial version
7595 void CMSKeepAliveClosure::do_oop(oop obj) {
7596   HeapWord* addr = (HeapWord*)obj;
7597   if (_span.contains(addr) &&
7598       !_bit_map->isMarked(addr)) {
7599     _bit_map->mark(addr);
7600     bool simulate_overflow = false;
7601     NOT_PRODUCT(
7602       if (CMSMarkStackOverflowALot &&
7603           _collector->simulate_overflow()) {
7604         // simulate a stack overflow
7605         simulate_overflow = true;
7606       }
7607     )
7608     if (simulate_overflow || !_mark_stack->push(obj)) {
7609       if (_concurrent_precleaning) {
7610         // We dirty the overflown object and let the remark
7611         // phase deal with it.
7612         assert(_collector->overflow_list_is_empty(), "Error");
7613         // In the case of object arrays, we need to dirty all of
7614         // the cards that the object spans. No locking or atomics
7615         // are needed since no one else can be mutating the mod union
7616         // table.
7617         if (obj->is_objArray()) {
7618           size_t sz = obj->size();
7619           HeapWord* end_card_addr =
7620             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7621           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7622           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7623           _collector->_modUnionTable.mark_range(redirty_range);
7624         } else {
7625           _collector->_modUnionTable.mark(addr);
7626         }
7627         _collector->_ser_kac_preclean_ovflw++;
7628       } else {
7629         _collector->push_on_overflow_list(obj);
7630         _collector->_ser_kac_ovflw++;
7631       }
7632     }
7633   }
7634 }
7635 
7636 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
7637 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
7638 
7639 // CMSParKeepAliveClosure: a parallel version of the above.
7640 // The work queues are private to each closure (thread),
7641 // but (may be) available for stealing by other threads.
7642 void CMSParKeepAliveClosure::do_oop(oop obj) {
7643   HeapWord* addr = (HeapWord*)obj;
7644   if (_span.contains(addr) &&
7645       !_bit_map->isMarked(addr)) {
7646     // In general, during recursive tracing, several threads
7647     // may be concurrently getting here; the first one to
7648     // "tag" it, claims it.
7649     if (_bit_map->par_mark(addr)) {
7650       bool res = _work_queue->push(obj);
7651       assert(res, "Low water mark should be much less than capacity");
7652       // Do a recursive trim in the hope that this will keep
7653       // stack usage lower, but leave some oops for potential stealers
7654       trim_queue(_low_water_mark);
7655     } // Else, another thread got there first
7656   }
7657 }
7658 
7659 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
7660 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7661 
7662 void CMSParKeepAliveClosure::trim_queue(uint max) {
7663   while (_work_queue->size() > max) {
7664     oop new_oop;
7665     if (_work_queue->pop_local(new_oop)) {
7666       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7667       assert(_bit_map->isMarked((HeapWord*)new_oop),
7668              "no white objects on this stack!");
7669       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7670       // iterate over the oops in this oop, marking and pushing
7671       // the ones in CMS heap (i.e. in _span).
7672       new_oop->oop_iterate(&_mark_and_push);
7673     }
7674   }
7675 }
7676 
7677 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7678                                 CMSCollector* collector,
7679                                 MemRegion span, CMSBitMap* bit_map,
7680                                 OopTaskQueue* work_queue):
7681   _collector(collector),
7682   _span(span),
7683   _bit_map(bit_map),
7684   _work_queue(work_queue) { }
7685 
7686 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7687   HeapWord* addr = (HeapWord*)obj;
7688   if (_span.contains(addr) &&
7689       !_bit_map->isMarked(addr)) {
7690     if (_bit_map->par_mark(addr)) {
7691       bool simulate_overflow = false;
7692       NOT_PRODUCT(
7693         if (CMSMarkStackOverflowALot &&
7694             _collector->par_simulate_overflow()) {
7695           // simulate a stack overflow
7696           simulate_overflow = true;
7697         }
7698       )
7699       if (simulate_overflow || !_work_queue->push(obj)) {
7700         _collector->par_push_on_overflow_list(obj);
7701         _collector->_par_kac_ovflw++;
7702       }
7703     } // Else another thread got there already
7704   }
7705 }
7706 
7707 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7708 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7709 
7710 //////////////////////////////////////////////////////////////////
7711 //  CMSExpansionCause                /////////////////////////////
7712 //////////////////////////////////////////////////////////////////
7713 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
7714   switch (cause) {
7715     case _no_expansion:
7716       return "No expansion";
7717     case _satisfy_free_ratio:
7718       return "Free ratio";
7719     case _satisfy_promotion:
7720       return "Satisfy promotion";
7721     case _satisfy_allocation:
7722       return "allocation";
7723     case _allocate_par_lab:
7724       return "Par LAB";
7725     case _allocate_par_spooling_space:
7726       return "Par Spooling Space";
7727     case _adaptive_size_policy:
7728       return "Ergonomics";
7729     default:
7730       return "unknown";
7731   }
7732 }
7733 
7734 void CMSDrainMarkingStackClosure::do_void() {
7735   // the max number to take from overflow list at a time
7736   const size_t num = _mark_stack->capacity()/4;
7737   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7738          "Overflow list should be NULL during concurrent phases");
7739   while (!_mark_stack->isEmpty() ||
7740          // if stack is empty, check the overflow list
7741          _collector->take_from_overflow_list(num, _mark_stack)) {
7742     oop obj = _mark_stack->pop();
7743     HeapWord* addr = (HeapWord*)obj;
7744     assert(_span.contains(addr), "Should be within span");
7745     assert(_bit_map->isMarked(addr), "Should be marked");
7746     assert(obj->is_oop(), "Should be an oop");
7747     obj->oop_iterate(_keep_alive);
7748   }
7749 }
7750 
7751 void CMSParDrainMarkingStackClosure::do_void() {
7752   // drain queue
7753   trim_queue(0);
7754 }
7755 
7756 // Trim our work_queue so its length is below max at return
7757 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
7758   while (_work_queue->size() > max) {
7759     oop new_oop;
7760     if (_work_queue->pop_local(new_oop)) {
7761       assert(new_oop->is_oop(), "Expected an oop");
7762       assert(_bit_map->isMarked((HeapWord*)new_oop),
7763              "no white objects on this stack!");
7764       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7765       // iterate over the oops in this oop, marking and pushing
7766       // the ones in CMS heap (i.e. in _span).
7767       new_oop->oop_iterate(&_mark_and_push);
7768     }
7769   }
7770 }
7771 
7772 ////////////////////////////////////////////////////////////////////
7773 // Support for Marking Stack Overflow list handling and related code
7774 ////////////////////////////////////////////////////////////////////
7775 // Much of the following code is similar in shape and spirit to the
7776 // code used in ParNewGC. We should try and share that code
7777 // as much as possible in the future.
7778 
7779 #ifndef PRODUCT
7780 // Debugging support for CMSStackOverflowALot
7781 
7782 // It's OK to call this multi-threaded;  the worst thing
7783 // that can happen is that we'll get a bunch of closely
7784 // spaced simulated overflows, but that's OK, in fact
7785 // probably good as it would exercise the overflow code
7786 // under contention.
7787 bool CMSCollector::simulate_overflow() {
7788   if (_overflow_counter-- <= 0) { // just being defensive
7789     _overflow_counter = CMSMarkStackOverflowInterval;
7790     return true;
7791   } else {
7792     return false;
7793   }
7794 }
7795 
7796 bool CMSCollector::par_simulate_overflow() {
7797   return simulate_overflow();
7798 }
7799 #endif
7800 
7801 // Single-threaded
7802 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
7803   assert(stack->isEmpty(), "Expected precondition");
7804   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
7805   size_t i = num;
7806   oop  cur = _overflow_list;
7807   const markOop proto = markOopDesc::prototype();
7808   NOT_PRODUCT(ssize_t n = 0;)
7809   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
7810     next = oop(cur->mark());
7811     cur->set_mark(proto);   // until proven otherwise
7812     assert(cur->is_oop(), "Should be an oop");
7813     bool res = stack->push(cur);
7814     assert(res, "Bit off more than can chew?");
7815     NOT_PRODUCT(n++;)
7816   }
7817   _overflow_list = cur;
7818 #ifndef PRODUCT
7819   assert(_num_par_pushes >= n, "Too many pops?");
7820   _num_par_pushes -=n;
7821 #endif
7822   return !stack->isEmpty();
7823 }
7824 
7825 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
7826 // (MT-safe) Get a prefix of at most "num" from the list.
7827 // The overflow list is chained through the mark word of
7828 // each object in the list. We fetch the entire list,
7829 // break off a prefix of the right size and return the
7830 // remainder. If other threads try to take objects from
7831 // the overflow list at that time, they will wait for
7832 // some time to see if data becomes available. If (and
7833 // only if) another thread places one or more object(s)
7834 // on the global list before we have returned the suffix
7835 // to the global list, we will walk down our local list
7836 // to find its end and append the global list to
7837 // our suffix before returning it. This suffix walk can
7838 // prove to be expensive (quadratic in the amount of traffic)
7839 // when there are many objects in the overflow list and
7840 // there is much producer-consumer contention on the list.
7841 // *NOTE*: The overflow list manipulation code here and
7842 // in ParNewGeneration:: are very similar in shape,
7843 // except that in the ParNew case we use the old (from/eden)
7844 // copy of the object to thread the list via its klass word.
7845 // Because of the common code, if you make any changes in
7846 // the code below, please check the ParNew version to see if
7847 // similar changes might be needed.
7848 // CR 6797058 has been filed to consolidate the common code.
7849 bool CMSCollector::par_take_from_overflow_list(size_t num,
7850                                                OopTaskQueue* work_q,
7851                                                int no_of_gc_threads) {
7852   assert(work_q->size() == 0, "First empty local work queue");
7853   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
7854   if (_overflow_list == NULL) {
7855     return false;
7856   }
7857   // Grab the entire list; we'll put back a suffix
7858   oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7859   Thread* tid = Thread::current();
7860   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
7861   // set to ParallelGCThreads.
7862   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
7863   size_t sleep_time_millis = MAX2((size_t)1, num/100);
7864   // If the list is busy, we spin for a short while,
7865   // sleeping between attempts to get the list.
7866   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
7867     os::sleep(tid, sleep_time_millis, false);
7868     if (_overflow_list == NULL) {
7869       // Nothing left to take
7870       return false;
7871     } else if (_overflow_list != BUSY) {
7872       // Try and grab the prefix
7873       prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
7874     }
7875   }
7876   // If the list was found to be empty, or we spun long
7877   // enough, we give up and return empty-handed. If we leave
7878   // the list in the BUSY state below, it must be the case that
7879   // some other thread holds the overflow list and will set it
7880   // to a non-BUSY state in the future.
7881   if (prefix == NULL || prefix == BUSY) {
7882      // Nothing to take or waited long enough
7883      if (prefix == NULL) {
7884        // Write back the NULL in case we overwrote it with BUSY above
7885        // and it is still the same value.
7886        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7887      }
7888      return false;
7889   }
7890   assert(prefix != NULL && prefix != BUSY, "Error");
7891   size_t i = num;
7892   oop cur = prefix;
7893   // Walk down the first "num" objects, unless we reach the end.
7894   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
7895   if (cur->mark() == NULL) {
7896     // We have "num" or fewer elements in the list, so there
7897     // is nothing to return to the global list.
7898     // Write back the NULL in lieu of the BUSY we wrote
7899     // above, if it is still the same value.
7900     if (_overflow_list == BUSY) {
7901       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
7902     }
7903   } else {
7904     // Chop off the suffix and return it to the global list.
7905     assert(cur->mark() != BUSY, "Error");
7906     oop suffix_head = cur->mark(); // suffix will be put back on global list
7907     cur->set_mark(NULL);           // break off suffix
7908     // It's possible that the list is still in the empty(busy) state
7909     // we left it in a short while ago; in that case we may be
7910     // able to place back the suffix without incurring the cost
7911     // of a walk down the list.
7912     oop observed_overflow_list = _overflow_list;
7913     oop cur_overflow_list = observed_overflow_list;
7914     bool attached = false;
7915     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
7916       observed_overflow_list =
7917         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7918       if (cur_overflow_list == observed_overflow_list) {
7919         attached = true;
7920         break;
7921       } else cur_overflow_list = observed_overflow_list;
7922     }
7923     if (!attached) {
7924       // Too bad, someone else sneaked in (at least) an element; we'll need
7925       // to do a splice. Find tail of suffix so we can prepend suffix to global
7926       // list.
7927       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
7928       oop suffix_tail = cur;
7929       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
7930              "Tautology");
7931       observed_overflow_list = _overflow_list;
7932       do {
7933         cur_overflow_list = observed_overflow_list;
7934         if (cur_overflow_list != BUSY) {
7935           // Do the splice ...
7936           suffix_tail->set_mark(markOop(cur_overflow_list));
7937         } else { // cur_overflow_list == BUSY
7938           suffix_tail->set_mark(NULL);
7939         }
7940         // ... and try to place spliced list back on overflow_list ...
7941         observed_overflow_list =
7942           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
7943       } while (cur_overflow_list != observed_overflow_list);
7944       // ... until we have succeeded in doing so.
7945     }
7946   }
7947 
7948   // Push the prefix elements on work_q
7949   assert(prefix != NULL, "control point invariant");
7950   const markOop proto = markOopDesc::prototype();
7951   oop next;
7952   NOT_PRODUCT(ssize_t n = 0;)
7953   for (cur = prefix; cur != NULL; cur = next) {
7954     next = oop(cur->mark());
7955     cur->set_mark(proto);   // until proven otherwise
7956     assert(cur->is_oop(), "Should be an oop");
7957     bool res = work_q->push(cur);
7958     assert(res, "Bit off more than we can chew?");
7959     NOT_PRODUCT(n++;)
7960   }
7961 #ifndef PRODUCT
7962   assert(_num_par_pushes >= n, "Too many pops?");
7963   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
7964 #endif
7965   return true;
7966 }
7967 
7968 // Single-threaded
7969 void CMSCollector::push_on_overflow_list(oop p) {
7970   NOT_PRODUCT(_num_par_pushes++;)
7971   assert(p->is_oop(), "Not an oop");
7972   preserve_mark_if_necessary(p);
7973   p->set_mark((markOop)_overflow_list);
7974   _overflow_list = p;
7975 }
7976 
7977 // Multi-threaded; use CAS to prepend to overflow list
7978 void CMSCollector::par_push_on_overflow_list(oop p) {
7979   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
7980   assert(p->is_oop(), "Not an oop");
7981   par_preserve_mark_if_necessary(p);
7982   oop observed_overflow_list = _overflow_list;
7983   oop cur_overflow_list;
7984   do {
7985     cur_overflow_list = observed_overflow_list;
7986     if (cur_overflow_list != BUSY) {
7987       p->set_mark(markOop(cur_overflow_list));
7988     } else {
7989       p->set_mark(NULL);
7990     }
7991     observed_overflow_list =
7992       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
7993   } while (cur_overflow_list != observed_overflow_list);
7994 }
7995 #undef BUSY
7996 
7997 // Single threaded
7998 // General Note on GrowableArray: pushes may silently fail
7999 // because we are (temporarily) out of C-heap for expanding
8000 // the stack. The problem is quite ubiquitous and affects
8001 // a lot of code in the JVM. The prudent thing for GrowableArray
8002 // to do (for now) is to exit with an error. However, that may
8003 // be too draconian in some cases because the caller may be
8004 // able to recover without much harm. For such cases, we
8005 // should probably introduce a "soft_push" method which returns
8006 // an indication of success or failure with the assumption that
8007 // the caller may be able to recover from a failure; code in
8008 // the VM can then be changed, incrementally, to deal with such
8009 // failures where possible, thus, incrementally hardening the VM
8010 // in such low resource situations.
8011 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8012   _preserved_oop_stack.push(p);
8013   _preserved_mark_stack.push(m);
8014   assert(m == p->mark(), "Mark word changed");
8015   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8016          "bijection");
8017 }
8018 
8019 // Single threaded
8020 void CMSCollector::preserve_mark_if_necessary(oop p) {
8021   markOop m = p->mark();
8022   if (m->must_be_preserved(p)) {
8023     preserve_mark_work(p, m);
8024   }
8025 }
8026 
8027 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8028   markOop m = p->mark();
8029   if (m->must_be_preserved(p)) {
8030     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8031     // Even though we read the mark word without holding
8032     // the lock, we are assured that it will not change
8033     // because we "own" this oop, so no other thread can
8034     // be trying to push it on the overflow list; see
8035     // the assertion in preserve_mark_work() that checks
8036     // that m == p->mark().
8037     preserve_mark_work(p, m);
8038   }
8039 }
8040 
8041 // We should be able to do this multi-threaded,
8042 // a chunk of stack being a task (this is
8043 // correct because each oop only ever appears
8044 // once in the overflow list. However, it's
8045 // not very easy to completely overlap this with
8046 // other operations, so will generally not be done
8047 // until all work's been completed. Because we
8048 // expect the preserved oop stack (set) to be small,
8049 // it's probably fine to do this single-threaded.
8050 // We can explore cleverer concurrent/overlapped/parallel
8051 // processing of preserved marks if we feel the
8052 // need for this in the future. Stack overflow should
8053 // be so rare in practice and, when it happens, its
8054 // effect on performance so great that this will
8055 // likely just be in the noise anyway.
8056 void CMSCollector::restore_preserved_marks_if_any() {
8057   assert(SafepointSynchronize::is_at_safepoint(),
8058          "world should be stopped");
8059   assert(Thread::current()->is_ConcurrentGC_thread() ||
8060          Thread::current()->is_VM_thread(),
8061          "should be single-threaded");
8062   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8063          "bijection");
8064 
8065   while (!_preserved_oop_stack.is_empty()) {
8066     oop p = _preserved_oop_stack.pop();
8067     assert(p->is_oop(), "Should be an oop");
8068     assert(_span.contains(p), "oop should be in _span");
8069     assert(p->mark() == markOopDesc::prototype(),
8070            "Set when taken from overflow list");
8071     markOop m = _preserved_mark_stack.pop();
8072     p->set_mark(m);
8073   }
8074   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8075          "stacks were cleared above");
8076 }
8077 
8078 #ifndef PRODUCT
8079 bool CMSCollector::no_preserved_marks() const {
8080   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8081 }
8082 #endif
8083 
8084 // Transfer some number of overflown objects to usual marking
8085 // stack. Return true if some objects were transferred.
8086 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8087   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8088                     (size_t)ParGCDesiredObjsFromOverflowList);
8089 
8090   bool res = _collector->take_from_overflow_list(num, _mark_stack);
8091   assert(_collector->overflow_list_is_empty() || res,
8092          "If list is not empty, we should have taken something");
8093   assert(!res || !_mark_stack->isEmpty(),
8094          "If we took something, it should now be on our stack");
8095   return res;
8096 }
8097 
8098 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8099   size_t res = _sp->block_size_no_stall(addr, _collector);
8100   if (_sp->block_is_obj(addr)) {
8101     if (_live_bit_map->isMarked(addr)) {
8102       // It can't have been dead in a previous cycle
8103       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8104     } else {
8105       _dead_bit_map->mark(addr);      // mark the dead object
8106     }
8107   }
8108   // Could be 0, if the block size could not be computed without stalling.
8109   return res;
8110 }
8111 
8112 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8113 
8114   switch (phase) {
8115     case CMSCollector::InitialMarking:
8116       initialize(true  /* fullGC */ ,
8117                  cause /* cause of the GC */,
8118                  true  /* recordGCBeginTime */,
8119                  true  /* recordPreGCUsage */,
8120                  false /* recordPeakUsage */,
8121                  false /* recordPostGCusage */,
8122                  true  /* recordAccumulatedGCTime */,
8123                  false /* recordGCEndTime */,
8124                  false /* countCollection */  );
8125       break;
8126 
8127     case CMSCollector::FinalMarking:
8128       initialize(true  /* fullGC */ ,
8129                  cause /* cause of the GC */,
8130                  false /* recordGCBeginTime */,
8131                  false /* recordPreGCUsage */,
8132                  false /* recordPeakUsage */,
8133                  false /* recordPostGCusage */,
8134                  true  /* recordAccumulatedGCTime */,
8135                  false /* recordGCEndTime */,
8136                  false /* countCollection */  );
8137       break;
8138 
8139     case CMSCollector::Sweeping:
8140       initialize(true  /* fullGC */ ,
8141                  cause /* cause of the GC */,
8142                  false /* recordGCBeginTime */,
8143                  false /* recordPreGCUsage */,
8144                  true  /* recordPeakUsage */,
8145                  true  /* recordPostGCusage */,
8146                  false /* recordAccumulatedGCTime */,
8147                  true  /* recordGCEndTime */,
8148                  true  /* countCollection */  );
8149       break;
8150 
8151     default:
8152       ShouldNotReachHere();
8153   }
8154 }