1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/stringTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "gc/cms/cmsCollectorPolicy.hpp"
  31 #include "gc/cms/cmsOopClosures.inline.hpp"
  32 #include "gc/cms/compactibleFreeListSpace.hpp"
  33 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  34 #include "gc/cms/concurrentMarkSweepThread.hpp"
  35 #include "gc/cms/parNewGeneration.hpp"
  36 #include "gc/cms/vmCMSOperations.hpp"
  37 #include "gc/serial/genMarkSweep.hpp"
  38 #include "gc/serial/tenuredGeneration.hpp"
  39 #include "gc/shared/adaptiveSizePolicy.hpp"
  40 #include "gc/shared/cardGeneration.inline.hpp"
  41 #include "gc/shared/cardTableRS.hpp"
  42 #include "gc/shared/collectedHeap.inline.hpp"
  43 #include "gc/shared/collectorCounters.hpp"
  44 #include "gc/shared/collectorPolicy.hpp"
  45 #include "gc/shared/gcLocker.inline.hpp"
  46 #include "gc/shared/gcPolicyCounters.hpp"
  47 #include "gc/shared/gcTimer.hpp"
  48 #include "gc/shared/gcTrace.hpp"
  49 #include "gc/shared/gcTraceTime.hpp"
  50 #include "gc/shared/genCollectedHeap.hpp"
  51 #include "gc/shared/genOopClosures.inline.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/referencePolicy.hpp"
  54 #include "gc/shared/strongRootsScope.hpp"
  55 #include "gc/shared/taskqueue.inline.hpp"
  56 #include "memory/allocation.hpp"
  57 #include "memory/iterator.inline.hpp"
  58 #include "memory/padded.hpp"
  59 #include "memory/resourceArea.hpp"
  60 #include "oops/oop.inline.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "runtime/atomic.inline.hpp"
  63 #include "runtime/globals_extension.hpp"
  64 #include "runtime/handles.inline.hpp"
  65 #include "runtime/java.hpp"
  66 #include "runtime/orderAccess.inline.hpp"
  67 #include "runtime/vmThread.hpp"
  68 #include "services/memoryService.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/stack.inline.hpp"
  71 
  72 // statics
  73 CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
  74 bool CMSCollector::_full_gc_requested = false;
  75 GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
  76 
  77 //////////////////////////////////////////////////////////////////
  78 // In support of CMS/VM thread synchronization
  79 //////////////////////////////////////////////////////////////////
  80 // We split use of the CGC_lock into 2 "levels".
  81 // The low-level locking is of the usual CGC_lock monitor. We introduce
  82 // a higher level "token" (hereafter "CMS token") built on top of the
  83 // low level monitor (hereafter "CGC lock").
  84 // The token-passing protocol gives priority to the VM thread. The
  85 // CMS-lock doesn't provide any fairness guarantees, but clients
  86 // should ensure that it is only held for very short, bounded
  87 // durations.
  88 //
  89 // When either of the CMS thread or the VM thread is involved in
  90 // collection operations during which it does not want the other
  91 // thread to interfere, it obtains the CMS token.
  92 //
  93 // If either thread tries to get the token while the other has
  94 // it, that thread waits. However, if the VM thread and CMS thread
  95 // both want the token, then the VM thread gets priority while the
  96 // CMS thread waits. This ensures, for instance, that the "concurrent"
  97 // phases of the CMS thread's work do not block out the VM thread
  98 // for long periods of time as the CMS thread continues to hog
  99 // the token. (See bug 4616232).
 100 //
 101 // The baton-passing functions are, however, controlled by the
 102 // flags _foregroundGCShouldWait and _foregroundGCIsActive,
 103 // and here the low-level CMS lock, not the high level token,
 104 // ensures mutual exclusion.
 105 //
 106 // Two important conditions that we have to satisfy:
 107 // 1. if a thread does a low-level wait on the CMS lock, then it
 108 //    relinquishes the CMS token if it were holding that token
 109 //    when it acquired the low-level CMS lock.
 110 // 2. any low-level notifications on the low-level lock
 111 //    should only be sent when a thread has relinquished the token.
 112 //
 113 // In the absence of either property, we'd have potential deadlock.
 114 //
 115 // We protect each of the CMS (concurrent and sequential) phases
 116 // with the CMS _token_, not the CMS _lock_.
 117 //
 118 // The only code protected by CMS lock is the token acquisition code
 119 // itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
 120 // baton-passing code.
 121 //
 122 // Unfortunately, i couldn't come up with a good abstraction to factor and
 123 // hide the naked CGC_lock manipulation in the baton-passing code
 124 // further below. That's something we should try to do. Also, the proof
 125 // of correctness of this 2-level locking scheme is far from obvious,
 126 // and potentially quite slippery. We have an uneasy suspicion, for instance,
 127 // that there may be a theoretical possibility of delay/starvation in the
 128 // low-level lock/wait/notify scheme used for the baton-passing because of
 129 // potential interference with the priority scheme embodied in the
 130 // CMS-token-passing protocol. See related comments at a CGC_lock->wait()
 131 // invocation further below and marked with "XXX 20011219YSR".
 132 // Indeed, as we note elsewhere, this may become yet more slippery
 133 // in the presence of multiple CMS and/or multiple VM threads. XXX
 134 
 135 class CMSTokenSync: public StackObj {
 136  private:
 137   bool _is_cms_thread;
 138  public:
 139   CMSTokenSync(bool is_cms_thread):
 140     _is_cms_thread(is_cms_thread) {
 141     assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
 142            "Incorrect argument to constructor");
 143     ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
 144   }
 145 
 146   ~CMSTokenSync() {
 147     assert(_is_cms_thread ?
 148              ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
 149              ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
 150           "Incorrect state");
 151     ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
 152   }
 153 };
 154 
 155 // Convenience class that does a CMSTokenSync, and then acquires
 156 // upto three locks.
 157 class CMSTokenSyncWithLocks: public CMSTokenSync {
 158  private:
 159   // Note: locks are acquired in textual declaration order
 160   // and released in the opposite order
 161   MutexLockerEx _locker1, _locker2, _locker3;
 162  public:
 163   CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
 164                         Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
 165     CMSTokenSync(is_cms_thread),
 166     _locker1(mutex1, Mutex::_no_safepoint_check_flag),
 167     _locker2(mutex2, Mutex::_no_safepoint_check_flag),
 168     _locker3(mutex3, Mutex::_no_safepoint_check_flag)
 169   { }
 170 };
 171 
 172 
 173 //////////////////////////////////////////////////////////////////
 174 //  Concurrent Mark-Sweep Generation /////////////////////////////
 175 //////////////////////////////////////////////////////////////////
 176 
 177 NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
 178 
 179 // This struct contains per-thread things necessary to support parallel
 180 // young-gen collection.
 181 class CMSParGCThreadState: public CHeapObj<mtGC> {
 182  public:
 183   CFLS_LAB lab;
 184   PromotionInfo promo;
 185 
 186   // Constructor.
 187   CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
 188     promo.setSpace(cfls);
 189   }
 190 };
 191 
 192 ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
 193      ReservedSpace rs, size_t initial_byte_size, CardTableRS* ct) :
 194   CardGeneration(rs, initial_byte_size, ct),
 195   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
 196   _did_compact(false)
 197 {
 198   HeapWord* bottom = (HeapWord*) _virtual_space.low();
 199   HeapWord* end    = (HeapWord*) _virtual_space.high();
 200 
 201   _direct_allocated_words = 0;
 202   NOT_PRODUCT(
 203     _numObjectsPromoted = 0;
 204     _numWordsPromoted = 0;
 205     _numObjectsAllocated = 0;
 206     _numWordsAllocated = 0;
 207   )
 208 
 209   _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
 210   NOT_PRODUCT(debug_cms_space = _cmsSpace;)
 211   _cmsSpace->_old_gen = this;
 212 
 213   _gc_stats = new CMSGCStats();
 214 
 215   // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
 216   // offsets match. The ability to tell free chunks from objects
 217   // depends on this property.
 218   debug_only(
 219     FreeChunk* junk = NULL;
 220     assert(UseCompressedClassPointers ||
 221            junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
 222            "Offset of FreeChunk::_prev within FreeChunk must match"
 223            "  that of OopDesc::_klass within OopDesc");
 224   )
 225 
 226   _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
 227   for (uint i = 0; i < ParallelGCThreads; i++) {
 228     _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
 229   }
 230 
 231   _incremental_collection_failed = false;
 232   // The "dilatation_factor" is the expansion that can occur on
 233   // account of the fact that the minimum object size in the CMS
 234   // generation may be larger than that in, say, a contiguous young
 235   //  generation.
 236   // Ideally, in the calculation below, we'd compute the dilatation
 237   // factor as: MinChunkSize/(promoting_gen's min object size)
 238   // Since we do not have such a general query interface for the
 239   // promoting generation, we'll instead just use the minimum
 240   // object size (which today is a header's worth of space);
 241   // note that all arithmetic is in units of HeapWords.
 242   assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
 243   assert(_dilatation_factor >= 1.0, "from previous assert");
 244 }
 245 
 246 
 247 // The field "_initiating_occupancy" represents the occupancy percentage
 248 // at which we trigger a new collection cycle.  Unless explicitly specified
 249 // via CMSInitiatingOccupancyFraction (argument "io" below), it
 250 // is calculated by:
 251 //
 252 //   Let "f" be MinHeapFreeRatio in
 253 //
 254 //    _initiating_occupancy = 100-f +
 255 //                           f * (CMSTriggerRatio/100)
 256 //   where CMSTriggerRatio is the argument "tr" below.
 257 //
 258 // That is, if we assume the heap is at its desired maximum occupancy at the
 259 // end of a collection, we let CMSTriggerRatio of the (purported) free
 260 // space be allocated before initiating a new collection cycle.
 261 //
 262 void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
 263   assert(io <= 100 && tr <= 100, "Check the arguments");
 264   if (io >= 0) {
 265     _initiating_occupancy = (double)io / 100.0;
 266   } else {
 267     _initiating_occupancy = ((100 - MinHeapFreeRatio) +
 268                              (double)(tr * MinHeapFreeRatio) / 100.0)
 269                             / 100.0;
 270   }
 271 }
 272 
 273 void ConcurrentMarkSweepGeneration::ref_processor_init() {
 274   assert(collector() != NULL, "no collector");
 275   collector()->ref_processor_init();
 276 }
 277 
 278 void CMSCollector::ref_processor_init() {
 279   if (_ref_processor == NULL) {
 280     // Allocate and initialize a reference processor
 281     _ref_processor =
 282       new ReferenceProcessor(_span,                               // span
 283                              (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
 284                              ParallelGCThreads,                   // mt processing degree
 285                              _cmsGen->refs_discovery_is_mt(),     // mt discovery
 286                              MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
 287                              _cmsGen->refs_discovery_is_atomic(), // discovery is not atomic
 288                              &_is_alive_closure);                 // closure for liveness info
 289     // Initialize the _ref_processor field of CMSGen
 290     _cmsGen->set_ref_processor(_ref_processor);
 291 
 292   }
 293 }
 294 
 295 AdaptiveSizePolicy* CMSCollector::size_policy() {
 296   GenCollectedHeap* gch = GenCollectedHeap::heap();
 297   return gch->gen_policy()->size_policy();
 298 }
 299 
 300 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 301 
 302   const char* gen_name = "old";
 303   GenCollectorPolicy* gcp = GenCollectedHeap::heap()->gen_policy();
 304   // Generation Counters - generation 1, 1 subspace
 305   _gen_counters = new GenerationCounters(gen_name, 1, 1,
 306       gcp->min_old_size(), gcp->max_old_size(), &_virtual_space);
 307 
 308   _space_counters = new GSpaceCounters(gen_name, 0,
 309                                        _virtual_space.reserved_size(),
 310                                        this, _gen_counters);
 311 }
 312 
 313 CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
 314   _cms_gen(cms_gen)
 315 {
 316   assert(alpha <= 100, "bad value");
 317   _saved_alpha = alpha;
 318 
 319   // Initialize the alphas to the bootstrap value of 100.
 320   _gc0_alpha = _cms_alpha = 100;
 321 
 322   _cms_begin_time.update();
 323   _cms_end_time.update();
 324 
 325   _gc0_duration = 0.0;
 326   _gc0_period = 0.0;
 327   _gc0_promoted = 0;
 328 
 329   _cms_duration = 0.0;
 330   _cms_period = 0.0;
 331   _cms_allocated = 0;
 332 
 333   _cms_used_at_gc0_begin = 0;
 334   _cms_used_at_gc0_end = 0;
 335   _allow_duty_cycle_reduction = false;
 336   _valid_bits = 0;
 337 }
 338 
 339 double CMSStats::cms_free_adjustment_factor(size_t free) const {
 340   // TBD: CR 6909490
 341   return 1.0;
 342 }
 343 
 344 void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
 345 }
 346 
 347 // If promotion failure handling is on use
 348 // the padded average size of the promotion for each
 349 // young generation collection.
 350 double CMSStats::time_until_cms_gen_full() const {
 351   size_t cms_free = _cms_gen->cmsSpace()->free();
 352   GenCollectedHeap* gch = GenCollectedHeap::heap();
 353   size_t expected_promotion = MIN2(gch->young_gen()->capacity(),
 354                                    (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
 355   if (cms_free > expected_promotion) {
 356     // Start a cms collection if there isn't enough space to promote
 357     // for the next young collection.  Use the padded average as
 358     // a safety factor.
 359     cms_free -= expected_promotion;
 360 
 361     // Adjust by the safety factor.
 362     double cms_free_dbl = (double)cms_free;
 363     double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
 364     // Apply a further correction factor which tries to adjust
 365     // for recent occurance of concurrent mode failures.
 366     cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
 367     cms_free_dbl = cms_free_dbl * cms_adjustment;
 368 
 369     if (PrintGCDetails && Verbose) {
 370       gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
 371         SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
 372         cms_free, expected_promotion);
 373       gclog_or_tty->print_cr("  cms_free_dbl %f cms_consumption_rate %f",
 374         cms_free_dbl, cms_consumption_rate() + 1.0);
 375     }
 376     // Add 1 in case the consumption rate goes to zero.
 377     return cms_free_dbl / (cms_consumption_rate() + 1.0);
 378   }
 379   return 0.0;
 380 }
 381 
 382 // Compare the duration of the cms collection to the
 383 // time remaining before the cms generation is empty.
 384 // Note that the time from the start of the cms collection
 385 // to the start of the cms sweep (less than the total
 386 // duration of the cms collection) can be used.  This
 387 // has been tried and some applications experienced
 388 // promotion failures early in execution.  This was
 389 // possibly because the averages were not accurate
 390 // enough at the beginning.
 391 double CMSStats::time_until_cms_start() const {
 392   // We add "gc0_period" to the "work" calculation
 393   // below because this query is done (mostly) at the
 394   // end of a scavenge, so we need to conservatively
 395   // account for that much possible delay
 396   // in the query so as to avoid concurrent mode failures
 397   // due to starting the collection just a wee bit too
 398   // late.
 399   double work = cms_duration() + gc0_period();
 400   double deadline = time_until_cms_gen_full();
 401   // If a concurrent mode failure occurred recently, we want to be
 402   // more conservative and halve our expected time_until_cms_gen_full()
 403   if (work > deadline) {
 404     if (Verbose && PrintGCDetails) {
 405       gclog_or_tty->print(
 406         " CMSCollector: collect because of anticipated promotion "
 407         "before full %3.7f + %3.7f > %3.7f ", cms_duration(),
 408         gc0_period(), time_until_cms_gen_full());
 409     }
 410     return 0.0;
 411   }
 412   return work - deadline;
 413 }
 414 
 415 #ifndef PRODUCT
 416 void CMSStats::print_on(outputStream *st) const {
 417   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
 418   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
 419                gc0_duration(), gc0_period(), gc0_promoted());
 420   st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
 421             cms_duration(), cms_period(), cms_allocated());
 422   st->print(",cms_since_beg=%g,cms_since_end=%g",
 423             cms_time_since_begin(), cms_time_since_end());
 424   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
 425             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
 426 
 427   if (valid()) {
 428     st->print(",promo_rate=%g,cms_alloc_rate=%g",
 429               promotion_rate(), cms_allocation_rate());
 430     st->print(",cms_consumption_rate=%g,time_until_full=%g",
 431               cms_consumption_rate(), time_until_cms_gen_full());
 432   }
 433   st->print(" ");
 434 }
 435 #endif // #ifndef PRODUCT
 436 
 437 CMSCollector::CollectorState CMSCollector::_collectorState =
 438                              CMSCollector::Idling;
 439 bool CMSCollector::_foregroundGCIsActive = false;
 440 bool CMSCollector::_foregroundGCShouldWait = false;
 441 
 442 CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
 443                            CardTableRS*                   ct,
 444                            ConcurrentMarkSweepPolicy*     cp):
 445   _cmsGen(cmsGen),
 446   _ct(ct),
 447   _ref_processor(NULL),    // will be set later
 448   _conc_workers(NULL),     // may be set later
 449   _abort_preclean(false),
 450   _start_sampling(false),
 451   _between_prologue_and_epilogue(false),
 452   _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
 453   _modUnionTable((CardTableModRefBS::card_shift - LogHeapWordSize),
 454                  -1 /* lock-free */, "No_lock" /* dummy */),
 455   _modUnionClosurePar(&_modUnionTable),
 456   // Adjust my span to cover old (cms) gen
 457   _span(cmsGen->reserved()),
 458   // Construct the is_alive_closure with _span & markBitMap
 459   _is_alive_closure(_span, &_markBitMap),
 460   _restart_addr(NULL),
 461   _overflow_list(NULL),
 462   _stats(cmsGen),
 463   _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
 464                              //verify that this lock should be acquired with safepoint check.
 465                              Monitor::_safepoint_check_sometimes)),
 466   _eden_chunk_array(NULL),     // may be set in ctor body
 467   _eden_chunk_capacity(0),     // -- ditto --
 468   _eden_chunk_index(0),        // -- ditto --
 469   _survivor_plab_array(NULL),  // -- ditto --
 470   _survivor_chunk_array(NULL), // -- ditto --
 471   _survivor_chunk_capacity(0), // -- ditto --
 472   _survivor_chunk_index(0),    // -- ditto --
 473   _ser_pmc_preclean_ovflw(0),
 474   _ser_kac_preclean_ovflw(0),
 475   _ser_pmc_remark_ovflw(0),
 476   _par_pmc_remark_ovflw(0),
 477   _ser_kac_ovflw(0),
 478   _par_kac_ovflw(0),
 479 #ifndef PRODUCT
 480   _num_par_pushes(0),
 481 #endif
 482   _collection_count_start(0),
 483   _verifying(false),
 484   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
 485   _completed_initialization(false),
 486   _collector_policy(cp),
 487   _should_unload_classes(CMSClassUnloadingEnabled),
 488   _concurrent_cycles_since_last_unload(0),
 489   _roots_scanning_options(GenCollectedHeap::SO_None),
 490   _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 491   _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
 492   _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
 493   _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 494   _cms_start_registered(false)
 495 {
 496   if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
 497     ExplicitGCInvokesConcurrent = true;
 498   }
 499   // Now expand the span and allocate the collection support structures
 500   // (MUT, marking bit map etc.) to cover both generations subject to
 501   // collection.
 502 
 503   // For use by dirty card to oop closures.
 504   _cmsGen->cmsSpace()->set_collector(this);
 505 
 506   // Allocate MUT and marking bit map
 507   {
 508     MutexLockerEx x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
 509     if (!_markBitMap.allocate(_span)) {
 510       warning("Failed to allocate CMS Bit Map");
 511       return;
 512     }
 513     assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
 514   }
 515   {
 516     _modUnionTable.allocate(_span);
 517     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
 518   }
 519 
 520   if (!_markStack.allocate(MarkStackSize)) {
 521     warning("Failed to allocate CMS Marking Stack");
 522     return;
 523   }
 524 
 525   // Support for multi-threaded concurrent phases
 526   if (CMSConcurrentMTEnabled) {
 527     if (FLAG_IS_DEFAULT(ConcGCThreads)) {
 528       // just for now
 529       FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
 530     }
 531     if (ConcGCThreads > 1) {
 532       _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
 533                                  ConcGCThreads, true);
 534       if (_conc_workers == NULL) {
 535         warning("GC/CMS: _conc_workers allocation failure: "
 536               "forcing -CMSConcurrentMTEnabled");
 537         CMSConcurrentMTEnabled = false;
 538       } else {
 539         _conc_workers->initialize_workers();
 540       }
 541     } else {
 542       CMSConcurrentMTEnabled = false;
 543     }
 544   }
 545   if (!CMSConcurrentMTEnabled) {
 546     ConcGCThreads = 0;
 547   } else {
 548     // Turn off CMSCleanOnEnter optimization temporarily for
 549     // the MT case where it's not fixed yet; see 6178663.
 550     CMSCleanOnEnter = false;
 551   }
 552   assert((_conc_workers != NULL) == (ConcGCThreads > 1),
 553          "Inconsistency");
 554 
 555   // Parallel task queues; these are shared for the
 556   // concurrent and stop-world phases of CMS, but
 557   // are not shared with parallel scavenge (ParNew).
 558   {
 559     uint i;
 560     uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
 561 
 562     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
 563          || ParallelRefProcEnabled)
 564         && num_queues > 0) {
 565       _task_queues = new OopTaskQueueSet(num_queues);
 566       if (_task_queues == NULL) {
 567         warning("task_queues allocation failure.");
 568         return;
 569       }
 570       _hash_seed = NEW_C_HEAP_ARRAY(int, num_queues, mtGC);
 571       typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
 572       for (i = 0; i < num_queues; i++) {
 573         PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
 574         if (q == NULL) {
 575           warning("work_queue allocation failure.");
 576           return;
 577         }
 578         _task_queues->register_queue(i, q);
 579       }
 580       for (i = 0; i < num_queues; i++) {
 581         _task_queues->queue(i)->initialize();
 582         _hash_seed[i] = 17;  // copied from ParNew
 583       }
 584     }
 585   }
 586 
 587   _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
 588 
 589   // Clip CMSBootstrapOccupancy between 0 and 100.
 590   _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
 591 
 592   // Now tell CMS generations the identity of their collector
 593   ConcurrentMarkSweepGeneration::set_collector(this);
 594 
 595   // Create & start a CMS thread for this CMS collector
 596   _cmsThread = ConcurrentMarkSweepThread::start(this);
 597   assert(cmsThread() != NULL, "CMS Thread should have been created");
 598   assert(cmsThread()->collector() == this,
 599          "CMS Thread should refer to this gen");
 600   assert(CGC_lock != NULL, "Where's the CGC_lock?");
 601 
 602   // Support for parallelizing young gen rescan
 603   GenCollectedHeap* gch = GenCollectedHeap::heap();
 604   assert(gch->young_gen()->kind() == Generation::ParNew, "CMS can only be used with ParNew");
 605   _young_gen = (ParNewGeneration*)gch->young_gen();
 606   if (gch->supports_inline_contig_alloc()) {
 607     _top_addr = gch->top_addr();
 608     _end_addr = gch->end_addr();
 609     assert(_young_gen != NULL, "no _young_gen");
 610     _eden_chunk_index = 0;
 611     _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
 612     _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
 613   }
 614 
 615   // Support for parallelizing survivor space rescan
 616   if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
 617     const size_t max_plab_samples =
 618       _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
 619 
 620     _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
 621     _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 622     _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
 623     _survivor_chunk_capacity = max_plab_samples;
 624     for (uint i = 0; i < ParallelGCThreads; i++) {
 625       HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
 626       ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
 627       assert(cur->end() == 0, "Should be 0");
 628       assert(cur->array() == vec, "Should be vec");
 629       assert(cur->capacity() == max_plab_samples, "Error");
 630     }
 631   }
 632 
 633   NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
 634   _gc_counters = new CollectorCounters("CMS", 1);
 635   _completed_initialization = true;
 636   _inter_sweep_timer.start();  // start of time
 637 }
 638 
 639 const char* ConcurrentMarkSweepGeneration::name() const {
 640   return "concurrent mark-sweep generation";
 641 }
 642 void ConcurrentMarkSweepGeneration::update_counters() {
 643   if (UsePerfData) {
 644     _space_counters->update_all();
 645     _gen_counters->update_all();
 646   }
 647 }
 648 
 649 // this is an optimized version of update_counters(). it takes the
 650 // used value as a parameter rather than computing it.
 651 //
 652 void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
 653   if (UsePerfData) {
 654     _space_counters->update_used(used);
 655     _space_counters->update_capacity();
 656     _gen_counters->update_all();
 657   }
 658 }
 659 
 660 void ConcurrentMarkSweepGeneration::print() const {
 661   Generation::print();
 662   cmsSpace()->print();
 663 }
 664 
 665 #ifndef PRODUCT
 666 void ConcurrentMarkSweepGeneration::print_statistics() {
 667   cmsSpace()->printFLCensus(0);
 668 }
 669 #endif
 670 
 671 void ConcurrentMarkSweepGeneration::printOccupancy(const char *s) {
 672   GenCollectedHeap* gch = GenCollectedHeap::heap();
 673   if (PrintGCDetails) {
 674     // I didn't want to change the logging when removing the level concept,
 675     // but I guess this logging could say "old" or something instead of "1".
 676     assert(gch->is_old_gen(this),
 677            "The CMS generation should be the old generation");
 678     uint level = 1;
 679     if (Verbose) {
 680       gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "(" SIZE_FORMAT ")]",
 681         level, short_name(), s, used(), capacity());
 682     } else {
 683       gclog_or_tty->print("[%u %s-%s: " SIZE_FORMAT "K(" SIZE_FORMAT "K)]",
 684         level, short_name(), s, used() / K, capacity() / K);
 685     }
 686   }
 687   if (Verbose) {
 688     gclog_or_tty->print(" " SIZE_FORMAT "(" SIZE_FORMAT ")",
 689               gch->used(), gch->capacity());
 690   } else {
 691     gclog_or_tty->print(" " SIZE_FORMAT "K(" SIZE_FORMAT "K)",
 692               gch->used() / K, gch->capacity() / K);
 693   }
 694 }
 695 
 696 size_t
 697 ConcurrentMarkSweepGeneration::contiguous_available() const {
 698   // dld proposes an improvement in precision here. If the committed
 699   // part of the space ends in a free block we should add that to
 700   // uncommitted size in the calculation below. Will make this
 701   // change later, staying with the approximation below for the
 702   // time being. -- ysr.
 703   return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
 704 }
 705 
 706 size_t
 707 ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
 708   return _cmsSpace->max_alloc_in_words() * HeapWordSize;
 709 }
 710 
 711 size_t ConcurrentMarkSweepGeneration::max_available() const {
 712   return free() + _virtual_space.uncommitted_size();
 713 }
 714 
 715 bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
 716   size_t available = max_available();
 717   size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
 718   bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
 719   if (Verbose && PrintGCDetails) {
 720     gclog_or_tty->print_cr(
 721       "CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "),"
 722       "max_promo(" SIZE_FORMAT ")",
 723       res? "":" not", available, res? ">=":"<",
 724       av_promo, max_promotion_in_bytes);
 725   }
 726   return res;
 727 }
 728 
 729 // At a promotion failure dump information on block layout in heap
 730 // (cms old generation).
 731 void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
 732   if (CMSDumpAtPromotionFailure) {
 733     cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
 734   }
 735 }
 736 
 737 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
 738   // Clear the promotion information.  These pointers can be adjusted
 739   // along with all the other pointers into the heap but
 740   // compaction is expected to be a rare event with
 741   // a heap using cms so don't do it without seeing the need.
 742   for (uint i = 0; i < ParallelGCThreads; i++) {
 743     _par_gc_thread_states[i]->promo.reset();
 744   }
 745 }
 746 
 747 void ConcurrentMarkSweepGeneration::compute_new_size() {
 748   assert_locked_or_safepoint(Heap_lock);
 749 
 750   // If incremental collection failed, we just want to expand
 751   // to the limit.
 752   if (incremental_collection_failed()) {
 753     clear_incremental_collection_failed();
 754     grow_to_reserved();
 755     return;
 756   }
 757 
 758   // The heap has been compacted but not reset yet.
 759   // Any metric such as free() or used() will be incorrect.
 760 
 761   CardGeneration::compute_new_size();
 762 
 763   // Reset again after a possible resizing
 764   if (did_compact()) {
 765     cmsSpace()->reset_after_compaction();
 766   }
 767 }
 768 
 769 void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
 770   assert_locked_or_safepoint(Heap_lock);
 771 
 772   // If incremental collection failed, we just want to expand
 773   // to the limit.
 774   if (incremental_collection_failed()) {
 775     clear_incremental_collection_failed();
 776     grow_to_reserved();
 777     return;
 778   }
 779 
 780   double free_percentage = ((double) free()) / capacity();
 781   double desired_free_percentage = (double) MinHeapFreeRatio / 100;
 782   double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
 783 
 784   // compute expansion delta needed for reaching desired free percentage
 785   if (free_percentage < desired_free_percentage) {
 786     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 787     assert(desired_capacity >= capacity(), "invalid expansion size");
 788     size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
 789     if (PrintGCDetails && Verbose) {
 790       size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 791       gclog_or_tty->print_cr("\nFrom compute_new_size: ");
 792       gclog_or_tty->print_cr("  Free fraction %f", free_percentage);
 793       gclog_or_tty->print_cr("  Desired free fraction %f", desired_free_percentage);
 794       gclog_or_tty->print_cr("  Maximum free fraction %f", maximum_free_percentage);
 795       gclog_or_tty->print_cr("  Capacity " SIZE_FORMAT, capacity() / 1000);
 796       gclog_or_tty->print_cr("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
 797       GenCollectedHeap* gch = GenCollectedHeap::heap();
 798       assert(gch->is_old_gen(this), "The CMS generation should always be the old generation");
 799       size_t young_size = gch->young_gen()->capacity();
 800       gclog_or_tty->print_cr("  Young gen size " SIZE_FORMAT, young_size / 1000);
 801       gclog_or_tty->print_cr("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
 802       gclog_or_tty->print_cr("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
 803       gclog_or_tty->print_cr("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
 804     }
 805     // safe if expansion fails
 806     expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
 807     if (PrintGCDetails && Verbose) {
 808       gclog_or_tty->print_cr("  Expanded free fraction %f", ((double) free()) / capacity());
 809     }
 810   } else {
 811     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
 812     assert(desired_capacity <= capacity(), "invalid expansion size");
 813     size_t shrink_bytes = capacity() - desired_capacity;
 814     // Don't shrink unless the delta is greater than the minimum shrink we want
 815     if (shrink_bytes >= MinHeapDeltaBytes) {
 816       shrink_free_list_by(shrink_bytes);
 817     }
 818   }
 819 }
 820 
 821 Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
 822   return cmsSpace()->freelistLock();
 823 }
 824 
 825 HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
 826   CMSSynchronousYieldRequest yr;
 827   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
 828   return have_lock_and_allocate(size, tlab);
 829 }
 830 
 831 HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
 832                                                                 bool   tlab /* ignored */) {
 833   assert_lock_strong(freelistLock());
 834   size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
 835   HeapWord* res = cmsSpace()->allocate(adjustedSize);
 836   // Allocate the object live (grey) if the background collector has
 837   // started marking. This is necessary because the marker may
 838   // have passed this address and consequently this object will
 839   // not otherwise be greyed and would be incorrectly swept up.
 840   // Note that if this object contains references, the writing
 841   // of those references will dirty the card containing this object
 842   // allowing the object to be blackened (and its references scanned)
 843   // either during a preclean phase or at the final checkpoint.
 844   if (res != NULL) {
 845     // We may block here with an uninitialized object with
 846     // its mark-bit or P-bits not yet set. Such objects need
 847     // to be safely navigable by block_start().
 848     assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
 849     assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
 850     collector()->direct_allocated(res, adjustedSize);
 851     _direct_allocated_words += adjustedSize;
 852     // allocation counters
 853     NOT_PRODUCT(
 854       _numObjectsAllocated++;
 855       _numWordsAllocated += (int)adjustedSize;
 856     )
 857   }
 858   return res;
 859 }
 860 
 861 // In the case of direct allocation by mutators in a generation that
 862 // is being concurrently collected, the object must be allocated
 863 // live (grey) if the background collector has started marking.
 864 // This is necessary because the marker may
 865 // have passed this address and consequently this object will
 866 // not otherwise be greyed and would be incorrectly swept up.
 867 // Note that if this object contains references, the writing
 868 // of those references will dirty the card containing this object
 869 // allowing the object to be blackened (and its references scanned)
 870 // either during a preclean phase or at the final checkpoint.
 871 void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
 872   assert(_markBitMap.covers(start, size), "Out of bounds");
 873   if (_collectorState >= Marking) {
 874     MutexLockerEx y(_markBitMap.lock(),
 875                     Mutex::_no_safepoint_check_flag);
 876     // [see comments preceding SweepClosure::do_blk() below for details]
 877     //
 878     // Can the P-bits be deleted now?  JJJ
 879     //
 880     // 1. need to mark the object as live so it isn't collected
 881     // 2. need to mark the 2nd bit to indicate the object may be uninitialized
 882     // 3. need to mark the end of the object so marking, precleaning or sweeping
 883     //    can skip over uninitialized or unparsable objects. An allocated
 884     //    object is considered uninitialized for our purposes as long as
 885     //    its klass word is NULL.  All old gen objects are parsable
 886     //    as soon as they are initialized.)
 887     _markBitMap.mark(start);          // object is live
 888     _markBitMap.mark(start + 1);      // object is potentially uninitialized?
 889     _markBitMap.mark(start + size - 1);
 890                                       // mark end of object
 891   }
 892   // check that oop looks uninitialized
 893   assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
 894 }
 895 
 896 void CMSCollector::promoted(bool par, HeapWord* start,
 897                             bool is_obj_array, size_t obj_size) {
 898   assert(_markBitMap.covers(start), "Out of bounds");
 899   // See comment in direct_allocated() about when objects should
 900   // be allocated live.
 901   if (_collectorState >= Marking) {
 902     // we already hold the marking bit map lock, taken in
 903     // the prologue
 904     if (par) {
 905       _markBitMap.par_mark(start);
 906     } else {
 907       _markBitMap.mark(start);
 908     }
 909     // We don't need to mark the object as uninitialized (as
 910     // in direct_allocated above) because this is being done with the
 911     // world stopped and the object will be initialized by the
 912     // time the marking, precleaning or sweeping get to look at it.
 913     // But see the code for copying objects into the CMS generation,
 914     // where we need to ensure that concurrent readers of the
 915     // block offset table are able to safely navigate a block that
 916     // is in flux from being free to being allocated (and in
 917     // transition while being copied into) and subsequently
 918     // becoming a bona-fide object when the copy/promotion is complete.
 919     assert(SafepointSynchronize::is_at_safepoint(),
 920            "expect promotion only at safepoints");
 921 
 922     if (_collectorState < Sweeping) {
 923       // Mark the appropriate cards in the modUnionTable, so that
 924       // this object gets scanned before the sweep. If this is
 925       // not done, CMS generation references in the object might
 926       // not get marked.
 927       // For the case of arrays, which are otherwise precisely
 928       // marked, we need to dirty the entire array, not just its head.
 929       if (is_obj_array) {
 930         // The [par_]mark_range() method expects mr.end() below to
 931         // be aligned to the granularity of a bit's representation
 932         // in the heap. In the case of the MUT below, that's a
 933         // card size.
 934         MemRegion mr(start,
 935                      (HeapWord*)round_to((intptr_t)(start + obj_size),
 936                         CardTableModRefBS::card_size /* bytes */));
 937         if (par) {
 938           _modUnionTable.par_mark_range(mr);
 939         } else {
 940           _modUnionTable.mark_range(mr);
 941         }
 942       } else {  // not an obj array; we can just mark the head
 943         if (par) {
 944           _modUnionTable.par_mark(start);
 945         } else {
 946           _modUnionTable.mark(start);
 947         }
 948       }
 949     }
 950   }
 951 }
 952 
 953 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
 954   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
 955   // allocate, copy and if necessary update promoinfo --
 956   // delegate to underlying space.
 957   assert_lock_strong(freelistLock());
 958 
 959 #ifndef PRODUCT
 960   if (GenCollectedHeap::heap()->promotion_should_fail()) {
 961     return NULL;
 962   }
 963 #endif  // #ifndef PRODUCT
 964 
 965   oop res = _cmsSpace->promote(obj, obj_size);
 966   if (res == NULL) {
 967     // expand and retry
 968     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
 969     expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
 970     // Since this is the old generation, we don't try to promote
 971     // into a more senior generation.
 972     res = _cmsSpace->promote(obj, obj_size);
 973   }
 974   if (res != NULL) {
 975     // See comment in allocate() about when objects should
 976     // be allocated live.
 977     assert(obj->is_oop(), "Will dereference klass pointer below");
 978     collector()->promoted(false,           // Not parallel
 979                           (HeapWord*)res, obj->is_objArray(), obj_size);
 980     // promotion counters
 981     NOT_PRODUCT(
 982       _numObjectsPromoted++;
 983       _numWordsPromoted +=
 984         (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
 985     )
 986   }
 987   return res;
 988 }
 989 
 990 
 991 // IMPORTANT: Notes on object size recognition in CMS.
 992 // ---------------------------------------------------
 993 // A block of storage in the CMS generation is always in
 994 // one of three states. A free block (FREE), an allocated
 995 // object (OBJECT) whose size() method reports the correct size,
 996 // and an intermediate state (TRANSIENT) in which its size cannot
 997 // be accurately determined.
 998 // STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
 999 // -----------------------------------------------------
1000 // FREE:      klass_word & 1 == 1; mark_word holds block size
1001 //
1002 // OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
1003 //            obj->size() computes correct size
1004 //
1005 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1006 //
1007 // STATE IDENTIFICATION: (64 bit+COOPS)
1008 // ------------------------------------
1009 // FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
1010 //
1011 // OBJECT:    klass_word installed; klass_word != 0;
1012 //            obj->size() computes correct size
1013 //
1014 // TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
1015 //
1016 //
1017 // STATE TRANSITION DIAGRAM
1018 //
1019 //        mut / parnew                     mut  /  parnew
1020 // FREE --------------------> TRANSIENT ---------------------> OBJECT --|
1021 //  ^                                                                   |
1022 //  |------------------------ DEAD <------------------------------------|
1023 //         sweep                            mut
1024 //
1025 // While a block is in TRANSIENT state its size cannot be determined
1026 // so readers will either need to come back later or stall until
1027 // the size can be determined. Note that for the case of direct
1028 // allocation, P-bits, when available, may be used to determine the
1029 // size of an object that may not yet have been initialized.
1030 
1031 // Things to support parallel young-gen collection.
1032 oop
1033 ConcurrentMarkSweepGeneration::par_promote(int thread_num,
1034                                            oop old, markOop m,
1035                                            size_t word_sz) {
1036 #ifndef PRODUCT
1037   if (GenCollectedHeap::heap()->promotion_should_fail()) {
1038     return NULL;
1039   }
1040 #endif  // #ifndef PRODUCT
1041 
1042   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1043   PromotionInfo* promoInfo = &ps->promo;
1044   // if we are tracking promotions, then first ensure space for
1045   // promotion (including spooling space for saving header if necessary).
1046   // then allocate and copy, then track promoted info if needed.
1047   // When tracking (see PromotionInfo::track()), the mark word may
1048   // be displaced and in this case restoration of the mark word
1049   // occurs in the (oop_since_save_marks_)iterate phase.
1050   if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
1051     // Out of space for allocating spooling buffers;
1052     // try expanding and allocating spooling buffers.
1053     if (!expand_and_ensure_spooling_space(promoInfo)) {
1054       return NULL;
1055     }
1056   }
1057   assert(promoInfo->has_spooling_space(), "Control point invariant");
1058   const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
1059   HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
1060   if (obj_ptr == NULL) {
1061      obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
1062      if (obj_ptr == NULL) {
1063        return NULL;
1064      }
1065   }
1066   oop obj = oop(obj_ptr);
1067   OrderAccess::storestore();
1068   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1069   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1070   // IMPORTANT: See note on object initialization for CMS above.
1071   // Otherwise, copy the object.  Here we must be careful to insert the
1072   // klass pointer last, since this marks the block as an allocated object.
1073   // Except with compressed oops it's the mark word.
1074   HeapWord* old_ptr = (HeapWord*)old;
1075   // Restore the mark word copied above.
1076   obj->set_mark(m);
1077   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1078   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1079   OrderAccess::storestore();
1080 
1081   if (UseCompressedClassPointers) {
1082     // Copy gap missed by (aligned) header size calculation below
1083     obj->set_klass_gap(old->klass_gap());
1084   }
1085   if (word_sz > (size_t)oopDesc::header_size()) {
1086     Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
1087                                  obj_ptr + oopDesc::header_size(),
1088                                  word_sz - oopDesc::header_size());
1089   }
1090 
1091   // Now we can track the promoted object, if necessary.  We take care
1092   // to delay the transition from uninitialized to full object
1093   // (i.e., insertion of klass pointer) until after, so that it
1094   // atomically becomes a promoted object.
1095   if (promoInfo->tracking()) {
1096     promoInfo->track((PromotedObject*)obj, old->klass());
1097   }
1098   assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
1099   assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
1100   assert(old->is_oop(), "Will use and dereference old klass ptr below");
1101 
1102   // Finally, install the klass pointer (this should be volatile).
1103   OrderAccess::storestore();
1104   obj->set_klass(old->klass());
1105   // We should now be able to calculate the right size for this object
1106   assert(obj->is_oop() && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
1107 
1108   collector()->promoted(true,          // parallel
1109                         obj_ptr, old->is_objArray(), word_sz);
1110 
1111   NOT_PRODUCT(
1112     Atomic::inc_ptr(&_numObjectsPromoted);
1113     Atomic::add_ptr(alloc_sz, &_numWordsPromoted);
1114   )
1115 
1116   return obj;
1117 }
1118 
1119 void
1120 ConcurrentMarkSweepGeneration::
1121 par_promote_alloc_done(int thread_num) {
1122   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1123   ps->lab.retire(thread_num);
1124 }
1125 
1126 void
1127 ConcurrentMarkSweepGeneration::
1128 par_oop_since_save_marks_iterate_done(int thread_num) {
1129   CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
1130   ParScanWithoutBarrierClosure* dummy_cl = NULL;
1131   ps->promo.promoted_oops_iterate_nv(dummy_cl);
1132 }
1133 
1134 bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
1135                                                    size_t size,
1136                                                    bool   tlab)
1137 {
1138   // We allow a STW collection only if a full
1139   // collection was requested.
1140   return full || should_allocate(size, tlab); // FIX ME !!!
1141   // This and promotion failure handling are connected at the
1142   // hip and should be fixed by untying them.
1143 }
1144 
1145 bool CMSCollector::shouldConcurrentCollect() {
1146   if (_full_gc_requested) {
1147     if (Verbose && PrintGCDetails) {
1148       gclog_or_tty->print_cr("CMSCollector: collect because of explicit "
1149                              " gc request (or gc_locker)");
1150     }
1151     return true;
1152   }
1153 
1154   FreelistLocker x(this);
1155   // ------------------------------------------------------------------
1156   // Print out lots of information which affects the initiation of
1157   // a collection.
1158   if (PrintCMSInitiationStatistics && stats().valid()) {
1159     gclog_or_tty->print("CMSCollector shouldConcurrentCollect: ");
1160     gclog_or_tty->stamp();
1161     gclog_or_tty->cr();
1162     stats().print_on(gclog_or_tty);
1163     gclog_or_tty->print_cr("time_until_cms_gen_full %3.7f",
1164       stats().time_until_cms_gen_full());
1165     gclog_or_tty->print_cr("free=" SIZE_FORMAT, _cmsGen->free());
1166     gclog_or_tty->print_cr("contiguous_available=" SIZE_FORMAT,
1167                            _cmsGen->contiguous_available());
1168     gclog_or_tty->print_cr("promotion_rate=%g", stats().promotion_rate());
1169     gclog_or_tty->print_cr("cms_allocation_rate=%g", stats().cms_allocation_rate());
1170     gclog_or_tty->print_cr("occupancy=%3.7f", _cmsGen->occupancy());
1171     gclog_or_tty->print_cr("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
1172     gclog_or_tty->print_cr("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
1173     gclog_or_tty->print_cr("cms_time_since_end=%3.7f", stats().cms_time_since_end());
1174     gclog_or_tty->print_cr("metadata initialized %d",
1175       MetaspaceGC::should_concurrent_collect());
1176   }
1177   // ------------------------------------------------------------------
1178 
1179   // If the estimated time to complete a cms collection (cms_duration())
1180   // is less than the estimated time remaining until the cms generation
1181   // is full, start a collection.
1182   if (!UseCMSInitiatingOccupancyOnly) {
1183     if (stats().valid()) {
1184       if (stats().time_until_cms_start() == 0.0) {
1185         return true;
1186       }
1187     } else {
1188       // We want to conservatively collect somewhat early in order
1189       // to try and "bootstrap" our CMS/promotion statistics;
1190       // this branch will not fire after the first successful CMS
1191       // collection because the stats should then be valid.
1192       if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
1193         if (Verbose && PrintGCDetails) {
1194           gclog_or_tty->print_cr(
1195             " CMSCollector: collect for bootstrapping statistics:"
1196             " occupancy = %f, boot occupancy = %f", _cmsGen->occupancy(),
1197             _bootstrap_occupancy);
1198         }
1199         return true;
1200       }
1201     }
1202   }
1203 
1204   // Otherwise, we start a collection cycle if
1205   // old gen want a collection cycle started. Each may use
1206   // an appropriate criterion for making this decision.
1207   // XXX We need to make sure that the gen expansion
1208   // criterion dovetails well with this. XXX NEED TO FIX THIS
1209   if (_cmsGen->should_concurrent_collect()) {
1210     if (Verbose && PrintGCDetails) {
1211       gclog_or_tty->print_cr("CMS old gen initiated");
1212     }
1213     return true;
1214   }
1215 
1216   // We start a collection if we believe an incremental collection may fail;
1217   // this is not likely to be productive in practice because it's probably too
1218   // late anyway.
1219   GenCollectedHeap* gch = GenCollectedHeap::heap();
1220   assert(gch->collector_policy()->is_generation_policy(),
1221          "You may want to check the correctness of the following");
1222   if (gch->incremental_collection_will_fail(true /* consult_young */)) {
1223     if (Verbose && PrintGCDetails) {
1224       gclog_or_tty->print("CMSCollector: collect because incremental collection will fail ");
1225     }
1226     return true;
1227   }
1228 
1229   if (MetaspaceGC::should_concurrent_collect()) {
1230     if (Verbose && PrintGCDetails) {
1231       gclog_or_tty->print("CMSCollector: collect for metadata allocation ");
1232     }
1233     return true;
1234   }
1235 
1236   // CMSTriggerInterval starts a CMS cycle if enough time has passed.
1237   if (CMSTriggerInterval >= 0) {
1238     if (CMSTriggerInterval == 0) {
1239       // Trigger always
1240       return true;
1241     }
1242 
1243     // Check the CMS time since begin (we do not check the stats validity
1244     // as we want to be able to trigger the first CMS cycle as well)
1245     if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
1246       if (Verbose && PrintGCDetails) {
1247         if (stats().valid()) {
1248           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
1249                                  stats().cms_time_since_begin());
1250         } else {
1251           gclog_or_tty->print_cr("CMSCollector: collect because of trigger interval (first collection)");
1252         }
1253       }
1254       return true;
1255     }
1256   }
1257 
1258   return false;
1259 }
1260 
1261 void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
1262 
1263 // Clear _expansion_cause fields of constituent generations
1264 void CMSCollector::clear_expansion_cause() {
1265   _cmsGen->clear_expansion_cause();
1266 }
1267 
1268 // We should be conservative in starting a collection cycle.  To
1269 // start too eagerly runs the risk of collecting too often in the
1270 // extreme.  To collect too rarely falls back on full collections,
1271 // which works, even if not optimum in terms of concurrent work.
1272 // As a work around for too eagerly collecting, use the flag
1273 // UseCMSInitiatingOccupancyOnly.  This also has the advantage of
1274 // giving the user an easily understandable way of controlling the
1275 // collections.
1276 // We want to start a new collection cycle if any of the following
1277 // conditions hold:
1278 // . our current occupancy exceeds the configured initiating occupancy
1279 //   for this generation, or
1280 // . we recently needed to expand this space and have not, since that
1281 //   expansion, done a collection of this generation, or
1282 // . the underlying space believes that it may be a good idea to initiate
1283 //   a concurrent collection (this may be based on criteria such as the
1284 //   following: the space uses linear allocation and linear allocation is
1285 //   going to fail, or there is believed to be excessive fragmentation in
1286 //   the generation, etc... or ...
1287 // [.(currently done by CMSCollector::shouldConcurrentCollect() only for
1288 //   the case of the old generation; see CR 6543076):
1289 //   we may be approaching a point at which allocation requests may fail because
1290 //   we will be out of sufficient free space given allocation rate estimates.]
1291 bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
1292 
1293   assert_lock_strong(freelistLock());
1294   if (occupancy() > initiating_occupancy()) {
1295     if (PrintGCDetails && Verbose) {
1296       gclog_or_tty->print(" %s: collect because of occupancy %f / %f  ",
1297         short_name(), occupancy(), initiating_occupancy());
1298     }
1299     return true;
1300   }
1301   if (UseCMSInitiatingOccupancyOnly) {
1302     return false;
1303   }
1304   if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
1305     if (PrintGCDetails && Verbose) {
1306       gclog_or_tty->print(" %s: collect because expanded for allocation ",
1307         short_name());
1308     }
1309     return true;
1310   }
1311   return false;
1312 }
1313 
1314 void ConcurrentMarkSweepGeneration::collect(bool   full,
1315                                             bool   clear_all_soft_refs,
1316                                             size_t size,
1317                                             bool   tlab)
1318 {
1319   collector()->collect(full, clear_all_soft_refs, size, tlab);
1320 }
1321 
1322 void CMSCollector::collect(bool   full,
1323                            bool   clear_all_soft_refs,
1324                            size_t size,
1325                            bool   tlab)
1326 {
1327   // The following "if" branch is present for defensive reasons.
1328   // In the current uses of this interface, it can be replaced with:
1329   // assert(!GC_locker.is_active(), "Can't be called otherwise");
1330   // But I am not placing that assert here to allow future
1331   // generality in invoking this interface.
1332   if (GC_locker::is_active()) {
1333     // A consistency test for GC_locker
1334     assert(GC_locker::needs_gc(), "Should have been set already");
1335     // Skip this foreground collection, instead
1336     // expanding the heap if necessary.
1337     // Need the free list locks for the call to free() in compute_new_size()
1338     compute_new_size();
1339     return;
1340   }
1341   acquire_control_and_collect(full, clear_all_soft_refs);
1342 }
1343 
1344 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
1345   GenCollectedHeap* gch = GenCollectedHeap::heap();
1346   unsigned int gc_count = gch->total_full_collections();
1347   if (gc_count == full_gc_count) {
1348     MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
1349     _full_gc_requested = true;
1350     _full_gc_cause = cause;
1351     CGC_lock->notify();   // nudge CMS thread
1352   } else {
1353     assert(gc_count > full_gc_count, "Error: causal loop");
1354   }
1355 }
1356 
1357 bool CMSCollector::is_external_interruption() {
1358   GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
1359   return GCCause::is_user_requested_gc(cause) ||
1360          GCCause::is_serviceability_requested_gc(cause);
1361 }
1362 
1363 void CMSCollector::report_concurrent_mode_interruption() {
1364   if (is_external_interruption()) {
1365     if (PrintGCDetails) {
1366       gclog_or_tty->print(" (concurrent mode interrupted)");
1367     }
1368   } else {
1369     if (PrintGCDetails) {
1370       gclog_or_tty->print(" (concurrent mode failure)");
1371     }
1372     _gc_tracer_cm->report_concurrent_mode_failure();
1373   }
1374 }
1375 
1376 
1377 // The foreground and background collectors need to coordinate in order
1378 // to make sure that they do not mutually interfere with CMS collections.
1379 // When a background collection is active,
1380 // the foreground collector may need to take over (preempt) and
1381 // synchronously complete an ongoing collection. Depending on the
1382 // frequency of the background collections and the heap usage
1383 // of the application, this preemption can be seldom or frequent.
1384 // There are only certain
1385 // points in the background collection that the "collection-baton"
1386 // can be passed to the foreground collector.
1387 //
1388 // The foreground collector will wait for the baton before
1389 // starting any part of the collection.  The foreground collector
1390 // will only wait at one location.
1391 //
1392 // The background collector will yield the baton before starting a new
1393 // phase of the collection (e.g., before initial marking, marking from roots,
1394 // precleaning, final re-mark, sweep etc.)  This is normally done at the head
1395 // of the loop which switches the phases. The background collector does some
1396 // of the phases (initial mark, final re-mark) with the world stopped.
1397 // Because of locking involved in stopping the world,
1398 // the foreground collector should not block waiting for the background
1399 // collector when it is doing a stop-the-world phase.  The background
1400 // collector will yield the baton at an additional point just before
1401 // it enters a stop-the-world phase.  Once the world is stopped, the
1402 // background collector checks the phase of the collection.  If the
1403 // phase has not changed, it proceeds with the collection.  If the
1404 // phase has changed, it skips that phase of the collection.  See
1405 // the comments on the use of the Heap_lock in collect_in_background().
1406 //
1407 // Variable used in baton passing.
1408 //   _foregroundGCIsActive - Set to true by the foreground collector when
1409 //      it wants the baton.  The foreground clears it when it has finished
1410 //      the collection.
1411 //   _foregroundGCShouldWait - Set to true by the background collector
1412 //        when it is running.  The foreground collector waits while
1413 //      _foregroundGCShouldWait is true.
1414 //  CGC_lock - monitor used to protect access to the above variables
1415 //      and to notify the foreground and background collectors.
1416 //  _collectorState - current state of the CMS collection.
1417 //
1418 // The foreground collector
1419 //   acquires the CGC_lock
1420 //   sets _foregroundGCIsActive
1421 //   waits on the CGC_lock for _foregroundGCShouldWait to be false
1422 //     various locks acquired in preparation for the collection
1423 //     are released so as not to block the background collector
1424 //     that is in the midst of a collection
1425 //   proceeds with the collection
1426 //   clears _foregroundGCIsActive
1427 //   returns
1428 //
1429 // The background collector in a loop iterating on the phases of the
1430 //      collection
1431 //   acquires the CGC_lock
1432 //   sets _foregroundGCShouldWait
1433 //   if _foregroundGCIsActive is set
1434 //     clears _foregroundGCShouldWait, notifies _CGC_lock
1435 //     waits on _CGC_lock for _foregroundGCIsActive to become false
1436 //     and exits the loop.
1437 //   otherwise
1438 //     proceed with that phase of the collection
1439 //     if the phase is a stop-the-world phase,
1440 //       yield the baton once more just before enqueueing
1441 //       the stop-world CMS operation (executed by the VM thread).
1442 //   returns after all phases of the collection are done
1443 //
1444 
1445 void CMSCollector::acquire_control_and_collect(bool full,
1446         bool clear_all_soft_refs) {
1447   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
1448   assert(!Thread::current()->is_ConcurrentGC_thread(),
1449          "shouldn't try to acquire control from self!");
1450 
1451   // Start the protocol for acquiring control of the
1452   // collection from the background collector (aka CMS thread).
1453   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1454          "VM thread should have CMS token");
1455   // Remember the possibly interrupted state of an ongoing
1456   // concurrent collection
1457   CollectorState first_state = _collectorState;
1458 
1459   // Signal to a possibly ongoing concurrent collection that
1460   // we want to do a foreground collection.
1461   _foregroundGCIsActive = true;
1462 
1463   // release locks and wait for a notify from the background collector
1464   // releasing the locks in only necessary for phases which
1465   // do yields to improve the granularity of the collection.
1466   assert_lock_strong(bitMapLock());
1467   // We need to lock the Free list lock for the space that we are
1468   // currently collecting.
1469   assert(haveFreelistLocks(), "Must be holding free list locks");
1470   bitMapLock()->unlock();
1471   releaseFreelistLocks();
1472   {
1473     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1474     if (_foregroundGCShouldWait) {
1475       // We are going to be waiting for action for the CMS thread;
1476       // it had better not be gone (for instance at shutdown)!
1477       assert(ConcurrentMarkSweepThread::cmst() != NULL,
1478              "CMS thread must be running");
1479       // Wait here until the background collector gives us the go-ahead
1480       ConcurrentMarkSweepThread::clear_CMS_flag(
1481         ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
1482       // Get a possibly blocked CMS thread going:
1483       //   Note that we set _foregroundGCIsActive true above,
1484       //   without protection of the CGC_lock.
1485       CGC_lock->notify();
1486       assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
1487              "Possible deadlock");
1488       while (_foregroundGCShouldWait) {
1489         // wait for notification
1490         CGC_lock->wait(Mutex::_no_safepoint_check_flag);
1491         // Possibility of delay/starvation here, since CMS token does
1492         // not know to give priority to VM thread? Actually, i think
1493         // there wouldn't be any delay/starvation, but the proof of
1494         // that "fact" (?) appears non-trivial. XXX 20011219YSR
1495       }
1496       ConcurrentMarkSweepThread::set_CMS_flag(
1497         ConcurrentMarkSweepThread::CMS_vm_has_token);
1498     }
1499   }
1500   // The CMS_token is already held.  Get back the other locks.
1501   assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
1502          "VM thread should have CMS token");
1503   getFreelistLocks();
1504   bitMapLock()->lock_without_safepoint_check();
1505   if (TraceCMSState) {
1506     gclog_or_tty->print_cr("CMS foreground collector has asked for control "
1507       INTPTR_FORMAT " with first state %d", p2i(Thread::current()), first_state);
1508     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
1509   }
1510 
1511   // Inform cms gen if this was due to partial collection failing.
1512   // The CMS gen may use this fact to determine its expansion policy.
1513   GenCollectedHeap* gch = GenCollectedHeap::heap();
1514   if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
1515     assert(!_cmsGen->incremental_collection_failed(),
1516            "Should have been noticed, reacted to and cleared");
1517     _cmsGen->set_incremental_collection_failed();
1518   }
1519 
1520   if (first_state > Idling) {
1521     report_concurrent_mode_interruption();
1522   }
1523 
1524   set_did_compact(true);
1525 
1526   // If the collection is being acquired from the background
1527   // collector, there may be references on the discovered
1528   // references lists.  Abandon those references, since some
1529   // of them may have become unreachable after concurrent
1530   // discovery; the STW compacting collector will redo discovery
1531   // more precisely, without being subject to floating garbage.
1532   // Leaving otherwise unreachable references in the discovered
1533   // lists would require special handling.
1534   ref_processor()->disable_discovery();
1535   ref_processor()->abandon_partial_discovery();
1536   ref_processor()->verify_no_references_recorded();
1537 
1538   if (first_state > Idling) {
1539     save_heap_summary();
1540   }
1541 
1542   do_compaction_work(clear_all_soft_refs);
1543 
1544   // Has the GC time limit been exceeded?
1545   size_t max_eden_size = _young_gen->max_eden_size();
1546   GCCause::Cause gc_cause = gch->gc_cause();
1547   size_policy()->check_gc_overhead_limit(_young_gen->used(),
1548                                          _young_gen->eden()->used(),
1549                                          _cmsGen->max_capacity(),
1550                                          max_eden_size,
1551                                          full,
1552                                          gc_cause,
1553                                          gch->collector_policy());
1554 
1555   // Reset the expansion cause, now that we just completed
1556   // a collection cycle.
1557   clear_expansion_cause();
1558   _foregroundGCIsActive = false;
1559   return;
1560 }
1561 
1562 // Resize the tenured generation
1563 // after obtaining the free list locks for the
1564 // two generations.
1565 void CMSCollector::compute_new_size() {
1566   assert_locked_or_safepoint(Heap_lock);
1567   FreelistLocker z(this);
1568   MetaspaceGC::compute_new_size();
1569   _cmsGen->compute_new_size_free_list();
1570 }
1571 
1572 // A work method used by the foreground collector to do
1573 // a mark-sweep-compact.
1574 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
1575   GenCollectedHeap* gch = GenCollectedHeap::heap();
1576 
1577   STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
1578   gc_timer->register_gc_start();
1579 
1580   SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
1581   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
1582 
1583   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
1584 
1585   // Temporarily widen the span of the weak reference processing to
1586   // the entire heap.
1587   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
1588   ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
1589   // Temporarily, clear the "is_alive_non_header" field of the
1590   // reference processor.
1591   ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
1592   // Temporarily make reference _processing_ single threaded (non-MT).
1593   ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
1594   // Temporarily make refs discovery atomic
1595   ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
1596   // Temporarily make reference _discovery_ single threaded (non-MT)
1597   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
1598 
1599   ref_processor()->set_enqueuing_is_done(false);
1600   ref_processor()->enable_discovery();
1601   ref_processor()->setup_policy(clear_all_soft_refs);
1602   // If an asynchronous collection finishes, the _modUnionTable is
1603   // all clear.  If we are assuming the collection from an asynchronous
1604   // collection, clear the _modUnionTable.
1605   assert(_collectorState != Idling || _modUnionTable.isAllClear(),
1606     "_modUnionTable should be clear if the baton was not passed");
1607   _modUnionTable.clear_all();
1608   assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
1609     "mod union for klasses should be clear if the baton was passed");
1610   _ct->klass_rem_set()->clear_mod_union();
1611 
1612   // We must adjust the allocation statistics being maintained
1613   // in the free list space. We do so by reading and clearing
1614   // the sweep timer and updating the block flux rate estimates below.
1615   assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
1616   if (_inter_sweep_timer.is_active()) {
1617     _inter_sweep_timer.stop();
1618     // Note that we do not use this sample to update the _inter_sweep_estimate.
1619     _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
1620                                             _inter_sweep_estimate.padded_average(),
1621                                             _intra_sweep_estimate.padded_average());
1622   }
1623 
1624   GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
1625   #ifdef ASSERT
1626     CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
1627     size_t free_size = cms_space->free();
1628     assert(free_size ==
1629            pointer_delta(cms_space->end(), cms_space->compaction_top())
1630            * HeapWordSize,
1631       "All the free space should be compacted into one chunk at top");
1632     assert(cms_space->dictionary()->total_chunk_size(
1633                                       debug_only(cms_space->freelistLock())) == 0 ||
1634            cms_space->totalSizeInIndexedFreeLists() == 0,
1635       "All the free space should be in a single chunk");
1636     size_t num = cms_space->totalCount();
1637     assert((free_size == 0 && num == 0) ||
1638            (free_size > 0  && (num == 1 || num == 2)),
1639          "There should be at most 2 free chunks after compaction");
1640   #endif // ASSERT
1641   _collectorState = Resetting;
1642   assert(_restart_addr == NULL,
1643          "Should have been NULL'd before baton was passed");
1644   reset_stw();
1645   _cmsGen->reset_after_compaction();
1646   _concurrent_cycles_since_last_unload = 0;
1647 
1648   // Clear any data recorded in the PLAB chunk arrays.
1649   if (_survivor_plab_array != NULL) {
1650     reset_survivor_plab_arrays();
1651   }
1652 
1653   // Adjust the per-size allocation stats for the next epoch.
1654   _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
1655   // Restart the "inter sweep timer" for the next epoch.
1656   _inter_sweep_timer.reset();
1657   _inter_sweep_timer.start();
1658 
1659   gc_timer->register_gc_end();
1660 
1661   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
1662 
1663   // For a mark-sweep-compact, compute_new_size() will be called
1664   // in the heap's do_collection() method.
1665 }
1666 
1667 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
1668   ContiguousSpace* eden_space = _young_gen->eden();
1669   ContiguousSpace* from_space = _young_gen->from();
1670   ContiguousSpace* to_space   = _young_gen->to();
1671   // Eden
1672   if (_eden_chunk_array != NULL) {
1673     gclog_or_tty->print_cr("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1674                            p2i(eden_space->bottom()), p2i(eden_space->top()),
1675                            p2i(eden_space->end()), eden_space->capacity());
1676     gclog_or_tty->print_cr("_eden_chunk_index=" SIZE_FORMAT ", "
1677                            "_eden_chunk_capacity=" SIZE_FORMAT,
1678                            _eden_chunk_index, _eden_chunk_capacity);
1679     for (size_t i = 0; i < _eden_chunk_index; i++) {
1680       gclog_or_tty->print_cr("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1681                              i, p2i(_eden_chunk_array[i]));
1682     }
1683   }
1684   // Survivor
1685   if (_survivor_chunk_array != NULL) {
1686     gclog_or_tty->print_cr("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
1687                            p2i(from_space->bottom()), p2i(from_space->top()),
1688                            p2i(from_space->end()), from_space->capacity());
1689     gclog_or_tty->print_cr("_survivor_chunk_index=" SIZE_FORMAT ", "
1690                            "_survivor_chunk_capacity=" SIZE_FORMAT,
1691                            _survivor_chunk_index, _survivor_chunk_capacity);
1692     for (size_t i = 0; i < _survivor_chunk_index; i++) {
1693       gclog_or_tty->print_cr("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT,
1694                              i, p2i(_survivor_chunk_array[i]));
1695     }
1696   }
1697 }
1698 
1699 void CMSCollector::getFreelistLocks() const {
1700   // Get locks for all free lists in all generations that this
1701   // collector is responsible for
1702   _cmsGen->freelistLock()->lock_without_safepoint_check();
1703 }
1704 
1705 void CMSCollector::releaseFreelistLocks() const {
1706   // Release locks for all free lists in all generations that this
1707   // collector is responsible for
1708   _cmsGen->freelistLock()->unlock();
1709 }
1710 
1711 bool CMSCollector::haveFreelistLocks() const {
1712   // Check locks for all free lists in all generations that this
1713   // collector is responsible for
1714   assert_lock_strong(_cmsGen->freelistLock());
1715   PRODUCT_ONLY(ShouldNotReachHere());
1716   return true;
1717 }
1718 
1719 // A utility class that is used by the CMS collector to
1720 // temporarily "release" the foreground collector from its
1721 // usual obligation to wait for the background collector to
1722 // complete an ongoing phase before proceeding.
1723 class ReleaseForegroundGC: public StackObj {
1724  private:
1725   CMSCollector* _c;
1726  public:
1727   ReleaseForegroundGC(CMSCollector* c) : _c(c) {
1728     assert(_c->_foregroundGCShouldWait, "Else should not need to call");
1729     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1730     // allow a potentially blocked foreground collector to proceed
1731     _c->_foregroundGCShouldWait = false;
1732     if (_c->_foregroundGCIsActive) {
1733       CGC_lock->notify();
1734     }
1735     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1736            "Possible deadlock");
1737   }
1738 
1739   ~ReleaseForegroundGC() {
1740     assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
1741     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1742     _c->_foregroundGCShouldWait = true;
1743   }
1744 };
1745 
1746 void CMSCollector::collect_in_background(GCCause::Cause cause) {
1747   assert(Thread::current()->is_ConcurrentGC_thread(),
1748     "A CMS asynchronous collection is only allowed on a CMS thread.");
1749 
1750   GenCollectedHeap* gch = GenCollectedHeap::heap();
1751   {
1752     bool safepoint_check = Mutex::_no_safepoint_check_flag;
1753     MutexLockerEx hl(Heap_lock, safepoint_check);
1754     FreelistLocker fll(this);
1755     MutexLockerEx x(CGC_lock, safepoint_check);
1756     if (_foregroundGCIsActive) {
1757       // The foreground collector is. Skip this
1758       // background collection.
1759       assert(!_foregroundGCShouldWait, "Should be clear");
1760       return;
1761     } else {
1762       assert(_collectorState == Idling, "Should be idling before start.");
1763       _collectorState = InitialMarking;
1764       register_gc_start(cause);
1765       // Reset the expansion cause, now that we are about to begin
1766       // a new cycle.
1767       clear_expansion_cause();
1768 
1769       // Clear the MetaspaceGC flag since a concurrent collection
1770       // is starting but also clear it after the collection.
1771       MetaspaceGC::set_should_concurrent_collect(false);
1772     }
1773     // Decide if we want to enable class unloading as part of the
1774     // ensuing concurrent GC cycle.
1775     update_should_unload_classes();
1776     _full_gc_requested = false;           // acks all outstanding full gc requests
1777     _full_gc_cause = GCCause::_no_gc;
1778     // Signal that we are about to start a collection
1779     gch->increment_total_full_collections();  // ... starting a collection cycle
1780     _collection_count_start = gch->total_full_collections();
1781   }
1782 
1783   // Used for PrintGC
1784   size_t prev_used = 0;
1785   if (PrintGC && Verbose) {
1786     prev_used = _cmsGen->used();
1787   }
1788 
1789   // The change of the collection state is normally done at this level;
1790   // the exceptions are phases that are executed while the world is
1791   // stopped.  For those phases the change of state is done while the
1792   // world is stopped.  For baton passing purposes this allows the
1793   // background collector to finish the phase and change state atomically.
1794   // The foreground collector cannot wait on a phase that is done
1795   // while the world is stopped because the foreground collector already
1796   // has the world stopped and would deadlock.
1797   while (_collectorState != Idling) {
1798     if (TraceCMSState) {
1799       gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
1800         p2i(Thread::current()), _collectorState);
1801     }
1802     // The foreground collector
1803     //   holds the Heap_lock throughout its collection.
1804     //   holds the CMS token (but not the lock)
1805     //     except while it is waiting for the background collector to yield.
1806     //
1807     // The foreground collector should be blocked (not for long)
1808     //   if the background collector is about to start a phase
1809     //   executed with world stopped.  If the background
1810     //   collector has already started such a phase, the
1811     //   foreground collector is blocked waiting for the
1812     //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
1813     //   are executed in the VM thread.
1814     //
1815     // The locking order is
1816     //   PendingListLock (PLL)  -- if applicable (FinalMarking)
1817     //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
1818     //   CMS token  (claimed in
1819     //                stop_world_and_do() -->
1820     //                  safepoint_synchronize() -->
1821     //                    CMSThread::synchronize())
1822 
1823     {
1824       // Check if the FG collector wants us to yield.
1825       CMSTokenSync x(true); // is cms thread
1826       if (waitForForegroundGC()) {
1827         // We yielded to a foreground GC, nothing more to be
1828         // done this round.
1829         assert(_foregroundGCShouldWait == false, "We set it to false in "
1830                "waitForForegroundGC()");
1831         if (TraceCMSState) {
1832           gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1833             " exiting collection CMS state %d",
1834             p2i(Thread::current()), _collectorState);
1835         }
1836         return;
1837       } else {
1838         // The background collector can run but check to see if the
1839         // foreground collector has done a collection while the
1840         // background collector was waiting to get the CGC_lock
1841         // above.  If yes, break so that _foregroundGCShouldWait
1842         // is cleared before returning.
1843         if (_collectorState == Idling) {
1844           break;
1845         }
1846       }
1847     }
1848 
1849     assert(_foregroundGCShouldWait, "Foreground collector, if active, "
1850       "should be waiting");
1851 
1852     switch (_collectorState) {
1853       case InitialMarking:
1854         {
1855           ReleaseForegroundGC x(this);
1856           stats().record_cms_begin();
1857           VM_CMS_Initial_Mark initial_mark_op(this);
1858           VMThread::execute(&initial_mark_op);
1859         }
1860         // The collector state may be any legal state at this point
1861         // since the background collector may have yielded to the
1862         // foreground collector.
1863         break;
1864       case Marking:
1865         // initial marking in checkpointRootsInitialWork has been completed
1866         if (markFromRoots()) { // we were successful
1867           assert(_collectorState == Precleaning, "Collector state should "
1868             "have changed");
1869         } else {
1870           assert(_foregroundGCIsActive, "Internal state inconsistency");
1871         }
1872         break;
1873       case Precleaning:
1874         // marking from roots in markFromRoots has been completed
1875         preclean();
1876         assert(_collectorState == AbortablePreclean ||
1877                _collectorState == FinalMarking,
1878                "Collector state should have changed");
1879         break;
1880       case AbortablePreclean:
1881         abortable_preclean();
1882         assert(_collectorState == FinalMarking, "Collector state should "
1883           "have changed");
1884         break;
1885       case FinalMarking:
1886         {
1887           ReleaseForegroundGC x(this);
1888 
1889           VM_CMS_Final_Remark final_remark_op(this);
1890           VMThread::execute(&final_remark_op);
1891         }
1892         assert(_foregroundGCShouldWait, "block post-condition");
1893         break;
1894       case Sweeping:
1895         // final marking in checkpointRootsFinal has been completed
1896         sweep();
1897         assert(_collectorState == Resizing, "Collector state change "
1898           "to Resizing must be done under the free_list_lock");
1899 
1900       case Resizing: {
1901         // Sweeping has been completed...
1902         // At this point the background collection has completed.
1903         // Don't move the call to compute_new_size() down
1904         // into code that might be executed if the background
1905         // collection was preempted.
1906         {
1907           ReleaseForegroundGC x(this);   // unblock FG collection
1908           MutexLockerEx       y(Heap_lock, Mutex::_no_safepoint_check_flag);
1909           CMSTokenSync        z(true);   // not strictly needed.
1910           if (_collectorState == Resizing) {
1911             compute_new_size();
1912             save_heap_summary();
1913             _collectorState = Resetting;
1914           } else {
1915             assert(_collectorState == Idling, "The state should only change"
1916                    " because the foreground collector has finished the collection");
1917           }
1918         }
1919         break;
1920       }
1921       case Resetting:
1922         // CMS heap resizing has been completed
1923         reset_concurrent();
1924         assert(_collectorState == Idling, "Collector state should "
1925           "have changed");
1926 
1927         MetaspaceGC::set_should_concurrent_collect(false);
1928 
1929         stats().record_cms_end();
1930         // Don't move the concurrent_phases_end() and compute_new_size()
1931         // calls to here because a preempted background collection
1932         // has it's state set to "Resetting".
1933         break;
1934       case Idling:
1935       default:
1936         ShouldNotReachHere();
1937         break;
1938     }
1939     if (TraceCMSState) {
1940       gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
1941         p2i(Thread::current()), _collectorState);
1942     }
1943     assert(_foregroundGCShouldWait, "block post-condition");
1944   }
1945 
1946   // Should this be in gc_epilogue?
1947   collector_policy()->counters()->update_counters();
1948 
1949   {
1950     // Clear _foregroundGCShouldWait and, in the event that the
1951     // foreground collector is waiting, notify it, before
1952     // returning.
1953     MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
1954     _foregroundGCShouldWait = false;
1955     if (_foregroundGCIsActive) {
1956       CGC_lock->notify();
1957     }
1958     assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
1959            "Possible deadlock");
1960   }
1961   if (TraceCMSState) {
1962     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
1963       " exiting collection CMS state %d",
1964       p2i(Thread::current()), _collectorState);
1965   }
1966   if (PrintGC && Verbose) {
1967     _cmsGen->print_heap_change(prev_used);
1968   }
1969 }
1970 
1971 void CMSCollector::register_gc_start(GCCause::Cause cause) {
1972   _cms_start_registered = true;
1973   _gc_timer_cm->register_gc_start();
1974   _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
1975 }
1976 
1977 void CMSCollector::register_gc_end() {
1978   if (_cms_start_registered) {
1979     report_heap_summary(GCWhen::AfterGC);
1980 
1981     _gc_timer_cm->register_gc_end();
1982     _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
1983     _cms_start_registered = false;
1984   }
1985 }
1986 
1987 void CMSCollector::save_heap_summary() {
1988   GenCollectedHeap* gch = GenCollectedHeap::heap();
1989   _last_heap_summary = gch->create_heap_summary();
1990   _last_metaspace_summary = gch->create_metaspace_summary();
1991 }
1992 
1993 void CMSCollector::report_heap_summary(GCWhen::Type when) {
1994   _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
1995   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
1996 }
1997 
1998 bool CMSCollector::waitForForegroundGC() {
1999   bool res = false;
2000   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
2001          "CMS thread should have CMS token");
2002   // Block the foreground collector until the
2003   // background collectors decides whether to
2004   // yield.
2005   MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2006   _foregroundGCShouldWait = true;
2007   if (_foregroundGCIsActive) {
2008     // The background collector yields to the
2009     // foreground collector and returns a value
2010     // indicating that it has yielded.  The foreground
2011     // collector can proceed.
2012     res = true;
2013     _foregroundGCShouldWait = false;
2014     ConcurrentMarkSweepThread::clear_CMS_flag(
2015       ConcurrentMarkSweepThread::CMS_cms_has_token);
2016     ConcurrentMarkSweepThread::set_CMS_flag(
2017       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2018     // Get a possibly blocked foreground thread going
2019     CGC_lock->notify();
2020     if (TraceCMSState) {
2021       gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
2022         p2i(Thread::current()), _collectorState);
2023     }
2024     while (_foregroundGCIsActive) {
2025       CGC_lock->wait(Mutex::_no_safepoint_check_flag);
2026     }
2027     ConcurrentMarkSweepThread::set_CMS_flag(
2028       ConcurrentMarkSweepThread::CMS_cms_has_token);
2029     ConcurrentMarkSweepThread::clear_CMS_flag(
2030       ConcurrentMarkSweepThread::CMS_cms_wants_token);
2031   }
2032   if (TraceCMSState) {
2033     gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
2034       p2i(Thread::current()), _collectorState);
2035   }
2036   return res;
2037 }
2038 
2039 // Because of the need to lock the free lists and other structures in
2040 // the collector, common to all the generations that the collector is
2041 // collecting, we need the gc_prologues of individual CMS generations
2042 // delegate to their collector. It may have been simpler had the
2043 // current infrastructure allowed one to call a prologue on a
2044 // collector. In the absence of that we have the generation's
2045 // prologue delegate to the collector, which delegates back
2046 // some "local" work to a worker method in the individual generations
2047 // that it's responsible for collecting, while itself doing any
2048 // work common to all generations it's responsible for. A similar
2049 // comment applies to the  gc_epilogue()'s.
2050 // The role of the variable _between_prologue_and_epilogue is to
2051 // enforce the invocation protocol.
2052 void CMSCollector::gc_prologue(bool full) {
2053   // Call gc_prologue_work() for the CMSGen
2054   // we are responsible for.
2055 
2056   // The following locking discipline assumes that we are only called
2057   // when the world is stopped.
2058   assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
2059 
2060   // The CMSCollector prologue must call the gc_prologues for the
2061   // "generations" that it's responsible
2062   // for.
2063 
2064   assert(   Thread::current()->is_VM_thread()
2065          || (   CMSScavengeBeforeRemark
2066              && Thread::current()->is_ConcurrentGC_thread()),
2067          "Incorrect thread type for prologue execution");
2068 
2069   if (_between_prologue_and_epilogue) {
2070     // We have already been invoked; this is a gc_prologue delegation
2071     // from yet another CMS generation that we are responsible for, just
2072     // ignore it since all relevant work has already been done.
2073     return;
2074   }
2075 
2076   // set a bit saying prologue has been called; cleared in epilogue
2077   _between_prologue_and_epilogue = true;
2078   // Claim locks for common data structures, then call gc_prologue_work()
2079   // for each CMSGen.
2080 
2081   getFreelistLocks();   // gets free list locks on constituent spaces
2082   bitMapLock()->lock_without_safepoint_check();
2083 
2084   // Should call gc_prologue_work() for all cms gens we are responsible for
2085   bool duringMarking =    _collectorState >= Marking
2086                          && _collectorState < Sweeping;
2087 
2088   // The young collections clear the modified oops state, which tells if
2089   // there are any modified oops in the class. The remark phase also needs
2090   // that information. Tell the young collection to save the union of all
2091   // modified klasses.
2092   if (duringMarking) {
2093     _ct->klass_rem_set()->set_accumulate_modified_oops(true);
2094   }
2095 
2096   bool registerClosure = duringMarking;
2097 
2098   _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
2099 
2100   if (!full) {
2101     stats().record_gc0_begin();
2102   }
2103 }
2104 
2105 void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
2106 
2107   _capacity_at_prologue = capacity();
2108   _used_at_prologue = used();
2109 
2110   // Delegate to CMScollector which knows how to coordinate between
2111   // this and any other CMS generations that it is responsible for
2112   // collecting.
2113   collector()->gc_prologue(full);
2114 }
2115 
2116 // This is a "private" interface for use by this generation's CMSCollector.
2117 // Not to be called directly by any other entity (for instance,
2118 // GenCollectedHeap, which calls the "public" gc_prologue method above).
2119 void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
2120   bool registerClosure, ModUnionClosure* modUnionClosure) {
2121   assert(!incremental_collection_failed(), "Shouldn't be set yet");
2122   assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
2123     "Should be NULL");
2124   if (registerClosure) {
2125     cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
2126   }
2127   cmsSpace()->gc_prologue();
2128   // Clear stat counters
2129   NOT_PRODUCT(
2130     assert(_numObjectsPromoted == 0, "check");
2131     assert(_numWordsPromoted   == 0, "check");
2132     if (Verbose && PrintGC) {
2133       gclog_or_tty->print("Allocated " SIZE_FORMAT " objects, "
2134                           SIZE_FORMAT " bytes concurrently",
2135       _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
2136     }
2137     _numObjectsAllocated = 0;
2138     _numWordsAllocated   = 0;
2139   )
2140 }
2141 
2142 void CMSCollector::gc_epilogue(bool full) {
2143   // The following locking discipline assumes that we are only called
2144   // when the world is stopped.
2145   assert(SafepointSynchronize::is_at_safepoint(),
2146          "world is stopped assumption");
2147 
2148   // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
2149   // if linear allocation blocks need to be appropriately marked to allow the
2150   // the blocks to be parsable. We also check here whether we need to nudge the
2151   // CMS collector thread to start a new cycle (if it's not already active).
2152   assert(   Thread::current()->is_VM_thread()
2153          || (   CMSScavengeBeforeRemark
2154              && Thread::current()->is_ConcurrentGC_thread()),
2155          "Incorrect thread type for epilogue execution");
2156 
2157   if (!_between_prologue_and_epilogue) {
2158     // We have already been invoked; this is a gc_epilogue delegation
2159     // from yet another CMS generation that we are responsible for, just
2160     // ignore it since all relevant work has already been done.
2161     return;
2162   }
2163   assert(haveFreelistLocks(), "must have freelist locks");
2164   assert_lock_strong(bitMapLock());
2165 
2166   _ct->klass_rem_set()->set_accumulate_modified_oops(false);
2167 
2168   _cmsGen->gc_epilogue_work(full);
2169 
2170   if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
2171     // in case sampling was not already enabled, enable it
2172     _start_sampling = true;
2173   }
2174   // reset _eden_chunk_array so sampling starts afresh
2175   _eden_chunk_index = 0;
2176 
2177   size_t cms_used   = _cmsGen->cmsSpace()->used();
2178 
2179   // update performance counters - this uses a special version of
2180   // update_counters() that allows the utilization to be passed as a
2181   // parameter, avoiding multiple calls to used().
2182   //
2183   _cmsGen->update_counters(cms_used);
2184 
2185   bitMapLock()->unlock();
2186   releaseFreelistLocks();
2187 
2188   if (!CleanChunkPoolAsync) {
2189     Chunk::clean_chunk_pool();
2190   }
2191 
2192   set_did_compact(false);
2193   _between_prologue_and_epilogue = false;  // ready for next cycle
2194 }
2195 
2196 void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
2197   collector()->gc_epilogue(full);
2198 
2199   // Also reset promotion tracking in par gc thread states.
2200   for (uint i = 0; i < ParallelGCThreads; i++) {
2201     _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
2202   }
2203 }
2204 
2205 void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
2206   assert(!incremental_collection_failed(), "Should have been cleared");
2207   cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
2208   cmsSpace()->gc_epilogue();
2209     // Print stat counters
2210   NOT_PRODUCT(
2211     assert(_numObjectsAllocated == 0, "check");
2212     assert(_numWordsAllocated == 0, "check");
2213     if (Verbose && PrintGC) {
2214       gclog_or_tty->print("Promoted " SIZE_FORMAT " objects, "
2215                           SIZE_FORMAT " bytes",
2216                  _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
2217     }
2218     _numObjectsPromoted = 0;
2219     _numWordsPromoted   = 0;
2220   )
2221 
2222   if (PrintGC && Verbose) {
2223     // Call down the chain in contiguous_available needs the freelistLock
2224     // so print this out before releasing the freeListLock.
2225     gclog_or_tty->print(" Contiguous available " SIZE_FORMAT " bytes ",
2226                         contiguous_available());
2227   }
2228 }
2229 
2230 #ifndef PRODUCT
2231 bool CMSCollector::have_cms_token() {
2232   Thread* thr = Thread::current();
2233   if (thr->is_VM_thread()) {
2234     return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
2235   } else if (thr->is_ConcurrentGC_thread()) {
2236     return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
2237   } else if (thr->is_GC_task_thread()) {
2238     return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
2239            ParGCRareEvent_lock->owned_by_self();
2240   }
2241   return false;
2242 }
2243 
2244 // Check reachability of the given heap address in CMS generation,
2245 // treating all other generations as roots.
2246 bool CMSCollector::is_cms_reachable(HeapWord* addr) {
2247   // We could "guarantee" below, rather than assert, but I'll
2248   // leave these as "asserts" so that an adventurous debugger
2249   // could try this in the product build provided some subset of
2250   // the conditions were met, provided they were interested in the
2251   // results and knew that the computation below wouldn't interfere
2252   // with other concurrent computations mutating the structures
2253   // being read or written.
2254   assert(SafepointSynchronize::is_at_safepoint(),
2255          "Else mutations in object graph will make answer suspect");
2256   assert(have_cms_token(), "Should hold cms token");
2257   assert(haveFreelistLocks(), "must hold free list locks");
2258   assert_lock_strong(bitMapLock());
2259 
2260   // Clear the marking bit map array before starting, but, just
2261   // for kicks, first report if the given address is already marked
2262   tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
2263                 _markBitMap.isMarked(addr) ? "" : " not");
2264 
2265   if (verify_after_remark()) {
2266     MutexLockerEx x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2267     bool result = verification_mark_bm()->isMarked(addr);
2268     tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
2269                   result ? "IS" : "is NOT");
2270     return result;
2271   } else {
2272     tty->print_cr("Could not compute result");
2273     return false;
2274   }
2275 }
2276 #endif
2277 
2278 void
2279 CMSCollector::print_on_error(outputStream* st) {
2280   CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
2281   if (collector != NULL) {
2282     CMSBitMap* bitmap = &collector->_markBitMap;
2283     st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
2284     bitmap->print_on_error(st, " Bits: ");
2285 
2286     st->cr();
2287 
2288     CMSBitMap* mut_bitmap = &collector->_modUnionTable;
2289     st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
2290     mut_bitmap->print_on_error(st, " Bits: ");
2291   }
2292 }
2293 
2294 ////////////////////////////////////////////////////////
2295 // CMS Verification Support
2296 ////////////////////////////////////////////////////////
2297 // Following the remark phase, the following invariant
2298 // should hold -- each object in the CMS heap which is
2299 // marked in markBitMap() should be marked in the verification_mark_bm().
2300 
2301 class VerifyMarkedClosure: public BitMapClosure {
2302   CMSBitMap* _marks;
2303   bool       _failed;
2304 
2305  public:
2306   VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
2307 
2308   bool do_bit(size_t offset) {
2309     HeapWord* addr = _marks->offsetToHeapWord(offset);
2310     if (!_marks->isMarked(addr)) {
2311       oop(addr)->print_on(gclog_or_tty);
2312       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
2313       _failed = true;
2314     }
2315     return true;
2316   }
2317 
2318   bool failed() { return _failed; }
2319 };
2320 
2321 bool CMSCollector::verify_after_remark(bool silent) {
2322   if (!silent) gclog_or_tty->print(" [Verifying CMS Marking... ");
2323   MutexLockerEx ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
2324   static bool init = false;
2325 
2326   assert(SafepointSynchronize::is_at_safepoint(),
2327          "Else mutations in object graph will make answer suspect");
2328   assert(have_cms_token(),
2329          "Else there may be mutual interference in use of "
2330          " verification data structures");
2331   assert(_collectorState > Marking && _collectorState <= Sweeping,
2332          "Else marking info checked here may be obsolete");
2333   assert(haveFreelistLocks(), "must hold free list locks");
2334   assert_lock_strong(bitMapLock());
2335 
2336 
2337   // Allocate marking bit map if not already allocated
2338   if (!init) { // first time
2339     if (!verification_mark_bm()->allocate(_span)) {
2340       return false;
2341     }
2342     init = true;
2343   }
2344 
2345   assert(verification_mark_stack()->isEmpty(), "Should be empty");
2346 
2347   // Turn off refs discovery -- so we will be tracing through refs.
2348   // This is as intended, because by this time
2349   // GC must already have cleared any refs that need to be cleared,
2350   // and traced those that need to be marked; moreover,
2351   // the marking done here is not going to interfere in any
2352   // way with the marking information used by GC.
2353   NoRefDiscovery no_discovery(ref_processor());
2354 
2355 #if defined(COMPILER2) || INCLUDE_JVMCI
2356   DerivedPointerTableDeactivate dpt_deact;
2357 #endif
2358 
2359   // Clear any marks from a previous round
2360   verification_mark_bm()->clear_all();
2361   assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
2362   verify_work_stacks_empty();
2363 
2364   GenCollectedHeap* gch = GenCollectedHeap::heap();
2365   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2366   // Update the saved marks which may affect the root scans.
2367   gch->save_marks();
2368 
2369   if (CMSRemarkVerifyVariant == 1) {
2370     // In this first variant of verification, we complete
2371     // all marking, then check if the new marks-vector is
2372     // a subset of the CMS marks-vector.
2373     verify_after_remark_work_1();
2374   } else if (CMSRemarkVerifyVariant == 2) {
2375     // In this second variant of verification, we flag an error
2376     // (i.e. an object reachable in the new marks-vector not reachable
2377     // in the CMS marks-vector) immediately, also indicating the
2378     // identify of an object (A) that references the unmarked object (B) --
2379     // presumably, a mutation to A failed to be picked up by preclean/remark?
2380     verify_after_remark_work_2();
2381   } else {
2382     warning("Unrecognized value " UINTX_FORMAT " for CMSRemarkVerifyVariant",
2383             CMSRemarkVerifyVariant);
2384   }
2385   if (!silent) gclog_or_tty->print(" done] ");
2386   return true;
2387 }
2388 
2389 void CMSCollector::verify_after_remark_work_1() {
2390   ResourceMark rm;
2391   HandleMark  hm;
2392   GenCollectedHeap* gch = GenCollectedHeap::heap();
2393 
2394   // Get a clear set of claim bits for the roots processing to work with.
2395   ClassLoaderDataGraph::clear_claimed_marks();
2396 
2397   // Mark from roots one level into CMS
2398   MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
2399   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2400 
2401   {
2402     StrongRootsScope srs(1);
2403 
2404     gch->gen_process_roots(&srs,
2405                            GenCollectedHeap::OldGen,
2406                            true,   // young gen as roots
2407                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2408                            should_unload_classes(),
2409                            &notOlder,
2410                            NULL,
2411                            NULL);
2412   }
2413 
2414   // Now mark from the roots
2415   MarkFromRootsClosure markFromRootsClosure(this, _span,
2416     verification_mark_bm(), verification_mark_stack(),
2417     false /* don't yield */, true /* verifying */);
2418   assert(_restart_addr == NULL, "Expected pre-condition");
2419   verification_mark_bm()->iterate(&markFromRootsClosure);
2420   while (_restart_addr != NULL) {
2421     // Deal with stack overflow: by restarting at the indicated
2422     // address.
2423     HeapWord* ra = _restart_addr;
2424     markFromRootsClosure.reset(ra);
2425     _restart_addr = NULL;
2426     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2427   }
2428   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2429   verify_work_stacks_empty();
2430 
2431   // Marking completed -- now verify that each bit marked in
2432   // verification_mark_bm() is also marked in markBitMap(); flag all
2433   // errors by printing corresponding objects.
2434   VerifyMarkedClosure vcl(markBitMap());
2435   verification_mark_bm()->iterate(&vcl);
2436   if (vcl.failed()) {
2437     gclog_or_tty->print("Verification failed");
2438     gch->print_on(gclog_or_tty);
2439     fatal("CMS: failed marking verification after remark");
2440   }
2441 }
2442 
2443 class VerifyKlassOopsKlassClosure : public KlassClosure {
2444   class VerifyKlassOopsClosure : public OopClosure {
2445     CMSBitMap* _bitmap;
2446    public:
2447     VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
2448     void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
2449     void do_oop(narrowOop* p) { ShouldNotReachHere(); }
2450   } _oop_closure;
2451  public:
2452   VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
2453   void do_klass(Klass* k) {
2454     k->oops_do(&_oop_closure);
2455   }
2456 };
2457 
2458 void CMSCollector::verify_after_remark_work_2() {
2459   ResourceMark rm;
2460   HandleMark  hm;
2461   GenCollectedHeap* gch = GenCollectedHeap::heap();
2462 
2463   // Get a clear set of claim bits for the roots processing to work with.
2464   ClassLoaderDataGraph::clear_claimed_marks();
2465 
2466   // Mark from roots one level into CMS
2467   MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
2468                                      markBitMap());
2469   CLDToOopClosure cld_closure(&notOlder, true);
2470 
2471   gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2472 
2473   {
2474     StrongRootsScope srs(1);
2475 
2476     gch->gen_process_roots(&srs,
2477                            GenCollectedHeap::OldGen,
2478                            true,   // young gen as roots
2479                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
2480                            should_unload_classes(),
2481                            &notOlder,
2482                            NULL,
2483                            &cld_closure);
2484   }
2485 
2486   // Now mark from the roots
2487   MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
2488     verification_mark_bm(), markBitMap(), verification_mark_stack());
2489   assert(_restart_addr == NULL, "Expected pre-condition");
2490   verification_mark_bm()->iterate(&markFromRootsClosure);
2491   while (_restart_addr != NULL) {
2492     // Deal with stack overflow: by restarting at the indicated
2493     // address.
2494     HeapWord* ra = _restart_addr;
2495     markFromRootsClosure.reset(ra);
2496     _restart_addr = NULL;
2497     verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
2498   }
2499   assert(verification_mark_stack()->isEmpty(), "Should have been drained");
2500   verify_work_stacks_empty();
2501 
2502   VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
2503   ClassLoaderDataGraph::classes_do(&verify_klass_oops);
2504 
2505   // Marking completed -- now verify that each bit marked in
2506   // verification_mark_bm() is also marked in markBitMap(); flag all
2507   // errors by printing corresponding objects.
2508   VerifyMarkedClosure vcl(markBitMap());
2509   verification_mark_bm()->iterate(&vcl);
2510   assert(!vcl.failed(), "Else verification above should not have succeeded");
2511 }
2512 
2513 void ConcurrentMarkSweepGeneration::save_marks() {
2514   // delegate to CMS space
2515   cmsSpace()->save_marks();
2516   for (uint i = 0; i < ParallelGCThreads; i++) {
2517     _par_gc_thread_states[i]->promo.startTrackingPromotions();
2518   }
2519 }
2520 
2521 bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
2522   return cmsSpace()->no_allocs_since_save_marks();
2523 }
2524 
2525 #define CMS_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)    \
2526                                                                 \
2527 void ConcurrentMarkSweepGeneration::                            \
2528 oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) {   \
2529   cl->set_generation(this);                                     \
2530   cmsSpace()->oop_since_save_marks_iterate##nv_suffix(cl);      \
2531   cl->reset_generation();                                       \
2532   save_marks();                                                 \
2533 }
2534 
2535 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
2536 
2537 void
2538 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
2539   if (freelistLock()->owned_by_self()) {
2540     Generation::oop_iterate(cl);
2541   } else {
2542     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2543     Generation::oop_iterate(cl);
2544   }
2545 }
2546 
2547 void
2548 ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
2549   if (freelistLock()->owned_by_self()) {
2550     Generation::object_iterate(cl);
2551   } else {
2552     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2553     Generation::object_iterate(cl);
2554   }
2555 }
2556 
2557 void
2558 ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
2559   if (freelistLock()->owned_by_self()) {
2560     Generation::safe_object_iterate(cl);
2561   } else {
2562     MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2563     Generation::safe_object_iterate(cl);
2564   }
2565 }
2566 
2567 void
2568 ConcurrentMarkSweepGeneration::post_compact() {
2569 }
2570 
2571 void
2572 ConcurrentMarkSweepGeneration::prepare_for_verify() {
2573   // Fix the linear allocation blocks to look like free blocks.
2574 
2575   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2576   // are not called when the heap is verified during universe initialization and
2577   // at vm shutdown.
2578   if (freelistLock()->owned_by_self()) {
2579     cmsSpace()->prepare_for_verify();
2580   } else {
2581     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2582     cmsSpace()->prepare_for_verify();
2583   }
2584 }
2585 
2586 void
2587 ConcurrentMarkSweepGeneration::verify() {
2588   // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
2589   // are not called when the heap is verified during universe initialization and
2590   // at vm shutdown.
2591   if (freelistLock()->owned_by_self()) {
2592     cmsSpace()->verify();
2593   } else {
2594     MutexLockerEx fll(freelistLock(), Mutex::_no_safepoint_check_flag);
2595     cmsSpace()->verify();
2596   }
2597 }
2598 
2599 void CMSCollector::verify() {
2600   _cmsGen->verify();
2601 }
2602 
2603 #ifndef PRODUCT
2604 bool CMSCollector::overflow_list_is_empty() const {
2605   assert(_num_par_pushes >= 0, "Inconsistency");
2606   if (_overflow_list == NULL) {
2607     assert(_num_par_pushes == 0, "Inconsistency");
2608   }
2609   return _overflow_list == NULL;
2610 }
2611 
2612 // The methods verify_work_stacks_empty() and verify_overflow_empty()
2613 // merely consolidate assertion checks that appear to occur together frequently.
2614 void CMSCollector::verify_work_stacks_empty() const {
2615   assert(_markStack.isEmpty(), "Marking stack should be empty");
2616   assert(overflow_list_is_empty(), "Overflow list should be empty");
2617 }
2618 
2619 void CMSCollector::verify_overflow_empty() const {
2620   assert(overflow_list_is_empty(), "Overflow list should be empty");
2621   assert(no_preserved_marks(), "No preserved marks");
2622 }
2623 #endif // PRODUCT
2624 
2625 // Decide if we want to enable class unloading as part of the
2626 // ensuing concurrent GC cycle. We will collect and
2627 // unload classes if it's the case that:
2628 // (1) an explicit gc request has been made and the flag
2629 //     ExplicitGCInvokesConcurrentAndUnloadsClasses is set, OR
2630 // (2) (a) class unloading is enabled at the command line, and
2631 //     (b) old gen is getting really full
2632 // NOTE: Provided there is no change in the state of the heap between
2633 // calls to this method, it should have idempotent results. Moreover,
2634 // its results should be monotonically increasing (i.e. going from 0 to 1,
2635 // but not 1 to 0) between successive calls between which the heap was
2636 // not collected. For the implementation below, it must thus rely on
2637 // the property that concurrent_cycles_since_last_unload()
2638 // will not decrease unless a collection cycle happened and that
2639 // _cmsGen->is_too_full() are
2640 // themselves also monotonic in that sense. See check_monotonicity()
2641 // below.
2642 void CMSCollector::update_should_unload_classes() {
2643   _should_unload_classes = false;
2644   // Condition 1 above
2645   if (_full_gc_requested && ExplicitGCInvokesConcurrentAndUnloadsClasses) {
2646     _should_unload_classes = true;
2647   } else if (CMSClassUnloadingEnabled) { // Condition 2.a above
2648     // Disjuncts 2.b.(i,ii,iii) above
2649     _should_unload_classes = (concurrent_cycles_since_last_unload() >=
2650                               CMSClassUnloadingMaxInterval)
2651                            || _cmsGen->is_too_full();
2652   }
2653 }
2654 
2655 bool ConcurrentMarkSweepGeneration::is_too_full() const {
2656   bool res = should_concurrent_collect();
2657   res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
2658   return res;
2659 }
2660 
2661 void CMSCollector::setup_cms_unloading_and_verification_state() {
2662   const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
2663                              || VerifyBeforeExit;
2664   const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
2665 
2666   // We set the proper root for this CMS cycle here.
2667   if (should_unload_classes()) {   // Should unload classes this cycle
2668     remove_root_scanning_option(rso);  // Shrink the root set appropriately
2669     set_verifying(should_verify);    // Set verification state for this cycle
2670     return;                            // Nothing else needs to be done at this time
2671   }
2672 
2673   // Not unloading classes this cycle
2674   assert(!should_unload_classes(), "Inconsistency!");
2675 
2676   // If we are not unloading classes then add SO_AllCodeCache to root
2677   // scanning options.
2678   add_root_scanning_option(rso);
2679 
2680   if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
2681     set_verifying(true);
2682   } else if (verifying() && !should_verify) {
2683     // We were verifying, but some verification flags got disabled.
2684     set_verifying(false);
2685     // Exclude symbols, strings and code cache elements from root scanning to
2686     // reduce IM and RM pauses.
2687     remove_root_scanning_option(rso);
2688   }
2689 }
2690 
2691 
2692 #ifndef PRODUCT
2693 HeapWord* CMSCollector::block_start(const void* p) const {
2694   const HeapWord* addr = (HeapWord*)p;
2695   if (_span.contains(p)) {
2696     if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
2697       return _cmsGen->cmsSpace()->block_start(p);
2698     }
2699   }
2700   return NULL;
2701 }
2702 #endif
2703 
2704 HeapWord*
2705 ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
2706                                                    bool   tlab,
2707                                                    bool   parallel) {
2708   CMSSynchronousYieldRequest yr;
2709   assert(!tlab, "Can't deal with TLAB allocation");
2710   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
2711   expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
2712   if (GCExpandToAllocateDelayMillis > 0) {
2713     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2714   }
2715   return have_lock_and_allocate(word_size, tlab);
2716 }
2717 
2718 void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
2719     size_t bytes,
2720     size_t expand_bytes,
2721     CMSExpansionCause::Cause cause)
2722 {
2723 
2724   bool success = expand(bytes, expand_bytes);
2725 
2726   // remember why we expanded; this information is used
2727   // by shouldConcurrentCollect() when making decisions on whether to start
2728   // a new CMS cycle.
2729   if (success) {
2730     set_expansion_cause(cause);
2731     if (PrintGCDetails && Verbose) {
2732       gclog_or_tty->print_cr("Expanded CMS gen for %s",
2733         CMSExpansionCause::to_string(cause));
2734     }
2735   }
2736 }
2737 
2738 HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
2739   HeapWord* res = NULL;
2740   MutexLocker x(ParGCRareEvent_lock);
2741   while (true) {
2742     // Expansion by some other thread might make alloc OK now:
2743     res = ps->lab.alloc(word_sz);
2744     if (res != NULL) return res;
2745     // If there's not enough expansion space available, give up.
2746     if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
2747       return NULL;
2748     }
2749     // Otherwise, we try expansion.
2750     expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
2751     // Now go around the loop and try alloc again;
2752     // A competing par_promote might beat us to the expansion space,
2753     // so we may go around the loop again if promotion fails again.
2754     if (GCExpandToAllocateDelayMillis > 0) {
2755       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2756     }
2757   }
2758 }
2759 
2760 
2761 bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
2762   PromotionInfo* promo) {
2763   MutexLocker x(ParGCRareEvent_lock);
2764   size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
2765   while (true) {
2766     // Expansion by some other thread might make alloc OK now:
2767     if (promo->ensure_spooling_space()) {
2768       assert(promo->has_spooling_space(),
2769              "Post-condition of successful ensure_spooling_space()");
2770       return true;
2771     }
2772     // If there's not enough expansion space available, give up.
2773     if (_virtual_space.uncommitted_size() < refill_size_bytes) {
2774       return false;
2775     }
2776     // Otherwise, we try expansion.
2777     expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
2778     // Now go around the loop and try alloc again;
2779     // A competing allocation might beat us to the expansion space,
2780     // so we may go around the loop again if allocation fails again.
2781     if (GCExpandToAllocateDelayMillis > 0) {
2782       os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
2783     }
2784   }
2785 }
2786 
2787 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
2788   // Only shrink if a compaction was done so that all the free space
2789   // in the generation is in a contiguous block at the end.
2790   if (did_compact()) {
2791     CardGeneration::shrink(bytes);
2792   }
2793 }
2794 
2795 void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
2796   assert_locked_or_safepoint(Heap_lock);
2797 }
2798 
2799 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
2800   assert_locked_or_safepoint(Heap_lock);
2801   assert_lock_strong(freelistLock());
2802   if (PrintGCDetails && Verbose) {
2803     warning("Shrinking of CMS not yet implemented");
2804   }
2805   return;
2806 }
2807 
2808 
2809 // Simple ctor/dtor wrapper for accounting & timer chores around concurrent
2810 // phases.
2811 class CMSPhaseAccounting: public StackObj {
2812  public:
2813   CMSPhaseAccounting(CMSCollector *collector,
2814                      const char *phase,
2815                      bool print_cr = true);
2816   ~CMSPhaseAccounting();
2817 
2818  private:
2819   CMSCollector *_collector;
2820   const char *_phase;
2821   elapsedTimer _wallclock;
2822   bool _print_cr;
2823 
2824  public:
2825   // Not MT-safe; so do not pass around these StackObj's
2826   // where they may be accessed by other threads.
2827   jlong wallclock_millis() {
2828     assert(_wallclock.is_active(), "Wall clock should not stop");
2829     _wallclock.stop();  // to record time
2830     jlong ret = _wallclock.milliseconds();
2831     _wallclock.start(); // restart
2832     return ret;
2833   }
2834 };
2835 
2836 CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
2837                                        const char *phase,
2838                                        bool print_cr) :
2839   _collector(collector), _phase(phase), _print_cr(print_cr) {
2840 
2841   if (PrintCMSStatistics != 0) {
2842     _collector->resetYields();
2843   }
2844   if (PrintGCDetails) {
2845     gclog_or_tty->gclog_stamp();
2846     gclog_or_tty->print_cr("[%s-concurrent-%s-start]",
2847       _collector->cmsGen()->short_name(), _phase);
2848   }
2849   _collector->resetTimer();
2850   _wallclock.start();
2851   _collector->startTimer();
2852 }
2853 
2854 CMSPhaseAccounting::~CMSPhaseAccounting() {
2855   assert(_wallclock.is_active(), "Wall clock should not have stopped");
2856   _collector->stopTimer();
2857   _wallclock.stop();
2858   if (PrintGCDetails) {
2859     gclog_or_tty->gclog_stamp();
2860     gclog_or_tty->print("[%s-concurrent-%s: %3.3f/%3.3f secs]",
2861                  _collector->cmsGen()->short_name(),
2862                  _phase, _collector->timerValue(), _wallclock.seconds());
2863     if (_print_cr) {
2864       gclog_or_tty->cr();
2865     }
2866     if (PrintCMSStatistics != 0) {
2867       gclog_or_tty->print_cr(" (CMS-concurrent-%s yielded %d times)", _phase,
2868                     _collector->yields());
2869     }
2870   }
2871 }
2872 
2873 // CMS work
2874 
2875 // The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
2876 class CMSParMarkTask : public AbstractGangTask {
2877  protected:
2878   CMSCollector*     _collector;
2879   uint              _n_workers;
2880   CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
2881       AbstractGangTask(name),
2882       _collector(collector),
2883       _n_workers(n_workers) {}
2884   // Work method in support of parallel rescan ... of young gen spaces
2885   void do_young_space_rescan(uint worker_id, OopsInGenClosure* cl,
2886                              ContiguousSpace* space,
2887                              HeapWord** chunk_array, size_t chunk_top);
2888   void work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl);
2889 };
2890 
2891 // Parallel initial mark task
2892 class CMSParInitialMarkTask: public CMSParMarkTask {
2893   StrongRootsScope* _strong_roots_scope;
2894  public:
2895   CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
2896       CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
2897       _strong_roots_scope(strong_roots_scope) {}
2898   void work(uint worker_id);
2899 };
2900 
2901 // Checkpoint the roots into this generation from outside
2902 // this generation. [Note this initial checkpoint need only
2903 // be approximate -- we'll do a catch up phase subsequently.]
2904 void CMSCollector::checkpointRootsInitial() {
2905   assert(_collectorState == InitialMarking, "Wrong collector state");
2906   check_correct_thread_executing();
2907   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
2908 
2909   save_heap_summary();
2910   report_heap_summary(GCWhen::BeforeGC);
2911 
2912   ReferenceProcessor* rp = ref_processor();
2913   assert(_restart_addr == NULL, "Control point invariant");
2914   {
2915     // acquire locks for subsequent manipulations
2916     MutexLockerEx x(bitMapLock(),
2917                     Mutex::_no_safepoint_check_flag);
2918     checkpointRootsInitialWork();
2919     // enable ("weak") refs discovery
2920     rp->enable_discovery();
2921     _collectorState = Marking;
2922   }
2923 }
2924 
2925 void CMSCollector::checkpointRootsInitialWork() {
2926   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
2927   assert(_collectorState == InitialMarking, "just checking");
2928 
2929   // Already have locks.
2930   assert_lock_strong(bitMapLock());
2931   assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
2932 
2933   // Setup the verification and class unloading state for this
2934   // CMS collection cycle.
2935   setup_cms_unloading_and_verification_state();
2936 
2937   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
2938     PrintGCDetails && Verbose, true, _gc_timer_cm);)
2939 
2940   // Reset all the PLAB chunk arrays if necessary.
2941   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
2942     reset_survivor_plab_arrays();
2943   }
2944 
2945   ResourceMark rm;
2946   HandleMark  hm;
2947 
2948   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
2949   GenCollectedHeap* gch = GenCollectedHeap::heap();
2950 
2951   verify_work_stacks_empty();
2952   verify_overflow_empty();
2953 
2954   gch->ensure_parsability(false);  // fill TLABs, but no need to retire them
2955   // Update the saved marks which may affect the root scans.
2956   gch->save_marks();
2957 
2958   // weak reference processing has not started yet.
2959   ref_processor()->set_enqueuing_is_done(false);
2960 
2961   // Need to remember all newly created CLDs,
2962   // so that we can guarantee that the remark finds them.
2963   ClassLoaderDataGraph::remember_new_clds(true);
2964 
2965   // Whenever a CLD is found, it will be claimed before proceeding to mark
2966   // the klasses. The claimed marks need to be cleared before marking starts.
2967   ClassLoaderDataGraph::clear_claimed_marks();
2968 
2969   if (CMSPrintEdenSurvivorChunks) {
2970     print_eden_and_survivor_chunk_arrays();
2971   }
2972 
2973   {
2974 #if defined(COMPILER2) || INCLUDE_JVMCI
2975     DerivedPointerTableDeactivate dpt_deact;
2976 #endif
2977     if (CMSParallelInitialMarkEnabled) {
2978       // The parallel version.
2979       WorkGang* workers = gch->workers();
2980       assert(workers != NULL, "Need parallel worker threads.");
2981       uint n_workers = workers->active_workers();
2982 
2983       StrongRootsScope srs(n_workers);
2984 
2985       CMSParInitialMarkTask tsk(this, &srs, n_workers);
2986       initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
2987       if (n_workers > 1) {
2988         workers->run_task(&tsk);
2989       } else {
2990         tsk.work(0);
2991       }
2992     } else {
2993       // The serial version.
2994       CLDToOopClosure cld_closure(&notOlder, true);
2995       gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
2996 
2997       StrongRootsScope srs(1);
2998 
2999       gch->gen_process_roots(&srs,
3000                              GenCollectedHeap::OldGen,
3001                              true,   // young gen as roots
3002                              GenCollectedHeap::ScanningOption(roots_scanning_options()),
3003                              should_unload_classes(),
3004                              &notOlder,
3005                              NULL,
3006                              &cld_closure);
3007     }
3008   }
3009 
3010   // Clear mod-union table; it will be dirtied in the prologue of
3011   // CMS generation per each young generation collection.
3012 
3013   assert(_modUnionTable.isAllClear(),
3014        "Was cleared in most recent final checkpoint phase"
3015        " or no bits are set in the gc_prologue before the start of the next "
3016        "subsequent marking phase.");
3017 
3018   assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
3019 
3020   // Save the end of the used_region of the constituent generations
3021   // to be used to limit the extent of sweep in each generation.
3022   save_sweep_limits();
3023   verify_overflow_empty();
3024 }
3025 
3026 bool CMSCollector::markFromRoots() {
3027   // we might be tempted to assert that:
3028   // assert(!SafepointSynchronize::is_at_safepoint(),
3029   //        "inconsistent argument?");
3030   // However that wouldn't be right, because it's possible that
3031   // a safepoint is indeed in progress as a young generation
3032   // stop-the-world GC happens even as we mark in this generation.
3033   assert(_collectorState == Marking, "inconsistent state?");
3034   check_correct_thread_executing();
3035   verify_overflow_empty();
3036 
3037   // Weak ref discovery note: We may be discovering weak
3038   // refs in this generation concurrent (but interleaved) with
3039   // weak ref discovery by the young generation collector.
3040 
3041   CMSTokenSyncWithLocks ts(true, bitMapLock());
3042   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3043   CMSPhaseAccounting pa(this, "mark", !PrintGCDetails);
3044   bool res = markFromRootsWork();
3045   if (res) {
3046     _collectorState = Precleaning;
3047   } else { // We failed and a foreground collection wants to take over
3048     assert(_foregroundGCIsActive, "internal state inconsistency");
3049     assert(_restart_addr == NULL,  "foreground will restart from scratch");
3050     if (PrintGCDetails) {
3051       gclog_or_tty->print_cr("bailing out to foreground collection");
3052     }
3053   }
3054   verify_overflow_empty();
3055   return res;
3056 }
3057 
3058 bool CMSCollector::markFromRootsWork() {
3059   // iterate over marked bits in bit map, doing a full scan and mark
3060   // from these roots using the following algorithm:
3061   // . if oop is to the right of the current scan pointer,
3062   //   mark corresponding bit (we'll process it later)
3063   // . else (oop is to left of current scan pointer)
3064   //   push oop on marking stack
3065   // . drain the marking stack
3066 
3067   // Note that when we do a marking step we need to hold the
3068   // bit map lock -- recall that direct allocation (by mutators)
3069   // and promotion (by the young generation collector) is also
3070   // marking the bit map. [the so-called allocate live policy.]
3071   // Because the implementation of bit map marking is not
3072   // robust wrt simultaneous marking of bits in the same word,
3073   // we need to make sure that there is no such interference
3074   // between concurrent such updates.
3075 
3076   // already have locks
3077   assert_lock_strong(bitMapLock());
3078 
3079   verify_work_stacks_empty();
3080   verify_overflow_empty();
3081   bool result = false;
3082   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
3083     result = do_marking_mt();
3084   } else {
3085     result = do_marking_st();
3086   }
3087   return result;
3088 }
3089 
3090 // Forward decl
3091 class CMSConcMarkingTask;
3092 
3093 class CMSConcMarkingTerminator: public ParallelTaskTerminator {
3094   CMSCollector*       _collector;
3095   CMSConcMarkingTask* _task;
3096  public:
3097   virtual void yield();
3098 
3099   // "n_threads" is the number of threads to be terminated.
3100   // "queue_set" is a set of work queues of other threads.
3101   // "collector" is the CMS collector associated with this task terminator.
3102   // "yield" indicates whether we need the gang as a whole to yield.
3103   CMSConcMarkingTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
3104     ParallelTaskTerminator(n_threads, queue_set),
3105     _collector(collector) { }
3106 
3107   void set_task(CMSConcMarkingTask* task) {
3108     _task = task;
3109   }
3110 };
3111 
3112 class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
3113   CMSConcMarkingTask* _task;
3114  public:
3115   bool should_exit_termination();
3116   void set_task(CMSConcMarkingTask* task) {
3117     _task = task;
3118   }
3119 };
3120 
3121 // MT Concurrent Marking Task
3122 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
3123   CMSCollector* _collector;
3124   uint          _n_workers;       // requested/desired # workers
3125   bool          _result;
3126   CompactibleFreeListSpace*  _cms_space;
3127   char          _pad_front[64];   // padding to ...
3128   HeapWord*     _global_finger;   // ... avoid sharing cache line
3129   char          _pad_back[64];
3130   HeapWord*     _restart_addr;
3131 
3132   //  Exposed here for yielding support
3133   Mutex* const _bit_map_lock;
3134 
3135   // The per thread work queues, available here for stealing
3136   OopTaskQueueSet*  _task_queues;
3137 
3138   // Termination (and yielding) support
3139   CMSConcMarkingTerminator _term;
3140   CMSConcMarkingTerminatorTerminator _term_term;
3141 
3142  public:
3143   CMSConcMarkingTask(CMSCollector* collector,
3144                  CompactibleFreeListSpace* cms_space,
3145                  YieldingFlexibleWorkGang* workers,
3146                  OopTaskQueueSet* task_queues):
3147     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
3148     _collector(collector),
3149     _cms_space(cms_space),
3150     _n_workers(0), _result(true),
3151     _task_queues(task_queues),
3152     _term(_n_workers, task_queues, _collector),
3153     _bit_map_lock(collector->bitMapLock())
3154   {
3155     _requested_size = _n_workers;
3156     _term.set_task(this);
3157     _term_term.set_task(this);
3158     _restart_addr = _global_finger = _cms_space->bottom();
3159   }
3160 
3161 
3162   OopTaskQueueSet* task_queues()  { return _task_queues; }
3163 
3164   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
3165 
3166   HeapWord** global_finger_addr() { return &_global_finger; }
3167 
3168   CMSConcMarkingTerminator* terminator() { return &_term; }
3169 
3170   virtual void set_for_termination(uint active_workers) {
3171     terminator()->reset_for_reuse(active_workers);
3172   }
3173 
3174   void work(uint worker_id);
3175   bool should_yield() {
3176     return    ConcurrentMarkSweepThread::should_yield()
3177            && !_collector->foregroundGCIsActive();
3178   }
3179 
3180   virtual void coordinator_yield();  // stuff done by coordinator
3181   bool result() { return _result; }
3182 
3183   void reset(HeapWord* ra) {
3184     assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
3185     _restart_addr = _global_finger = ra;
3186     _term.reset_for_reuse();
3187   }
3188 
3189   static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3190                                            OopTaskQueue* work_q);
3191 
3192  private:
3193   void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
3194   void do_work_steal(int i);
3195   void bump_global_finger(HeapWord* f);
3196 };
3197 
3198 bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
3199   assert(_task != NULL, "Error");
3200   return _task->yielding();
3201   // Note that we do not need the disjunct || _task->should_yield() above
3202   // because we want terminating threads to yield only if the task
3203   // is already in the midst of yielding, which happens only after at least one
3204   // thread has yielded.
3205 }
3206 
3207 void CMSConcMarkingTerminator::yield() {
3208   if (_task->should_yield()) {
3209     _task->yield();
3210   } else {
3211     ParallelTaskTerminator::yield();
3212   }
3213 }
3214 
3215 ////////////////////////////////////////////////////////////////
3216 // Concurrent Marking Algorithm Sketch
3217 ////////////////////////////////////////////////////////////////
3218 // Until all tasks exhausted (both spaces):
3219 // -- claim next available chunk
3220 // -- bump global finger via CAS
3221 // -- find first object that starts in this chunk
3222 //    and start scanning bitmap from that position
3223 // -- scan marked objects for oops
3224 // -- CAS-mark target, and if successful:
3225 //    . if target oop is above global finger (volatile read)
3226 //      nothing to do
3227 //    . if target oop is in chunk and above local finger
3228 //        then nothing to do
3229 //    . else push on work-queue
3230 // -- Deal with possible overflow issues:
3231 //    . local work-queue overflow causes stuff to be pushed on
3232 //      global (common) overflow queue
3233 //    . always first empty local work queue
3234 //    . then get a batch of oops from global work queue if any
3235 //    . then do work stealing
3236 // -- When all tasks claimed (both spaces)
3237 //    and local work queue empty,
3238 //    then in a loop do:
3239 //    . check global overflow stack; steal a batch of oops and trace
3240 //    . try to steal from other threads oif GOS is empty
3241 //    . if neither is available, offer termination
3242 // -- Terminate and return result
3243 //
3244 void CMSConcMarkingTask::work(uint worker_id) {
3245   elapsedTimer _timer;
3246   ResourceMark rm;
3247   HandleMark hm;
3248 
3249   DEBUG_ONLY(_collector->verify_overflow_empty();)
3250 
3251   // Before we begin work, our work queue should be empty
3252   assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
3253   // Scan the bitmap covering _cms_space, tracing through grey objects.
3254   _timer.start();
3255   do_scan_and_mark(worker_id, _cms_space);
3256   _timer.stop();
3257   if (PrintCMSStatistics != 0) {
3258     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
3259       worker_id, _timer.seconds());
3260       // XXX: need xxx/xxx type of notation, two timers
3261   }
3262 
3263   // ... do work stealing
3264   _timer.reset();
3265   _timer.start();
3266   do_work_steal(worker_id);
3267   _timer.stop();
3268   if (PrintCMSStatistics != 0) {
3269     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
3270       worker_id, _timer.seconds());
3271       // XXX: need xxx/xxx type of notation, two timers
3272   }
3273   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
3274   assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
3275   // Note that under the current task protocol, the
3276   // following assertion is true even of the spaces
3277   // expanded since the completion of the concurrent
3278   // marking. XXX This will likely change under a strict
3279   // ABORT semantics.
3280   // After perm removal the comparison was changed to
3281   // greater than or equal to from strictly greater than.
3282   // Before perm removal the highest address sweep would
3283   // have been at the end of perm gen but now is at the
3284   // end of the tenured gen.
3285   assert(_global_finger >=  _cms_space->end(),
3286          "All tasks have been completed");
3287   DEBUG_ONLY(_collector->verify_overflow_empty();)
3288 }
3289 
3290 void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
3291   HeapWord* read = _global_finger;
3292   HeapWord* cur  = read;
3293   while (f > read) {
3294     cur = read;
3295     read = (HeapWord*) Atomic::cmpxchg_ptr(f, &_global_finger, cur);
3296     if (cur == read) {
3297       // our cas succeeded
3298       assert(_global_finger >= f, "protocol consistency");
3299       break;
3300     }
3301   }
3302 }
3303 
3304 // This is really inefficient, and should be redone by
3305 // using (not yet available) block-read and -write interfaces to the
3306 // stack and the work_queue. XXX FIX ME !!!
3307 bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
3308                                                       OopTaskQueue* work_q) {
3309   // Fast lock-free check
3310   if (ovflw_stk->length() == 0) {
3311     return false;
3312   }
3313   assert(work_q->size() == 0, "Shouldn't steal");
3314   MutexLockerEx ml(ovflw_stk->par_lock(),
3315                    Mutex::_no_safepoint_check_flag);
3316   // Grab up to 1/4 the size of the work queue
3317   size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
3318                     (size_t)ParGCDesiredObjsFromOverflowList);
3319   num = MIN2(num, ovflw_stk->length());
3320   for (int i = (int) num; i > 0; i--) {
3321     oop cur = ovflw_stk->pop();
3322     assert(cur != NULL, "Counted wrong?");
3323     work_q->push(cur);
3324   }
3325   return num > 0;
3326 }
3327 
3328 void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
3329   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
3330   int n_tasks = pst->n_tasks();
3331   // We allow that there may be no tasks to do here because
3332   // we are restarting after a stack overflow.
3333   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
3334   uint nth_task = 0;
3335 
3336   HeapWord* aligned_start = sp->bottom();
3337   if (sp->used_region().contains(_restart_addr)) {
3338     // Align down to a card boundary for the start of 0th task
3339     // for this space.
3340     aligned_start =
3341       (HeapWord*)align_size_down((uintptr_t)_restart_addr,
3342                                  CardTableModRefBS::card_size);
3343   }
3344 
3345   size_t chunk_size = sp->marking_task_size();
3346   while (!pst->is_task_claimed(/* reference */ nth_task)) {
3347     // Having claimed the nth task in this space,
3348     // compute the chunk that it corresponds to:
3349     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
3350                                aligned_start + (nth_task+1)*chunk_size);
3351     // Try and bump the global finger via a CAS;
3352     // note that we need to do the global finger bump
3353     // _before_ taking the intersection below, because
3354     // the task corresponding to that region will be
3355     // deemed done even if the used_region() expands
3356     // because of allocation -- as it almost certainly will
3357     // during start-up while the threads yield in the
3358     // closure below.
3359     HeapWord* finger = span.end();
3360     bump_global_finger(finger);   // atomically
3361     // There are null tasks here corresponding to chunks
3362     // beyond the "top" address of the space.
3363     span = span.intersection(sp->used_region());
3364     if (!span.is_empty()) {  // Non-null task
3365       HeapWord* prev_obj;
3366       assert(!span.contains(_restart_addr) || nth_task == 0,
3367              "Inconsistency");
3368       if (nth_task == 0) {
3369         // For the 0th task, we'll not need to compute a block_start.
3370         if (span.contains(_restart_addr)) {
3371           // In the case of a restart because of stack overflow,
3372           // we might additionally skip a chunk prefix.
3373           prev_obj = _restart_addr;
3374         } else {
3375           prev_obj = span.start();
3376         }
3377       } else {
3378         // We want to skip the first object because
3379         // the protocol is to scan any object in its entirety
3380         // that _starts_ in this span; a fortiori, any
3381         // object starting in an earlier span is scanned
3382         // as part of an earlier claimed task.
3383         // Below we use the "careful" version of block_start
3384         // so we do not try to navigate uninitialized objects.
3385         prev_obj = sp->block_start_careful(span.start());
3386         // Below we use a variant of block_size that uses the
3387         // Printezis bits to avoid waiting for allocated
3388         // objects to become initialized/parsable.
3389         while (prev_obj < span.start()) {
3390           size_t sz = sp->block_size_no_stall(prev_obj, _collector);
3391           if (sz > 0) {
3392             prev_obj += sz;
3393           } else {
3394             // In this case we may end up doing a bit of redundant
3395             // scanning, but that appears unavoidable, short of
3396             // locking the free list locks; see bug 6324141.
3397             break;
3398           }
3399         }
3400       }
3401       if (prev_obj < span.end()) {
3402         MemRegion my_span = MemRegion(prev_obj, span.end());
3403         // Do the marking work within a non-empty span --
3404         // the last argument to the constructor indicates whether the
3405         // iteration should be incremental with periodic yields.
3406         Par_MarkFromRootsClosure cl(this, _collector, my_span,
3407                                     &_collector->_markBitMap,
3408                                     work_queue(i),
3409                                     &_collector->_markStack);
3410         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
3411       } // else nothing to do for this task
3412     }   // else nothing to do for this task
3413   }
3414   // We'd be tempted to assert here that since there are no
3415   // more tasks left to claim in this space, the global_finger
3416   // must exceed space->top() and a fortiori space->end(). However,
3417   // that would not quite be correct because the bumping of
3418   // global_finger occurs strictly after the claiming of a task,
3419   // so by the time we reach here the global finger may not yet
3420   // have been bumped up by the thread that claimed the last
3421   // task.
3422   pst->all_tasks_completed();
3423 }
3424 
3425 class Par_ConcMarkingClosure: public MetadataAwareOopClosure {
3426  private:
3427   CMSCollector* _collector;
3428   CMSConcMarkingTask* _task;
3429   MemRegion     _span;
3430   CMSBitMap*    _bit_map;
3431   CMSMarkStack* _overflow_stack;
3432   OopTaskQueue* _work_queue;
3433  protected:
3434   DO_OOP_WORK_DEFN
3435  public:
3436   Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
3437                          CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
3438     MetadataAwareOopClosure(collector->ref_processor()),
3439     _collector(collector),
3440     _task(task),
3441     _span(collector->_span),
3442     _work_queue(work_queue),
3443     _bit_map(bit_map),
3444     _overflow_stack(overflow_stack)
3445   { }
3446   virtual void do_oop(oop* p);
3447   virtual void do_oop(narrowOop* p);
3448 
3449   void trim_queue(size_t max);
3450   void handle_stack_overflow(HeapWord* lost);
3451   void do_yield_check() {
3452     if (_task->should_yield()) {
3453       _task->yield();
3454     }
3455   }
3456 };
3457 
3458 // Grey object scanning during work stealing phase --
3459 // the salient assumption here is that any references
3460 // that are in these stolen objects being scanned must
3461 // already have been initialized (else they would not have
3462 // been published), so we do not need to check for
3463 // uninitialized objects before pushing here.
3464 void Par_ConcMarkingClosure::do_oop(oop obj) {
3465   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
3466   HeapWord* addr = (HeapWord*)obj;
3467   // Check if oop points into the CMS generation
3468   // and is not marked
3469   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
3470     // a white object ...
3471     // If we manage to "claim" the object, by being the
3472     // first thread to mark it, then we push it on our
3473     // marking stack
3474     if (_bit_map->par_mark(addr)) {     // ... now grey
3475       // push on work queue (grey set)
3476       bool simulate_overflow = false;
3477       NOT_PRODUCT(
3478         if (CMSMarkStackOverflowALot &&
3479             _collector->simulate_overflow()) {
3480           // simulate a stack overflow
3481           simulate_overflow = true;
3482         }
3483       )
3484       if (simulate_overflow ||
3485           !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
3486         // stack overflow
3487         if (PrintCMSStatistics != 0) {
3488           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
3489                                  SIZE_FORMAT, _overflow_stack->capacity());
3490         }
3491         // We cannot assert that the overflow stack is full because
3492         // it may have been emptied since.
3493         assert(simulate_overflow ||
3494                _work_queue->size() == _work_queue->max_elems(),
3495               "Else push should have succeeded");
3496         handle_stack_overflow(addr);
3497       }
3498     } // Else, some other thread got there first
3499     do_yield_check();
3500   }
3501 }
3502 
3503 void Par_ConcMarkingClosure::do_oop(oop* p)       { Par_ConcMarkingClosure::do_oop_work(p); }
3504 void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); }
3505 
3506 void Par_ConcMarkingClosure::trim_queue(size_t max) {
3507   while (_work_queue->size() > max) {
3508     oop new_oop;
3509     if (_work_queue->pop_local(new_oop)) {
3510       assert(new_oop->is_oop(), "Should be an oop");
3511       assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
3512       assert(_span.contains((HeapWord*)new_oop), "Not in span");
3513       new_oop->oop_iterate(this);  // do_oop() above
3514       do_yield_check();
3515     }
3516   }
3517 }
3518 
3519 // Upon stack overflow, we discard (part of) the stack,
3520 // remembering the least address amongst those discarded
3521 // in CMSCollector's _restart_address.
3522 void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
3523   // We need to do this under a mutex to prevent other
3524   // workers from interfering with the work done below.
3525   MutexLockerEx ml(_overflow_stack->par_lock(),
3526                    Mutex::_no_safepoint_check_flag);
3527   // Remember the least grey address discarded
3528   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
3529   _collector->lower_restart_addr(ra);
3530   _overflow_stack->reset();  // discard stack contents
3531   _overflow_stack->expand(); // expand the stack if possible
3532 }
3533 
3534 
3535 void CMSConcMarkingTask::do_work_steal(int i) {
3536   OopTaskQueue* work_q = work_queue(i);
3537   oop obj_to_scan;
3538   CMSBitMap* bm = &(_collector->_markBitMap);
3539   CMSMarkStack* ovflw = &(_collector->_markStack);
3540   int* seed = _collector->hash_seed(i);
3541   Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
3542   while (true) {
3543     cl.trim_queue(0);
3544     assert(work_q->size() == 0, "Should have been emptied above");
3545     if (get_work_from_overflow_stack(ovflw, work_q)) {
3546       // Can't assert below because the work obtained from the
3547       // overflow stack may already have been stolen from us.
3548       // assert(work_q->size() > 0, "Work from overflow stack");
3549       continue;
3550     } else if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
3551       assert(obj_to_scan->is_oop(), "Should be an oop");
3552       assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
3553       obj_to_scan->oop_iterate(&cl);
3554     } else if (terminator()->offer_termination(&_term_term)) {
3555       assert(work_q->size() == 0, "Impossible!");
3556       break;
3557     } else if (yielding() || should_yield()) {
3558       yield();
3559     }
3560   }
3561 }
3562 
3563 // This is run by the CMS (coordinator) thread.
3564 void CMSConcMarkingTask::coordinator_yield() {
3565   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3566          "CMS thread should hold CMS token");
3567   // First give up the locks, then yield, then re-lock
3568   // We should probably use a constructor/destructor idiom to
3569   // do this unlock/lock or modify the MutexUnlocker class to
3570   // serve our purpose. XXX
3571   assert_lock_strong(_bit_map_lock);
3572   _bit_map_lock->unlock();
3573   ConcurrentMarkSweepThread::desynchronize(true);
3574   _collector->stopTimer();
3575   if (PrintCMSStatistics != 0) {
3576     _collector->incrementYields();
3577   }
3578 
3579   // It is possible for whichever thread initiated the yield request
3580   // not to get a chance to wake up and take the bitmap lock between
3581   // this thread releasing it and reacquiring it. So, while the
3582   // should_yield() flag is on, let's sleep for a bit to give the
3583   // other thread a chance to wake up. The limit imposed on the number
3584   // of iterations is defensive, to avoid any unforseen circumstances
3585   // putting us into an infinite loop. Since it's always been this
3586   // (coordinator_yield()) method that was observed to cause the
3587   // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
3588   // which is by default non-zero. For the other seven methods that
3589   // also perform the yield operation, as are using a different
3590   // parameter (CMSYieldSleepCount) which is by default zero. This way we
3591   // can enable the sleeping for those methods too, if necessary.
3592   // See 6442774.
3593   //
3594   // We really need to reconsider the synchronization between the GC
3595   // thread and the yield-requesting threads in the future and we
3596   // should really use wait/notify, which is the recommended
3597   // way of doing this type of interaction. Additionally, we should
3598   // consolidate the eight methods that do the yield operation and they
3599   // are almost identical into one for better maintainability and
3600   // readability. See 6445193.
3601   //
3602   // Tony 2006.06.29
3603   for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
3604                    ConcurrentMarkSweepThread::should_yield() &&
3605                    !CMSCollector::foregroundGCIsActive(); ++i) {
3606     os::sleep(Thread::current(), 1, false);
3607   }
3608 
3609   ConcurrentMarkSweepThread::synchronize(true);
3610   _bit_map_lock->lock_without_safepoint_check();
3611   _collector->startTimer();
3612 }
3613 
3614 bool CMSCollector::do_marking_mt() {
3615   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
3616   uint num_workers = AdaptiveSizePolicy::calc_active_conc_workers(conc_workers()->total_workers(),
3617                                                                   conc_workers()->active_workers(),
3618                                                                   Threads::number_of_non_daemon_threads());
3619   conc_workers()->set_active_workers(num_workers);
3620 
3621   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
3622 
3623   CMSConcMarkingTask tsk(this,
3624                          cms_space,
3625                          conc_workers(),
3626                          task_queues());
3627 
3628   // Since the actual number of workers we get may be different
3629   // from the number we requested above, do we need to do anything different
3630   // below? In particular, may be we need to subclass the SequantialSubTasksDone
3631   // class?? XXX
3632   cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
3633 
3634   // Refs discovery is already non-atomic.
3635   assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
3636   assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
3637   conc_workers()->start_task(&tsk);
3638   while (tsk.yielded()) {
3639     tsk.coordinator_yield();
3640     conc_workers()->continue_task(&tsk);
3641   }
3642   // If the task was aborted, _restart_addr will be non-NULL
3643   assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
3644   while (_restart_addr != NULL) {
3645     // XXX For now we do not make use of ABORTED state and have not
3646     // yet implemented the right abort semantics (even in the original
3647     // single-threaded CMS case). That needs some more investigation
3648     // and is deferred for now; see CR# TBF. 07252005YSR. XXX
3649     assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
3650     // If _restart_addr is non-NULL, a marking stack overflow
3651     // occurred; we need to do a fresh marking iteration from the
3652     // indicated restart address.
3653     if (_foregroundGCIsActive) {
3654       // We may be running into repeated stack overflows, having
3655       // reached the limit of the stack size, while making very
3656       // slow forward progress. It may be best to bail out and
3657       // let the foreground collector do its job.
3658       // Clear _restart_addr, so that foreground GC
3659       // works from scratch. This avoids the headache of
3660       // a "rescan" which would otherwise be needed because
3661       // of the dirty mod union table & card table.
3662       _restart_addr = NULL;
3663       return false;
3664     }
3665     // Adjust the task to restart from _restart_addr
3666     tsk.reset(_restart_addr);
3667     cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
3668                   _restart_addr);
3669     _restart_addr = NULL;
3670     // Get the workers going again
3671     conc_workers()->start_task(&tsk);
3672     while (tsk.yielded()) {
3673       tsk.coordinator_yield();
3674       conc_workers()->continue_task(&tsk);
3675     }
3676   }
3677   assert(tsk.completed(), "Inconsistency");
3678   assert(tsk.result() == true, "Inconsistency");
3679   return true;
3680 }
3681 
3682 bool CMSCollector::do_marking_st() {
3683   ResourceMark rm;
3684   HandleMark   hm;
3685 
3686   // Temporarily make refs discovery single threaded (non-MT)
3687   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
3688   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
3689     &_markStack, CMSYield);
3690   // the last argument to iterate indicates whether the iteration
3691   // should be incremental with periodic yields.
3692   _markBitMap.iterate(&markFromRootsClosure);
3693   // If _restart_addr is non-NULL, a marking stack overflow
3694   // occurred; we need to do a fresh iteration from the
3695   // indicated restart address.
3696   while (_restart_addr != NULL) {
3697     if (_foregroundGCIsActive) {
3698       // We may be running into repeated stack overflows, having
3699       // reached the limit of the stack size, while making very
3700       // slow forward progress. It may be best to bail out and
3701       // let the foreground collector do its job.
3702       // Clear _restart_addr, so that foreground GC
3703       // works from scratch. This avoids the headache of
3704       // a "rescan" which would otherwise be needed because
3705       // of the dirty mod union table & card table.
3706       _restart_addr = NULL;
3707       return false;  // indicating failure to complete marking
3708     }
3709     // Deal with stack overflow:
3710     // we restart marking from _restart_addr
3711     HeapWord* ra = _restart_addr;
3712     markFromRootsClosure.reset(ra);
3713     _restart_addr = NULL;
3714     _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
3715   }
3716   return true;
3717 }
3718 
3719 void CMSCollector::preclean() {
3720   check_correct_thread_executing();
3721   assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
3722   verify_work_stacks_empty();
3723   verify_overflow_empty();
3724   _abort_preclean = false;
3725   if (CMSPrecleaningEnabled) {
3726     if (!CMSEdenChunksRecordAlways) {
3727       _eden_chunk_index = 0;
3728     }
3729     size_t used = get_eden_used();
3730     size_t capacity = get_eden_capacity();
3731     // Don't start sampling unless we will get sufficiently
3732     // many samples.
3733     if (used < (capacity/(CMSScheduleRemarkSamplingRatio * 100)
3734                 * CMSScheduleRemarkEdenPenetration)) {
3735       _start_sampling = true;
3736     } else {
3737       _start_sampling = false;
3738     }
3739     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3740     CMSPhaseAccounting pa(this, "preclean", !PrintGCDetails);
3741     preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
3742   }
3743   CMSTokenSync x(true); // is cms thread
3744   if (CMSPrecleaningEnabled) {
3745     sample_eden();
3746     _collectorState = AbortablePreclean;
3747   } else {
3748     _collectorState = FinalMarking;
3749   }
3750   verify_work_stacks_empty();
3751   verify_overflow_empty();
3752 }
3753 
3754 // Try and schedule the remark such that young gen
3755 // occupancy is CMSScheduleRemarkEdenPenetration %.
3756 void CMSCollector::abortable_preclean() {
3757   check_correct_thread_executing();
3758   assert(CMSPrecleaningEnabled,  "Inconsistent control state");
3759   assert(_collectorState == AbortablePreclean, "Inconsistent control state");
3760 
3761   // If Eden's current occupancy is below this threshold,
3762   // immediately schedule the remark; else preclean
3763   // past the next scavenge in an effort to
3764   // schedule the pause as described above. By choosing
3765   // CMSScheduleRemarkEdenSizeThreshold >= max eden size
3766   // we will never do an actual abortable preclean cycle.
3767   if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
3768     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
3769     CMSPhaseAccounting pa(this, "abortable-preclean", !PrintGCDetails);
3770     // We need more smarts in the abortable preclean
3771     // loop below to deal with cases where allocation
3772     // in young gen is very very slow, and our precleaning
3773     // is running a losing race against a horde of
3774     // mutators intent on flooding us with CMS updates
3775     // (dirty cards).
3776     // One, admittedly dumb, strategy is to give up
3777     // after a certain number of abortable precleaning loops
3778     // or after a certain maximum time. We want to make
3779     // this smarter in the next iteration.
3780     // XXX FIX ME!!! YSR
3781     size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
3782     while (!(should_abort_preclean() ||
3783              ConcurrentMarkSweepThread::should_terminate())) {
3784       workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
3785       cumworkdone += workdone;
3786       loops++;
3787       // Voluntarily terminate abortable preclean phase if we have
3788       // been at it for too long.
3789       if ((CMSMaxAbortablePrecleanLoops != 0) &&
3790           loops >= CMSMaxAbortablePrecleanLoops) {
3791         if (PrintGCDetails) {
3792           gclog_or_tty->print(" CMS: abort preclean due to loops ");
3793         }
3794         break;
3795       }
3796       if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
3797         if (PrintGCDetails) {
3798           gclog_or_tty->print(" CMS: abort preclean due to time ");
3799         }
3800         break;
3801       }
3802       // If we are doing little work each iteration, we should
3803       // take a short break.
3804       if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
3805         // Sleep for some time, waiting for work to accumulate
3806         stopTimer();
3807         cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
3808         startTimer();
3809         waited++;
3810       }
3811     }
3812     if (PrintCMSStatistics > 0) {
3813       gclog_or_tty->print(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
3814                           loops, waited, cumworkdone);
3815     }
3816   }
3817   CMSTokenSync x(true); // is cms thread
3818   if (_collectorState != Idling) {
3819     assert(_collectorState == AbortablePreclean,
3820            "Spontaneous state transition?");
3821     _collectorState = FinalMarking;
3822   } // Else, a foreground collection completed this CMS cycle.
3823   return;
3824 }
3825 
3826 // Respond to an Eden sampling opportunity
3827 void CMSCollector::sample_eden() {
3828   // Make sure a young gc cannot sneak in between our
3829   // reading and recording of a sample.
3830   assert(Thread::current()->is_ConcurrentGC_thread(),
3831          "Only the cms thread may collect Eden samples");
3832   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
3833          "Should collect samples while holding CMS token");
3834   if (!_start_sampling) {
3835     return;
3836   }
3837   // When CMSEdenChunksRecordAlways is true, the eden chunk array
3838   // is populated by the young generation.
3839   if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
3840     if (_eden_chunk_index < _eden_chunk_capacity) {
3841       _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
3842       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
3843              "Unexpected state of Eden");
3844       // We'd like to check that what we just sampled is an oop-start address;
3845       // however, we cannot do that here since the object may not yet have been
3846       // initialized. So we'll instead do the check when we _use_ this sample
3847       // later.
3848       if (_eden_chunk_index == 0 ||
3849           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
3850                          _eden_chunk_array[_eden_chunk_index-1])
3851            >= CMSSamplingGrain)) {
3852         _eden_chunk_index++;  // commit sample
3853       }
3854     }
3855   }
3856   if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
3857     size_t used = get_eden_used();
3858     size_t capacity = get_eden_capacity();
3859     assert(used <= capacity, "Unexpected state of Eden");
3860     if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
3861       _abort_preclean = true;
3862     }
3863   }
3864 }
3865 
3866 
3867 size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
3868   assert(_collectorState == Precleaning ||
3869          _collectorState == AbortablePreclean, "incorrect state");
3870   ResourceMark rm;
3871   HandleMark   hm;
3872 
3873   // Precleaning is currently not MT but the reference processor
3874   // may be set for MT.  Disable it temporarily here.
3875   ReferenceProcessor* rp = ref_processor();
3876   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
3877 
3878   // Do one pass of scrubbing the discovered reference lists
3879   // to remove any reference objects with strongly-reachable
3880   // referents.
3881   if (clean_refs) {
3882     CMSPrecleanRefsYieldClosure yield_cl(this);
3883     assert(rp->span().equals(_span), "Spans should be equal");
3884     CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
3885                                    &_markStack, true /* preclean */);
3886     CMSDrainMarkingStackClosure complete_trace(this,
3887                                    _span, &_markBitMap, &_markStack,
3888                                    &keep_alive, true /* preclean */);
3889 
3890     // We don't want this step to interfere with a young
3891     // collection because we don't want to take CPU
3892     // or memory bandwidth away from the young GC threads
3893     // (which may be as many as there are CPUs).
3894     // Note that we don't need to protect ourselves from
3895     // interference with mutators because they can't
3896     // manipulate the discovered reference lists nor affect
3897     // the computed reachability of the referents, the
3898     // only properties manipulated by the precleaning
3899     // of these reference lists.
3900     stopTimer();
3901     CMSTokenSyncWithLocks x(true /* is cms thread */,
3902                             bitMapLock());
3903     startTimer();
3904     sample_eden();
3905 
3906     // The following will yield to allow foreground
3907     // collection to proceed promptly. XXX YSR:
3908     // The code in this method may need further
3909     // tweaking for better performance and some restructuring
3910     // for cleaner interfaces.
3911     GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
3912     rp->preclean_discovered_references(
3913           rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
3914           gc_timer);
3915   }
3916 
3917   if (clean_survivor) {  // preclean the active survivor space(s)
3918     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
3919                              &_markBitMap, &_modUnionTable,
3920                              &_markStack, true /* precleaning phase */);
3921     stopTimer();
3922     CMSTokenSyncWithLocks ts(true /* is cms thread */,
3923                              bitMapLock());
3924     startTimer();
3925     unsigned int before_count =
3926       GenCollectedHeap::heap()->total_collections();
3927     SurvivorSpacePrecleanClosure
3928       sss_cl(this, _span, &_markBitMap, &_markStack,
3929              &pam_cl, before_count, CMSYield);
3930     _young_gen->from()->object_iterate_careful(&sss_cl);
3931     _young_gen->to()->object_iterate_careful(&sss_cl);
3932   }
3933   MarkRefsIntoAndScanClosure
3934     mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
3935              &_markStack, this, CMSYield,
3936              true /* precleaning phase */);
3937   // CAUTION: The following closure has persistent state that may need to
3938   // be reset upon a decrease in the sequence of addresses it
3939   // processes.
3940   ScanMarkedObjectsAgainCarefullyClosure
3941     smoac_cl(this, _span,
3942       &_markBitMap, &_markStack, &mrias_cl, CMSYield);
3943 
3944   // Preclean dirty cards in ModUnionTable and CardTable using
3945   // appropriate convergence criterion;
3946   // repeat CMSPrecleanIter times unless we find that
3947   // we are losing.
3948   assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
3949   assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
3950          "Bad convergence multiplier");
3951   assert(CMSPrecleanThreshold >= 100,
3952          "Unreasonably low CMSPrecleanThreshold");
3953 
3954   size_t numIter, cumNumCards, lastNumCards, curNumCards;
3955   for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
3956        numIter < CMSPrecleanIter;
3957        numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
3958     curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
3959     if (Verbose && PrintGCDetails) {
3960       gclog_or_tty->print(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
3961     }
3962     // Either there are very few dirty cards, so re-mark
3963     // pause will be small anyway, or our pre-cleaning isn't
3964     // that much faster than the rate at which cards are being
3965     // dirtied, so we might as well stop and re-mark since
3966     // precleaning won't improve our re-mark time by much.
3967     if (curNumCards <= CMSPrecleanThreshold ||
3968         (numIter > 0 &&
3969          (curNumCards * CMSPrecleanDenominator >
3970          lastNumCards * CMSPrecleanNumerator))) {
3971       numIter++;
3972       cumNumCards += curNumCards;
3973       break;
3974     }
3975   }
3976 
3977   preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
3978 
3979   curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
3980   cumNumCards += curNumCards;
3981   if (PrintGCDetails && PrintCMSStatistics != 0) {
3982     gclog_or_tty->print_cr(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
3983                   curNumCards, cumNumCards, numIter);
3984   }
3985   return cumNumCards;   // as a measure of useful work done
3986 }
3987 
3988 // PRECLEANING NOTES:
3989 // Precleaning involves:
3990 // . reading the bits of the modUnionTable and clearing the set bits.
3991 // . For the cards corresponding to the set bits, we scan the
3992 //   objects on those cards. This means we need the free_list_lock
3993 //   so that we can safely iterate over the CMS space when scanning
3994 //   for oops.
3995 // . When we scan the objects, we'll be both reading and setting
3996 //   marks in the marking bit map, so we'll need the marking bit map.
3997 // . For protecting _collector_state transitions, we take the CGC_lock.
3998 //   Note that any races in the reading of of card table entries by the
3999 //   CMS thread on the one hand and the clearing of those entries by the
4000 //   VM thread or the setting of those entries by the mutator threads on the
4001 //   other are quite benign. However, for efficiency it makes sense to keep
4002 //   the VM thread from racing with the CMS thread while the latter is
4003 //   dirty card info to the modUnionTable. We therefore also use the
4004 //   CGC_lock to protect the reading of the card table and the mod union
4005 //   table by the CM thread.
4006 // . We run concurrently with mutator updates, so scanning
4007 //   needs to be done carefully  -- we should not try to scan
4008 //   potentially uninitialized objects.
4009 //
4010 // Locking strategy: While holding the CGC_lock, we scan over and
4011 // reset a maximal dirty range of the mod union / card tables, then lock
4012 // the free_list_lock and bitmap lock to do a full marking, then
4013 // release these locks; and repeat the cycle. This allows for a
4014 // certain amount of fairness in the sharing of these locks between
4015 // the CMS collector on the one hand, and the VM thread and the
4016 // mutators on the other.
4017 
4018 // NOTE: preclean_mod_union_table() and preclean_card_table()
4019 // further below are largely identical; if you need to modify
4020 // one of these methods, please check the other method too.
4021 
4022 size_t CMSCollector::preclean_mod_union_table(
4023   ConcurrentMarkSweepGeneration* old_gen,
4024   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4025   verify_work_stacks_empty();
4026   verify_overflow_empty();
4027 
4028   // strategy: starting with the first card, accumulate contiguous
4029   // ranges of dirty cards; clear these cards, then scan the region
4030   // covered by these cards.
4031 
4032   // Since all of the MUT is committed ahead, we can just use
4033   // that, in case the generations expand while we are precleaning.
4034   // It might also be fine to just use the committed part of the
4035   // generation, but we might potentially miss cards when the
4036   // generation is rapidly expanding while we are in the midst
4037   // of precleaning.
4038   HeapWord* startAddr = old_gen->reserved().start();
4039   HeapWord* endAddr   = old_gen->reserved().end();
4040 
4041   cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
4042 
4043   size_t numDirtyCards, cumNumDirtyCards;
4044   HeapWord *nextAddr, *lastAddr;
4045   for (cumNumDirtyCards = numDirtyCards = 0,
4046        nextAddr = lastAddr = startAddr;
4047        nextAddr < endAddr;
4048        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4049 
4050     ResourceMark rm;
4051     HandleMark   hm;
4052 
4053     MemRegion dirtyRegion;
4054     {
4055       stopTimer();
4056       // Potential yield point
4057       CMSTokenSync ts(true);
4058       startTimer();
4059       sample_eden();
4060       // Get dirty region starting at nextOffset (inclusive),
4061       // simultaneously clearing it.
4062       dirtyRegion =
4063         _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
4064       assert(dirtyRegion.start() >= nextAddr,
4065              "returned region inconsistent?");
4066     }
4067     // Remember where the next search should begin.
4068     // The returned region (if non-empty) is a right open interval,
4069     // so lastOffset is obtained from the right end of that
4070     // interval.
4071     lastAddr = dirtyRegion.end();
4072     // Should do something more transparent and less hacky XXX
4073     numDirtyCards =
4074       _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
4075 
4076     // We'll scan the cards in the dirty region (with periodic
4077     // yields for foreground GC as needed).
4078     if (!dirtyRegion.is_empty()) {
4079       assert(numDirtyCards > 0, "consistency check");
4080       HeapWord* stop_point = NULL;
4081       stopTimer();
4082       // Potential yield point
4083       CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
4084                                bitMapLock());
4085       startTimer();
4086       {
4087         verify_work_stacks_empty();
4088         verify_overflow_empty();
4089         sample_eden();
4090         stop_point =
4091           old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4092       }
4093       if (stop_point != NULL) {
4094         // The careful iteration stopped early either because it found an
4095         // uninitialized object, or because we were in the midst of an
4096         // "abortable preclean", which should now be aborted. Redirty
4097         // the bits corresponding to the partially-scanned or unscanned
4098         // cards. We'll either restart at the next block boundary or
4099         // abort the preclean.
4100         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4101                "Should only be AbortablePreclean.");
4102         _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
4103         if (should_abort_preclean()) {
4104           break; // out of preclean loop
4105         } else {
4106           // Compute the next address at which preclean should pick up;
4107           // might need bitMapLock in order to read P-bits.
4108           lastAddr = next_card_start_after_block(stop_point);
4109         }
4110       }
4111     } else {
4112       assert(lastAddr == endAddr, "consistency check");
4113       assert(numDirtyCards == 0, "consistency check");
4114       break;
4115     }
4116   }
4117   verify_work_stacks_empty();
4118   verify_overflow_empty();
4119   return cumNumDirtyCards;
4120 }
4121 
4122 // NOTE: preclean_mod_union_table() above and preclean_card_table()
4123 // below are largely identical; if you need to modify
4124 // one of these methods, please check the other method too.
4125 
4126 size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
4127   ScanMarkedObjectsAgainCarefullyClosure* cl) {
4128   // strategy: it's similar to precleamModUnionTable above, in that
4129   // we accumulate contiguous ranges of dirty cards, mark these cards
4130   // precleaned, then scan the region covered by these cards.
4131   HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
4132   HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
4133 
4134   cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
4135 
4136   size_t numDirtyCards, cumNumDirtyCards;
4137   HeapWord *lastAddr, *nextAddr;
4138 
4139   for (cumNumDirtyCards = numDirtyCards = 0,
4140        nextAddr = lastAddr = startAddr;
4141        nextAddr < endAddr;
4142        nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
4143 
4144     ResourceMark rm;
4145     HandleMark   hm;
4146 
4147     MemRegion dirtyRegion;
4148     {
4149       // See comments in "Precleaning notes" above on why we
4150       // do this locking. XXX Could the locking overheads be
4151       // too high when dirty cards are sparse? [I don't think so.]
4152       stopTimer();
4153       CMSTokenSync x(true); // is cms thread
4154       startTimer();
4155       sample_eden();
4156       // Get and clear dirty region from card table
4157       dirtyRegion = _ct->ct_bs()->dirty_card_range_after_reset(
4158                                     MemRegion(nextAddr, endAddr),
4159                                     true,
4160                                     CardTableModRefBS::precleaned_card_val());
4161 
4162       assert(dirtyRegion.start() >= nextAddr,
4163              "returned region inconsistent?");
4164     }
4165     lastAddr = dirtyRegion.end();
4166     numDirtyCards =
4167       dirtyRegion.word_size()/CardTableModRefBS::card_size_in_words;
4168 
4169     if (!dirtyRegion.is_empty()) {
4170       stopTimer();
4171       CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
4172       startTimer();
4173       sample_eden();
4174       verify_work_stacks_empty();
4175       verify_overflow_empty();
4176       HeapWord* stop_point =
4177         old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
4178       if (stop_point != NULL) {
4179         assert((_collectorState == AbortablePreclean && should_abort_preclean()),
4180                "Should only be AbortablePreclean.");
4181         _ct->ct_bs()->invalidate(MemRegion(stop_point, dirtyRegion.end()));
4182         if (should_abort_preclean()) {
4183           break; // out of preclean loop
4184         } else {
4185           // Compute the next address at which preclean should pick up.
4186           lastAddr = next_card_start_after_block(stop_point);
4187         }
4188       }
4189     } else {
4190       break;
4191     }
4192   }
4193   verify_work_stacks_empty();
4194   verify_overflow_empty();
4195   return cumNumDirtyCards;
4196 }
4197 
4198 class PrecleanKlassClosure : public KlassClosure {
4199   KlassToOopClosure _cm_klass_closure;
4200  public:
4201   PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4202   void do_klass(Klass* k) {
4203     if (k->has_accumulated_modified_oops()) {
4204       k->clear_accumulated_modified_oops();
4205 
4206       _cm_klass_closure.do_klass(k);
4207     }
4208   }
4209 };
4210 
4211 // The freelist lock is needed to prevent asserts, is it really needed?
4212 void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
4213 
4214   cl->set_freelistLock(freelistLock);
4215 
4216   CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
4217 
4218   // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
4219   // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
4220   PrecleanKlassClosure preclean_klass_closure(cl);
4221   ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
4222 
4223   verify_work_stacks_empty();
4224   verify_overflow_empty();
4225 }
4226 
4227 void CMSCollector::checkpointRootsFinal() {
4228   assert(_collectorState == FinalMarking, "incorrect state transition?");
4229   check_correct_thread_executing();
4230   // world is stopped at this checkpoint
4231   assert(SafepointSynchronize::is_at_safepoint(),
4232          "world should be stopped");
4233   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
4234 
4235   verify_work_stacks_empty();
4236   verify_overflow_empty();
4237 
4238   if (PrintGCDetails) {
4239     gclog_or_tty->print("[YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)]",
4240                         _young_gen->used() / K,
4241                         _young_gen->capacity() / K);
4242   }
4243   {
4244     if (CMSScavengeBeforeRemark) {
4245       GenCollectedHeap* gch = GenCollectedHeap::heap();
4246       // Temporarily set flag to false, GCH->do_collection will
4247       // expect it to be false and set to true
4248       FlagSetting fl(gch->_is_gc_active, false);
4249       NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
4250         PrintGCDetails && Verbose, true, _gc_timer_cm);)
4251       gch->do_collection(true,                      // full (i.e. force, see below)
4252                          false,                     // !clear_all_soft_refs
4253                          0,                         // size
4254                          false,                     // is_tlab
4255                          GenCollectedHeap::YoungGen // type
4256         );
4257     }
4258     FreelistLocker x(this);
4259     MutexLockerEx y(bitMapLock(),
4260                     Mutex::_no_safepoint_check_flag);
4261     checkpointRootsFinalWork();
4262   }
4263   verify_work_stacks_empty();
4264   verify_overflow_empty();
4265 }
4266 
4267 void CMSCollector::checkpointRootsFinalWork() {
4268   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
4269 
4270   assert(haveFreelistLocks(), "must have free list locks");
4271   assert_lock_strong(bitMapLock());
4272 
4273   ResourceMark rm;
4274   HandleMark   hm;
4275 
4276   GenCollectedHeap* gch = GenCollectedHeap::heap();
4277 
4278   if (should_unload_classes()) {
4279     CodeCache::gc_prologue();
4280   }
4281   assert(haveFreelistLocks(), "must have free list locks");
4282   assert_lock_strong(bitMapLock());
4283 
4284   // We might assume that we need not fill TLAB's when
4285   // CMSScavengeBeforeRemark is set, because we may have just done
4286   // a scavenge which would have filled all TLAB's -- and besides
4287   // Eden would be empty. This however may not always be the case --
4288   // for instance although we asked for a scavenge, it may not have
4289   // happened because of a JNI critical section. We probably need
4290   // a policy for deciding whether we can in that case wait until
4291   // the critical section releases and then do the remark following
4292   // the scavenge, and skip it here. In the absence of that policy,
4293   // or of an indication of whether the scavenge did indeed occur,
4294   // we cannot rely on TLAB's having been filled and must do
4295   // so here just in case a scavenge did not happen.
4296   gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
4297   // Update the saved marks which may affect the root scans.
4298   gch->save_marks();
4299 
4300   if (CMSPrintEdenSurvivorChunks) {
4301     print_eden_and_survivor_chunk_arrays();
4302   }
4303 
4304   {
4305 #if defined(COMPILER2) || INCLUDE_JVMCI
4306     DerivedPointerTableDeactivate dpt_deact;
4307 #endif
4308 
4309     // Note on the role of the mod union table:
4310     // Since the marker in "markFromRoots" marks concurrently with
4311     // mutators, it is possible for some reachable objects not to have been
4312     // scanned. For instance, an only reference to an object A was
4313     // placed in object B after the marker scanned B. Unless B is rescanned,
4314     // A would be collected. Such updates to references in marked objects
4315     // are detected via the mod union table which is the set of all cards
4316     // dirtied since the first checkpoint in this GC cycle and prior to
4317     // the most recent young generation GC, minus those cleaned up by the
4318     // concurrent precleaning.
4319     if (CMSParallelRemarkEnabled) {
4320       GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
4321       do_remark_parallel();
4322     } else {
4323       GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false, _gc_timer_cm);
4324       do_remark_non_parallel();
4325     }
4326   }
4327   verify_work_stacks_empty();
4328   verify_overflow_empty();
4329 
4330   {
4331     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
4332     refProcessingWork();
4333   }
4334   verify_work_stacks_empty();
4335   verify_overflow_empty();
4336 
4337   if (should_unload_classes()) {
4338     CodeCache::gc_epilogue();
4339   }
4340   JvmtiExport::gc_epilogue();
4341 
4342   // If we encountered any (marking stack / work queue) overflow
4343   // events during the current CMS cycle, take appropriate
4344   // remedial measures, where possible, so as to try and avoid
4345   // recurrence of that condition.
4346   assert(_markStack.isEmpty(), "No grey objects");
4347   size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
4348                      _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
4349   if (ser_ovflw > 0) {
4350     if (PrintCMSStatistics != 0) {
4351       gclog_or_tty->print_cr("Marking stack overflow (benign) "
4352         "(pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT
4353         ", kac_preclean=" SIZE_FORMAT ")",
4354         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw,
4355         _ser_kac_ovflw, _ser_kac_preclean_ovflw);
4356     }
4357     _markStack.expand();
4358     _ser_pmc_remark_ovflw = 0;
4359     _ser_pmc_preclean_ovflw = 0;
4360     _ser_kac_preclean_ovflw = 0;
4361     _ser_kac_ovflw = 0;
4362   }
4363   if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
4364     if (PrintCMSStatistics != 0) {
4365       gclog_or_tty->print_cr("Work queue overflow (benign) "
4366         "(pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
4367         _par_pmc_remark_ovflw, _par_kac_ovflw);
4368     }
4369     _par_pmc_remark_ovflw = 0;
4370     _par_kac_ovflw = 0;
4371   }
4372   if (PrintCMSStatistics != 0) {
4373      if (_markStack._hit_limit > 0) {
4374        gclog_or_tty->print_cr(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
4375                               _markStack._hit_limit);
4376      }
4377      if (_markStack._failed_double > 0) {
4378        gclog_or_tty->print_cr(" (benign) Failed stack doubling (" SIZE_FORMAT "),"
4379                               " current capacity " SIZE_FORMAT,
4380                               _markStack._failed_double,
4381                               _markStack.capacity());
4382      }
4383   }
4384   _markStack._hit_limit = 0;
4385   _markStack._failed_double = 0;
4386 
4387   if ((VerifyAfterGC || VerifyDuringGC) &&
4388       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
4389     verify_after_remark();
4390   }
4391 
4392   _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
4393 
4394   // Change under the freelistLocks.
4395   _collectorState = Sweeping;
4396   // Call isAllClear() under bitMapLock
4397   assert(_modUnionTable.isAllClear(),
4398       "Should be clear by end of the final marking");
4399   assert(_ct->klass_rem_set()->mod_union_is_clear(),
4400       "Should be clear by end of the final marking");
4401 }
4402 
4403 void CMSParInitialMarkTask::work(uint worker_id) {
4404   elapsedTimer _timer;
4405   ResourceMark rm;
4406   HandleMark   hm;
4407 
4408   // ---------- scan from roots --------------
4409   _timer.start();
4410   GenCollectedHeap* gch = GenCollectedHeap::heap();
4411   Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
4412 
4413   // ---------- young gen roots --------------
4414   {
4415     work_on_young_gen_roots(worker_id, &par_mri_cl);
4416     _timer.stop();
4417     if (PrintCMSStatistics != 0) {
4418       gclog_or_tty->print_cr(
4419         "Finished young gen initial mark scan work in %dth thread: %3.3f sec",
4420         worker_id, _timer.seconds());
4421     }
4422   }
4423 
4424   // ---------- remaining roots --------------
4425   _timer.reset();
4426   _timer.start();
4427 
4428   CLDToOopClosure cld_closure(&par_mri_cl, true);
4429 
4430   gch->gen_process_roots(_strong_roots_scope,
4431                          GenCollectedHeap::OldGen,
4432                          false,     // yg was scanned above
4433                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4434                          _collector->should_unload_classes(),
4435                          &par_mri_cl,
4436                          NULL,
4437                          &cld_closure);
4438   assert(_collector->should_unload_classes()
4439          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4440          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4441   _timer.stop();
4442   if (PrintCMSStatistics != 0) {
4443     gclog_or_tty->print_cr(
4444       "Finished remaining root initial mark scan work in %dth thread: %3.3f sec",
4445       worker_id, _timer.seconds());
4446   }
4447 }
4448 
4449 // Parallel remark task
4450 class CMSParRemarkTask: public CMSParMarkTask {
4451   CompactibleFreeListSpace* _cms_space;
4452 
4453   // The per-thread work queues, available here for stealing.
4454   OopTaskQueueSet*       _task_queues;
4455   ParallelTaskTerminator _term;
4456   StrongRootsScope*      _strong_roots_scope;
4457 
4458  public:
4459   // A value of 0 passed to n_workers will cause the number of
4460   // workers to be taken from the active workers in the work gang.
4461   CMSParRemarkTask(CMSCollector* collector,
4462                    CompactibleFreeListSpace* cms_space,
4463                    uint n_workers, WorkGang* workers,
4464                    OopTaskQueueSet* task_queues,
4465                    StrongRootsScope* strong_roots_scope):
4466     CMSParMarkTask("Rescan roots and grey objects in parallel",
4467                    collector, n_workers),
4468     _cms_space(cms_space),
4469     _task_queues(task_queues),
4470     _term(n_workers, task_queues),
4471     _strong_roots_scope(strong_roots_scope) { }
4472 
4473   OopTaskQueueSet* task_queues() { return _task_queues; }
4474 
4475   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
4476 
4477   ParallelTaskTerminator* terminator() { return &_term; }
4478   uint n_workers() { return _n_workers; }
4479 
4480   void work(uint worker_id);
4481 
4482  private:
4483   // ... of  dirty cards in old space
4484   void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
4485                                   Par_MarkRefsIntoAndScanClosure* cl);
4486 
4487   // ... work stealing for the above
4488   void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed);
4489 };
4490 
4491 class RemarkKlassClosure : public KlassClosure {
4492   KlassToOopClosure _cm_klass_closure;
4493  public:
4494   RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
4495   void do_klass(Klass* k) {
4496     // Check if we have modified any oops in the Klass during the concurrent marking.
4497     if (k->has_accumulated_modified_oops()) {
4498       k->clear_accumulated_modified_oops();
4499 
4500       // We could have transfered the current modified marks to the accumulated marks,
4501       // like we do with the Card Table to Mod Union Table. But it's not really necessary.
4502     } else if (k->has_modified_oops()) {
4503       // Don't clear anything, this info is needed by the next young collection.
4504     } else {
4505       // No modified oops in the Klass.
4506       return;
4507     }
4508 
4509     // The klass has modified fields, need to scan the klass.
4510     _cm_klass_closure.do_klass(k);
4511   }
4512 };
4513 
4514 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
4515   ParNewGeneration* young_gen = _collector->_young_gen;
4516   ContiguousSpace* eden_space = young_gen->eden();
4517   ContiguousSpace* from_space = young_gen->from();
4518   ContiguousSpace* to_space   = young_gen->to();
4519 
4520   HeapWord** eca = _collector->_eden_chunk_array;
4521   size_t     ect = _collector->_eden_chunk_index;
4522   HeapWord** sca = _collector->_survivor_chunk_array;
4523   size_t     sct = _collector->_survivor_chunk_index;
4524 
4525   assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
4526   assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
4527 
4528   do_young_space_rescan(worker_id, cl, to_space, NULL, 0);
4529   do_young_space_rescan(worker_id, cl, from_space, sca, sct);
4530   do_young_space_rescan(worker_id, cl, eden_space, eca, ect);
4531 }
4532 
4533 // work_queue(i) is passed to the closure
4534 // Par_MarkRefsIntoAndScanClosure.  The "i" parameter
4535 // also is passed to do_dirty_card_rescan_tasks() and to
4536 // do_work_steal() to select the i-th task_queue.
4537 
4538 void CMSParRemarkTask::work(uint worker_id) {
4539   elapsedTimer _timer;
4540   ResourceMark rm;
4541   HandleMark   hm;
4542 
4543   // ---------- rescan from roots --------------
4544   _timer.start();
4545   GenCollectedHeap* gch = GenCollectedHeap::heap();
4546   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
4547     _collector->_span, _collector->ref_processor(),
4548     &(_collector->_markBitMap),
4549     work_queue(worker_id));
4550 
4551   // Rescan young gen roots first since these are likely
4552   // coarsely partitioned and may, on that account, constitute
4553   // the critical path; thus, it's best to start off that
4554   // work first.
4555   // ---------- young gen roots --------------
4556   {
4557     work_on_young_gen_roots(worker_id, &par_mrias_cl);
4558     _timer.stop();
4559     if (PrintCMSStatistics != 0) {
4560       gclog_or_tty->print_cr(
4561         "Finished young gen rescan work in %dth thread: %3.3f sec",
4562         worker_id, _timer.seconds());
4563     }
4564   }
4565 
4566   // ---------- remaining roots --------------
4567   _timer.reset();
4568   _timer.start();
4569   gch->gen_process_roots(_strong_roots_scope,
4570                          GenCollectedHeap::OldGen,
4571                          false,     // yg was scanned above
4572                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
4573                          _collector->should_unload_classes(),
4574                          &par_mrias_cl,
4575                          NULL,
4576                          NULL);     // The dirty klasses will be handled below
4577 
4578   assert(_collector->should_unload_classes()
4579          || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
4580          "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
4581   _timer.stop();
4582   if (PrintCMSStatistics != 0) {
4583     gclog_or_tty->print_cr(
4584       "Finished remaining root rescan work in %dth thread: %3.3f sec",
4585       worker_id, _timer.seconds());
4586   }
4587 
4588   // ---------- unhandled CLD scanning ----------
4589   if (worker_id == 0) { // Single threaded at the moment.
4590     _timer.reset();
4591     _timer.start();
4592 
4593     // Scan all new class loader data objects and new dependencies that were
4594     // introduced during concurrent marking.
4595     ResourceMark rm;
4596     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
4597     for (int i = 0; i < array->length(); i++) {
4598       par_mrias_cl.do_cld_nv(array->at(i));
4599     }
4600 
4601     // We don't need to keep track of new CLDs anymore.
4602     ClassLoaderDataGraph::remember_new_clds(false);
4603 
4604     _timer.stop();
4605     if (PrintCMSStatistics != 0) {
4606       gclog_or_tty->print_cr(
4607           "Finished unhandled CLD scanning work in %dth thread: %3.3f sec",
4608           worker_id, _timer.seconds());
4609     }
4610   }
4611 
4612   // ---------- dirty klass scanning ----------
4613   if (worker_id == 0) { // Single threaded at the moment.
4614     _timer.reset();
4615     _timer.start();
4616 
4617     // Scan all classes that was dirtied during the concurrent marking phase.
4618     RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
4619     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
4620 
4621     _timer.stop();
4622     if (PrintCMSStatistics != 0) {
4623       gclog_or_tty->print_cr(
4624           "Finished dirty klass scanning work in %dth thread: %3.3f sec",
4625           worker_id, _timer.seconds());
4626     }
4627   }
4628 
4629   // We might have added oops to ClassLoaderData::_handles during the
4630   // concurrent marking phase. These oops point to newly allocated objects
4631   // that are guaranteed to be kept alive. Either by the direct allocation
4632   // code, or when the young collector processes the roots. Hence,
4633   // we don't have to revisit the _handles block during the remark phase.
4634 
4635   // ---------- rescan dirty cards ------------
4636   _timer.reset();
4637   _timer.start();
4638 
4639   // Do the rescan tasks for each of the two spaces
4640   // (cms_space) in turn.
4641   // "worker_id" is passed to select the task_queue for "worker_id"
4642   do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
4643   _timer.stop();
4644   if (PrintCMSStatistics != 0) {
4645     gclog_or_tty->print_cr(
4646       "Finished dirty card rescan work in %dth thread: %3.3f sec",
4647       worker_id, _timer.seconds());
4648   }
4649 
4650   // ---------- steal work from other threads ...
4651   // ---------- ... and drain overflow list.
4652   _timer.reset();
4653   _timer.start();
4654   do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
4655   _timer.stop();
4656   if (PrintCMSStatistics != 0) {
4657     gclog_or_tty->print_cr(
4658       "Finished work stealing in %dth thread: %3.3f sec",
4659       worker_id, _timer.seconds());
4660   }
4661 }
4662 
4663 // Note that parameter "i" is not used.
4664 void
4665 CMSParMarkTask::do_young_space_rescan(uint worker_id,
4666   OopsInGenClosure* cl, ContiguousSpace* space,
4667   HeapWord** chunk_array, size_t chunk_top) {
4668   // Until all tasks completed:
4669   // . claim an unclaimed task
4670   // . compute region boundaries corresponding to task claimed
4671   //   using chunk_array
4672   // . par_oop_iterate(cl) over that region
4673 
4674   ResourceMark rm;
4675   HandleMark   hm;
4676 
4677   SequentialSubTasksDone* pst = space->par_seq_tasks();
4678 
4679   uint nth_task = 0;
4680   uint n_tasks  = pst->n_tasks();
4681 
4682   if (n_tasks > 0) {
4683     assert(pst->valid(), "Uninitialized use?");
4684     HeapWord *start, *end;
4685     while (!pst->is_task_claimed(/* reference */ nth_task)) {
4686       // We claimed task # nth_task; compute its boundaries.
4687       if (chunk_top == 0) {  // no samples were taken
4688         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
4689         start = space->bottom();
4690         end   = space->top();
4691       } else if (nth_task == 0) {
4692         start = space->bottom();
4693         end   = chunk_array[nth_task];
4694       } else if (nth_task < (uint)chunk_top) {
4695         assert(nth_task >= 1, "Control point invariant");
4696         start = chunk_array[nth_task - 1];
4697         end   = chunk_array[nth_task];
4698       } else {
4699         assert(nth_task == (uint)chunk_top, "Control point invariant");
4700         start = chunk_array[chunk_top - 1];
4701         end   = space->top();
4702       }
4703       MemRegion mr(start, end);
4704       // Verify that mr is in space
4705       assert(mr.is_empty() || space->used_region().contains(mr),
4706              "Should be in space");
4707       // Verify that "start" is an object boundary
4708       assert(mr.is_empty() || oop(mr.start())->is_oop(),
4709              "Should be an oop");
4710       space->par_oop_iterate(mr, cl);
4711     }
4712     pst->all_tasks_completed();
4713   }
4714 }
4715 
4716 void
4717 CMSParRemarkTask::do_dirty_card_rescan_tasks(
4718   CompactibleFreeListSpace* sp, int i,
4719   Par_MarkRefsIntoAndScanClosure* cl) {
4720   // Until all tasks completed:
4721   // . claim an unclaimed task
4722   // . compute region boundaries corresponding to task claimed
4723   // . transfer dirty bits ct->mut for that region
4724   // . apply rescanclosure to dirty mut bits for that region
4725 
4726   ResourceMark rm;
4727   HandleMark   hm;
4728 
4729   OopTaskQueue* work_q = work_queue(i);
4730   ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
4731   // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
4732   // CAUTION: This closure has state that persists across calls to
4733   // the work method dirty_range_iterate_clear() in that it has
4734   // embedded in it a (subtype of) UpwardsObjectClosure. The
4735   // use of that state in the embedded UpwardsObjectClosure instance
4736   // assumes that the cards are always iterated (even if in parallel
4737   // by several threads) in monotonically increasing order per each
4738   // thread. This is true of the implementation below which picks
4739   // card ranges (chunks) in monotonically increasing order globally
4740   // and, a-fortiori, in monotonically increasing order per thread
4741   // (the latter order being a subsequence of the former).
4742   // If the work code below is ever reorganized into a more chaotic
4743   // work-partitioning form than the current "sequential tasks"
4744   // paradigm, the use of that persistent state will have to be
4745   // revisited and modified appropriately. See also related
4746   // bug 4756801 work on which should examine this code to make
4747   // sure that the changes there do not run counter to the
4748   // assumptions made here and necessary for correctness and
4749   // efficiency. Note also that this code might yield inefficient
4750   // behavior in the case of very large objects that span one or
4751   // more work chunks. Such objects would potentially be scanned
4752   // several times redundantly. Work on 4756801 should try and
4753   // address that performance anomaly if at all possible. XXX
4754   MemRegion  full_span  = _collector->_span;
4755   CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
4756   MarkFromDirtyCardsClosure
4757     greyRescanClosure(_collector, full_span, // entire span of interest
4758                       sp, bm, work_q, cl);
4759 
4760   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
4761   assert(pst->valid(), "Uninitialized use?");
4762   uint nth_task = 0;
4763   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
4764   MemRegion span = sp->used_region();
4765   HeapWord* start_addr = span.start();
4766   HeapWord* end_addr = (HeapWord*)round_to((intptr_t)span.end(),
4767                                            alignment);
4768   const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
4769   assert((HeapWord*)round_to((intptr_t)start_addr, alignment) ==
4770          start_addr, "Check alignment");
4771   assert((size_t)round_to((intptr_t)chunk_size, alignment) ==
4772          chunk_size, "Check alignment");
4773 
4774   while (!pst->is_task_claimed(/* reference */ nth_task)) {
4775     // Having claimed the nth_task, compute corresponding mem-region,
4776     // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
4777     // The alignment restriction ensures that we do not need any
4778     // synchronization with other gang-workers while setting or
4779     // clearing bits in thus chunk of the MUT.
4780     MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
4781                                     start_addr + (nth_task+1)*chunk_size);
4782     // The last chunk's end might be way beyond end of the
4783     // used region. In that case pull back appropriately.
4784     if (this_span.end() > end_addr) {
4785       this_span.set_end(end_addr);
4786       assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
4787     }
4788     // Iterate over the dirty cards covering this chunk, marking them
4789     // precleaned, and setting the corresponding bits in the mod union
4790     // table. Since we have been careful to partition at Card and MUT-word
4791     // boundaries no synchronization is needed between parallel threads.
4792     _collector->_ct->ct_bs()->dirty_card_iterate(this_span,
4793                                                  &modUnionClosure);
4794 
4795     // Having transferred these marks into the modUnionTable,
4796     // rescan the marked objects on the dirty cards in the modUnionTable.
4797     // Even if this is at a synchronous collection, the initial marking
4798     // may have been done during an asynchronous collection so there
4799     // may be dirty bits in the mod-union table.
4800     _collector->_modUnionTable.dirty_range_iterate_clear(
4801                   this_span, &greyRescanClosure);
4802     _collector->_modUnionTable.verifyNoOneBitsInRange(
4803                                  this_span.start(),
4804                                  this_span.end());
4805   }
4806   pst->all_tasks_completed();  // declare that i am done
4807 }
4808 
4809 // . see if we can share work_queues with ParNew? XXX
4810 void
4811 CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl,
4812                                 int* seed) {
4813   OopTaskQueue* work_q = work_queue(i);
4814   NOT_PRODUCT(int num_steals = 0;)
4815   oop obj_to_scan;
4816   CMSBitMap* bm = &(_collector->_markBitMap);
4817 
4818   while (true) {
4819     // Completely finish any left over work from (an) earlier round(s)
4820     cl->trim_queue(0);
4821     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
4822                                          (size_t)ParGCDesiredObjsFromOverflowList);
4823     // Now check if there's any work in the overflow list
4824     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
4825     // only affects the number of attempts made to get work from the
4826     // overflow list and does not affect the number of workers.  Just
4827     // pass ParallelGCThreads so this behavior is unchanged.
4828     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
4829                                                 work_q,
4830                                                 ParallelGCThreads)) {
4831       // found something in global overflow list;
4832       // not yet ready to go stealing work from others.
4833       // We'd like to assert(work_q->size() != 0, ...)
4834       // because we just took work from the overflow list,
4835       // but of course we can't since all of that could have
4836       // been already stolen from us.
4837       // "He giveth and He taketh away."
4838       continue;
4839     }
4840     // Verify that we have no work before we resort to stealing
4841     assert(work_q->size() == 0, "Have work, shouldn't steal");
4842     // Try to steal from other queues that have work
4843     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
4844       NOT_PRODUCT(num_steals++;)
4845       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
4846       assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
4847       // Do scanning work
4848       obj_to_scan->oop_iterate(cl);
4849       // Loop around, finish this work, and try to steal some more
4850     } else if (terminator()->offer_termination()) {
4851         break;  // nirvana from the infinite cycle
4852     }
4853   }
4854   NOT_PRODUCT(
4855     if (PrintCMSStatistics != 0) {
4856       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
4857     }
4858   )
4859   assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
4860          "Else our work is not yet done");
4861 }
4862 
4863 // Record object boundaries in _eden_chunk_array by sampling the eden
4864 // top in the slow-path eden object allocation code path and record
4865 // the boundaries, if CMSEdenChunksRecordAlways is true. If
4866 // CMSEdenChunksRecordAlways is false, we use the other asynchronous
4867 // sampling in sample_eden() that activates during the part of the
4868 // preclean phase.
4869 void CMSCollector::sample_eden_chunk() {
4870   if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
4871     if (_eden_chunk_lock->try_lock()) {
4872       // Record a sample. This is the critical section. The contents
4873       // of the _eden_chunk_array have to be non-decreasing in the
4874       // address order.
4875       _eden_chunk_array[_eden_chunk_index] = *_top_addr;
4876       assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
4877              "Unexpected state of Eden");
4878       if (_eden_chunk_index == 0 ||
4879           ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
4880            (pointer_delta(_eden_chunk_array[_eden_chunk_index],
4881                           _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
4882         _eden_chunk_index++;  // commit sample
4883       }
4884       _eden_chunk_lock->unlock();
4885     }
4886   }
4887 }
4888 
4889 // Return a thread-local PLAB recording array, as appropriate.
4890 void* CMSCollector::get_data_recorder(int thr_num) {
4891   if (_survivor_plab_array != NULL &&
4892       (CMSPLABRecordAlways ||
4893        (_collectorState > Marking && _collectorState < FinalMarking))) {
4894     assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
4895     ChunkArray* ca = &_survivor_plab_array[thr_num];
4896     ca->reset();   // clear it so that fresh data is recorded
4897     return (void*) ca;
4898   } else {
4899     return NULL;
4900   }
4901 }
4902 
4903 // Reset all the thread-local PLAB recording arrays
4904 void CMSCollector::reset_survivor_plab_arrays() {
4905   for (uint i = 0; i < ParallelGCThreads; i++) {
4906     _survivor_plab_array[i].reset();
4907   }
4908 }
4909 
4910 // Merge the per-thread plab arrays into the global survivor chunk
4911 // array which will provide the partitioning of the survivor space
4912 // for CMS initial scan and rescan.
4913 void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
4914                                               int no_of_gc_threads) {
4915   assert(_survivor_plab_array  != NULL, "Error");
4916   assert(_survivor_chunk_array != NULL, "Error");
4917   assert(_collectorState == FinalMarking ||
4918          (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
4919   for (int j = 0; j < no_of_gc_threads; j++) {
4920     _cursor[j] = 0;
4921   }
4922   HeapWord* top = surv->top();
4923   size_t i;
4924   for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
4925     HeapWord* min_val = top;          // Higher than any PLAB address
4926     uint      min_tid = 0;            // position of min_val this round
4927     for (int j = 0; j < no_of_gc_threads; j++) {
4928       ChunkArray* cur_sca = &_survivor_plab_array[j];
4929       if (_cursor[j] == cur_sca->end()) {
4930         continue;
4931       }
4932       assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
4933       HeapWord* cur_val = cur_sca->nth(_cursor[j]);
4934       assert(surv->used_region().contains(cur_val), "Out of bounds value");
4935       if (cur_val < min_val) {
4936         min_tid = j;
4937         min_val = cur_val;
4938       } else {
4939         assert(cur_val < top, "All recorded addresses should be less");
4940       }
4941     }
4942     // At this point min_val and min_tid are respectively
4943     // the least address in _survivor_plab_array[j]->nth(_cursor[j])
4944     // and the thread (j) that witnesses that address.
4945     // We record this address in the _survivor_chunk_array[i]
4946     // and increment _cursor[min_tid] prior to the next round i.
4947     if (min_val == top) {
4948       break;
4949     }
4950     _survivor_chunk_array[i] = min_val;
4951     _cursor[min_tid]++;
4952   }
4953   // We are all done; record the size of the _survivor_chunk_array
4954   _survivor_chunk_index = i; // exclusive: [0, i)
4955   if (PrintCMSStatistics > 0) {
4956     gclog_or_tty->print(" (Survivor:" SIZE_FORMAT "chunks) ", i);
4957   }
4958   // Verify that we used up all the recorded entries
4959   #ifdef ASSERT
4960     size_t total = 0;
4961     for (int j = 0; j < no_of_gc_threads; j++) {
4962       assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
4963       total += _cursor[j];
4964     }
4965     assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
4966     // Check that the merged array is in sorted order
4967     if (total > 0) {
4968       for (size_t i = 0; i < total - 1; i++) {
4969         if (PrintCMSStatistics > 0) {
4970           gclog_or_tty->print(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
4971                               i, p2i(_survivor_chunk_array[i]));
4972         }
4973         assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
4974                "Not sorted");
4975       }
4976     }
4977   #endif // ASSERT
4978 }
4979 
4980 // Set up the space's par_seq_tasks structure for work claiming
4981 // for parallel initial scan and rescan of young gen.
4982 // See ParRescanTask where this is currently used.
4983 void
4984 CMSCollector::
4985 initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
4986   assert(n_threads > 0, "Unexpected n_threads argument");
4987 
4988   // Eden space
4989   if (!_young_gen->eden()->is_empty()) {
4990     SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
4991     assert(!pst->valid(), "Clobbering existing data?");
4992     // Each valid entry in [0, _eden_chunk_index) represents a task.
4993     size_t n_tasks = _eden_chunk_index + 1;
4994     assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
4995     // Sets the condition for completion of the subtask (how many threads
4996     // need to finish in order to be done).
4997     pst->set_n_threads(n_threads);
4998     pst->set_n_tasks((int)n_tasks);
4999   }
5000 
5001   // Merge the survivor plab arrays into _survivor_chunk_array
5002   if (_survivor_plab_array != NULL) {
5003     merge_survivor_plab_arrays(_young_gen->from(), n_threads);
5004   } else {
5005     assert(_survivor_chunk_index == 0, "Error");
5006   }
5007 
5008   // To space
5009   {
5010     SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
5011     assert(!pst->valid(), "Clobbering existing data?");
5012     // Sets the condition for completion of the subtask (how many threads
5013     // need to finish in order to be done).
5014     pst->set_n_threads(n_threads);
5015     pst->set_n_tasks(1);
5016     assert(pst->valid(), "Error");
5017   }
5018 
5019   // From space
5020   {
5021     SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
5022     assert(!pst->valid(), "Clobbering existing data?");
5023     size_t n_tasks = _survivor_chunk_index + 1;
5024     assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
5025     // Sets the condition for completion of the subtask (how many threads
5026     // need to finish in order to be done).
5027     pst->set_n_threads(n_threads);
5028     pst->set_n_tasks((int)n_tasks);
5029     assert(pst->valid(), "Error");
5030   }
5031 }
5032 
5033 // Parallel version of remark
5034 void CMSCollector::do_remark_parallel() {
5035   GenCollectedHeap* gch = GenCollectedHeap::heap();
5036   WorkGang* workers = gch->workers();
5037   assert(workers != NULL, "Need parallel worker threads.");
5038   // Choose to use the number of GC workers most recently set
5039   // into "active_workers".
5040   uint n_workers = workers->active_workers();
5041 
5042   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
5043 
5044   StrongRootsScope srs(n_workers);
5045 
5046   CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
5047 
5048   // We won't be iterating over the cards in the card table updating
5049   // the younger_gen cards, so we shouldn't call the following else
5050   // the verification code as well as subsequent younger_refs_iterate
5051   // code would get confused. XXX
5052   // gch->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
5053 
5054   // The young gen rescan work will not be done as part of
5055   // process_roots (which currently doesn't know how to
5056   // parallelize such a scan), but rather will be broken up into
5057   // a set of parallel tasks (via the sampling that the [abortable]
5058   // preclean phase did of eden, plus the [two] tasks of
5059   // scanning the [two] survivor spaces. Further fine-grain
5060   // parallelization of the scanning of the survivor spaces
5061   // themselves, and of precleaning of the young gen itself
5062   // is deferred to the future.
5063   initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
5064 
5065   // The dirty card rescan work is broken up into a "sequence"
5066   // of parallel tasks (per constituent space) that are dynamically
5067   // claimed by the parallel threads.
5068   cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
5069 
5070   // It turns out that even when we're using 1 thread, doing the work in a
5071   // separate thread causes wide variance in run times.  We can't help this
5072   // in the multi-threaded case, but we special-case n=1 here to get
5073   // repeatable measurements of the 1-thread overhead of the parallel code.
5074   if (n_workers > 1) {
5075     // Make refs discovery MT-safe, if it isn't already: it may not
5076     // necessarily be so, since it's possible that we are doing
5077     // ST marking.
5078     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
5079     workers->run_task(&tsk);
5080   } else {
5081     ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5082     tsk.work(0);
5083   }
5084 
5085   // restore, single-threaded for now, any preserved marks
5086   // as a result of work_q overflow
5087   restore_preserved_marks_if_any();
5088 }
5089 
5090 // Non-parallel version of remark
5091 void CMSCollector::do_remark_non_parallel() {
5092   ResourceMark rm;
5093   HandleMark   hm;
5094   GenCollectedHeap* gch = GenCollectedHeap::heap();
5095   ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
5096 
5097   MarkRefsIntoAndScanClosure
5098     mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
5099              &_markStack, this,
5100              false /* should_yield */, false /* not precleaning */);
5101   MarkFromDirtyCardsClosure
5102     markFromDirtyCardsClosure(this, _span,
5103                               NULL,  // space is set further below
5104                               &_markBitMap, &_markStack, &mrias_cl);
5105   {
5106     GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
5107     // Iterate over the dirty cards, setting the corresponding bits in the
5108     // mod union table.
5109     {
5110       ModUnionClosure modUnionClosure(&_modUnionTable);
5111       _ct->ct_bs()->dirty_card_iterate(
5112                       _cmsGen->used_region(),
5113                       &modUnionClosure);
5114     }
5115     // Having transferred these marks into the modUnionTable, we just need
5116     // to rescan the marked objects on the dirty cards in the modUnionTable.
5117     // The initial marking may have been done during an asynchronous
5118     // collection so there may be dirty bits in the mod-union table.
5119     const int alignment =
5120       CardTableModRefBS::card_size * BitsPerWord;
5121     {
5122       // ... First handle dirty cards in CMS gen
5123       markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
5124       MemRegion ur = _cmsGen->used_region();
5125       HeapWord* lb = ur.start();
5126       HeapWord* ub = (HeapWord*)round_to((intptr_t)ur.end(), alignment);
5127       MemRegion cms_span(lb, ub);
5128       _modUnionTable.dirty_range_iterate_clear(cms_span,
5129                                                &markFromDirtyCardsClosure);
5130       verify_work_stacks_empty();
5131       if (PrintCMSStatistics != 0) {
5132         gclog_or_tty->print(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ",
5133           markFromDirtyCardsClosure.num_dirty_cards());
5134       }
5135     }
5136   }
5137   if (VerifyDuringGC &&
5138       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
5139     HandleMark hm;  // Discard invalid handles created during verification
5140     Universe::verify();
5141   }
5142   {
5143     GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
5144 
5145     verify_work_stacks_empty();
5146 
5147     gch->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
5148     StrongRootsScope srs(1);
5149 
5150     gch->gen_process_roots(&srs,
5151                            GenCollectedHeap::OldGen,
5152                            true,  // young gen as roots
5153                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
5154                            should_unload_classes(),
5155                            &mrias_cl,
5156                            NULL,
5157                            NULL); // The dirty klasses will be handled below
5158 
5159     assert(should_unload_classes()
5160            || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
5161            "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
5162   }
5163 
5164   {
5165     GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
5166 
5167     verify_work_stacks_empty();
5168 
5169     // Scan all class loader data objects that might have been introduced
5170     // during concurrent marking.
5171     ResourceMark rm;
5172     GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
5173     for (int i = 0; i < array->length(); i++) {
5174       mrias_cl.do_cld_nv(array->at(i));
5175     }
5176 
5177     // We don't need to keep track of new CLDs anymore.
5178     ClassLoaderDataGraph::remember_new_clds(false);
5179 
5180     verify_work_stacks_empty();
5181   }
5182 
5183   {
5184     GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
5185 
5186     verify_work_stacks_empty();
5187 
5188     RemarkKlassClosure remark_klass_closure(&mrias_cl);
5189     ClassLoaderDataGraph::classes_do(&remark_klass_closure);
5190 
5191     verify_work_stacks_empty();
5192   }
5193 
5194   // We might have added oops to ClassLoaderData::_handles during the
5195   // concurrent marking phase. These oops point to newly allocated objects
5196   // that are guaranteed to be kept alive. Either by the direct allocation
5197   // code, or when the young collector processes the roots. Hence,
5198   // we don't have to revisit the _handles block during the remark phase.
5199 
5200   verify_work_stacks_empty();
5201   // Restore evacuated mark words, if any, used for overflow list links
5202   restore_preserved_marks_if_any();
5203 
5204   verify_overflow_empty();
5205 }
5206 
5207 ////////////////////////////////////////////////////////
5208 // Parallel Reference Processing Task Proxy Class
5209 ////////////////////////////////////////////////////////
5210 class AbstractGangTaskWOopQueues : public AbstractGangTask {
5211   OopTaskQueueSet*       _queues;
5212   ParallelTaskTerminator _terminator;
5213  public:
5214   AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
5215     AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
5216   ParallelTaskTerminator* terminator() { return &_terminator; }
5217   OopTaskQueueSet* queues() { return _queues; }
5218 };
5219 
5220 class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
5221   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5222   CMSCollector*          _collector;
5223   CMSBitMap*             _mark_bit_map;
5224   const MemRegion        _span;
5225   ProcessTask&           _task;
5226 
5227 public:
5228   CMSRefProcTaskProxy(ProcessTask&     task,
5229                       CMSCollector*    collector,
5230                       const MemRegion& span,
5231                       CMSBitMap*       mark_bit_map,
5232                       AbstractWorkGang* workers,
5233                       OopTaskQueueSet* task_queues):
5234     AbstractGangTaskWOopQueues("Process referents by policy in parallel",
5235       task_queues,
5236       workers->active_workers()),
5237     _task(task),
5238     _collector(collector), _span(span), _mark_bit_map(mark_bit_map)
5239   {
5240     assert(_collector->_span.equals(_span) && !_span.is_empty(),
5241            "Inconsistency in _span");
5242   }
5243 
5244   OopTaskQueueSet* task_queues() { return queues(); }
5245 
5246   OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
5247 
5248   void do_work_steal(int i,
5249                      CMSParDrainMarkingStackClosure* drain,
5250                      CMSParKeepAliveClosure* keep_alive,
5251                      int* seed);
5252 
5253   virtual void work(uint worker_id);
5254 };
5255 
5256 void CMSRefProcTaskProxy::work(uint worker_id) {
5257   ResourceMark rm;
5258   HandleMark hm;
5259   assert(_collector->_span.equals(_span), "Inconsistency in _span");
5260   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
5261                                         _mark_bit_map,
5262                                         work_queue(worker_id));
5263   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
5264                                                  _mark_bit_map,
5265                                                  work_queue(worker_id));
5266   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
5267   _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
5268   if (_task.marks_oops_alive()) {
5269     do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
5270                   _collector->hash_seed(worker_id));
5271   }
5272   assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
5273   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
5274 }
5275 
5276 class CMSRefEnqueueTaskProxy: public AbstractGangTask {
5277   typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5278   EnqueueTask& _task;
5279 
5280 public:
5281   CMSRefEnqueueTaskProxy(EnqueueTask& task)
5282     : AbstractGangTask("Enqueue reference objects in parallel"),
5283       _task(task)
5284   { }
5285 
5286   virtual void work(uint worker_id)
5287   {
5288     _task.work(worker_id);
5289   }
5290 };
5291 
5292 CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
5293   MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
5294    _span(span),
5295    _bit_map(bit_map),
5296    _work_queue(work_queue),
5297    _mark_and_push(collector, span, bit_map, work_queue),
5298    _low_water_mark(MIN2((work_queue->max_elems()/4),
5299                         ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
5300 { }
5301 
5302 // . see if we can share work_queues with ParNew? XXX
5303 void CMSRefProcTaskProxy::do_work_steal(int i,
5304   CMSParDrainMarkingStackClosure* drain,
5305   CMSParKeepAliveClosure* keep_alive,
5306   int* seed) {
5307   OopTaskQueue* work_q = work_queue(i);
5308   NOT_PRODUCT(int num_steals = 0;)
5309   oop obj_to_scan;
5310 
5311   while (true) {
5312     // Completely finish any left over work from (an) earlier round(s)
5313     drain->trim_queue(0);
5314     size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
5315                                          (size_t)ParGCDesiredObjsFromOverflowList);
5316     // Now check if there's any work in the overflow list
5317     // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
5318     // only affects the number of attempts made to get work from the
5319     // overflow list and does not affect the number of workers.  Just
5320     // pass ParallelGCThreads so this behavior is unchanged.
5321     if (_collector->par_take_from_overflow_list(num_from_overflow_list,
5322                                                 work_q,
5323                                                 ParallelGCThreads)) {
5324       // Found something in global overflow list;
5325       // not yet ready to go stealing work from others.
5326       // We'd like to assert(work_q->size() != 0, ...)
5327       // because we just took work from the overflow list,
5328       // but of course we can't, since all of that might have
5329       // been already stolen from us.
5330       continue;
5331     }
5332     // Verify that we have no work before we resort to stealing
5333     assert(work_q->size() == 0, "Have work, shouldn't steal");
5334     // Try to steal from other queues that have work
5335     if (task_queues()->steal(i, seed, /* reference */ obj_to_scan)) {
5336       NOT_PRODUCT(num_steals++;)
5337       assert(obj_to_scan->is_oop(), "Oops, not an oop!");
5338       assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
5339       // Do scanning work
5340       obj_to_scan->oop_iterate(keep_alive);
5341       // Loop around, finish this work, and try to steal some more
5342     } else if (terminator()->offer_termination()) {
5343       break;  // nirvana from the infinite cycle
5344     }
5345   }
5346   NOT_PRODUCT(
5347     if (PrintCMSStatistics != 0) {
5348       gclog_or_tty->print("\n\t(%d: stole %d oops)", i, num_steals);
5349     }
5350   )
5351 }
5352 
5353 void CMSRefProcTaskExecutor::execute(ProcessTask& task)
5354 {
5355   GenCollectedHeap* gch = GenCollectedHeap::heap();
5356   WorkGang* workers = gch->workers();
5357   assert(workers != NULL, "Need parallel worker threads.");
5358   CMSRefProcTaskProxy rp_task(task, &_collector,
5359                               _collector.ref_processor()->span(),
5360                               _collector.markBitMap(),
5361                               workers, _collector.task_queues());
5362   workers->run_task(&rp_task);
5363 }
5364 
5365 void CMSRefProcTaskExecutor::execute(EnqueueTask& task)
5366 {
5367 
5368   GenCollectedHeap* gch = GenCollectedHeap::heap();
5369   WorkGang* workers = gch->workers();
5370   assert(workers != NULL, "Need parallel worker threads.");
5371   CMSRefEnqueueTaskProxy enq_task(task);
5372   workers->run_task(&enq_task);
5373 }
5374 
5375 void CMSCollector::refProcessingWork() {
5376   ResourceMark rm;
5377   HandleMark   hm;
5378 
5379   ReferenceProcessor* rp = ref_processor();
5380   assert(rp->span().equals(_span), "Spans should be equal");
5381   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
5382   // Process weak references.
5383   rp->setup_policy(false);
5384   verify_work_stacks_empty();
5385 
5386   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
5387                                           &_markStack, false /* !preclean */);
5388   CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
5389                                 _span, &_markBitMap, &_markStack,
5390                                 &cmsKeepAliveClosure, false /* !preclean */);
5391   {
5392     GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
5393 
5394     ReferenceProcessorStats stats;
5395     if (rp->processing_is_mt()) {
5396       // Set the degree of MT here.  If the discovery is done MT, there
5397       // may have been a different number of threads doing the discovery
5398       // and a different number of discovered lists may have Ref objects.
5399       // That is OK as long as the Reference lists are balanced (see
5400       // balance_all_queues() and balance_queues()).
5401       GenCollectedHeap* gch = GenCollectedHeap::heap();
5402       uint active_workers = ParallelGCThreads;
5403       WorkGang* workers = gch->workers();
5404       if (workers != NULL) {
5405         active_workers = workers->active_workers();
5406         // The expectation is that active_workers will have already
5407         // been set to a reasonable value.  If it has not been set,
5408         // investigate.
5409         assert(active_workers > 0, "Should have been set during scavenge");
5410       }
5411       rp->set_active_mt_degree(active_workers);
5412       CMSRefProcTaskExecutor task_executor(*this);
5413       stats = rp->process_discovered_references(&_is_alive_closure,
5414                                         &cmsKeepAliveClosure,
5415                                         &cmsDrainMarkingStackClosure,
5416                                         &task_executor,
5417                                         _gc_timer_cm);
5418     } else {
5419       stats = rp->process_discovered_references(&_is_alive_closure,
5420                                         &cmsKeepAliveClosure,
5421                                         &cmsDrainMarkingStackClosure,
5422                                         NULL,
5423                                         _gc_timer_cm);
5424     }
5425     _gc_tracer_cm->report_gc_reference_stats(stats);
5426 
5427   }
5428 
5429   // This is the point where the entire marking should have completed.
5430   verify_work_stacks_empty();
5431 
5432   if (should_unload_classes()) {
5433     {
5434       GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
5435 
5436       // Unload classes and purge the SystemDictionary.
5437       bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
5438 
5439       // Unload nmethods.
5440       CodeCache::do_unloading(&_is_alive_closure, purged_class);
5441 
5442       // Prune dead klasses from subklass/sibling/implementor lists.
5443       Klass::clean_weak_klass_links(&_is_alive_closure);
5444     }
5445 
5446     {
5447       GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
5448       // Clean up unreferenced symbols in symbol table.
5449       SymbolTable::unlink();
5450     }
5451 
5452     {
5453       GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
5454       // Delete entries for dead interned strings.
5455       StringTable::unlink(&_is_alive_closure);
5456     }
5457   }
5458 
5459 
5460   // Restore any preserved marks as a result of mark stack or
5461   // work queue overflow
5462   restore_preserved_marks_if_any();  // done single-threaded for now
5463 
5464   rp->set_enqueuing_is_done(true);
5465   if (rp->processing_is_mt()) {
5466     rp->balance_all_queues();
5467     CMSRefProcTaskExecutor task_executor(*this);
5468     rp->enqueue_discovered_references(&task_executor);
5469   } else {
5470     rp->enqueue_discovered_references(NULL);
5471   }
5472   rp->verify_no_references_recorded();
5473   assert(!rp->discovery_enabled(), "should have been disabled");
5474 }
5475 
5476 #ifndef PRODUCT
5477 void CMSCollector::check_correct_thread_executing() {
5478   Thread* t = Thread::current();
5479   // Only the VM thread or the CMS thread should be here.
5480   assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
5481          "Unexpected thread type");
5482   // If this is the vm thread, the foreground process
5483   // should not be waiting.  Note that _foregroundGCIsActive is
5484   // true while the foreground collector is waiting.
5485   if (_foregroundGCShouldWait) {
5486     // We cannot be the VM thread
5487     assert(t->is_ConcurrentGC_thread(),
5488            "Should be CMS thread");
5489   } else {
5490     // We can be the CMS thread only if we are in a stop-world
5491     // phase of CMS collection.
5492     if (t->is_ConcurrentGC_thread()) {
5493       assert(_collectorState == InitialMarking ||
5494              _collectorState == FinalMarking,
5495              "Should be a stop-world phase");
5496       // The CMS thread should be holding the CMS_token.
5497       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5498              "Potential interference with concurrently "
5499              "executing VM thread");
5500     }
5501   }
5502 }
5503 #endif
5504 
5505 void CMSCollector::sweep() {
5506   assert(_collectorState == Sweeping, "just checking");
5507   check_correct_thread_executing();
5508   verify_work_stacks_empty();
5509   verify_overflow_empty();
5510   increment_sweep_count();
5511   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
5512 
5513   _inter_sweep_timer.stop();
5514   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
5515 
5516   assert(!_intra_sweep_timer.is_active(), "Should not be active");
5517   _intra_sweep_timer.reset();
5518   _intra_sweep_timer.start();
5519   {
5520     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5521     CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
5522     // First sweep the old gen
5523     {
5524       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
5525                                bitMapLock());
5526       sweepWork(_cmsGen);
5527     }
5528 
5529     // Update Universe::_heap_*_at_gc figures.
5530     // We need all the free list locks to make the abstract state
5531     // transition from Sweeping to Resetting. See detailed note
5532     // further below.
5533     {
5534       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
5535       // Update heap occupancy information which is used as
5536       // input to soft ref clearing policy at the next gc.
5537       Universe::update_heap_info_at_gc();
5538       _collectorState = Resizing;
5539     }
5540   }
5541   verify_work_stacks_empty();
5542   verify_overflow_empty();
5543 
5544   if (should_unload_classes()) {
5545     // Delay purge to the beginning of the next safepoint.  Metaspace::contains
5546     // requires that the virtual spaces are stable and not deleted.
5547     ClassLoaderDataGraph::set_should_purge(true);
5548   }
5549 
5550   _intra_sweep_timer.stop();
5551   _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
5552 
5553   _inter_sweep_timer.reset();
5554   _inter_sweep_timer.start();
5555 
5556   // We need to use a monotonically non-decreasing time in ms
5557   // or we will see time-warp warnings and os::javaTimeMillis()
5558   // does not guarantee monotonicity.
5559   jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
5560   update_time_of_last_gc(now);
5561 
5562   // NOTE on abstract state transitions:
5563   // Mutators allocate-live and/or mark the mod-union table dirty
5564   // based on the state of the collection.  The former is done in
5565   // the interval [Marking, Sweeping] and the latter in the interval
5566   // [Marking, Sweeping).  Thus the transitions into the Marking state
5567   // and out of the Sweeping state must be synchronously visible
5568   // globally to the mutators.
5569   // The transition into the Marking state happens with the world
5570   // stopped so the mutators will globally see it.  Sweeping is
5571   // done asynchronously by the background collector so the transition
5572   // from the Sweeping state to the Resizing state must be done
5573   // under the freelistLock (as is the check for whether to
5574   // allocate-live and whether to dirty the mod-union table).
5575   assert(_collectorState == Resizing, "Change of collector state to"
5576     " Resizing must be done under the freelistLocks (plural)");
5577 
5578   // Now that sweeping has been completed, we clear
5579   // the incremental_collection_failed flag,
5580   // thus inviting a younger gen collection to promote into
5581   // this generation. If such a promotion may still fail,
5582   // the flag will be set again when a young collection is
5583   // attempted.
5584   GenCollectedHeap* gch = GenCollectedHeap::heap();
5585   gch->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
5586   gch->update_full_collections_completed(_collection_count_start);
5587 }
5588 
5589 // FIX ME!!! Looks like this belongs in CFLSpace, with
5590 // CMSGen merely delegating to it.
5591 void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
5592   double nearLargestPercent = FLSLargestBlockCoalesceProximity;
5593   HeapWord*  minAddr        = _cmsSpace->bottom();
5594   HeapWord*  largestAddr    =
5595     (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
5596   if (largestAddr == NULL) {
5597     // The dictionary appears to be empty.  In this case
5598     // try to coalesce at the end of the heap.
5599     largestAddr = _cmsSpace->end();
5600   }
5601   size_t largestOffset     = pointer_delta(largestAddr, minAddr);
5602   size_t nearLargestOffset =
5603     (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
5604   if (PrintFLSStatistics != 0) {
5605     gclog_or_tty->print_cr(
5606       "CMS: Large Block: " PTR_FORMAT ";"
5607       " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
5608       p2i(largestAddr),
5609       p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
5610   }
5611   _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
5612 }
5613 
5614 bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
5615   return addr >= _cmsSpace->nearLargestChunk();
5616 }
5617 
5618 FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
5619   return _cmsSpace->find_chunk_at_end();
5620 }
5621 
5622 void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
5623                                                     bool full) {
5624   // If the young generation has been collected, gather any statistics
5625   // that are of interest at this point.
5626   bool current_is_young = GenCollectedHeap::heap()->is_young_gen(current_generation);
5627   if (!full && current_is_young) {
5628     // Gather statistics on the young generation collection.
5629     collector()->stats().record_gc0_end(used());
5630   }
5631 }
5632 
5633 void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
5634   // We iterate over the space(s) underlying this generation,
5635   // checking the mark bit map to see if the bits corresponding
5636   // to specific blocks are marked or not. Blocks that are
5637   // marked are live and are not swept up. All remaining blocks
5638   // are swept up, with coalescing on-the-fly as we sweep up
5639   // contiguous free and/or garbage blocks:
5640   // We need to ensure that the sweeper synchronizes with allocators
5641   // and stop-the-world collectors. In particular, the following
5642   // locks are used:
5643   // . CMS token: if this is held, a stop the world collection cannot occur
5644   // . freelistLock: if this is held no allocation can occur from this
5645   //                 generation by another thread
5646   // . bitMapLock: if this is held, no other thread can access or update
5647   //
5648 
5649   // Note that we need to hold the freelistLock if we use
5650   // block iterate below; else the iterator might go awry if
5651   // a mutator (or promotion) causes block contents to change
5652   // (for instance if the allocator divvies up a block).
5653   // If we hold the free list lock, for all practical purposes
5654   // young generation GC's can't occur (they'll usually need to
5655   // promote), so we might as well prevent all young generation
5656   // GC's while we do a sweeping step. For the same reason, we might
5657   // as well take the bit map lock for the entire duration
5658 
5659   // check that we hold the requisite locks
5660   assert(have_cms_token(), "Should hold cms token");
5661   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
5662   assert_lock_strong(old_gen->freelistLock());
5663   assert_lock_strong(bitMapLock());
5664 
5665   assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
5666   assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
5667   old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
5668                                           _inter_sweep_estimate.padded_average(),
5669                                           _intra_sweep_estimate.padded_average());
5670   old_gen->setNearLargestChunk();
5671 
5672   {
5673     SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
5674     old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
5675     // We need to free-up/coalesce garbage/blocks from a
5676     // co-terminal free run. This is done in the SweepClosure
5677     // destructor; so, do not remove this scope, else the
5678     // end-of-sweep-census below will be off by a little bit.
5679   }
5680   old_gen->cmsSpace()->sweep_completed();
5681   old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
5682   if (should_unload_classes()) {                // unloaded classes this cycle,
5683     _concurrent_cycles_since_last_unload = 0;   // ... reset count
5684   } else {                                      // did not unload classes,
5685     _concurrent_cycles_since_last_unload++;     // ... increment count
5686   }
5687 }
5688 
5689 // Reset CMS data structures (for now just the marking bit map)
5690 // preparatory for the next cycle.
5691 void CMSCollector::reset_concurrent() {
5692   CMSTokenSyncWithLocks ts(true, bitMapLock());
5693 
5694   // If the state is not "Resetting", the foreground  thread
5695   // has done a collection and the resetting.
5696   if (_collectorState != Resetting) {
5697     assert(_collectorState == Idling, "The state should only change"
5698       " because the foreground collector has finished the collection");
5699     return;
5700   }
5701 
5702   // Clear the mark bitmap (no grey objects to start with)
5703   // for the next cycle.
5704   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5705   CMSPhaseAccounting cmspa(this, "reset", !PrintGCDetails);
5706 
5707   HeapWord* curAddr = _markBitMap.startWord();
5708   while (curAddr < _markBitMap.endWord()) {
5709     size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
5710     MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
5711     _markBitMap.clear_large_range(chunk);
5712     if (ConcurrentMarkSweepThread::should_yield() &&
5713         !foregroundGCIsActive() &&
5714         CMSYield) {
5715       assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
5716              "CMS thread should hold CMS token");
5717       assert_lock_strong(bitMapLock());
5718       bitMapLock()->unlock();
5719       ConcurrentMarkSweepThread::desynchronize(true);
5720       stopTimer();
5721       if (PrintCMSStatistics != 0) {
5722         incrementYields();
5723       }
5724 
5725       // See the comment in coordinator_yield()
5726       for (unsigned i = 0; i < CMSYieldSleepCount &&
5727                        ConcurrentMarkSweepThread::should_yield() &&
5728                        !CMSCollector::foregroundGCIsActive(); ++i) {
5729         os::sleep(Thread::current(), 1, false);
5730       }
5731 
5732       ConcurrentMarkSweepThread::synchronize(true);
5733       bitMapLock()->lock_without_safepoint_check();
5734       startTimer();
5735     }
5736     curAddr = chunk.end();
5737   }
5738   // A successful mostly concurrent collection has been done.
5739   // Because only the full (i.e., concurrent mode failure) collections
5740   // are being measured for gc overhead limits, clean the "near" flag
5741   // and count.
5742   size_policy()->reset_gc_overhead_limit_count();
5743   _collectorState = Idling;
5744 
5745   register_gc_end();
5746 }
5747 
5748 // Same as above but for STW paths
5749 void CMSCollector::reset_stw() {
5750   // already have the lock
5751   assert(_collectorState == Resetting, "just checking");
5752   assert_lock_strong(bitMapLock());
5753   GCIdMarkAndRestore gc_id_mark(_cmsThread->gc_id());
5754   _markBitMap.clear_all();
5755   _collectorState = Idling;
5756   register_gc_end();
5757 }
5758 
5759 void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
5760   TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
5761   GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
5762   TraceCollectorStats tcs(counters());
5763 
5764   switch (op) {
5765     case CMS_op_checkpointRootsInitial: {
5766       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5767       checkpointRootsInitial();
5768       if (PrintGC) {
5769         _cmsGen->printOccupancy("initial-mark");
5770       }
5771       break;
5772     }
5773     case CMS_op_checkpointRootsFinal: {
5774       SvcGCMarker sgcm(SvcGCMarker::OTHER);
5775       checkpointRootsFinal();
5776       if (PrintGC) {
5777         _cmsGen->printOccupancy("remark");
5778       }
5779       break;
5780     }
5781     default:
5782       fatal("No such CMS_op");
5783   }
5784 }
5785 
5786 #ifndef PRODUCT
5787 size_t const CMSCollector::skip_header_HeapWords() {
5788   return FreeChunk::header_size();
5789 }
5790 
5791 // Try and collect here conditions that should hold when
5792 // CMS thread is exiting. The idea is that the foreground GC
5793 // thread should not be blocked if it wants to terminate
5794 // the CMS thread and yet continue to run the VM for a while
5795 // after that.
5796 void CMSCollector::verify_ok_to_terminate() const {
5797   assert(Thread::current()->is_ConcurrentGC_thread(),
5798          "should be called by CMS thread");
5799   assert(!_foregroundGCShouldWait, "should be false");
5800   // We could check here that all the various low-level locks
5801   // are not held by the CMS thread, but that is overkill; see
5802   // also CMSThread::verify_ok_to_terminate() where the CGC_lock
5803   // is checked.
5804 }
5805 #endif
5806 
5807 size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
5808    assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
5809           "missing Printezis mark?");
5810   HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5811   size_t size = pointer_delta(nextOneAddr + 1, addr);
5812   assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5813          "alignment problem");
5814   assert(size >= 3, "Necessary for Printezis marks to work");
5815   return size;
5816 }
5817 
5818 // A variant of the above (block_size_using_printezis_bits()) except
5819 // that we return 0 if the P-bits are not yet set.
5820 size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
5821   if (_markBitMap.isMarked(addr + 1)) {
5822     assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
5823     HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
5824     size_t size = pointer_delta(nextOneAddr + 1, addr);
5825     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
5826            "alignment problem");
5827     assert(size >= 3, "Necessary for Printezis marks to work");
5828     return size;
5829   }
5830   return 0;
5831 }
5832 
5833 HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
5834   size_t sz = 0;
5835   oop p = (oop)addr;
5836   if (p->klass_or_null() != NULL) {
5837     sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
5838   } else {
5839     sz = block_size_using_printezis_bits(addr);
5840   }
5841   assert(sz > 0, "size must be nonzero");
5842   HeapWord* next_block = addr + sz;
5843   HeapWord* next_card  = (HeapWord*)round_to((uintptr_t)next_block,
5844                                              CardTableModRefBS::card_size);
5845   assert(round_down((uintptr_t)addr,      CardTableModRefBS::card_size) <
5846          round_down((uintptr_t)next_card, CardTableModRefBS::card_size),
5847          "must be different cards");
5848   return next_card;
5849 }
5850 
5851 
5852 // CMS Bit Map Wrapper /////////////////////////////////////////
5853 
5854 // Construct a CMS bit map infrastructure, but don't create the
5855 // bit vector itself. That is done by a separate call CMSBitMap::allocate()
5856 // further below.
5857 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
5858   _bm(),
5859   _shifter(shifter),
5860   _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
5861                                     Monitor::_safepoint_check_sometimes) : NULL)
5862 {
5863   _bmStartWord = 0;
5864   _bmWordSize  = 0;
5865 }
5866 
5867 bool CMSBitMap::allocate(MemRegion mr) {
5868   _bmStartWord = mr.start();
5869   _bmWordSize  = mr.word_size();
5870   ReservedSpace brs(ReservedSpace::allocation_align_size_up(
5871                      (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
5872   if (!brs.is_reserved()) {
5873     warning("CMS bit map allocation failure");
5874     return false;
5875   }
5876   // For now we'll just commit all of the bit map up front.
5877   // Later on we'll try to be more parsimonious with swap.
5878   if (!_virtual_space.initialize(brs, brs.size())) {
5879     warning("CMS bit map backing store failure");
5880     return false;
5881   }
5882   assert(_virtual_space.committed_size() == brs.size(),
5883          "didn't reserve backing store for all of CMS bit map?");
5884   _bm.set_map((BitMap::bm_word_t*)_virtual_space.low());
5885   assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
5886          _bmWordSize, "inconsistency in bit map sizing");
5887   _bm.set_size(_bmWordSize >> _shifter);
5888 
5889   // bm.clear(); // can we rely on getting zero'd memory? verify below
5890   assert(isAllClear(),
5891          "Expected zero'd memory from ReservedSpace constructor");
5892   assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
5893          "consistency check");
5894   return true;
5895 }
5896 
5897 void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
5898   HeapWord *next_addr, *end_addr, *last_addr;
5899   assert_locked();
5900   assert(covers(mr), "out-of-range error");
5901   // XXX assert that start and end are appropriately aligned
5902   for (next_addr = mr.start(), end_addr = mr.end();
5903        next_addr < end_addr; next_addr = last_addr) {
5904     MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
5905     last_addr = dirty_region.end();
5906     if (!dirty_region.is_empty()) {
5907       cl->do_MemRegion(dirty_region);
5908     } else {
5909       assert(last_addr == end_addr, "program logic");
5910       return;
5911     }
5912   }
5913 }
5914 
5915 void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
5916   _bm.print_on_error(st, prefix);
5917 }
5918 
5919 #ifndef PRODUCT
5920 void CMSBitMap::assert_locked() const {
5921   CMSLockVerifier::assert_locked(lock());
5922 }
5923 
5924 bool CMSBitMap::covers(MemRegion mr) const {
5925   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
5926   assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
5927          "size inconsistency");
5928   return (mr.start() >= _bmStartWord) &&
5929          (mr.end()   <= endWord());
5930 }
5931 
5932 bool CMSBitMap::covers(HeapWord* start, size_t size) const {
5933     return (start >= _bmStartWord && (start + size) <= endWord());
5934 }
5935 
5936 void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
5937   // verify that there are no 1 bits in the interval [left, right)
5938   FalseBitMapClosure falseBitMapClosure;
5939   iterate(&falseBitMapClosure, left, right);
5940 }
5941 
5942 void CMSBitMap::region_invariant(MemRegion mr)
5943 {
5944   assert_locked();
5945   // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
5946   assert(!mr.is_empty(), "unexpected empty region");
5947   assert(covers(mr), "mr should be covered by bit map");
5948   // convert address range into offset range
5949   size_t start_ofs = heapWordToOffset(mr.start());
5950   // Make sure that end() is appropriately aligned
5951   assert(mr.end() == (HeapWord*)round_to((intptr_t)mr.end(),
5952                         (1 << (_shifter+LogHeapWordSize))),
5953          "Misaligned mr.end()");
5954   size_t end_ofs   = heapWordToOffset(mr.end());
5955   assert(end_ofs > start_ofs, "Should mark at least one bit");
5956 }
5957 
5958 #endif
5959 
5960 bool CMSMarkStack::allocate(size_t size) {
5961   // allocate a stack of the requisite depth
5962   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
5963                    size * sizeof(oop)));
5964   if (!rs.is_reserved()) {
5965     warning("CMSMarkStack allocation failure");
5966     return false;
5967   }
5968   if (!_virtual_space.initialize(rs, rs.size())) {
5969     warning("CMSMarkStack backing store failure");
5970     return false;
5971   }
5972   assert(_virtual_space.committed_size() == rs.size(),
5973          "didn't reserve backing store for all of CMS stack?");
5974   _base = (oop*)(_virtual_space.low());
5975   _index = 0;
5976   _capacity = size;
5977   NOT_PRODUCT(_max_depth = 0);
5978   return true;
5979 }
5980 
5981 // XXX FIX ME !!! In the MT case we come in here holding a
5982 // leaf lock. For printing we need to take a further lock
5983 // which has lower rank. We need to recalibrate the two
5984 // lock-ranks involved in order to be able to print the
5985 // messages below. (Or defer the printing to the caller.
5986 // For now we take the expedient path of just disabling the
5987 // messages for the problematic case.)
5988 void CMSMarkStack::expand() {
5989   assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
5990   if (_capacity == MarkStackSizeMax) {
5991     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
5992       // We print a warning message only once per CMS cycle.
5993       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
5994     }
5995     return;
5996   }
5997   // Double capacity if possible
5998   size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
5999   // Do not give up existing stack until we have managed to
6000   // get the double capacity that we desired.
6001   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
6002                    new_capacity * sizeof(oop)));
6003   if (rs.is_reserved()) {
6004     // Release the backing store associated with old stack
6005     _virtual_space.release();
6006     // Reinitialize virtual space for new stack
6007     if (!_virtual_space.initialize(rs, rs.size())) {
6008       fatal("Not enough swap for expanded marking stack");
6009     }
6010     _base = (oop*)(_virtual_space.low());
6011     _index = 0;
6012     _capacity = new_capacity;
6013   } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
6014     // Failed to double capacity, continue;
6015     // we print a detail message only once per CMS cycle.
6016     gclog_or_tty->print(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to "
6017             SIZE_FORMAT "K",
6018             _capacity / K, new_capacity / K);
6019   }
6020 }
6021 
6022 
6023 // Closures
6024 // XXX: there seems to be a lot of code  duplication here;
6025 // should refactor and consolidate common code.
6026 
6027 // This closure is used to mark refs into the CMS generation in
6028 // the CMS bit map. Called at the first checkpoint. This closure
6029 // assumes that we do not need to re-mark dirty cards; if the CMS
6030 // generation on which this is used is not an oldest
6031 // generation then this will lose younger_gen cards!
6032 
6033 MarkRefsIntoClosure::MarkRefsIntoClosure(
6034   MemRegion span, CMSBitMap* bitMap):
6035     _span(span),
6036     _bitMap(bitMap)
6037 {
6038   assert(ref_processor() == NULL, "deliberately left NULL");
6039   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6040 }
6041 
6042 void MarkRefsIntoClosure::do_oop(oop obj) {
6043   // if p points into _span, then mark corresponding bit in _markBitMap
6044   assert(obj->is_oop(), "expected an oop");
6045   HeapWord* addr = (HeapWord*)obj;
6046   if (_span.contains(addr)) {
6047     // this should be made more efficient
6048     _bitMap->mark(addr);
6049   }
6050 }
6051 
6052 void MarkRefsIntoClosure::do_oop(oop* p)       { MarkRefsIntoClosure::do_oop_work(p); }
6053 void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); }
6054 
6055 Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure(
6056   MemRegion span, CMSBitMap* bitMap):
6057     _span(span),
6058     _bitMap(bitMap)
6059 {
6060   assert(ref_processor() == NULL, "deliberately left NULL");
6061   assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
6062 }
6063 
6064 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
6065   // if p points into _span, then mark corresponding bit in _markBitMap
6066   assert(obj->is_oop(), "expected an oop");
6067   HeapWord* addr = (HeapWord*)obj;
6068   if (_span.contains(addr)) {
6069     // this should be made more efficient
6070     _bitMap->par_mark(addr);
6071   }
6072 }
6073 
6074 void Par_MarkRefsIntoClosure::do_oop(oop* p)       { Par_MarkRefsIntoClosure::do_oop_work(p); }
6075 void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); }
6076 
6077 // A variant of the above, used for CMS marking verification.
6078 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
6079   MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
6080     _span(span),
6081     _verification_bm(verification_bm),
6082     _cms_bm(cms_bm)
6083 {
6084   assert(ref_processor() == NULL, "deliberately left NULL");
6085   assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
6086 }
6087 
6088 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
6089   // if p points into _span, then mark corresponding bit in _markBitMap
6090   assert(obj->is_oop(), "expected an oop");
6091   HeapWord* addr = (HeapWord*)obj;
6092   if (_span.contains(addr)) {
6093     _verification_bm->mark(addr);
6094     if (!_cms_bm->isMarked(addr)) {
6095       oop(addr)->print();
6096       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
6097       fatal("... aborting");
6098     }
6099   }
6100 }
6101 
6102 void MarkRefsIntoVerifyClosure::do_oop(oop* p)       { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6103 void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); }
6104 
6105 //////////////////////////////////////////////////
6106 // MarkRefsIntoAndScanClosure
6107 //////////////////////////////////////////////////
6108 
6109 MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
6110                                                        ReferenceProcessor* rp,
6111                                                        CMSBitMap* bit_map,
6112                                                        CMSBitMap* mod_union_table,
6113                                                        CMSMarkStack*  mark_stack,
6114                                                        CMSCollector* collector,
6115                                                        bool should_yield,
6116                                                        bool concurrent_precleaning):
6117   _collector(collector),
6118   _span(span),
6119   _bit_map(bit_map),
6120   _mark_stack(mark_stack),
6121   _pushAndMarkClosure(collector, span, rp, bit_map, mod_union_table,
6122                       mark_stack, concurrent_precleaning),
6123   _yield(should_yield),
6124   _concurrent_precleaning(concurrent_precleaning),
6125   _freelistLock(NULL)
6126 {
6127   // FIXME: Should initialize in base class constructor.
6128   assert(rp != NULL, "ref_processor shouldn't be NULL");
6129   set_ref_processor_internal(rp);
6130 }
6131 
6132 // This closure is used to mark refs into the CMS generation at the
6133 // second (final) checkpoint, and to scan and transitively follow
6134 // the unmarked oops. It is also used during the concurrent precleaning
6135 // phase while scanning objects on dirty cards in the CMS generation.
6136 // The marks are made in the marking bit map and the marking stack is
6137 // used for keeping the (newly) grey objects during the scan.
6138 // The parallel version (Par_...) appears further below.
6139 void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6140   if (obj != NULL) {
6141     assert(obj->is_oop(), "expected an oop");
6142     HeapWord* addr = (HeapWord*)obj;
6143     assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6144     assert(_collector->overflow_list_is_empty(),
6145            "overflow list should be empty");
6146     if (_span.contains(addr) &&
6147         !_bit_map->isMarked(addr)) {
6148       // mark bit map (object is now grey)
6149       _bit_map->mark(addr);
6150       // push on marking stack (stack should be empty), and drain the
6151       // stack by applying this closure to the oops in the oops popped
6152       // from the stack (i.e. blacken the grey objects)
6153       bool res = _mark_stack->push(obj);
6154       assert(res, "Should have space to push on empty stack");
6155       do {
6156         oop new_oop = _mark_stack->pop();
6157         assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6158         assert(_bit_map->isMarked((HeapWord*)new_oop),
6159                "only grey objects on this stack");
6160         // iterate over the oops in this oop, marking and pushing
6161         // the ones in CMS heap (i.e. in _span).
6162         new_oop->oop_iterate(&_pushAndMarkClosure);
6163         // check if it's time to yield
6164         do_yield_check();
6165       } while (!_mark_stack->isEmpty() ||
6166                (!_concurrent_precleaning && take_from_overflow_list()));
6167         // if marking stack is empty, and we are not doing this
6168         // during precleaning, then check the overflow list
6169     }
6170     assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6171     assert(_collector->overflow_list_is_empty(),
6172            "overflow list was drained above");
6173 
6174     assert(_collector->no_preserved_marks(),
6175            "All preserved marks should have been restored above");
6176   }
6177 }
6178 
6179 void MarkRefsIntoAndScanClosure::do_oop(oop* p)       { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6180 void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); }
6181 
6182 void MarkRefsIntoAndScanClosure::do_yield_work() {
6183   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6184          "CMS thread should hold CMS token");
6185   assert_lock_strong(_freelistLock);
6186   assert_lock_strong(_bit_map->lock());
6187   // relinquish the free_list_lock and bitMaplock()
6188   _bit_map->lock()->unlock();
6189   _freelistLock->unlock();
6190   ConcurrentMarkSweepThread::desynchronize(true);
6191   _collector->stopTimer();
6192   if (PrintCMSStatistics != 0) {
6193     _collector->incrementYields();
6194   }
6195 
6196   // See the comment in coordinator_yield()
6197   for (unsigned i = 0;
6198        i < CMSYieldSleepCount &&
6199        ConcurrentMarkSweepThread::should_yield() &&
6200        !CMSCollector::foregroundGCIsActive();
6201        ++i) {
6202     os::sleep(Thread::current(), 1, false);
6203   }
6204 
6205   ConcurrentMarkSweepThread::synchronize(true);
6206   _freelistLock->lock_without_safepoint_check();
6207   _bit_map->lock()->lock_without_safepoint_check();
6208   _collector->startTimer();
6209 }
6210 
6211 ///////////////////////////////////////////////////////////
6212 // Par_MarkRefsIntoAndScanClosure: a parallel version of
6213 //                                 MarkRefsIntoAndScanClosure
6214 ///////////////////////////////////////////////////////////
6215 Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure(
6216   CMSCollector* collector, MemRegion span, ReferenceProcessor* rp,
6217   CMSBitMap* bit_map, OopTaskQueue* work_queue):
6218   _span(span),
6219   _bit_map(bit_map),
6220   _work_queue(work_queue),
6221   _low_water_mark(MIN2((work_queue->max_elems()/4),
6222                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
6223   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
6224 {
6225   // FIXME: Should initialize in base class constructor.
6226   assert(rp != NULL, "ref_processor shouldn't be NULL");
6227   set_ref_processor_internal(rp);
6228 }
6229 
6230 // This closure is used to mark refs into the CMS generation at the
6231 // second (final) checkpoint, and to scan and transitively follow
6232 // the unmarked oops. The marks are made in the marking bit map and
6233 // the work_queue is used for keeping the (newly) grey objects during
6234 // the scan phase whence they are also available for stealing by parallel
6235 // threads. Since the marking bit map is shared, updates are
6236 // synchronized (via CAS).
6237 void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) {
6238   if (obj != NULL) {
6239     // Ignore mark word because this could be an already marked oop
6240     // that may be chained at the end of the overflow list.
6241     assert(obj->is_oop(true), "expected an oop");
6242     HeapWord* addr = (HeapWord*)obj;
6243     if (_span.contains(addr) &&
6244         !_bit_map->isMarked(addr)) {
6245       // mark bit map (object will become grey):
6246       // It is possible for several threads to be
6247       // trying to "claim" this object concurrently;
6248       // the unique thread that succeeds in marking the
6249       // object first will do the subsequent push on
6250       // to the work queue (or overflow list).
6251       if (_bit_map->par_mark(addr)) {
6252         // push on work_queue (which may not be empty), and trim the
6253         // queue to an appropriate length by applying this closure to
6254         // the oops in the oops popped from the stack (i.e. blacken the
6255         // grey objects)
6256         bool res = _work_queue->push(obj);
6257         assert(res, "Low water mark should be less than capacity?");
6258         trim_queue(_low_water_mark);
6259       } // Else, another thread claimed the object
6260     }
6261   }
6262 }
6263 
6264 void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p)       { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6265 void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); }
6266 
6267 // This closure is used to rescan the marked objects on the dirty cards
6268 // in the mod union table and the card table proper.
6269 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
6270   oop p, MemRegion mr) {
6271 
6272   size_t size = 0;
6273   HeapWord* addr = (HeapWord*)p;
6274   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6275   assert(_span.contains(addr), "we are scanning the CMS generation");
6276   // check if it's time to yield
6277   if (do_yield_check()) {
6278     // We yielded for some foreground stop-world work,
6279     // and we have been asked to abort this ongoing preclean cycle.
6280     return 0;
6281   }
6282   if (_bitMap->isMarked(addr)) {
6283     // it's marked; is it potentially uninitialized?
6284     if (p->klass_or_null() != NULL) {
6285         // an initialized object; ignore mark word in verification below
6286         // since we are running concurrent with mutators
6287         assert(p->is_oop(true), "should be an oop");
6288         if (p->is_objArray()) {
6289           // objArrays are precisely marked; restrict scanning
6290           // to dirty cards only.
6291           size = CompactibleFreeListSpace::adjustObjectSize(
6292                    p->oop_iterate_size(_scanningClosure, mr));
6293         } else {
6294           // A non-array may have been imprecisely marked; we need
6295           // to scan object in its entirety.
6296           size = CompactibleFreeListSpace::adjustObjectSize(
6297                    p->oop_iterate_size(_scanningClosure));
6298         }
6299         #ifdef ASSERT
6300           size_t direct_size =
6301             CompactibleFreeListSpace::adjustObjectSize(p->size());
6302           assert(size == direct_size, "Inconsistency in size");
6303           assert(size >= 3, "Necessary for Printezis marks to work");
6304           if (!_bitMap->isMarked(addr+1)) {
6305             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size);
6306           } else {
6307             _bitMap->verifyNoOneBitsInRange(addr+2, addr+size-1);
6308             assert(_bitMap->isMarked(addr+size-1),
6309                    "inconsistent Printezis mark");
6310           }
6311         #endif // ASSERT
6312     } else {
6313       // An uninitialized object.
6314       assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
6315       HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
6316       size = pointer_delta(nextOneAddr + 1, addr);
6317       assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
6318              "alignment problem");
6319       // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
6320       // will dirty the card when the klass pointer is installed in the
6321       // object (signaling the completion of initialization).
6322     }
6323   } else {
6324     // Either a not yet marked object or an uninitialized object
6325     if (p->klass_or_null() == NULL) {
6326       // An uninitialized object, skip to the next card, since
6327       // we may not be able to read its P-bits yet.
6328       assert(size == 0, "Initial value");
6329     } else {
6330       // An object not (yet) reached by marking: we merely need to
6331       // compute its size so as to go look at the next block.
6332       assert(p->is_oop(true), "should be an oop");
6333       size = CompactibleFreeListSpace::adjustObjectSize(p->size());
6334     }
6335   }
6336   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6337   return size;
6338 }
6339 
6340 void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
6341   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6342          "CMS thread should hold CMS token");
6343   assert_lock_strong(_freelistLock);
6344   assert_lock_strong(_bitMap->lock());
6345   // relinquish the free_list_lock and bitMaplock()
6346   _bitMap->lock()->unlock();
6347   _freelistLock->unlock();
6348   ConcurrentMarkSweepThread::desynchronize(true);
6349   _collector->stopTimer();
6350   if (PrintCMSStatistics != 0) {
6351     _collector->incrementYields();
6352   }
6353 
6354   // See the comment in coordinator_yield()
6355   for (unsigned i = 0; i < CMSYieldSleepCount &&
6356                    ConcurrentMarkSweepThread::should_yield() &&
6357                    !CMSCollector::foregroundGCIsActive(); ++i) {
6358     os::sleep(Thread::current(), 1, false);
6359   }
6360 
6361   ConcurrentMarkSweepThread::synchronize(true);
6362   _freelistLock->lock_without_safepoint_check();
6363   _bitMap->lock()->lock_without_safepoint_check();
6364   _collector->startTimer();
6365 }
6366 
6367 
6368 //////////////////////////////////////////////////////////////////
6369 // SurvivorSpacePrecleanClosure
6370 //////////////////////////////////////////////////////////////////
6371 // This (single-threaded) closure is used to preclean the oops in
6372 // the survivor spaces.
6373 size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
6374 
6375   HeapWord* addr = (HeapWord*)p;
6376   DEBUG_ONLY(_collector->verify_work_stacks_empty();)
6377   assert(!_span.contains(addr), "we are scanning the survivor spaces");
6378   assert(p->klass_or_null() != NULL, "object should be initialized");
6379   // an initialized object; ignore mark word in verification below
6380   // since we are running concurrent with mutators
6381   assert(p->is_oop(true), "should be an oop");
6382   // Note that we do not yield while we iterate over
6383   // the interior oops of p, pushing the relevant ones
6384   // on our marking stack.
6385   size_t size = p->oop_iterate_size(_scanning_closure);
6386   do_yield_check();
6387   // Observe that below, we do not abandon the preclean
6388   // phase as soon as we should; rather we empty the
6389   // marking stack before returning. This is to satisfy
6390   // some existing assertions. In general, it may be a
6391   // good idea to abort immediately and complete the marking
6392   // from the grey objects at a later time.
6393   while (!_mark_stack->isEmpty()) {
6394     oop new_oop = _mark_stack->pop();
6395     assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
6396     assert(_bit_map->isMarked((HeapWord*)new_oop),
6397            "only grey objects on this stack");
6398     // iterate over the oops in this oop, marking and pushing
6399     // the ones in CMS heap (i.e. in _span).
6400     new_oop->oop_iterate(_scanning_closure);
6401     // check if it's time to yield
6402     do_yield_check();
6403   }
6404   unsigned int after_count =
6405     GenCollectedHeap::heap()->total_collections();
6406   bool abort = (_before_count != after_count) ||
6407                _collector->should_abort_preclean();
6408   return abort ? 0 : size;
6409 }
6410 
6411 void SurvivorSpacePrecleanClosure::do_yield_work() {
6412   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6413          "CMS thread should hold CMS token");
6414   assert_lock_strong(_bit_map->lock());
6415   // Relinquish the bit map lock
6416   _bit_map->lock()->unlock();
6417   ConcurrentMarkSweepThread::desynchronize(true);
6418   _collector->stopTimer();
6419   if (PrintCMSStatistics != 0) {
6420     _collector->incrementYields();
6421   }
6422 
6423   // See the comment in coordinator_yield()
6424   for (unsigned i = 0; i < CMSYieldSleepCount &&
6425                        ConcurrentMarkSweepThread::should_yield() &&
6426                        !CMSCollector::foregroundGCIsActive(); ++i) {
6427     os::sleep(Thread::current(), 1, false);
6428   }
6429 
6430   ConcurrentMarkSweepThread::synchronize(true);
6431   _bit_map->lock()->lock_without_safepoint_check();
6432   _collector->startTimer();
6433 }
6434 
6435 // This closure is used to rescan the marked objects on the dirty cards
6436 // in the mod union table and the card table proper. In the parallel
6437 // case, although the bitMap is shared, we do a single read so the
6438 // isMarked() query is "safe".
6439 bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
6440   // Ignore mark word because we are running concurrent with mutators
6441   assert(p->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
6442   HeapWord* addr = (HeapWord*)p;
6443   assert(_span.contains(addr), "we are scanning the CMS generation");
6444   bool is_obj_array = false;
6445   #ifdef ASSERT
6446     if (!_parallel) {
6447       assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
6448       assert(_collector->overflow_list_is_empty(),
6449              "overflow list should be empty");
6450 
6451     }
6452   #endif // ASSERT
6453   if (_bit_map->isMarked(addr)) {
6454     // Obj arrays are precisely marked, non-arrays are not;
6455     // so we scan objArrays precisely and non-arrays in their
6456     // entirety.
6457     if (p->is_objArray()) {
6458       is_obj_array = true;
6459       if (_parallel) {
6460         p->oop_iterate(_par_scan_closure, mr);
6461       } else {
6462         p->oop_iterate(_scan_closure, mr);
6463       }
6464     } else {
6465       if (_parallel) {
6466         p->oop_iterate(_par_scan_closure);
6467       } else {
6468         p->oop_iterate(_scan_closure);
6469       }
6470     }
6471   }
6472   #ifdef ASSERT
6473     if (!_parallel) {
6474       assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
6475       assert(_collector->overflow_list_is_empty(),
6476              "overflow list should be empty");
6477 
6478     }
6479   #endif // ASSERT
6480   return is_obj_array;
6481 }
6482 
6483 MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
6484                         MemRegion span,
6485                         CMSBitMap* bitMap, CMSMarkStack*  markStack,
6486                         bool should_yield, bool verifying):
6487   _collector(collector),
6488   _span(span),
6489   _bitMap(bitMap),
6490   _mut(&collector->_modUnionTable),
6491   _markStack(markStack),
6492   _yield(should_yield),
6493   _skipBits(0)
6494 {
6495   assert(_markStack->isEmpty(), "stack should be empty");
6496   _finger = _bitMap->startWord();
6497   _threshold = _finger;
6498   assert(_collector->_restart_addr == NULL, "Sanity check");
6499   assert(_span.contains(_finger), "Out of bounds _finger?");
6500   DEBUG_ONLY(_verifying = verifying;)
6501 }
6502 
6503 void MarkFromRootsClosure::reset(HeapWord* addr) {
6504   assert(_markStack->isEmpty(), "would cause duplicates on stack");
6505   assert(_span.contains(addr), "Out of bounds _finger?");
6506   _finger = addr;
6507   _threshold = (HeapWord*)round_to(
6508                  (intptr_t)_finger, CardTableModRefBS::card_size);
6509 }
6510 
6511 // Should revisit to see if this should be restructured for
6512 // greater efficiency.
6513 bool MarkFromRootsClosure::do_bit(size_t offset) {
6514   if (_skipBits > 0) {
6515     _skipBits--;
6516     return true;
6517   }
6518   // convert offset into a HeapWord*
6519   HeapWord* addr = _bitMap->startWord() + offset;
6520   assert(_bitMap->endWord() && addr < _bitMap->endWord(),
6521          "address out of range");
6522   assert(_bitMap->isMarked(addr), "tautology");
6523   if (_bitMap->isMarked(addr+1)) {
6524     // this is an allocated but not yet initialized object
6525     assert(_skipBits == 0, "tautology");
6526     _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
6527     oop p = oop(addr);
6528     if (p->klass_or_null() == NULL) {
6529       DEBUG_ONLY(if (!_verifying) {)
6530         // We re-dirty the cards on which this object lies and increase
6531         // the _threshold so that we'll come back to scan this object
6532         // during the preclean or remark phase. (CMSCleanOnEnter)
6533         if (CMSCleanOnEnter) {
6534           size_t sz = _collector->block_size_using_printezis_bits(addr);
6535           HeapWord* end_card_addr   = (HeapWord*)round_to(
6536                                          (intptr_t)(addr+sz), CardTableModRefBS::card_size);
6537           MemRegion redirty_range = MemRegion(addr, end_card_addr);
6538           assert(!redirty_range.is_empty(), "Arithmetical tautology");
6539           // Bump _threshold to end_card_addr; note that
6540           // _threshold cannot possibly exceed end_card_addr, anyhow.
6541           // This prevents future clearing of the card as the scan proceeds
6542           // to the right.
6543           assert(_threshold <= end_card_addr,
6544                  "Because we are just scanning into this object");
6545           if (_threshold < end_card_addr) {
6546             _threshold = end_card_addr;
6547           }
6548           if (p->klass_or_null() != NULL) {
6549             // Redirty the range of cards...
6550             _mut->mark_range(redirty_range);
6551           } // ...else the setting of klass will dirty the card anyway.
6552         }
6553       DEBUG_ONLY(})
6554       return true;
6555     }
6556   }
6557   scanOopsInOop(addr);
6558   return true;
6559 }
6560 
6561 // We take a break if we've been at this for a while,
6562 // so as to avoid monopolizing the locks involved.
6563 void MarkFromRootsClosure::do_yield_work() {
6564   // First give up the locks, then yield, then re-lock
6565   // We should probably use a constructor/destructor idiom to
6566   // do this unlock/lock or modify the MutexUnlocker class to
6567   // serve our purpose. XXX
6568   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
6569          "CMS thread should hold CMS token");
6570   assert_lock_strong(_bitMap->lock());
6571   _bitMap->lock()->unlock();
6572   ConcurrentMarkSweepThread::desynchronize(true);
6573   _collector->stopTimer();
6574   if (PrintCMSStatistics != 0) {
6575     _collector->incrementYields();
6576   }
6577 
6578   // See the comment in coordinator_yield()
6579   for (unsigned i = 0; i < CMSYieldSleepCount &&
6580                        ConcurrentMarkSweepThread::should_yield() &&
6581                        !CMSCollector::foregroundGCIsActive(); ++i) {
6582     os::sleep(Thread::current(), 1, false);
6583   }
6584 
6585   ConcurrentMarkSweepThread::synchronize(true);
6586   _bitMap->lock()->lock_without_safepoint_check();
6587   _collector->startTimer();
6588 }
6589 
6590 void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
6591   assert(_bitMap->isMarked(ptr), "expected bit to be set");
6592   assert(_markStack->isEmpty(),
6593          "should drain stack to limit stack usage");
6594   // convert ptr to an oop preparatory to scanning
6595   oop obj = oop(ptr);
6596   // Ignore mark word in verification below, since we
6597   // may be running concurrent with mutators.
6598   assert(obj->is_oop(true), "should be an oop");
6599   assert(_finger <= ptr, "_finger runneth ahead");
6600   // advance the finger to right end of this object
6601   _finger = ptr + obj->size();
6602   assert(_finger > ptr, "we just incremented it above");
6603   // On large heaps, it may take us some time to get through
6604   // the marking phase. During
6605   // this time it's possible that a lot of mutations have
6606   // accumulated in the card table and the mod union table --
6607   // these mutation records are redundant until we have
6608   // actually traced into the corresponding card.
6609   // Here, we check whether advancing the finger would make
6610   // us cross into a new card, and if so clear corresponding
6611   // cards in the MUT (preclean them in the card-table in the
6612   // future).
6613 
6614   DEBUG_ONLY(if (!_verifying) {)
6615     // The clean-on-enter optimization is disabled by default,
6616     // until we fix 6178663.
6617     if (CMSCleanOnEnter && (_finger > _threshold)) {
6618       // [_threshold, _finger) represents the interval
6619       // of cards to be cleared  in MUT (or precleaned in card table).
6620       // The set of cards to be cleared is all those that overlap
6621       // with the interval [_threshold, _finger); note that
6622       // _threshold is always kept card-aligned but _finger isn't
6623       // always card-aligned.
6624       HeapWord* old_threshold = _threshold;
6625       assert(old_threshold == (HeapWord*)round_to(
6626               (intptr_t)old_threshold, CardTableModRefBS::card_size),
6627              "_threshold should always be card-aligned");
6628       _threshold = (HeapWord*)round_to(
6629                      (intptr_t)_finger, CardTableModRefBS::card_size);
6630       MemRegion mr(old_threshold, _threshold);
6631       assert(!mr.is_empty(), "Control point invariant");
6632       assert(_span.contains(mr), "Should clear within span");
6633       _mut->clear_range(mr);
6634     }
6635   DEBUG_ONLY(})
6636   // Note: the finger doesn't advance while we drain
6637   // the stack below.
6638   PushOrMarkClosure pushOrMarkClosure(_collector,
6639                                       _span, _bitMap, _markStack,
6640                                       _finger, this);
6641   bool res = _markStack->push(obj);
6642   assert(res, "Empty non-zero size stack should have space for single push");
6643   while (!_markStack->isEmpty()) {
6644     oop new_oop = _markStack->pop();
6645     // Skip verifying header mark word below because we are
6646     // running concurrent with mutators.
6647     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6648     // now scan this oop's oops
6649     new_oop->oop_iterate(&pushOrMarkClosure);
6650     do_yield_check();
6651   }
6652   assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
6653 }
6654 
6655 Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task,
6656                        CMSCollector* collector, MemRegion span,
6657                        CMSBitMap* bit_map,
6658                        OopTaskQueue* work_queue,
6659                        CMSMarkStack*  overflow_stack):
6660   _collector(collector),
6661   _whole_span(collector->_span),
6662   _span(span),
6663   _bit_map(bit_map),
6664   _mut(&collector->_modUnionTable),
6665   _work_queue(work_queue),
6666   _overflow_stack(overflow_stack),
6667   _skip_bits(0),
6668   _task(task)
6669 {
6670   assert(_work_queue->size() == 0, "work_queue should be empty");
6671   _finger = span.start();
6672   _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
6673   assert(_span.contains(_finger), "Out of bounds _finger?");
6674 }
6675 
6676 // Should revisit to see if this should be restructured for
6677 // greater efficiency.
6678 bool Par_MarkFromRootsClosure::do_bit(size_t offset) {
6679   if (_skip_bits > 0) {
6680     _skip_bits--;
6681     return true;
6682   }
6683   // convert offset into a HeapWord*
6684   HeapWord* addr = _bit_map->startWord() + offset;
6685   assert(_bit_map->endWord() && addr < _bit_map->endWord(),
6686          "address out of range");
6687   assert(_bit_map->isMarked(addr), "tautology");
6688   if (_bit_map->isMarked(addr+1)) {
6689     // this is an allocated object that might not yet be initialized
6690     assert(_skip_bits == 0, "tautology");
6691     _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
6692     oop p = oop(addr);
6693     if (p->klass_or_null() == NULL) {
6694       // in the case of Clean-on-Enter optimization, redirty card
6695       // and avoid clearing card by increasing  the threshold.
6696       return true;
6697     }
6698   }
6699   scan_oops_in_oop(addr);
6700   return true;
6701 }
6702 
6703 void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
6704   assert(_bit_map->isMarked(ptr), "expected bit to be set");
6705   // Should we assert that our work queue is empty or
6706   // below some drain limit?
6707   assert(_work_queue->size() == 0,
6708          "should drain stack to limit stack usage");
6709   // convert ptr to an oop preparatory to scanning
6710   oop obj = oop(ptr);
6711   // Ignore mark word in verification below, since we
6712   // may be running concurrent with mutators.
6713   assert(obj->is_oop(true), "should be an oop");
6714   assert(_finger <= ptr, "_finger runneth ahead");
6715   // advance the finger to right end of this object
6716   _finger = ptr + obj->size();
6717   assert(_finger > ptr, "we just incremented it above");
6718   // On large heaps, it may take us some time to get through
6719   // the marking phase. During
6720   // this time it's possible that a lot of mutations have
6721   // accumulated in the card table and the mod union table --
6722   // these mutation records are redundant until we have
6723   // actually traced into the corresponding card.
6724   // Here, we check whether advancing the finger would make
6725   // us cross into a new card, and if so clear corresponding
6726   // cards in the MUT (preclean them in the card-table in the
6727   // future).
6728 
6729   // The clean-on-enter optimization is disabled by default,
6730   // until we fix 6178663.
6731   if (CMSCleanOnEnter && (_finger > _threshold)) {
6732     // [_threshold, _finger) represents the interval
6733     // of cards to be cleared  in MUT (or precleaned in card table).
6734     // The set of cards to be cleared is all those that overlap
6735     // with the interval [_threshold, _finger); note that
6736     // _threshold is always kept card-aligned but _finger isn't
6737     // always card-aligned.
6738     HeapWord* old_threshold = _threshold;
6739     assert(old_threshold == (HeapWord*)round_to(
6740             (intptr_t)old_threshold, CardTableModRefBS::card_size),
6741            "_threshold should always be card-aligned");
6742     _threshold = (HeapWord*)round_to(
6743                    (intptr_t)_finger, CardTableModRefBS::card_size);
6744     MemRegion mr(old_threshold, _threshold);
6745     assert(!mr.is_empty(), "Control point invariant");
6746     assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
6747     _mut->clear_range(mr);
6748   }
6749 
6750   // Note: the local finger doesn't advance while we drain
6751   // the stack below, but the global finger sure can and will.
6752   HeapWord** gfa = _task->global_finger_addr();
6753   Par_PushOrMarkClosure pushOrMarkClosure(_collector,
6754                                       _span, _bit_map,
6755                                       _work_queue,
6756                                       _overflow_stack,
6757                                       _finger,
6758                                       gfa, this);
6759   bool res = _work_queue->push(obj);   // overflow could occur here
6760   assert(res, "Will hold once we use workqueues");
6761   while (true) {
6762     oop new_oop;
6763     if (!_work_queue->pop_local(new_oop)) {
6764       // We emptied our work_queue; check if there's stuff that can
6765       // be gotten from the overflow stack.
6766       if (CMSConcMarkingTask::get_work_from_overflow_stack(
6767             _overflow_stack, _work_queue)) {
6768         do_yield_check();
6769         continue;
6770       } else {  // done
6771         break;
6772       }
6773     }
6774     // Skip verifying header mark word below because we are
6775     // running concurrent with mutators.
6776     assert(new_oop->is_oop(true), "Oops! expected to pop an oop");
6777     // now scan this oop's oops
6778     new_oop->oop_iterate(&pushOrMarkClosure);
6779     do_yield_check();
6780   }
6781   assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
6782 }
6783 
6784 // Yield in response to a request from VM Thread or
6785 // from mutators.
6786 void Par_MarkFromRootsClosure::do_yield_work() {
6787   assert(_task != NULL, "sanity");
6788   _task->yield();
6789 }
6790 
6791 // A variant of the above used for verifying CMS marking work.
6792 MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
6793                         MemRegion span,
6794                         CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6795                         CMSMarkStack*  mark_stack):
6796   _collector(collector),
6797   _span(span),
6798   _verification_bm(verification_bm),
6799   _cms_bm(cms_bm),
6800   _mark_stack(mark_stack),
6801   _pam_verify_closure(collector, span, verification_bm, cms_bm,
6802                       mark_stack)
6803 {
6804   assert(_mark_stack->isEmpty(), "stack should be empty");
6805   _finger = _verification_bm->startWord();
6806   assert(_collector->_restart_addr == NULL, "Sanity check");
6807   assert(_span.contains(_finger), "Out of bounds _finger?");
6808 }
6809 
6810 void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
6811   assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
6812   assert(_span.contains(addr), "Out of bounds _finger?");
6813   _finger = addr;
6814 }
6815 
6816 // Should revisit to see if this should be restructured for
6817 // greater efficiency.
6818 bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
6819   // convert offset into a HeapWord*
6820   HeapWord* addr = _verification_bm->startWord() + offset;
6821   assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
6822          "address out of range");
6823   assert(_verification_bm->isMarked(addr), "tautology");
6824   assert(_cms_bm->isMarked(addr), "tautology");
6825 
6826   assert(_mark_stack->isEmpty(),
6827          "should drain stack to limit stack usage");
6828   // convert addr to an oop preparatory to scanning
6829   oop obj = oop(addr);
6830   assert(obj->is_oop(), "should be an oop");
6831   assert(_finger <= addr, "_finger runneth ahead");
6832   // advance the finger to right end of this object
6833   _finger = addr + obj->size();
6834   assert(_finger > addr, "we just incremented it above");
6835   // Note: the finger doesn't advance while we drain
6836   // the stack below.
6837   bool res = _mark_stack->push(obj);
6838   assert(res, "Empty non-zero size stack should have space for single push");
6839   while (!_mark_stack->isEmpty()) {
6840     oop new_oop = _mark_stack->pop();
6841     assert(new_oop->is_oop(), "Oops! expected to pop an oop");
6842     // now scan this oop's oops
6843     new_oop->oop_iterate(&_pam_verify_closure);
6844   }
6845   assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
6846   return true;
6847 }
6848 
6849 PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
6850   CMSCollector* collector, MemRegion span,
6851   CMSBitMap* verification_bm, CMSBitMap* cms_bm,
6852   CMSMarkStack*  mark_stack):
6853   MetadataAwareOopClosure(collector->ref_processor()),
6854   _collector(collector),
6855   _span(span),
6856   _verification_bm(verification_bm),
6857   _cms_bm(cms_bm),
6858   _mark_stack(mark_stack)
6859 { }
6860 
6861 void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
6862 void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
6863 
6864 // Upon stack overflow, we discard (part of) the stack,
6865 // remembering the least address amongst those discarded
6866 // in CMSCollector's _restart_address.
6867 void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
6868   // Remember the least grey address discarded
6869   HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
6870   _collector->lower_restart_addr(ra);
6871   _mark_stack->reset();  // discard stack contents
6872   _mark_stack->expand(); // expand the stack if possible
6873 }
6874 
6875 void PushAndMarkVerifyClosure::do_oop(oop obj) {
6876   assert(obj->is_oop_or_null(), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6877   HeapWord* addr = (HeapWord*)obj;
6878   if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
6879     // Oop lies in _span and isn't yet grey or black
6880     _verification_bm->mark(addr);            // now grey
6881     if (!_cms_bm->isMarked(addr)) {
6882       oop(addr)->print();
6883       gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)",
6884                              p2i(addr));
6885       fatal("... aborting");
6886     }
6887 
6888     if (!_mark_stack->push(obj)) { // stack overflow
6889       if (PrintCMSStatistics != 0) {
6890         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6891                                SIZE_FORMAT, _mark_stack->capacity());
6892       }
6893       assert(_mark_stack->isFull(), "Else push should have succeeded");
6894       handle_stack_overflow(addr);
6895     }
6896     // anything including and to the right of _finger
6897     // will be scanned as we iterate over the remainder of the
6898     // bit map
6899   }
6900 }
6901 
6902 PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
6903                      MemRegion span,
6904                      CMSBitMap* bitMap, CMSMarkStack*  markStack,
6905                      HeapWord* finger, MarkFromRootsClosure* parent) :
6906   MetadataAwareOopClosure(collector->ref_processor()),
6907   _collector(collector),
6908   _span(span),
6909   _bitMap(bitMap),
6910   _markStack(markStack),
6911   _finger(finger),
6912   _parent(parent)
6913 { }
6914 
6915 Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector,
6916                      MemRegion span,
6917                      CMSBitMap* bit_map,
6918                      OopTaskQueue* work_queue,
6919                      CMSMarkStack*  overflow_stack,
6920                      HeapWord* finger,
6921                      HeapWord** global_finger_addr,
6922                      Par_MarkFromRootsClosure* parent) :
6923   MetadataAwareOopClosure(collector->ref_processor()),
6924   _collector(collector),
6925   _whole_span(collector->_span),
6926   _span(span),
6927   _bit_map(bit_map),
6928   _work_queue(work_queue),
6929   _overflow_stack(overflow_stack),
6930   _finger(finger),
6931   _global_finger_addr(global_finger_addr),
6932   _parent(parent)
6933 { }
6934 
6935 // Assumes thread-safe access by callers, who are
6936 // responsible for mutual exclusion.
6937 void CMSCollector::lower_restart_addr(HeapWord* low) {
6938   assert(_span.contains(low), "Out of bounds addr");
6939   if (_restart_addr == NULL) {
6940     _restart_addr = low;
6941   } else {
6942     _restart_addr = MIN2(_restart_addr, low);
6943   }
6944 }
6945 
6946 // Upon stack overflow, we discard (part of) the stack,
6947 // remembering the least address amongst those discarded
6948 // in CMSCollector's _restart_address.
6949 void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6950   // Remember the least grey address discarded
6951   HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
6952   _collector->lower_restart_addr(ra);
6953   _markStack->reset();  // discard stack contents
6954   _markStack->expand(); // expand the stack if possible
6955 }
6956 
6957 // Upon stack overflow, we discard (part of) the stack,
6958 // remembering the least address amongst those discarded
6959 // in CMSCollector's _restart_address.
6960 void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
6961   // We need to do this under a mutex to prevent other
6962   // workers from interfering with the work done below.
6963   MutexLockerEx ml(_overflow_stack->par_lock(),
6964                    Mutex::_no_safepoint_check_flag);
6965   // Remember the least grey address discarded
6966   HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
6967   _collector->lower_restart_addr(ra);
6968   _overflow_stack->reset();  // discard stack contents
6969   _overflow_stack->expand(); // expand the stack if possible
6970 }
6971 
6972 void PushOrMarkClosure::do_oop(oop obj) {
6973   // Ignore mark word because we are running concurrent with mutators.
6974   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
6975   HeapWord* addr = (HeapWord*)obj;
6976   if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
6977     // Oop lies in _span and isn't yet grey or black
6978     _bitMap->mark(addr);            // now grey
6979     if (addr < _finger) {
6980       // the bit map iteration has already either passed, or
6981       // sampled, this bit in the bit map; we'll need to
6982       // use the marking stack to scan this oop's oops.
6983       bool simulate_overflow = false;
6984       NOT_PRODUCT(
6985         if (CMSMarkStackOverflowALot &&
6986             _collector->simulate_overflow()) {
6987           // simulate a stack overflow
6988           simulate_overflow = true;
6989         }
6990       )
6991       if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
6992         if (PrintCMSStatistics != 0) {
6993           gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
6994                                  SIZE_FORMAT, _markStack->capacity());
6995         }
6996         assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
6997         handle_stack_overflow(addr);
6998       }
6999     }
7000     // anything including and to the right of _finger
7001     // will be scanned as we iterate over the remainder of the
7002     // bit map
7003     do_yield_check();
7004   }
7005 }
7006 
7007 void PushOrMarkClosure::do_oop(oop* p)       { PushOrMarkClosure::do_oop_work(p); }
7008 void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); }
7009 
7010 void Par_PushOrMarkClosure::do_oop(oop obj) {
7011   // Ignore mark word because we are running concurrent with mutators.
7012   assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7013   HeapWord* addr = (HeapWord*)obj;
7014   if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
7015     // Oop lies in _span and isn't yet grey or black
7016     // We read the global_finger (volatile read) strictly after marking oop
7017     bool res = _bit_map->par_mark(addr);    // now grey
7018     volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
7019     // Should we push this marked oop on our stack?
7020     // -- if someone else marked it, nothing to do
7021     // -- if target oop is above global finger nothing to do
7022     // -- if target oop is in chunk and above local finger
7023     //      then nothing to do
7024     // -- else push on work queue
7025     if (   !res       // someone else marked it, they will deal with it
7026         || (addr >= *gfa)  // will be scanned in a later task
7027         || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
7028       return;
7029     }
7030     // the bit map iteration has already either passed, or
7031     // sampled, this bit in the bit map; we'll need to
7032     // use the marking stack to scan this oop's oops.
7033     bool simulate_overflow = false;
7034     NOT_PRODUCT(
7035       if (CMSMarkStackOverflowALot &&
7036           _collector->simulate_overflow()) {
7037         // simulate a stack overflow
7038         simulate_overflow = true;
7039       }
7040     )
7041     if (simulate_overflow ||
7042         !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
7043       // stack overflow
7044       if (PrintCMSStatistics != 0) {
7045         gclog_or_tty->print_cr("CMS marking stack overflow (benign) at "
7046                                SIZE_FORMAT, _overflow_stack->capacity());
7047       }
7048       // We cannot assert that the overflow stack is full because
7049       // it may have been emptied since.
7050       assert(simulate_overflow ||
7051              _work_queue->size() == _work_queue->max_elems(),
7052             "Else push should have succeeded");
7053       handle_stack_overflow(addr);
7054     }
7055     do_yield_check();
7056   }
7057 }
7058 
7059 void Par_PushOrMarkClosure::do_oop(oop* p)       { Par_PushOrMarkClosure::do_oop_work(p); }
7060 void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); }
7061 
7062 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
7063                                        MemRegion span,
7064                                        ReferenceProcessor* rp,
7065                                        CMSBitMap* bit_map,
7066                                        CMSBitMap* mod_union_table,
7067                                        CMSMarkStack*  mark_stack,
7068                                        bool           concurrent_precleaning):
7069   MetadataAwareOopClosure(rp),
7070   _collector(collector),
7071   _span(span),
7072   _bit_map(bit_map),
7073   _mod_union_table(mod_union_table),
7074   _mark_stack(mark_stack),
7075   _concurrent_precleaning(concurrent_precleaning)
7076 {
7077   assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
7078 }
7079 
7080 // Grey object rescan during pre-cleaning and second checkpoint phases --
7081 // the non-parallel version (the parallel version appears further below.)
7082 void PushAndMarkClosure::do_oop(oop obj) {
7083   // Ignore mark word verification. If during concurrent precleaning,
7084   // the object monitor may be locked. If during the checkpoint
7085   // phases, the object may already have been reached by a  different
7086   // path and may be at the end of the global overflow list (so
7087   // the mark word may be NULL).
7088   assert(obj->is_oop_or_null(true /* ignore mark word */),
7089          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7090   HeapWord* addr = (HeapWord*)obj;
7091   // Check if oop points into the CMS generation
7092   // and is not marked
7093   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7094     // a white object ...
7095     _bit_map->mark(addr);         // ... now grey
7096     // push on the marking stack (grey set)
7097     bool simulate_overflow = false;
7098     NOT_PRODUCT(
7099       if (CMSMarkStackOverflowALot &&
7100           _collector->simulate_overflow()) {
7101         // simulate a stack overflow
7102         simulate_overflow = true;
7103       }
7104     )
7105     if (simulate_overflow || !_mark_stack->push(obj)) {
7106       if (_concurrent_precleaning) {
7107          // During precleaning we can just dirty the appropriate card(s)
7108          // in the mod union table, thus ensuring that the object remains
7109          // in the grey set  and continue. In the case of object arrays
7110          // we need to dirty all of the cards that the object spans,
7111          // since the rescan of object arrays will be limited to the
7112          // dirty cards.
7113          // Note that no one can be interfering with us in this action
7114          // of dirtying the mod union table, so no locking or atomics
7115          // are required.
7116          if (obj->is_objArray()) {
7117            size_t sz = obj->size();
7118            HeapWord* end_card_addr = (HeapWord*)round_to(
7119                                         (intptr_t)(addr+sz), CardTableModRefBS::card_size);
7120            MemRegion redirty_range = MemRegion(addr, end_card_addr);
7121            assert(!redirty_range.is_empty(), "Arithmetical tautology");
7122            _mod_union_table->mark_range(redirty_range);
7123          } else {
7124            _mod_union_table->mark(addr);
7125          }
7126          _collector->_ser_pmc_preclean_ovflw++;
7127       } else {
7128          // During the remark phase, we need to remember this oop
7129          // in the overflow list.
7130          _collector->push_on_overflow_list(obj);
7131          _collector->_ser_pmc_remark_ovflw++;
7132       }
7133     }
7134   }
7135 }
7136 
7137 Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector,
7138                                                MemRegion span,
7139                                                ReferenceProcessor* rp,
7140                                                CMSBitMap* bit_map,
7141                                                OopTaskQueue* work_queue):
7142   MetadataAwareOopClosure(rp),
7143   _collector(collector),
7144   _span(span),
7145   _bit_map(bit_map),
7146   _work_queue(work_queue)
7147 {
7148   assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
7149 }
7150 
7151 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
7152 void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); }
7153 
7154 // Grey object rescan during second checkpoint phase --
7155 // the parallel version.
7156 void Par_PushAndMarkClosure::do_oop(oop obj) {
7157   // In the assert below, we ignore the mark word because
7158   // this oop may point to an already visited object that is
7159   // on the overflow stack (in which case the mark word has
7160   // been hijacked for chaining into the overflow stack --
7161   // if this is the last object in the overflow stack then
7162   // its mark word will be NULL). Because this object may
7163   // have been subsequently popped off the global overflow
7164   // stack, and the mark word possibly restored to the prototypical
7165   // value, by the time we get to examined this failing assert in
7166   // the debugger, is_oop_or_null(false) may subsequently start
7167   // to hold.
7168   assert(obj->is_oop_or_null(true),
7169          "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
7170   HeapWord* addr = (HeapWord*)obj;
7171   // Check if oop points into the CMS generation
7172   // and is not marked
7173   if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
7174     // a white object ...
7175     // If we manage to "claim" the object, by being the
7176     // first thread to mark it, then we push it on our
7177     // marking stack
7178     if (_bit_map->par_mark(addr)) {     // ... now grey
7179       // push on work queue (grey set)
7180       bool simulate_overflow = false;
7181       NOT_PRODUCT(
7182         if (CMSMarkStackOverflowALot &&
7183             _collector->par_simulate_overflow()) {
7184           // simulate a stack overflow
7185           simulate_overflow = true;
7186         }
7187       )
7188       if (simulate_overflow || !_work_queue->push(obj)) {
7189         _collector->par_push_on_overflow_list(obj);
7190         _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
7191       }
7192     } // Else, some other thread got there first
7193   }
7194 }
7195 
7196 void Par_PushAndMarkClosure::do_oop(oop* p)       { Par_PushAndMarkClosure::do_oop_work(p); }
7197 void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); }
7198 
7199 void CMSPrecleanRefsYieldClosure::do_yield_work() {
7200   Mutex* bml = _collector->bitMapLock();
7201   assert_lock_strong(bml);
7202   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7203          "CMS thread should hold CMS token");
7204 
7205   bml->unlock();
7206   ConcurrentMarkSweepThread::desynchronize(true);
7207 
7208   _collector->stopTimer();
7209   if (PrintCMSStatistics != 0) {
7210     _collector->incrementYields();
7211   }
7212 
7213   // See the comment in coordinator_yield()
7214   for (unsigned i = 0; i < CMSYieldSleepCount &&
7215                        ConcurrentMarkSweepThread::should_yield() &&
7216                        !CMSCollector::foregroundGCIsActive(); ++i) {
7217     os::sleep(Thread::current(), 1, false);
7218   }
7219 
7220   ConcurrentMarkSweepThread::synchronize(true);
7221   bml->lock();
7222 
7223   _collector->startTimer();
7224 }
7225 
7226 bool CMSPrecleanRefsYieldClosure::should_return() {
7227   if (ConcurrentMarkSweepThread::should_yield()) {
7228     do_yield_work();
7229   }
7230   return _collector->foregroundGCIsActive();
7231 }
7232 
7233 void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
7234   assert(((size_t)mr.start())%CardTableModRefBS::card_size_in_words == 0,
7235          "mr should be aligned to start at a card boundary");
7236   // We'd like to assert:
7237   // assert(mr.word_size()%CardTableModRefBS::card_size_in_words == 0,
7238   //        "mr should be a range of cards");
7239   // However, that would be too strong in one case -- the last
7240   // partition ends at _unallocated_block which, in general, can be
7241   // an arbitrary boundary, not necessarily card aligned.
7242   if (PrintCMSStatistics != 0) {
7243     _num_dirty_cards +=
7244          mr.word_size()/CardTableModRefBS::card_size_in_words;
7245   }
7246   _space->object_iterate_mem(mr, &_scan_cl);
7247 }
7248 
7249 SweepClosure::SweepClosure(CMSCollector* collector,
7250                            ConcurrentMarkSweepGeneration* g,
7251                            CMSBitMap* bitMap, bool should_yield) :
7252   _collector(collector),
7253   _g(g),
7254   _sp(g->cmsSpace()),
7255   _limit(_sp->sweep_limit()),
7256   _freelistLock(_sp->freelistLock()),
7257   _bitMap(bitMap),
7258   _yield(should_yield),
7259   _inFreeRange(false),           // No free range at beginning of sweep
7260   _freeRangeInFreeLists(false),  // No free range at beginning of sweep
7261   _lastFreeRangeCoalesced(false),
7262   _freeFinger(g->used_region().start())
7263 {
7264   NOT_PRODUCT(
7265     _numObjectsFreed = 0;
7266     _numWordsFreed   = 0;
7267     _numObjectsLive = 0;
7268     _numWordsLive = 0;
7269     _numObjectsAlreadyFree = 0;
7270     _numWordsAlreadyFree = 0;
7271     _last_fc = NULL;
7272 
7273     _sp->initializeIndexedFreeListArrayReturnedBytes();
7274     _sp->dictionary()->initialize_dict_returned_bytes();
7275   )
7276   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7277          "sweep _limit out of bounds");
7278   if (CMSTraceSweeper) {
7279     gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
7280                         p2i(_limit));
7281   }
7282 }
7283 
7284 void SweepClosure::print_on(outputStream* st) const {
7285   tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
7286                 p2i(_sp->bottom()), p2i(_sp->end()));
7287   tty->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
7288   tty->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
7289   NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
7290   tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
7291                 _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
7292 }
7293 
7294 #ifndef PRODUCT
7295 // Assertion checking only:  no useful work in product mode --
7296 // however, if any of the flags below become product flags,
7297 // you may need to review this code to see if it needs to be
7298 // enabled in product mode.
7299 SweepClosure::~SweepClosure() {
7300   assert_lock_strong(_freelistLock);
7301   assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7302          "sweep _limit out of bounds");
7303   if (inFreeRange()) {
7304     warning("inFreeRange() should have been reset; dumping state of SweepClosure");
7305     print();
7306     ShouldNotReachHere();
7307   }
7308   if (Verbose && PrintGC) {
7309     gclog_or_tty->print("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7310                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
7311     gclog_or_tty->print_cr("\nLive " SIZE_FORMAT " objects,  "
7312                            SIZE_FORMAT " bytes  "
7313       "Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
7314       _numObjectsLive, _numWordsLive*sizeof(HeapWord),
7315       _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
7316     size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
7317                         * sizeof(HeapWord);
7318     gclog_or_tty->print_cr("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
7319 
7320     if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
7321       size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
7322       size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
7323       size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
7324       gclog_or_tty->print("Returned " SIZE_FORMAT " bytes", returned_bytes);
7325       gclog_or_tty->print("   Indexed List Returned " SIZE_FORMAT " bytes",
7326         indexListReturnedBytes);
7327       gclog_or_tty->print_cr("        Dictionary Returned " SIZE_FORMAT " bytes",
7328         dict_returned_bytes);
7329     }
7330   }
7331   if (CMSTraceSweeper) {
7332     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7333                            p2i(_limit));
7334   }
7335 }
7336 #endif  // PRODUCT
7337 
7338 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7339     bool freeRangeInFreeLists) {
7340   if (CMSTraceSweeper) {
7341     gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7342                p2i(freeFinger), freeRangeInFreeLists);
7343   }
7344   assert(!inFreeRange(), "Trampling existing free range");
7345   set_inFreeRange(true);
7346   set_lastFreeRangeCoalesced(false);
7347 
7348   set_freeFinger(freeFinger);
7349   set_freeRangeInFreeLists(freeRangeInFreeLists);
7350   if (CMSTestInFreeList) {
7351     if (freeRangeInFreeLists) {
7352       FreeChunk* fc = (FreeChunk*) freeFinger;
7353       assert(fc->is_free(), "A chunk on the free list should be free.");
7354       assert(fc->size() > 0, "Free range should have a size");
7355       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7356     }
7357   }
7358 }
7359 
7360 // Note that the sweeper runs concurrently with mutators. Thus,
7361 // it is possible for direct allocation in this generation to happen
7362 // in the middle of the sweep. Note that the sweeper also coalesces
7363 // contiguous free blocks. Thus, unless the sweeper and the allocator
7364 // synchronize appropriately freshly allocated blocks may get swept up.
7365 // This is accomplished by the sweeper locking the free lists while
7366 // it is sweeping. Thus blocks that are determined to be free are
7367 // indeed free. There is however one additional complication:
7368 // blocks that have been allocated since the final checkpoint and
7369 // mark, will not have been marked and so would be treated as
7370 // unreachable and swept up. To prevent this, the allocator marks
7371 // the bit map when allocating during the sweep phase. This leads,
7372 // however, to a further complication -- objects may have been allocated
7373 // but not yet initialized -- in the sense that the header isn't yet
7374 // installed. The sweeper can not then determine the size of the block
7375 // in order to skip over it. To deal with this case, we use a technique
7376 // (due to Printezis) to encode such uninitialized block sizes in the
7377 // bit map. Since the bit map uses a bit per every HeapWord, but the
7378 // CMS generation has a minimum object size of 3 HeapWords, it follows
7379 // that "normal marks" won't be adjacent in the bit map (there will
7380 // always be at least two 0 bits between successive 1 bits). We make use
7381 // of these "unused" bits to represent uninitialized blocks -- the bit
7382 // corresponding to the start of the uninitialized object and the next
7383 // bit are both set. Finally, a 1 bit marks the end of the object that
7384 // started with the two consecutive 1 bits to indicate its potentially
7385 // uninitialized state.
7386 
7387 size_t SweepClosure::do_blk_careful(HeapWord* addr) {
7388   FreeChunk* fc = (FreeChunk*)addr;
7389   size_t res;
7390 
7391   // Check if we are done sweeping. Below we check "addr >= _limit" rather
7392   // than "addr == _limit" because although _limit was a block boundary when
7393   // we started the sweep, it may no longer be one because heap expansion
7394   // may have caused us to coalesce the block ending at the address _limit
7395   // with a newly expanded chunk (this happens when _limit was set to the
7396   // previous _end of the space), so we may have stepped past _limit:
7397   // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
7398   if (addr >= _limit) { // we have swept up to or past the limit: finish up
7399     assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
7400            "sweep _limit out of bounds");
7401     assert(addr < _sp->end(), "addr out of bounds");
7402     // Flush any free range we might be holding as a single
7403     // coalesced chunk to the appropriate free list.
7404     if (inFreeRange()) {
7405       assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
7406              "freeFinger() " PTR_FORMAT " is out-of-bounds", p2i(freeFinger()));
7407       flush_cur_free_chunk(freeFinger(),
7408                            pointer_delta(addr, freeFinger()));
7409       if (CMSTraceSweeper) {
7410         gclog_or_tty->print("Sweep: last chunk: ");
7411         gclog_or_tty->print("put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") "
7412                    "[coalesced:%d]\n",
7413                    p2i(freeFinger()), pointer_delta(addr, freeFinger()),
7414                    lastFreeRangeCoalesced() ? 1 : 0);
7415       }
7416     }
7417 
7418     // help the iterator loop finish
7419     return pointer_delta(_sp->end(), addr);
7420   }
7421 
7422   assert(addr < _limit, "sweep invariant");
7423   // check if we should yield
7424   do_yield_check(addr);
7425   if (fc->is_free()) {
7426     // Chunk that is already free
7427     res = fc->size();
7428     do_already_free_chunk(fc);
7429     debug_only(_sp->verifyFreeLists());
7430     // If we flush the chunk at hand in lookahead_and_flush()
7431     // and it's coalesced with a preceding chunk, then the
7432     // process of "mangling" the payload of the coalesced block
7433     // will cause erasure of the size information from the
7434     // (erstwhile) header of all the coalesced blocks but the
7435     // first, so the first disjunct in the assert will not hold
7436     // in that specific case (in which case the second disjunct
7437     // will hold).
7438     assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
7439            "Otherwise the size info doesn't change at this step");
7440     NOT_PRODUCT(
7441       _numObjectsAlreadyFree++;
7442       _numWordsAlreadyFree += res;
7443     )
7444     NOT_PRODUCT(_last_fc = fc;)
7445   } else if (!_bitMap->isMarked(addr)) {
7446     // Chunk is fresh garbage
7447     res = do_garbage_chunk(fc);
7448     debug_only(_sp->verifyFreeLists());
7449     NOT_PRODUCT(
7450       _numObjectsFreed++;
7451       _numWordsFreed += res;
7452     )
7453   } else {
7454     // Chunk that is alive.
7455     res = do_live_chunk(fc);
7456     debug_only(_sp->verifyFreeLists());
7457     NOT_PRODUCT(
7458         _numObjectsLive++;
7459         _numWordsLive += res;
7460     )
7461   }
7462   return res;
7463 }
7464 
7465 // For the smart allocation, record following
7466 //  split deaths - a free chunk is removed from its free list because
7467 //      it is being split into two or more chunks.
7468 //  split birth - a free chunk is being added to its free list because
7469 //      a larger free chunk has been split and resulted in this free chunk.
7470 //  coal death - a free chunk is being removed from its free list because
7471 //      it is being coalesced into a large free chunk.
7472 //  coal birth - a free chunk is being added to its free list because
7473 //      it was created when two or more free chunks where coalesced into
7474 //      this free chunk.
7475 //
7476 // These statistics are used to determine the desired number of free
7477 // chunks of a given size.  The desired number is chosen to be relative
7478 // to the end of a CMS sweep.  The desired number at the end of a sweep
7479 // is the
7480 //      count-at-end-of-previous-sweep (an amount that was enough)
7481 //              - count-at-beginning-of-current-sweep  (the excess)
7482 //              + split-births  (gains in this size during interval)
7483 //              - split-deaths  (demands on this size during interval)
7484 // where the interval is from the end of one sweep to the end of the
7485 // next.
7486 //
7487 // When sweeping the sweeper maintains an accumulated chunk which is
7488 // the chunk that is made up of chunks that have been coalesced.  That
7489 // will be termed the left-hand chunk.  A new chunk of garbage that
7490 // is being considered for coalescing will be referred to as the
7491 // right-hand chunk.
7492 //
7493 // When making a decision on whether to coalesce a right-hand chunk with
7494 // the current left-hand chunk, the current count vs. the desired count
7495 // of the left-hand chunk is considered.  Also if the right-hand chunk
7496 // is near the large chunk at the end of the heap (see
7497 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7498 // left-hand chunk is coalesced.
7499 //
7500 // When making a decision about whether to split a chunk, the desired count
7501 // vs. the current count of the candidate to be split is also considered.
7502 // If the candidate is underpopulated (currently fewer chunks than desired)
7503 // a chunk of an overpopulated (currently more chunks than desired) size may
7504 // be chosen.  The "hint" associated with a free list, if non-null, points
7505 // to a free list which may be overpopulated.
7506 //
7507 
7508 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7509   const size_t size = fc->size();
7510   // Chunks that cannot be coalesced are not in the
7511   // free lists.
7512   if (CMSTestInFreeList && !fc->cantCoalesce()) {
7513     assert(_sp->verify_chunk_in_free_list(fc),
7514            "free chunk should be in free lists");
7515   }
7516   // a chunk that is already free, should not have been
7517   // marked in the bit map
7518   HeapWord* const addr = (HeapWord*) fc;
7519   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7520   // Verify that the bit map has no bits marked between
7521   // addr and purported end of this block.
7522   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7523 
7524   // Some chunks cannot be coalesced under any circumstances.
7525   // See the definition of cantCoalesce().
7526   if (!fc->cantCoalesce()) {
7527     // This chunk can potentially be coalesced.
7528     // All the work is done in
7529     do_post_free_or_garbage_chunk(fc, size);
7530     // Note that if the chunk is not coalescable (the else arm
7531     // below), we unconditionally flush, without needing to do
7532     // a "lookahead," as we do below.
7533     if (inFreeRange()) lookahead_and_flush(fc, size);
7534   } else {
7535     // Code path common to both original and adaptive free lists.
7536 
7537     // cant coalesce with previous block; this should be treated
7538     // as the end of a free run if any
7539     if (inFreeRange()) {
7540       // we kicked some butt; time to pick up the garbage
7541       assert(freeFinger() < addr, "freeFinger points too high");
7542       flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7543     }
7544     // else, nothing to do, just continue
7545   }
7546 }
7547 
7548 size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
7549   // This is a chunk of garbage.  It is not in any free list.
7550   // Add it to a free list or let it possibly be coalesced into
7551   // a larger chunk.
7552   HeapWord* const addr = (HeapWord*) fc;
7553   const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7554 
7555   // Verify that the bit map has no bits marked between
7556   // addr and purported end of just dead object.
7557   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7558   do_post_free_or_garbage_chunk(fc, size);
7559 
7560   assert(_limit >= addr + size,
7561          "A freshly garbage chunk can't possibly straddle over _limit");
7562   if (inFreeRange()) lookahead_and_flush(fc, size);
7563   return size;
7564 }
7565 
7566 size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
7567   HeapWord* addr = (HeapWord*) fc;
7568   // The sweeper has just found a live object. Return any accumulated
7569   // left hand chunk to the free lists.
7570   if (inFreeRange()) {
7571     assert(freeFinger() < addr, "freeFinger points too high");
7572     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7573   }
7574 
7575   // This object is live: we'd normally expect this to be
7576   // an oop, and like to assert the following:
7577   // assert(oop(addr)->is_oop(), "live block should be an oop");
7578   // However, as we commented above, this may be an object whose
7579   // header hasn't yet been initialized.
7580   size_t size;
7581   assert(_bitMap->isMarked(addr), "Tautology for this control point");
7582   if (_bitMap->isMarked(addr + 1)) {
7583     // Determine the size from the bit map, rather than trying to
7584     // compute it from the object header.
7585     HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
7586     size = pointer_delta(nextOneAddr + 1, addr);
7587     assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
7588            "alignment problem");
7589 
7590 #ifdef ASSERT
7591       if (oop(addr)->klass_or_null() != NULL) {
7592         // Ignore mark word because we are running concurrent with mutators
7593         assert(oop(addr)->is_oop(true), "live block should be an oop");
7594         assert(size ==
7595                CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
7596                "P-mark and computed size do not agree");
7597       }
7598 #endif
7599 
7600   } else {
7601     // This should be an initialized object that's alive.
7602     assert(oop(addr)->klass_or_null() != NULL,
7603            "Should be an initialized object");
7604     // Ignore mark word because we are running concurrent with mutators
7605     assert(oop(addr)->is_oop(true), "live block should be an oop");
7606     // Verify that the bit map has no bits marked between
7607     // addr and purported end of this block.
7608     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7609     assert(size >= 3, "Necessary for Printezis marks to work");
7610     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7611     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7612   }
7613   return size;
7614 }
7615 
7616 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7617                                                  size_t chunkSize) {
7618   // do_post_free_or_garbage_chunk() should only be called in the case
7619   // of the adaptive free list allocator.
7620   const bool fcInFreeLists = fc->is_free();
7621   assert((HeapWord*)fc <= _limit, "sweep invariant");
7622   if (CMSTestInFreeList && fcInFreeLists) {
7623     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7624   }
7625 
7626   if (CMSTraceSweeper) {
7627     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7628   }
7629 
7630   HeapWord* const fc_addr = (HeapWord*) fc;
7631 
7632   bool coalesce = false;
7633   const size_t left  = pointer_delta(fc_addr, freeFinger());
7634   const size_t right = chunkSize;
7635   switch (FLSCoalescePolicy) {
7636     // numeric value forms a coalition aggressiveness metric
7637     case 0:  { // never coalesce
7638       coalesce = false;
7639       break;
7640     }
7641     case 1: { // coalesce if left & right chunks on overpopulated lists
7642       coalesce = _sp->coalOverPopulated(left) &&
7643                  _sp->coalOverPopulated(right);
7644       break;
7645     }
7646     case 2: { // coalesce if left chunk on overpopulated list (default)
7647       coalesce = _sp->coalOverPopulated(left);
7648       break;
7649     }
7650     case 3: { // coalesce if left OR right chunk on overpopulated list
7651       coalesce = _sp->coalOverPopulated(left) ||
7652                  _sp->coalOverPopulated(right);
7653       break;
7654     }
7655     case 4: { // always coalesce
7656       coalesce = true;
7657       break;
7658     }
7659     default:
7660      ShouldNotReachHere();
7661   }
7662 
7663   // Should the current free range be coalesced?
7664   // If the chunk is in a free range and either we decided to coalesce above
7665   // or the chunk is near the large block at the end of the heap
7666   // (isNearLargestChunk() returns true), then coalesce this chunk.
7667   const bool doCoalesce = inFreeRange()
7668                           && (coalesce || _g->isNearLargestChunk(fc_addr));
7669   if (doCoalesce) {
7670     // Coalesce the current free range on the left with the new
7671     // chunk on the right.  If either is on a free list,
7672     // it must be removed from the list and stashed in the closure.
7673     if (freeRangeInFreeLists()) {
7674       FreeChunk* const ffc = (FreeChunk*)freeFinger();
7675       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7676              "Size of free range is inconsistent with chunk size.");
7677       if (CMSTestInFreeList) {
7678         assert(_sp->verify_chunk_in_free_list(ffc),
7679                "Chunk is not in free lists");
7680       }
7681       _sp->coalDeath(ffc->size());
7682       _sp->removeFreeChunkFromFreeLists(ffc);
7683       set_freeRangeInFreeLists(false);
7684     }
7685     if (fcInFreeLists) {
7686       _sp->coalDeath(chunkSize);
7687       assert(fc->size() == chunkSize,
7688         "The chunk has the wrong size or is not in the free lists");
7689       _sp->removeFreeChunkFromFreeLists(fc);
7690     }
7691     set_lastFreeRangeCoalesced(true);
7692     print_free_block_coalesced(fc);
7693   } else {  // not in a free range and/or should not coalesce
7694     // Return the current free range and start a new one.
7695     if (inFreeRange()) {
7696       // In a free range but cannot coalesce with the right hand chunk.
7697       // Put the current free range into the free lists.
7698       flush_cur_free_chunk(freeFinger(),
7699                            pointer_delta(fc_addr, freeFinger()));
7700     }
7701     // Set up for new free range.  Pass along whether the right hand
7702     // chunk is in the free lists.
7703     initialize_free_range((HeapWord*)fc, fcInFreeLists);
7704   }
7705 }
7706 
7707 // Lookahead flush:
7708 // If we are tracking a free range, and this is the last chunk that
7709 // we'll look at because its end crosses past _limit, we'll preemptively
7710 // flush it along with any free range we may be holding on to. Note that
7711 // this can be the case only for an already free or freshly garbage
7712 // chunk. If this block is an object, it can never straddle
7713 // over _limit. The "straddling" occurs when _limit is set at
7714 // the previous end of the space when this cycle started, and
7715 // a subsequent heap expansion caused the previously co-terminal
7716 // free block to be coalesced with the newly expanded portion,
7717 // thus rendering _limit a non-block-boundary making it dangerous
7718 // for the sweeper to step over and examine.
7719 void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
7720   assert(inFreeRange(), "Should only be called if currently in a free range.");
7721   HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
7722   assert(_sp->used_region().contains(eob - 1),
7723          "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
7724          " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
7725          " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
7726          p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
7727   if (eob >= _limit) {
7728     assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
7729     if (CMSTraceSweeper) {
7730       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7731                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7732                              "[" PTR_FORMAT "," PTR_FORMAT ")",
7733                              p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7734     }
7735     // Return the storage we are tracking back into the free lists.
7736     if (CMSTraceSweeper) {
7737       gclog_or_tty->print_cr("Flushing ... ");
7738     }
7739     assert(freeFinger() < eob, "Error");
7740     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7741   }
7742 }
7743 
7744 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7745   assert(inFreeRange(), "Should only be called if currently in a free range.");
7746   assert(size > 0,
7747     "A zero sized chunk cannot be added to the free lists.");
7748   if (!freeRangeInFreeLists()) {
7749     if (CMSTestInFreeList) {
7750       FreeChunk* fc = (FreeChunk*) chunk;
7751       fc->set_size(size);
7752       assert(!_sp->verify_chunk_in_free_list(fc),
7753              "chunk should not be in free lists yet");
7754     }
7755     if (CMSTraceSweeper) {
7756       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7757                     p2i(chunk), size);
7758     }
7759     // A new free range is going to be starting.  The current
7760     // free range has not been added to the free lists yet or
7761     // was removed so add it back.
7762     // If the current free range was coalesced, then the death
7763     // of the free range was recorded.  Record a birth now.
7764     if (lastFreeRangeCoalesced()) {
7765       _sp->coalBirth(size);
7766     }
7767     _sp->addChunkAndRepairOffsetTable(chunk, size,
7768             lastFreeRangeCoalesced());
7769   } else if (CMSTraceSweeper) {
7770     gclog_or_tty->print_cr("Already in free list: nothing to flush");
7771   }
7772   set_inFreeRange(false);
7773   set_freeRangeInFreeLists(false);
7774 }
7775 
7776 // We take a break if we've been at this for a while,
7777 // so as to avoid monopolizing the locks involved.
7778 void SweepClosure::do_yield_work(HeapWord* addr) {
7779   // Return current free chunk being used for coalescing (if any)
7780   // to the appropriate freelist.  After yielding, the next
7781   // free block encountered will start a coalescing range of
7782   // free blocks.  If the next free block is adjacent to the
7783   // chunk just flushed, they will need to wait for the next
7784   // sweep to be coalesced.
7785   if (inFreeRange()) {
7786     flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
7787   }
7788 
7789   // First give up the locks, then yield, then re-lock.
7790   // We should probably use a constructor/destructor idiom to
7791   // do this unlock/lock or modify the MutexUnlocker class to
7792   // serve our purpose. XXX
7793   assert_lock_strong(_bitMap->lock());
7794   assert_lock_strong(_freelistLock);
7795   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
7796          "CMS thread should hold CMS token");
7797   _bitMap->lock()->unlock();
7798   _freelistLock->unlock();
7799   ConcurrentMarkSweepThread::desynchronize(true);
7800   _collector->stopTimer();
7801   if (PrintCMSStatistics != 0) {
7802     _collector->incrementYields();
7803   }
7804 
7805   // See the comment in coordinator_yield()
7806   for (unsigned i = 0; i < CMSYieldSleepCount &&
7807                        ConcurrentMarkSweepThread::should_yield() &&
7808                        !CMSCollector::foregroundGCIsActive(); ++i) {
7809     os::sleep(Thread::current(), 1, false);
7810   }
7811 
7812   ConcurrentMarkSweepThread::synchronize(true);
7813   _freelistLock->lock();
7814   _bitMap->lock()->lock_without_safepoint_check();
7815   _collector->startTimer();
7816 }
7817 
7818 #ifndef PRODUCT
7819 // This is actually very useful in a product build if it can
7820 // be called from the debugger.  Compile it into the product
7821 // as needed.
7822 bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
7823   return debug_cms_space->verify_chunk_in_free_list(fc);
7824 }
7825 #endif
7826 
7827 void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
7828   if (CMSTraceSweeper) {
7829     gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
7830                            p2i(fc), fc->size());
7831   }
7832 }
7833 
7834 // CMSIsAliveClosure
7835 bool CMSIsAliveClosure::do_object_b(oop obj) {
7836   HeapWord* addr = (HeapWord*)obj;
7837   return addr != NULL &&
7838          (!_span.contains(addr) || _bit_map->isMarked(addr));
7839 }
7840 
7841 
7842 CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
7843                       MemRegion span,
7844                       CMSBitMap* bit_map, CMSMarkStack* mark_stack,
7845                       bool cpc):
7846   _collector(collector),
7847   _span(span),
7848   _bit_map(bit_map),
7849   _mark_stack(mark_stack),
7850   _concurrent_precleaning(cpc) {
7851   assert(!_span.is_empty(), "Empty span could spell trouble");
7852 }
7853 
7854 
7855 // CMSKeepAliveClosure: the serial version
7856 void CMSKeepAliveClosure::do_oop(oop obj) {
7857   HeapWord* addr = (HeapWord*)obj;
7858   if (_span.contains(addr) &&
7859       !_bit_map->isMarked(addr)) {
7860     _bit_map->mark(addr);
7861     bool simulate_overflow = false;
7862     NOT_PRODUCT(
7863       if (CMSMarkStackOverflowALot &&
7864           _collector->simulate_overflow()) {
7865         // simulate a stack overflow
7866         simulate_overflow = true;
7867       }
7868     )
7869     if (simulate_overflow || !_mark_stack->push(obj)) {
7870       if (_concurrent_precleaning) {
7871         // We dirty the overflown object and let the remark
7872         // phase deal with it.
7873         assert(_collector->overflow_list_is_empty(), "Error");
7874         // In the case of object arrays, we need to dirty all of
7875         // the cards that the object spans. No locking or atomics
7876         // are needed since no one else can be mutating the mod union
7877         // table.
7878         if (obj->is_objArray()) {
7879           size_t sz = obj->size();
7880           HeapWord* end_card_addr =
7881             (HeapWord*)round_to((intptr_t)(addr+sz), CardTableModRefBS::card_size);
7882           MemRegion redirty_range = MemRegion(addr, end_card_addr);
7883           assert(!redirty_range.is_empty(), "Arithmetical tautology");
7884           _collector->_modUnionTable.mark_range(redirty_range);
7885         } else {
7886           _collector->_modUnionTable.mark(addr);
7887         }
7888         _collector->_ser_kac_preclean_ovflw++;
7889       } else {
7890         _collector->push_on_overflow_list(obj);
7891         _collector->_ser_kac_ovflw++;
7892       }
7893     }
7894   }
7895 }
7896 
7897 void CMSKeepAliveClosure::do_oop(oop* p)       { CMSKeepAliveClosure::do_oop_work(p); }
7898 void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); }
7899 
7900 // CMSParKeepAliveClosure: a parallel version of the above.
7901 // The work queues are private to each closure (thread),
7902 // but (may be) available for stealing by other threads.
7903 void CMSParKeepAliveClosure::do_oop(oop obj) {
7904   HeapWord* addr = (HeapWord*)obj;
7905   if (_span.contains(addr) &&
7906       !_bit_map->isMarked(addr)) {
7907     // In general, during recursive tracing, several threads
7908     // may be concurrently getting here; the first one to
7909     // "tag" it, claims it.
7910     if (_bit_map->par_mark(addr)) {
7911       bool res = _work_queue->push(obj);
7912       assert(res, "Low water mark should be much less than capacity");
7913       // Do a recursive trim in the hope that this will keep
7914       // stack usage lower, but leave some oops for potential stealers
7915       trim_queue(_low_water_mark);
7916     } // Else, another thread got there first
7917   }
7918 }
7919 
7920 void CMSParKeepAliveClosure::do_oop(oop* p)       { CMSParKeepAliveClosure::do_oop_work(p); }
7921 void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); }
7922 
7923 void CMSParKeepAliveClosure::trim_queue(uint max) {
7924   while (_work_queue->size() > max) {
7925     oop new_oop;
7926     if (_work_queue->pop_local(new_oop)) {
7927       assert(new_oop != NULL && new_oop->is_oop(), "Expected an oop");
7928       assert(_bit_map->isMarked((HeapWord*)new_oop),
7929              "no white objects on this stack!");
7930       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
7931       // iterate over the oops in this oop, marking and pushing
7932       // the ones in CMS heap (i.e. in _span).
7933       new_oop->oop_iterate(&_mark_and_push);
7934     }
7935   }
7936 }
7937 
7938 CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
7939                                 CMSCollector* collector,
7940                                 MemRegion span, CMSBitMap* bit_map,
7941                                 OopTaskQueue* work_queue):
7942   _collector(collector),
7943   _span(span),
7944   _bit_map(bit_map),
7945   _work_queue(work_queue) { }
7946 
7947 void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
7948   HeapWord* addr = (HeapWord*)obj;
7949   if (_span.contains(addr) &&
7950       !_bit_map->isMarked(addr)) {
7951     if (_bit_map->par_mark(addr)) {
7952       bool simulate_overflow = false;
7953       NOT_PRODUCT(
7954         if (CMSMarkStackOverflowALot &&
7955             _collector->par_simulate_overflow()) {
7956           // simulate a stack overflow
7957           simulate_overflow = true;
7958         }
7959       )
7960       if (simulate_overflow || !_work_queue->push(obj)) {
7961         _collector->par_push_on_overflow_list(obj);
7962         _collector->_par_kac_ovflw++;
7963       }
7964     } // Else another thread got there already
7965   }
7966 }
7967 
7968 void CMSInnerParMarkAndPushClosure::do_oop(oop* p)       { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7969 void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); }
7970 
7971 //////////////////////////////////////////////////////////////////
7972 //  CMSExpansionCause                /////////////////////////////
7973 //////////////////////////////////////////////////////////////////
7974 const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
7975   switch (cause) {
7976     case _no_expansion:
7977       return "No expansion";
7978     case _satisfy_free_ratio:
7979       return "Free ratio";
7980     case _satisfy_promotion:
7981       return "Satisfy promotion";
7982     case _satisfy_allocation:
7983       return "allocation";
7984     case _allocate_par_lab:
7985       return "Par LAB";
7986     case _allocate_par_spooling_space:
7987       return "Par Spooling Space";
7988     case _adaptive_size_policy:
7989       return "Ergonomics";
7990     default:
7991       return "unknown";
7992   }
7993 }
7994 
7995 void CMSDrainMarkingStackClosure::do_void() {
7996   // the max number to take from overflow list at a time
7997   const size_t num = _mark_stack->capacity()/4;
7998   assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
7999          "Overflow list should be NULL during concurrent phases");
8000   while (!_mark_stack->isEmpty() ||
8001          // if stack is empty, check the overflow list
8002          _collector->take_from_overflow_list(num, _mark_stack)) {
8003     oop obj = _mark_stack->pop();
8004     HeapWord* addr = (HeapWord*)obj;
8005     assert(_span.contains(addr), "Should be within span");
8006     assert(_bit_map->isMarked(addr), "Should be marked");
8007     assert(obj->is_oop(), "Should be an oop");
8008     obj->oop_iterate(_keep_alive);
8009   }
8010 }
8011 
8012 void CMSParDrainMarkingStackClosure::do_void() {
8013   // drain queue
8014   trim_queue(0);
8015 }
8016 
8017 // Trim our work_queue so its length is below max at return
8018 void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
8019   while (_work_queue->size() > max) {
8020     oop new_oop;
8021     if (_work_queue->pop_local(new_oop)) {
8022       assert(new_oop->is_oop(), "Expected an oop");
8023       assert(_bit_map->isMarked((HeapWord*)new_oop),
8024              "no white objects on this stack!");
8025       assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
8026       // iterate over the oops in this oop, marking and pushing
8027       // the ones in CMS heap (i.e. in _span).
8028       new_oop->oop_iterate(&_mark_and_push);
8029     }
8030   }
8031 }
8032 
8033 ////////////////////////////////////////////////////////////////////
8034 // Support for Marking Stack Overflow list handling and related code
8035 ////////////////////////////////////////////////////////////////////
8036 // Much of the following code is similar in shape and spirit to the
8037 // code used in ParNewGC. We should try and share that code
8038 // as much as possible in the future.
8039 
8040 #ifndef PRODUCT
8041 // Debugging support for CMSStackOverflowALot
8042 
8043 // It's OK to call this multi-threaded;  the worst thing
8044 // that can happen is that we'll get a bunch of closely
8045 // spaced simulated overflows, but that's OK, in fact
8046 // probably good as it would exercise the overflow code
8047 // under contention.
8048 bool CMSCollector::simulate_overflow() {
8049   if (_overflow_counter-- <= 0) { // just being defensive
8050     _overflow_counter = CMSMarkStackOverflowInterval;
8051     return true;
8052   } else {
8053     return false;
8054   }
8055 }
8056 
8057 bool CMSCollector::par_simulate_overflow() {
8058   return simulate_overflow();
8059 }
8060 #endif
8061 
8062 // Single-threaded
8063 bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
8064   assert(stack->isEmpty(), "Expected precondition");
8065   assert(stack->capacity() > num, "Shouldn't bite more than can chew");
8066   size_t i = num;
8067   oop  cur = _overflow_list;
8068   const markOop proto = markOopDesc::prototype();
8069   NOT_PRODUCT(ssize_t n = 0;)
8070   for (oop next; i > 0 && cur != NULL; cur = next, i--) {
8071     next = oop(cur->mark());
8072     cur->set_mark(proto);   // until proven otherwise
8073     assert(cur->is_oop(), "Should be an oop");
8074     bool res = stack->push(cur);
8075     assert(res, "Bit off more than can chew?");
8076     NOT_PRODUCT(n++;)
8077   }
8078   _overflow_list = cur;
8079 #ifndef PRODUCT
8080   assert(_num_par_pushes >= n, "Too many pops?");
8081   _num_par_pushes -=n;
8082 #endif
8083   return !stack->isEmpty();
8084 }
8085 
8086 #define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
8087 // (MT-safe) Get a prefix of at most "num" from the list.
8088 // The overflow list is chained through the mark word of
8089 // each object in the list. We fetch the entire list,
8090 // break off a prefix of the right size and return the
8091 // remainder. If other threads try to take objects from
8092 // the overflow list at that time, they will wait for
8093 // some time to see if data becomes available. If (and
8094 // only if) another thread places one or more object(s)
8095 // on the global list before we have returned the suffix
8096 // to the global list, we will walk down our local list
8097 // to find its end and append the global list to
8098 // our suffix before returning it. This suffix walk can
8099 // prove to be expensive (quadratic in the amount of traffic)
8100 // when there are many objects in the overflow list and
8101 // there is much producer-consumer contention on the list.
8102 // *NOTE*: The overflow list manipulation code here and
8103 // in ParNewGeneration:: are very similar in shape,
8104 // except that in the ParNew case we use the old (from/eden)
8105 // copy of the object to thread the list via its klass word.
8106 // Because of the common code, if you make any changes in
8107 // the code below, please check the ParNew version to see if
8108 // similar changes might be needed.
8109 // CR 6797058 has been filed to consolidate the common code.
8110 bool CMSCollector::par_take_from_overflow_list(size_t num,
8111                                                OopTaskQueue* work_q,
8112                                                int no_of_gc_threads) {
8113   assert(work_q->size() == 0, "First empty local work queue");
8114   assert(num < work_q->max_elems(), "Can't bite more than we can chew");
8115   if (_overflow_list == NULL) {
8116     return false;
8117   }
8118   // Grab the entire list; we'll put back a suffix
8119   oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8120   Thread* tid = Thread::current();
8121   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
8122   // set to ParallelGCThreads.
8123   size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
8124   size_t sleep_time_millis = MAX2((size_t)1, num/100);
8125   // If the list is busy, we spin for a short while,
8126   // sleeping between attempts to get the list.
8127   for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
8128     os::sleep(tid, sleep_time_millis, false);
8129     if (_overflow_list == NULL) {
8130       // Nothing left to take
8131       return false;
8132     } else if (_overflow_list != BUSY) {
8133       // Try and grab the prefix
8134       prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
8135     }
8136   }
8137   // If the list was found to be empty, or we spun long
8138   // enough, we give up and return empty-handed. If we leave
8139   // the list in the BUSY state below, it must be the case that
8140   // some other thread holds the overflow list and will set it
8141   // to a non-BUSY state in the future.
8142   if (prefix == NULL || prefix == BUSY) {
8143      // Nothing to take or waited long enough
8144      if (prefix == NULL) {
8145        // Write back the NULL in case we overwrote it with BUSY above
8146        // and it is still the same value.
8147        (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8148      }
8149      return false;
8150   }
8151   assert(prefix != NULL && prefix != BUSY, "Error");
8152   size_t i = num;
8153   oop cur = prefix;
8154   // Walk down the first "num" objects, unless we reach the end.
8155   for (; i > 1 && cur->mark() != NULL; cur = oop(cur->mark()), i--);
8156   if (cur->mark() == NULL) {
8157     // We have "num" or fewer elements in the list, so there
8158     // is nothing to return to the global list.
8159     // Write back the NULL in lieu of the BUSY we wrote
8160     // above, if it is still the same value.
8161     if (_overflow_list == BUSY) {
8162       (void) Atomic::cmpxchg_ptr(NULL, &_overflow_list, BUSY);
8163     }
8164   } else {
8165     // Chop off the suffix and return it to the global list.
8166     assert(cur->mark() != BUSY, "Error");
8167     oop suffix_head = cur->mark(); // suffix will be put back on global list
8168     cur->set_mark(NULL);           // break off suffix
8169     // It's possible that the list is still in the empty(busy) state
8170     // we left it in a short while ago; in that case we may be
8171     // able to place back the suffix without incurring the cost
8172     // of a walk down the list.
8173     oop observed_overflow_list = _overflow_list;
8174     oop cur_overflow_list = observed_overflow_list;
8175     bool attached = false;
8176     while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
8177       observed_overflow_list =
8178         (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8179       if (cur_overflow_list == observed_overflow_list) {
8180         attached = true;
8181         break;
8182       } else cur_overflow_list = observed_overflow_list;
8183     }
8184     if (!attached) {
8185       // Too bad, someone else sneaked in (at least) an element; we'll need
8186       // to do a splice. Find tail of suffix so we can prepend suffix to global
8187       // list.
8188       for (cur = suffix_head; cur->mark() != NULL; cur = (oop)(cur->mark()));
8189       oop suffix_tail = cur;
8190       assert(suffix_tail != NULL && suffix_tail->mark() == NULL,
8191              "Tautology");
8192       observed_overflow_list = _overflow_list;
8193       do {
8194         cur_overflow_list = observed_overflow_list;
8195         if (cur_overflow_list != BUSY) {
8196           // Do the splice ...
8197           suffix_tail->set_mark(markOop(cur_overflow_list));
8198         } else { // cur_overflow_list == BUSY
8199           suffix_tail->set_mark(NULL);
8200         }
8201         // ... and try to place spliced list back on overflow_list ...
8202         observed_overflow_list =
8203           (oop) Atomic::cmpxchg_ptr(suffix_head, &_overflow_list, cur_overflow_list);
8204       } while (cur_overflow_list != observed_overflow_list);
8205       // ... until we have succeeded in doing so.
8206     }
8207   }
8208 
8209   // Push the prefix elements on work_q
8210   assert(prefix != NULL, "control point invariant");
8211   const markOop proto = markOopDesc::prototype();
8212   oop next;
8213   NOT_PRODUCT(ssize_t n = 0;)
8214   for (cur = prefix; cur != NULL; cur = next) {
8215     next = oop(cur->mark());
8216     cur->set_mark(proto);   // until proven otherwise
8217     assert(cur->is_oop(), "Should be an oop");
8218     bool res = work_q->push(cur);
8219     assert(res, "Bit off more than we can chew?");
8220     NOT_PRODUCT(n++;)
8221   }
8222 #ifndef PRODUCT
8223   assert(_num_par_pushes >= n, "Too many pops?");
8224   Atomic::add_ptr(-(intptr_t)n, &_num_par_pushes);
8225 #endif
8226   return true;
8227 }
8228 
8229 // Single-threaded
8230 void CMSCollector::push_on_overflow_list(oop p) {
8231   NOT_PRODUCT(_num_par_pushes++;)
8232   assert(p->is_oop(), "Not an oop");
8233   preserve_mark_if_necessary(p);
8234   p->set_mark((markOop)_overflow_list);
8235   _overflow_list = p;
8236 }
8237 
8238 // Multi-threaded; use CAS to prepend to overflow list
8239 void CMSCollector::par_push_on_overflow_list(oop p) {
8240   NOT_PRODUCT(Atomic::inc_ptr(&_num_par_pushes);)
8241   assert(p->is_oop(), "Not an oop");
8242   par_preserve_mark_if_necessary(p);
8243   oop observed_overflow_list = _overflow_list;
8244   oop cur_overflow_list;
8245   do {
8246     cur_overflow_list = observed_overflow_list;
8247     if (cur_overflow_list != BUSY) {
8248       p->set_mark(markOop(cur_overflow_list));
8249     } else {
8250       p->set_mark(NULL);
8251     }
8252     observed_overflow_list =
8253       (oop) Atomic::cmpxchg_ptr(p, &_overflow_list, cur_overflow_list);
8254   } while (cur_overflow_list != observed_overflow_list);
8255 }
8256 #undef BUSY
8257 
8258 // Single threaded
8259 // General Note on GrowableArray: pushes may silently fail
8260 // because we are (temporarily) out of C-heap for expanding
8261 // the stack. The problem is quite ubiquitous and affects
8262 // a lot of code in the JVM. The prudent thing for GrowableArray
8263 // to do (for now) is to exit with an error. However, that may
8264 // be too draconian in some cases because the caller may be
8265 // able to recover without much harm. For such cases, we
8266 // should probably introduce a "soft_push" method which returns
8267 // an indication of success or failure with the assumption that
8268 // the caller may be able to recover from a failure; code in
8269 // the VM can then be changed, incrementally, to deal with such
8270 // failures where possible, thus, incrementally hardening the VM
8271 // in such low resource situations.
8272 void CMSCollector::preserve_mark_work(oop p, markOop m) {
8273   _preserved_oop_stack.push(p);
8274   _preserved_mark_stack.push(m);
8275   assert(m == p->mark(), "Mark word changed");
8276   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8277          "bijection");
8278 }
8279 
8280 // Single threaded
8281 void CMSCollector::preserve_mark_if_necessary(oop p) {
8282   markOop m = p->mark();
8283   if (m->must_be_preserved(p)) {
8284     preserve_mark_work(p, m);
8285   }
8286 }
8287 
8288 void CMSCollector::par_preserve_mark_if_necessary(oop p) {
8289   markOop m = p->mark();
8290   if (m->must_be_preserved(p)) {
8291     MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
8292     // Even though we read the mark word without holding
8293     // the lock, we are assured that it will not change
8294     // because we "own" this oop, so no other thread can
8295     // be trying to push it on the overflow list; see
8296     // the assertion in preserve_mark_work() that checks
8297     // that m == p->mark().
8298     preserve_mark_work(p, m);
8299   }
8300 }
8301 
8302 // We should be able to do this multi-threaded,
8303 // a chunk of stack being a task (this is
8304 // correct because each oop only ever appears
8305 // once in the overflow list. However, it's
8306 // not very easy to completely overlap this with
8307 // other operations, so will generally not be done
8308 // until all work's been completed. Because we
8309 // expect the preserved oop stack (set) to be small,
8310 // it's probably fine to do this single-threaded.
8311 // We can explore cleverer concurrent/overlapped/parallel
8312 // processing of preserved marks if we feel the
8313 // need for this in the future. Stack overflow should
8314 // be so rare in practice and, when it happens, its
8315 // effect on performance so great that this will
8316 // likely just be in the noise anyway.
8317 void CMSCollector::restore_preserved_marks_if_any() {
8318   assert(SafepointSynchronize::is_at_safepoint(),
8319          "world should be stopped");
8320   assert(Thread::current()->is_ConcurrentGC_thread() ||
8321          Thread::current()->is_VM_thread(),
8322          "should be single-threaded");
8323   assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
8324          "bijection");
8325 
8326   while (!_preserved_oop_stack.is_empty()) {
8327     oop p = _preserved_oop_stack.pop();
8328     assert(p->is_oop(), "Should be an oop");
8329     assert(_span.contains(p), "oop should be in _span");
8330     assert(p->mark() == markOopDesc::prototype(),
8331            "Set when taken from overflow list");
8332     markOop m = _preserved_mark_stack.pop();
8333     p->set_mark(m);
8334   }
8335   assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
8336          "stacks were cleared above");
8337 }
8338 
8339 #ifndef PRODUCT
8340 bool CMSCollector::no_preserved_marks() const {
8341   return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
8342 }
8343 #endif
8344 
8345 // Transfer some number of overflown objects to usual marking
8346 // stack. Return true if some objects were transferred.
8347 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
8348   size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
8349                     (size_t)ParGCDesiredObjsFromOverflowList);
8350 
8351   bool res = _collector->take_from_overflow_list(num, _mark_stack);
8352   assert(_collector->overflow_list_is_empty() || res,
8353          "If list is not empty, we should have taken something");
8354   assert(!res || !_mark_stack->isEmpty(),
8355          "If we took something, it should now be on our stack");
8356   return res;
8357 }
8358 
8359 size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
8360   size_t res = _sp->block_size_no_stall(addr, _collector);
8361   if (_sp->block_is_obj(addr)) {
8362     if (_live_bit_map->isMarked(addr)) {
8363       // It can't have been dead in a previous cycle
8364       guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
8365     } else {
8366       _dead_bit_map->mark(addr);      // mark the dead object
8367     }
8368   }
8369   // Could be 0, if the block size could not be computed without stalling.
8370   return res;
8371 }
8372 
8373 TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
8374 
8375   switch (phase) {
8376     case CMSCollector::InitialMarking:
8377       initialize(true  /* fullGC */ ,
8378                  cause /* cause of the GC */,
8379                  true  /* recordGCBeginTime */,
8380                  true  /* recordPreGCUsage */,
8381                  false /* recordPeakUsage */,
8382                  false /* recordPostGCusage */,
8383                  true  /* recordAccumulatedGCTime */,
8384                  false /* recordGCEndTime */,
8385                  false /* countCollection */  );
8386       break;
8387 
8388     case CMSCollector::FinalMarking:
8389       initialize(true  /* fullGC */ ,
8390                  cause /* cause of the GC */,
8391                  false /* recordGCBeginTime */,
8392                  false /* recordPreGCUsage */,
8393                  false /* recordPeakUsage */,
8394                  false /* recordPostGCusage */,
8395                  true  /* recordAccumulatedGCTime */,
8396                  false /* recordGCEndTime */,
8397                  false /* countCollection */  );
8398       break;
8399 
8400     case CMSCollector::Sweeping:
8401       initialize(true  /* fullGC */ ,
8402                  cause /* cause of the GC */,
8403                  false /* recordGCBeginTime */,
8404                  false /* recordPreGCUsage */,
8405                  true  /* recordPeakUsage */,
8406                  true  /* recordPostGCusage */,
8407                  false /* recordAccumulatedGCTime */,
8408                  true  /* recordGCEndTime */,
8409                  true  /* countCollection */  );
8410       break;
8411 
8412     default:
8413       ShouldNotReachHere();
8414   }
8415 }